From 11fc2de060815d7f73863b96c95c03b89c6bbb4e Mon Sep 17 00:00:00 2001 From: Kim De Mey Date: Fri, 17 Mar 2023 10:19:17 +0100 Subject: [PATCH 1/4] Prepare Fluffy for beacon light client bridge (#1506) --- fluffy/fluffy.nim | 213 ++++++++++-------- .../beacon_light_client.nim | 2 +- .../beacon_light_client_content.nim | 13 +- fluffy/rpc/rpc_calls/rpc_portal_calls.nim | 26 +++ 4 files changed, 157 insertions(+), 97 deletions(-) diff --git a/fluffy/fluffy.nim b/fluffy/fluffy.nim index 8bb89ef642..98a2f8458f 100644 --- a/fluffy/fluffy.nim +++ b/fluffy/fluffy.nim @@ -11,7 +11,7 @@ import std/os, confutils, confutils/std/net, chronicles, chronicles/topics_registry, chronos, metrics, metrics/chronos_httpserver, json_rpc/clients/httpclient, - json_rpc/rpcproxy, stew/[byteutils, io2], + json_rpc/rpcproxy, stew/[byteutils, io2, results], eth/keys, eth/net/nat, eth/p2p/discoveryv5/protocol as discv5_protocol, beacon_chain/beacon_clock, @@ -46,6 +46,70 @@ proc initializeBridgeClient(maybeUri: Option[string]): Option[BridgeClient] = notice "Failed to initialize bridge client", error = err.msg return none(BridgeClient) +proc initBeaconLightClient( + network: LightClientNetwork, networkData: NetworkInitData, + trustedBlockRoot: Option[Eth2Digest]): LightClient = + let + getBeaconTime = networkData.clock.getBeaconTimeFn() + + refDigests = newClone networkData.forks + + lc = LightClient.new( + network, + network.portalProtocol.baseProtocol.rng, + networkData.metadata.cfg, + refDigests, + getBeaconTime, + networkData.genesis_validators_root, + LightClientFinalizationMode.Optimistic + ) + + # TODO: For now just log new headers. Ultimately we should also use callbacks + # for each lc object to save them to db and offer them to the network. + # TODO-2: The above statement sounds that this work should really be done at a + # later lower, and these callbacks are rather for use for the "application". + proc onFinalizedHeader( + lightClient: LightClient, finalizedHeader: ForkedLightClientHeader) = + withForkyHeader(finalizedHeader): + when lcDataFork > LightClientDataFork.None: + info "New LC finalized header", + finalized_header = shortLog(forkyHeader) + + proc onOptimisticHeader( + lightClient: LightClient, optimisticHeader: ForkedLightClientHeader) = + withForkyHeader(optimisticHeader): + when lcDataFork > LightClientDataFork.None: + info "New LC optimistic header", + optimistic_header = shortLog(forkyHeader) + + lc.onFinalizedHeader = onFinalizedHeader + lc.onOptimisticHeader = onOptimisticHeader + lc.trustedBlockRoot = trustedBlockRoot + + # proc onSecond(time: Moment) = + # let wallSlot = getBeaconTime().slotOrZero() + # # TODO this is a place to enable/disable gossip based on the current status + # # of light client + # # lc.updateGossipStatus(wallSlot + 1) + + # proc runOnSecondLoop() {.async.} = + # let sleepTime = chronos.seconds(1) + # while true: + # let start = chronos.now(chronos.Moment) + # await chronos.sleepAsync(sleepTime) + # let afterSleep = chronos.now(chronos.Moment) + # let sleepTime = afterSleep - start + # onSecond(start) + # let finished = chronos.now(chronos.Moment) + # let processingTime = finished - afterSleep + # trace "onSecond task completed", sleepTime, processingTime + + # onSecond(Moment.now()) + + # asyncSpawn runOnSecondLoop() + + lc + proc run(config: PortalConf) {.raises: [CatchableError].} = # Make sure dataDir exists let pathExists = createPath(config.dataDir.string) @@ -113,8 +177,10 @@ proc run(config: PortalConf) {.raises: [CatchableError].} = ) streamManager = StreamManager.new(d) - stateNetwork = StateNetwork.new(d, db, streamManager, - bootstrapRecords = bootstrapRecords, portalConfig = portalConfig) + stateNetwork = Opt.some(StateNetwork.new( + d, db, streamManager, + bootstrapRecords = bootstrapRecords, + portalConfig = portalConfig)) accumulator = # Building an accumulator from header epoch files takes > 2m30s and is @@ -132,8 +198,31 @@ proc run(config: PortalConf) {.raises: [CatchableError].} = except SszError as err: raiseAssert "Invalid baked-in accumulator: " & err.msg - historyNetwork = HistoryNetwork.new(d, db, streamManager, accumulator, - bootstrapRecords = bootstrapRecords, portalConfig = portalConfig) + historyNetwork = Opt.some(HistoryNetwork.new( + d, db, streamManager, accumulator, + bootstrapRecords = bootstrapRecords, + portalConfig = portalConfig)) + + beaconLightClient = + # TODO: Currently disabled by default as it is not sufficiently polished. + # Eventually this should be always-on functionality. + if config.trustedBlockRoot.isSome(): + let + # Fluffy works only over mainnet data currently + networkData = loadNetworkData("mainnet") + beaconLightClientDb = LightClientDb.new( + config.dataDir / "lightClientDb") + lightClientNetwork = LightClientNetwork.new( + d, + beaconLightClientDb, + streamManager, + networkData.forks, + bootstrapRecords = bootstrapRecords) + + Opt.some(initBeaconLightClient( + lightClientNetwork, networkData, config.trustedBlockRoot)) + else: + Opt.none(LightClient) # TODO: If no new network key is generated then we should first check if an # enr file exists, and in the case it does read out the seqNum from it and @@ -143,6 +232,8 @@ proc run(config: PortalConf) {.raises: [CatchableError].} = fatal "Failed to write the enr file", file = enrFile quit 1 + + ## Start metrics HTTP server if config.metricsEnabled: let address = config.metricsAddress @@ -155,101 +246,39 @@ proc run(config: PortalConf) {.raises: [CatchableError].} = # TODO: Ideally we don't have the Exception here except Exception as exc: raiseAssert exc.msg + ## Starting the different networks. + d.start() + if stateNetwork.isSome(): + stateNetwork.get().start() + if historyNetwork.isSome(): + historyNetwork.get().start() + if beaconLightClient.isSome(): + let lc = beaconLightClient.get() + lc.network.start() + lc.start() + + ## Starting the JSON-RPC APIs if config.rpcEnabled: let ta = initTAddress(config.rpcAddress, config.rpcPort) var rpcHttpServerWithProxy = RpcProxy.new([ta], config.proxyUri) - rpcHttpServerWithProxy.installEthApiHandlers(historyNetwork) rpcHttpServerWithProxy.installDiscoveryApiHandlers(d) - rpcHttpServerWithProxy.installPortalApiHandlers(stateNetwork.portalProtocol, "state") - rpcHttpServerWithProxy.installPortalApiHandlers(historyNetwork.portalProtocol, "history") - rpcHttpServerWithProxy.installPortalDebugApiHandlers(stateNetwork.portalProtocol, "state") - rpcHttpServerWithProxy.installPortalDebugApiHandlers(historyNetwork.portalProtocol, "history") - # TODO for now we can only proxy to local node (or remote one without ssl) to make it possible - # to call infura https://github.com/status-im/nim-json-rpc/pull/101 needs to get merged for http client to support https/ + if stateNetwork.isSome(): + rpcHttpServerWithProxy.installPortalApiHandlers( + stateNetwork.get().portalProtocol, "state") + if historyNetwork.isSome(): + rpcHttpServerWithProxy.installEthApiHandlers(historyNetwork.get()) + rpcHttpServerWithProxy.installPortalApiHandlers( + historyNetwork.get().portalProtocol, "history") + rpcHttpServerWithProxy.installPortalDebugApiHandlers( + historyNetwork.get().portalProtocol, "history") + if beaconLightClient.isSome(): + rpcHttpServerWithProxy.installPortalApiHandlers( + beaconLightClient.get().network.portalProtocol, "beaconLightClient") + # TODO: Test proxy with remote node over HTTPS waitFor rpcHttpServerWithProxy.start() let bridgeClient = initializeBridgeClient(config.bridgeUri) - d.start() - - # TODO: Currently disabled by default as it is not stable/polished enough, - # ultimatetely this should probably be always on. - if config.trustedBlockRoot.isSome(): - # fluffy light client works only over mainnet data - let - networkData = loadNetworkData("mainnet") - - db = LightClientDb.new(config.dataDir / "lightClientDb") - - lightClientNetwork = LightClientNetwork.new( - d, - db, - streamManager, - networkData.forks, - bootstrapRecords = bootstrapRecords) - - getBeaconTime = networkData.clock.getBeaconTimeFn() - - refDigests = newClone networkData.forks - - lc = LightClient.new( - lightClientNetwork, - rng, - networkData.metadata.cfg, - refDigests, - getBeaconTime, - networkData.genesis_validators_root, - LightClientFinalizationMode.Optimistic - ) - - # TODO: For now just log headers. Ultimately we should also use callbacks for each - # lc object to save them to db and offer them to the network. - proc onFinalizedHeader( - lightClient: LightClient, finalizedHeader: ForkedLightClientHeader) = - withForkyHeader(finalizedHeader): - when lcDataFork > LightClientDataFork.None: - info "New LC finalized header", - finalized_header = shortLog(forkyHeader) - - proc onOptimisticHeader( - lightClient: LightClient, optimisticHeader: ForkedLightClientHeader) = - withForkyHeader(optimisticHeader): - when lcDataFork > LightClientDataFork.None: - info "New LC optimistic header", - optimistic_header = shortLog(forkyHeader) - - lc.onFinalizedHeader = onFinalizedHeader - lc.onOptimisticHeader = onOptimisticHeader - lc.trustedBlockRoot = config.trustedBlockRoot - - proc onSecond(time: Moment) = - let wallSlot = getBeaconTime().slotOrZero() - # TODO this is a place to enable/disable gossip based on the current status - # of light client - # lc.updateGossipStatus(wallSlot + 1) - - proc runOnSecondLoop() {.async.} = - let sleepTime = chronos.seconds(1) - while true: - let start = chronos.now(chronos.Moment) - await chronos.sleepAsync(sleepTime) - let afterSleep = chronos.now(chronos.Moment) - let sleepTime = afterSleep - start - onSecond(start) - let finished = chronos.now(chronos.Moment) - let processingTime = finished - afterSleep - trace "onSecond task completed", sleepTime, processingTime - - onSecond(Moment.now()) - - lightClientNetwork.start() - lc.start() - - asyncSpawn runOnSecondLoop() - - historyNetwork.start() - stateNetwork.start() - runForever() when isMainModule: diff --git a/fluffy/network/beacon_light_client/beacon_light_client.nim b/fluffy/network/beacon_light_client/beacon_light_client.nim index 068508b201..af328951aa 100644 --- a/fluffy/network/beacon_light_client/beacon_light_client.nim +++ b/fluffy/network/beacon_light_client/beacon_light_client.nim @@ -27,7 +27,7 @@ type gcsafe, raises: [].} LightClient* = ref object - network: LightClientNetwork + network*: LightClientNetwork cfg: RuntimeConfig forkDigests: ref ForkDigests getBeaconTime: GetBeaconTimeFn diff --git a/fluffy/network/beacon_light_client/beacon_light_client_content.nim b/fluffy/network/beacon_light_client/beacon_light_client_content.nim index 1965dd41dc..f64a198dfb 100644 --- a/fluffy/network/beacon_light_client/beacon_light_client_content.nim +++ b/fluffy/network/beacon_light_client/beacon_light_client_content.nim @@ -103,10 +103,15 @@ func decodeSsz*(input: openArray[byte], T: type): Result[T, string] = except SszError as e: err(e.msg) -# TODO: Not sure at this point how this API should look best, but the current -# version is a bit weird as it provides both a Forked object and a forkDigest -# Lets see when we get to used it in the bridge, might require something -# like `forkDigestAtEpoch` instead. +# Yes, this API is odd as you pass a SomeForkedLightClientObject yet still have +# to also pass the ForkDigest. This is because we can't just select the right +# digest through the LightClientDataFork here as LightClientDataFork and +# ConsensusFork are not mapped 1-to-1. There is loss of fork data. +# This means we need to get the ConsensusFork directly, which is possible by +# passing the epoch (slot) from the object through `forkDigestAtEpoch`. This +# however requires the runtime config which is part of the `Eth2Node` object. +# Not something we would like to include as a parameter here, so we stick with +# just passing the forkDigest and doing the work outside of this encode call. func encodeForkedLightClientObject*( obj: SomeForkedLightClientObject, forkDigest: ForkDigest): seq[byte] = diff --git a/fluffy/rpc/rpc_calls/rpc_portal_calls.nim b/fluffy/rpc/rpc_calls/rpc_portal_calls.nim index 73bb4e8876..54ac8852d2 100644 --- a/fluffy/rpc/rpc_calls/rpc_portal_calls.nim +++ b/fluffy/rpc/rpc_calls/rpc_portal_calls.nim @@ -49,3 +49,29 @@ proc portal_historyRecursiveFindContent(contentKey: string): string proc portal_historyStore(contentKey: string, contentValue: string): bool proc portal_historyLocalContent(contentKey: string): string proc portal_historyGossip(contentKey: string, contentValue: string): int + +## Portal Beacon Light Client Network json-rpc calls +proc portal_beaconLightClientNodeInfo(): NodeInfo +proc portal_beaconLightClientRoutingTableInfo(): RoutingTableInfo +proc portal_beaconLightClientAddEnr(enr: Record): bool +proc portal_beaconLightClientAddEnrs(enrs: seq[Record]): bool +proc portal_beaconLightClientGetEnr(nodeId: NodeId): Record +proc portal_beaconLightClientDeleteEnr(nodeId: NodeId): bool +proc portal_beaconLightClientLookupEnr(nodeId: NodeId): Record +proc portal_beaconLightClientPing(enr: Record): tuple[ + enrSeq: uint64, customPayload: string] +proc portal_beaconLightClientFindNodes(enr: Record): seq[Record] +proc portal_beaconLightClientFindContent(enr: Record, contentKey: string): tuple[ + connectionId: Option[string], + content: Option[string], + enrs: Option[seq[Record]]] +proc portal_beaconLightClientFindContentFull(enr: Record, contentKey: string): tuple[ + content: Option[string], + enrs: Option[seq[Record]]] +proc portal_beaconLightClientOffer( + enr: Record, contentKey: string, contentValue: string): string +proc portal_beaconLightClientRecursiveFindNodes(nodeId: NodeId): seq[Record] +proc portal_beaconLightClientRecursiveFindContent(contentKey: string): string +proc portal_beaconLightClientStore(contentKey: string, contentValue: string): bool +proc portal_beaconLightClientLocalContent(contentKey: string): string +proc portal_beaconLightClientGossip(contentKey: string, contentValue: string): int From 15d0ccb39ca35b30647fd87df20b88ca98418aca Mon Sep 17 00:00:00 2001 From: Jordan Hrycaj Date: Fri, 17 Mar 2023 14:46:50 +0000 Subject: [PATCH 2/4] Prepare snap server client test scenario cont4 (#1507) * Add state root to node steps path register `RPath` or `XPath` why: Typically, the first node in the path register is the state root. There are occasions, when the path register is empty (i.e. there are no node references) which typically applies to a zero node key. In order to find the next node key greater than zero, the state root is is needed which is now part of the `RPath` or `XPath` data types. * Extracted hexary tree debugging functions into separate files * Update empty path fringe case for left/right node neighbour why: When starting at zero, the node steps path register would be empty. So will any path that is before the fist non-zero link of a state root (if it is a `Branch` node.) The `hexaryNearbyRight()` or `hexaryNearbyLeft()` function required a non-zero node steps path register. Now the first node is to be advanced starting at the first state root link if necessary. * Simplify/reorg neighbour node finder why: There was too mach code repetition for the cases * persistent or in-memory database * left or right move details: Most algorithms apply for persistent and in-memory alike. Using templates/generic functions most of these algorithms can be stated in a unified way * Update storage slots snap/1 handler details: Minor changes to be more debugging friendly. * Fix detection of full database for snap sync * Docu: Snap sync test & debugging scenario --- nimbus/sync/handlers/snap.nim | 106 +- nimbus/sync/snap/README.txt | 62 ++ nimbus/sync/snap/worker.nim | 18 +- nimbus/sync/snap/worker/db/hexary_debug.nim | 666 +++++++++++++ nimbus/sync/snap/worker/db/hexary_desc.nim | 201 +--- .../sync/snap/worker/db/hexary_envelope.nim | 23 +- nimbus/sync/snap/worker/db/hexary_error.nim | 8 + .../snap/worker/db/hexary_interpolate.nim | 10 +- nimbus/sync/snap/worker/db/hexary_nearby.nim | 943 ++++++------------ nimbus/sync/snap/worker/db/hexary_paths.nim | 260 +---- nimbus/sync/snap/worker/db/hexary_range.nim | 14 +- .../sync/snap/worker/db/snapdb_accounts.nim | 76 +- nimbus/sync/snap/worker/db/snapdb_debug.nim | 180 ++++ nimbus/sync/snap/worker/db/snapdb_desc.nim | 121 +-- nimbus/sync/snap/worker_desc.nim | 1 - tests/replay/undump_accounts.nim | 2 - tests/replay/undump_blocks.nim | 5 - tests/replay/undump_storages.nim | 2 - tests/test_sync_snap.nim | 14 +- tests/test_sync_snap/test_accounts.nim | 48 +- tests/test_sync_snap/test_node_range.nim | 28 +- 21 files changed, 1442 insertions(+), 1346 deletions(-) create mode 100644 nimbus/sync/snap/README.txt create mode 100644 nimbus/sync/snap/worker/db/hexary_debug.nim create mode 100644 nimbus/sync/snap/worker/db/snapdb_debug.nim diff --git a/nimbus/sync/handlers/snap.nim b/nimbus/sync/handlers/snap.nim index 82bda987e9..dd6e07208d 100644 --- a/nimbus/sync/handlers/snap.nim +++ b/nimbus/sync/handlers/snap.nim @@ -11,7 +11,7 @@ {.push raises: [].} import - std/sequtils, + std/[sequtils, strutils], chronicles, chronos, eth/[p2p, trie/trie_defs], @@ -19,7 +19,7 @@ import ../../db/db_chain, ../../core/chain, ../snap/[constants, range_desc], - ../snap/worker/db/[hexary_desc, hexary_paths, hexary_range], + ../snap/worker/db/[hexary_desc, hexary_error, hexary_paths, hexary_range], ../protocol, ../protocol/snap/snap_types @@ -43,7 +43,7 @@ const emptySnapStorageList = seq[SnapStorage].default ## Dummy list for empty slots - defaultElaFetchMax = 1500.milliseconds + defaultElaFetchMax = 990.milliseconds ## Fetching accounts or slots can be extensive, stop in the middle if ## it takes too long @@ -70,7 +70,7 @@ proc getAccountFn( return proc(key: openArray[byte]): Blob = db.get(key) -proc getStorageSlotsFn( +proc getStoSlotFn( chain: ChainRef; accKey: NodeKey; ): HexaryGetFn @@ -106,26 +106,37 @@ proc to( proc mkNodeTagRange( origin: openArray[byte]; limit: openArray[byte]; + nAccounts = 1; ): Result[NodeTagRange,void] = var (minPt, maxPt) = (low(NodeTag), high(NodeTag)) if 0 < origin.len or 0 < limit.len: + + # Range applies only if there is exactly one account. A number of accounts + # different from 1 may be used by `getStorageRanges()` + if nAccounts == 0: + return err() # oops: no account + + # Veriify range atguments if not minPt.init(origin) or not maxPt.init(limit) or maxPt <= minPt: when extraTraceMessages: trace logTxt "mkNodeTagRange: malformed range", origin, limit return err() + if 1 < nAccounts: + return ok(NodeTagRange.new(low(NodeTag), high(NodeTag))) + ok(NodeTagRange.new(minPt, maxPt)) proc fetchLeafRange( ctx: SnapWireRef; # Handler descriptor - db: HexaryGetFn; # Database abstraction + getFn: HexaryGetFn; # Database abstraction root: Hash256; # State root iv: NodeTagRange; # Proofed range of leaf paths replySizeMax: int; # Updated size counter for the raw list stopAt: Moment; # Implies timeout - ): Result[RangeProof,void] + ): Result[RangeProof,HexaryError] {.gcsafe, raises: [CatchableError].} = # Assemble result Note that the size limit is the size of the leaf nodes @@ -136,44 +147,48 @@ proc fetchLeafRange( sizeMax = replySizeMax - estimatedProofSize now = Moment.now() timeout = if now < stopAt: stopAt - now else: 1.milliseconds - rc = db.hexaryRangeLeafsProof(rootKey, iv, sizeMax, timeout) + rc = getFn.hexaryRangeLeafsProof(rootKey, iv, sizeMax, timeout) if rc.isErr: - debug logTxt "fetchLeafRange: database problem", + error logTxt "fetchLeafRange: database problem", iv, replySizeMax, error=rc.error - return err() # database error - let sizeOnWire = rc.value.leafsSize + rc.value.proofSize + return rc # database error + let sizeOnWire = rc.value.leafsSize + rc.value.proofSize if sizeOnWire <= replySizeMax: - return ok(rc.value) + return rc + + # Estimate the overhead size on wire needed for a single leaf tail item + const leafExtraSize = (sizeof RangeLeaf()) - (sizeof newSeq[Blob](0)) + + let nLeafs = rc.value.leafs.len + when extraTraceMessages: + trace logTxt "fetchLeafRange: reducing reply sample", + iv, sizeOnWire, replySizeMax, nLeafs # Strip parts of leafs result and amend remainder by adding proof nodes - var - rpl = rc.value - leafsTop = rpl.leafs.len - 1 - tailSize = 0 - tailItems = 0 - reduceBy = replySizeMax - sizeOnWire - while tailSize <= reduceBy and tailItems < leafsTop: - # Estimate the size on wire needed for the tail item - const extraSize = (sizeof RangeLeaf()) - (sizeof newSeq[Blob](0)) - tailSize += rpl.leafs[leafsTop - tailItems].data.len + extraSize + var (tailSize, tailItems, reduceBy) = (0, 0, replySizeMax - sizeOnWire) + while tailSize <= reduceBy: tailItems.inc - if leafsTop <= tailItems: - debug logTxt "fetchLeafRange: stripping leaf list failed", - iv, replySizeMax, leafsTop, tailItems - return err() # package size too small + if nLeafs <= tailItems: + when extraTraceMessages: + trace logTxt "fetchLeafRange: stripping leaf list failed", + iv, replySizeMax, nLeafs, tailItems + return err(DataSizeError) # empty tail (package size too small) + tailSize += rc.value.leafs[^tailItems].data.len + leafExtraSize - rpl.leafs.setLen(leafsTop - tailItems - 1) # chop off one more for slack + # Provide truncated leafs list let - leafProof = db.hexaryRangeLeafsProof(rootKey, rpl) + leafProof = getFn.hexaryRangeLeafsProof( + rootKey, RangeProof(leafs: rc.value.leafs[0 ..< nLeafs - tailItems])) strippedSizeOnWire = leafProof.leafsSize + leafProof.proofSize if strippedSizeOnWire <= replySizeMax: return ok(leafProof) - debug logTxt "fetchLeafRange: data size problem", - iv, replySizeMax, leafsTop, tailItems, strippedSizeOnWire + when extraTraceMessages: + trace logTxt "fetchLeafRange: data size problem", + iv, replySizeMax, nLeafs, tailItems, strippedSizeOnWire - err() + err(DataSizeError) # ------------------------------------------------------------------------------ # Private functions: peer observer @@ -254,8 +269,8 @@ method getAccountRange*( iv = block: # Calculate effective accounts range (if any) let rc = origin.mkNodeTagRange limit if rc.isErr: - return - rc.value # malformed interval + return # malformed interval + rc.value db = ctx.chain.getAccountFn stopAt = Moment.now() + ctx.elaFetchMax @@ -293,10 +308,10 @@ method getStorageRanges*( let iv = block: # Calculate effective slots range (if any) - let rc = origin.mkNodeTagRange limit + let rc = origin.mkNodeTagRange(limit, accounts.len) if rc.isErr: - return - rc.value # malformed interval + return # malformed interval + rc.value accGetFn = ctx.chain.getAccountFn rootKey = root.to(NodeKey) @@ -331,19 +346,30 @@ method getStorageRanges*( accDataLen=accData.len, stoRoot continue - # Collect data slots for this account + # Stop unless there is enough space left + if sizeMax - dataAllocated <= estimatedProofSize: + break + + # Prepare for data collection let - db = ctx.chain.getStorageSlotsFn(accKey) - rc = ctx.fetchLeafRange(db, stoRoot, iv, sizeMax - dataAllocated, stopAt) + slotsGetFn = ctx.chain.getStoSlotFn(accKey) + sizeLeft = sizeMax - dataAllocated + + # Collect data slots for this account + let rc = ctx.fetchLeafRange(slotsGetFn, stoRoot, iv, sizeLeft, stopAt) if rc.isErr: when extraTraceMessages: - trace logTxt "getStorageRanges: failed", iv, sizeMax, dataAllocated, - accDataLen=accData.len, stoRoot + trace logTxt "getStorageRanges: failed", iv, sizeMax, sizeLeft, + accDataLen=accData.len, stoRoot, error=rc.error return # extraction failed # Process data slots for this account dataAllocated += rc.value.leafsSize + when extraTraceMessages: + if accounts.len == 1: + trace logTxt "getStorageRanges: single account", iv, accKey, stoRoot + #trace logTxt "getStorageRanges: data slots", iv, sizeMax, dataAllocated, # accKey, stoRoot, nSlots=rc.value.leafs.len, nProof=rc.value.proof.len diff --git a/nimbus/sync/snap/README.txt b/nimbus/sync/snap/README.txt new file mode 100644 index 0000000000..ba84648b97 --- /dev/null +++ b/nimbus/sync/snap/README.txt @@ -0,0 +1,62 @@ +Snap sync test & debugging scenario +=================================== + + +Start snap/1 server +------------------- + + # Enter nimbus directory for snap/1 protocol server. + cd server + + # Tell nimbus to stop full sync after 2 mio blocks. + echo 2000000 > full-limit.txt + + # Tell nimbus to use this predefined key ID + echo 123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0 > full-id.key + + ./build/nimbus \ + --tcp-port:30319 --nat=None --sync-mode=full \ + --protocols=snap --discovery=none \ + --net-key=./full-id.key \ + --sync-ctrl-file=./full-limit.txt \ + --log-level:TRACE + + # Wait for several hours until enough blocks have been downloaded so that + # snap sync data are available. The full 2 mio blocks are available if the + # log ticker shows something like + # + # INF 2023-03-17 [..] Sync statistics (suspended) topics="full-tick" [..] persistent=#2000080 [..] + # + # where the persistent=#2000080 field might vary + + +Start snap/1 client +------------------- + + # Note: When the snap/1 server has enough blocks, the client can be started. + + # Enter nimbus directory for snap/1 protocol server + cd client + + # Tell nimbus to use this pivot block number. This number must be smaller + # than the 2000000 written into the file full-limit.txt above. + echo 600000 > snap/snap-update.txt. + + # Tell nimbus to use this hard coded peer enode. + echo enode://192d7e7a302bd4ff27f48d7852621e0d3cb863a6dd67dd44e0314a25a3aa866837f0d2460b4444dc66e7b7a2cd56a2de1c31b2a2ba4e23549bf3ba3b0c4f2eb5@127.0.0.1:30319 > snap/full-servers.txt + + ./build/nimbus \ + --tcp-port:30102 --nat=None --sync-mode=snap \ + --protocols=none --discovery=none \ + --static-peers-file=./full-servers.txt \ + --sync-ctrl-file=./snap-update.txt \ + --log-level:TRACE + + +Modifications while the programs are syncing +-------------------------------------------- + + # Increasing the number in the files full/full-limit.txt or + # snap/snap-update.txt will be recognised while running. Decreasing + # or removing will be ignored. + diff --git a/nimbus/sync/snap/worker.nim b/nimbus/sync/snap/worker.nim index b982da3191..bd365f8d50 100644 --- a/nimbus/sync/snap/worker.nim +++ b/nimbus/sync/snap/worker.nim @@ -225,14 +225,21 @@ proc runMulti*(buddy: SnapBuddyRef) {.async.} = return # nothing to do rc.value pivot = "#" & $env.stateHeader.blockNumber # for logging + nStorQuAtStart = env.fetchStorageFull.len + + env.fetchStoragePart.len + + env.parkedStorage.len buddy.only.pivotEnv = env # Full sync processsing based on current snapshot # ----------------------------------------------- - if env.storageDone: + + # Check whether this pivot is fully downloaded + if env.fetchAccounts.processed.isFull and nStorQuAtStart == 0: trace "Snap full sync -- not implemented yet", peer, pivot await sleepAsync(5.seconds) + # flip over to single mode for getting new instructins + buddy.ctrl.multiOk = false return # Snapshot sync processing @@ -248,9 +255,8 @@ proc runMulti*(buddy: SnapBuddyRef) {.async.} = nAccounts {.used.} = env.nAccounts nSlotLists {.used.} = env.nSlotLists processed {.used.} = env.fetchAccounts.processed.fullFactor.toPC(2) - nStoQu {.used.} = env.fetchStorageFull.len + env.fetchStoragePart.len trace "Multi sync runner", peer, pivot, nAccounts, nSlotLists, processed, - nStoQu + nStoQu=nStorQuAtStart # This one is the syncing work horse which downloads the database await env.execSnapSyncAction(buddy) @@ -260,7 +266,7 @@ proc runMulti*(buddy: SnapBuddyRef) {.async.} = nAccounts = env.nAccounts nSlotLists = env.nSlotLists processed = env.fetchAccounts.processed.fullFactor.toPC(2) - nStoQu = env.fetchStorageFull.len + env.fetchStoragePart.len + nStoQuLater = env.fetchStorageFull.len + env.fetchStoragePart.len if env.archived: # Archive pivot if it became stale @@ -273,11 +279,11 @@ proc runMulti*(buddy: SnapBuddyRef) {.async.} = let rc = env.saveCheckpoint(ctx) if rc.isErr: error "Failed to save recovery checkpoint", peer, pivot, nAccounts, - nSlotLists, processed, nStoQu, error=rc.error + nSlotLists, processed, nStoQu=nStoQuLater, error=rc.error else: when extraTraceMessages: trace "Saved recovery checkpoint", peer, pivot, nAccounts, nSlotLists, - processed, nStoQu, blobSize=rc.value + processed, nStoQu=nStoQuLater, blobSize=rc.value # ------------------------------------------------------------------------------ # End diff --git a/nimbus/sync/snap/worker/db/hexary_debug.nim b/nimbus/sync/snap/worker/db/hexary_debug.nim new file mode 100644 index 0000000000..baecef0348 --- /dev/null +++ b/nimbus/sync/snap/worker/db/hexary_debug.nim @@ -0,0 +1,666 @@ +# nimbus-eth1 +# Copyright (c) 2021 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed +# except according to those terms. + +## Find node paths in hexary tries. + +{.push raises: [].} + +import + std/[algorithm, sequtils, sets, strutils, tables, times], + chronos, + eth/[common, trie/nibbles], + stew/results, + ../../range_desc, + "."/[hexary_desc, hexary_error] + +proc next*(path: XPath; getFn: HexaryGetFn; minDepth = 64): XPath + {.gcsafe, raises: [CatchableError].} + +proc prev*(path: XPath; getFn: HexaryGetFn; minDepth = 64): XPath + {.gcsafe, raises: [CatchableError].} + +# ------------------------------------------------------------------------------ +# Private pretty printing helpers +# ------------------------------------------------------------------------------ + +proc asDateTime(m: Moment): DateTime = + ## Approximate UTC based `DateTime` for a `Moment` + let + utcNow = times.now().utc + momNow = Moment.now() + utcNow + initDuration(nanoseconds = (m - momNow).nanoseconds) + +# -------------- + +proc toPfx(indent: int): string = + "\n" & " ".repeat(indent) + +proc ppImpl(s: string; hex = false): string = + ## For long strings print `begin..end` only + if hex: + let n = (s.len + 1) div 2 + (if s.len < 20: s else: s[0 .. 5] & ".." & s[s.len-8 .. s.len-1]) & + "[" & (if 0 < n: "#" & $n else: "") & "]" + elif s.len <= 30: + s + else: + (if (s.len and 1) == 0: s[0 ..< 8] else: "0" & s[0 ..< 7]) & + "..(" & $s.len & ").." & s[s.len-16 ..< s.len] + +proc ppImpl(key: RepairKey; db: HexaryTreeDbRef): string = + if key.isZero: + return "ø" + if not key.isNodekey: + var num: uint64 + (addr num).copyMem(unsafeAddr key.ByteArray33[25], 8) + return "%" & $num + try: + if not disablePrettyKeys and not db.keyPp.isNil: + return db.keyPp(key) + except CatchableError: + discard + key.ByteArray33.toSeq.mapIt(it.toHex(2)).join.toLowerAscii + +proc ppImpl(key: NodeKey; db: HexaryTreeDbRef): string = + key.to(RepairKey).ppImpl(db) + +proc ppImpl(w: openArray[RepairKey]; db: HexaryTreeDbRef): string = + w.mapIt(it.ppImpl(db)).join(",") + +proc ppImpl(w: openArray[Blob]; db: HexaryTreeDbRef): string = + var q: seq[RepairKey] + for a in w: + var key: RepairKey + discard key.init(a) + q.add key + q.ppImpl(db) + +proc ppStr(blob: Blob): string = + if blob.len == 0: "" + else: blob.mapIt(it.toHex(2)).join.toLowerAscii.ppImpl(hex = true) + +proc ppImpl(n: RNodeRef; db: HexaryTreeDbRef): string = + let so = n.state.ord + case n.kind: + of Leaf: + ["l","ł","L","R"][so] & "(" & $n.lPfx & "," & n.lData.ppStr & ")" + of Extension: + ["e","€","E","R"][so] & "(" & $n.ePfx & "," & n.eLink.ppImpl(db) & ")" + of Branch: + ["b","þ","B","R"][so] & "(" & n.bLink.ppImpl(db) & "," & n.bData.ppStr & ")" + +proc ppImpl(n: XNodeObj; db: HexaryTreeDbRef): string = + case n.kind: + of Leaf: + "l(" & $n.lPfx & "," & n.lData.ppStr & ")" + of Extension: + var key: RepairKey + discard key.init(n.eLink) + "e(" & $n.ePfx & "," & key.ppImpl(db) & ")" + of Branch: + "b(" & n.bLink[0..15].ppImpl(db) & "," & n.bLink[16].ppStr & ")" + +proc ppImpl(w: RPathStep; db: HexaryTreeDbRef): string = + let + nibble = if 0 <= w.nibble: w.nibble.toHex(1).toLowerAscii else: "ø" + key = w.key.ppImpl(db) + "(" & key & "," & nibble & "," & w.node.ppImpl(db) & ")" + +proc ppImpl(w: XPathStep; db: HexaryTreeDbRef): string = + let nibble = if 0 <= w.nibble: w.nibble.toHex(1).toLowerAscii else: "ø" + var key: RepairKey + discard key.init(w.key) + "(" & key.ppImpl(db) & "," & $nibble & "," & w.node.ppImpl(db) & ")" + +proc ppImpl(db: HexaryTreeDbRef; root: NodeKey): seq[string] = + ## Dump the entries from the a generic repair tree. This function assumes + ## that mapped keys are printed `$###` if a node is locked or static, and + ## some substitute for the first letter `$` otherwise (if they are mutable.) + proc toKey(s: string): uint64 = + try: + result = s[1 ..< s.len].parseUint + except ValueError as e: + raiseAssert "Ooops ppImpl(s=" & s & "): name=" & $e.name & " msg=" & e.msg + if s[0] != '$': + result = result or (1u64 shl 63) + proc cmpIt(x, y: (uint64,string)): int = + cmp(x[0],y[0]) + + var accu: seq[(uint64,string)] + if root.ByteArray32 != ByteArray32.default: + accu.add @[(0u64, "($0" & "," & root.ppImpl(db) & ")")] + for key,node in db.tab.pairs: + accu.add ( + key.ppImpl(db).tokey, + "(" & key.ppImpl(db) & "," & node.ppImpl(db) & ")") + + accu.sorted(cmpIt).mapIt(it[1]) + +# ------------------------------------------------------------------------------ +# Private helpers +# ------------------------------------------------------------------------------ + +proc getNibblesImpl(path: XPath; start = 0): NibblesSeq = + ## Re-build the key path + for n in start ..< path.path.len: + let it = path.path[n] + case it.node.kind: + of Branch: + result = result & @[it.nibble.byte].initNibbleRange.slice(1) + of Extension: + result = result & it.node.ePfx + of Leaf: + result = result & it.node.lPfx + result = result & path.tail + +proc getLeafData(path: XPath): Blob = + ## Return the leaf data from a successful `XPath` computation (if any.) + ## Note that this function also exists as `hexary_paths.leafData()` but + ## the import of this file is avoided. + if path.tail.len == 0 and 0 < path.path.len: + let node = path.path[^1].node + case node.kind: + of Branch: + return node.bLink[16] + of Leaf: + return node.lData + of Extension: + discard + +proc toBranchNode( + rlp: Rlp + ): XNodeObj + {.gcsafe, raises: [RlpError].} = + var rlp = rlp + XNodeObj(kind: Branch, bLink: rlp.read(array[17,Blob])) + +proc toLeafNode( + rlp: Rlp; + pSegm: NibblesSeq + ): XNodeObj + {.gcsafe, raises: [RlpError].} = + XNodeObj(kind: Leaf, lPfx: pSegm, lData: rlp.listElem(1).toBytes) + +proc toExtensionNode( + rlp: Rlp; + pSegm: NibblesSeq + ): XNodeObj + {.gcsafe, raises: [RlpError].} = + XNodeObj(kind: Extension, ePfx: pSegm, eLink: rlp.listElem(1).toBytes) + + +proc to(node: XNodeObj; T: type RNodeRef): T = + case node.kind: + of Leaf: + result = T( + kind: Leaf, + lData: node.lData, + lPfx: node.lPfx) + of Extension: + result = T( + kind: Extension, + eLink: node.eLink.convertTo(RepairKey), + ePfx: node.ePfx) + of Branch: + result = T( + kind: Branch, + bData: node.bLink[16]) + for n in 0 .. 15: + result.bLink[n] = node.bLink[n].convertTo(RepairKey) + +# ------------------------------------------------------------------------------ +# Private functions +# ------------------------------------------------------------------------------ + +proc pathLeast( + path: XPath; + key: Blob; + getFn: HexaryGetFn; + ): XPath + {.gcsafe, raises: [CatchableError].} = + ## For the partial path given, extend by branch nodes with least node + ## indices. + result = path + result.tail = EmptyNibbleRange + result.depth = result.getNibblesImpl.len + + var + key = key + value = key.getFn() + if value.len == 0: + return + + while true: + block loopContinue: + let nodeRlp = rlpFromBytes value + case nodeRlp.listLen: + of 2: + let (isLeaf,pathSegment) = hexPrefixDecode nodeRlp.listElem(0).toBytes + + # Leaf node + if isLeaf: + let node = nodeRlp.toLeafNode(pathSegment) + result.path.add XPathStep(key: key, node: node, nibble: -1) + result.depth += pathSegment.len + return # done ok + + let node = nodeRlp.toExtensionNode(pathSegment) + if 0 < node.eLink.len: + value = node.eLink.getFn() + if 0 < value.len: + result.path.add XPathStep(key: key, node: node, nibble: -1) + result.depth += pathSegment.len + key = node.eLink + break loopContinue + of 17: + # Branch node + let node = nodeRlp.toBranchNode + if node.bLink[16].len != 0 and 64 <= result.depth: + result.path.add XPathStep(key: key, node: node, nibble: -1) + return # done ok + + for inx in 0 .. 15: + let newKey = node.bLink[inx] + if 0 < newKey.len: + value = newKey.getFn() + if 0 < value.len: + result.path.add XPathStep(key: key, node: node, nibble: inx.int8) + result.depth.inc + key = newKey + break loopContinue + else: + discard + + # Recurse (iteratively) + while true: + block loopRecurse: + # Modify last branch node and try again + if result.path[^1].node.kind == Branch: + for inx in result.path[^1].nibble+1 .. 15: + let newKey = result.path[^1].node.bLink[inx] + if 0 < newKey.len: + value = newKey.getFn() + if 0 < value.len: + result.path[^1].nibble = inx.int8 + key = newKey + break loopContinue + # Failed, step back and try predecessor branch. + while path.path.len < result.path.len: + case result.path[^1].node.kind: + of Branch: + result.depth.dec + result.path.setLen(result.path.len - 1) + break loopRecurse + of Extension: + result.depth -= result.path[^1].node.ePfx.len + result.path.setLen(result.path.len - 1) + of Leaf: + return # Ooops + return # Failed + # Notreached + # End while + # Notreached + + +proc pathMost( + path: XPath; + key: Blob; + getFn: HexaryGetFn; + ): XPath + {.gcsafe, raises: [CatchableError].} = + ## For the partial path given, extend by branch nodes with greatest node + ## indices. + result = path + result.tail = EmptyNibbleRange + result.depth = result.getNibblesImpl.len + + var + key = key + value = key.getFn() + if value.len == 0: + return + + while true: + block loopContinue: + let nodeRlp = rlpFromBytes value + case nodeRlp.listLen: + of 2: + let (isLeaf,pathSegment) = hexPrefixDecode nodeRlp.listElem(0).toBytes + + # Leaf node + if isLeaf: + let node = nodeRlp.toLeafNode(pathSegment) + result.path.add XPathStep(key: key, node: node, nibble: -1) + result.depth += pathSegment.len + return # done ok + + # Extension node + let node = nodeRlp.toExtensionNode(pathSegment) + if 0 < node.eLink.len: + value = node.eLink.getFn() + if 0 < value.len: + result.path.add XPathStep(key: key, node: node, nibble: -1) + result.depth += pathSegment.len + key = node.eLink + break loopContinue + of 17: + # Branch node + let node = nodeRlp.toBranchNode + if node.bLink[16].len != 0 and 64 <= result.depth: + result.path.add XPathStep(key: key, node: node, nibble: -1) + return # done ok + + for inx in 15.countDown(0): + let newKey = node.bLink[inx] + if 0 < newKey.len: + value = newKey.getFn() + if 0 < value.len: + result.path.add XPathStep(key: key, node: node, nibble: inx.int8) + result.depth.inc + key = newKey + break loopContinue + else: + discard + + # Recurse (iteratively) + while true: + block loopRecurse: + # Modify last branch node and try again + if result.path[^1].node.kind == Branch: + for inx in (result.path[^1].nibble-1).countDown(0): + let newKey = result.path[^1].node.bLink[inx] + if 0 < newKey.len: + value = newKey.getFn() + if 0 < value.len: + result.path[^1].nibble = inx.int8 + key = newKey + break loopContinue + # Failed, step back and try predecessor branch. + while path.path.len < result.path.len: + case result.path[^1].node.kind: + of Branch: + result.depth.dec + result.path.setLen(result.path.len - 1) + break loopRecurse + of Extension: + result.depth -= result.path[^1].node.ePfx.len + result.path.setLen(result.path.len - 1) + of Leaf: + return # Ooops + return # Failed + # Notreached + # End while + # Notreached + +# --------------- + +proc fillFromLeft( + db: HexaryTreeDbRef; # Target in-memory database + rootKey: NodeKey; # State root for persistent source database + getFn: HexaryGetFn; # Source database abstraction + maxLeafs = 5000; # Error if more than this many leaf nodes + ): Result[int,HexaryError] + {.gcsafe, raises: [CatchableError].} = + ## Import persistent sub-tree into target database + + # Find first least path + var + here = XPath(root: rootKey).pathLeast(rootkey.to(Blob), getFn) + countSteps = 0 + + if 0 < here.path.len: + while true: + countSteps.inc + + # Import records + for step in here.path: + db.tab[step.key.convertTo(RepairKey)] = step.node.to(RNodeRef) + + # Get next path + let topKey = here.path[^1].key + here = here.next(getFn) + + # Check for end condition + if here.path.len == 0: + break + if topKey == here.path[^1].key: + return err(GarbledNextLeaf) # Ooops + if maxLeafs <= countSteps: + return err(LeafMaxExceeded) + + ok(countSteps) + +proc fillFromRight( + db: HexaryTreeDbRef; # Target in-memory database + rootKey: NodeKey; # State root for persistent source database + getFn: HexaryGetFn; # Source database abstraction + maxLeafs = 5000; # Error if more than this many leaf nodes + ): Result[int,HexaryError] + {.gcsafe, raises: [CatchableError].} = + ## Import persistent sub-tree into target database + + # Find first least path + var + here = XPath(root: rootKey).pathMost(rootkey.to(Blob), getFn) + countSteps = 0 + + if 0 < here.path.len: + while true: + countSteps.inc + + # Import records + for step in here.path: + db.tab[step.key.convertTo(RepairKey)] = step.node.to(RNodeRef) + + # Get next path + let topKey = here.path[^1].key + here = here.prev(getFn) + + # Check for end condition + if here.path.len == 0: + break + if topKey == here.path[^1].key: + return err(GarbledNextLeaf) # Ooops + if maxLeafs <= countSteps: + return err(LeafMaxExceeded) + + ok(countSteps) + +# ------------------------------------------------------------------------------ +# Public functions, pretty printing +# ------------------------------------------------------------------------------ + +proc pp*(s: string; hex = false): string = + ## For long strings print `begin..end` only + s.ppImpl(hex) + +proc pp*(w: NibblesSeq): string = + $w + +proc pp*(key: RepairKey): string = + ## Raw key, for referenced key dump use `key.pp(db)` below + key.ByteArray33.toSeq.mapIt(it.toHex(2)).join.tolowerAscii + +proc pp*(key: NodeKey): string = + ## Raw key, for referenced key dump use `key.pp(db)` below + key.ByteArray32.toSeq.mapIt(it.toHex(2)).join.tolowerAscii + +proc pp*(key: NodeKey|RepairKey; db: HexaryTreeDbRef): string = + key.ppImpl(db) + +proc pp*( + w: RNodeRef|XNodeObj|RPathStep|XPathStep; + db: HexaryTreeDbRef; + ): string = + w.ppImpl(db) + +proc pp*( + w: openArray[RPathStep|XPathStep]; + db:HexaryTreeDbRef; + delim: string; + ): string = + w.toSeq.mapIt(it.ppImpl(db)).join(delim) + +proc pp*( + w: openArray[RPathStep|XPathStep]; + db: HexaryTreeDbRef; + indent = 4; + ): string = + w.pp(db, indent.toPfx) + +proc pp*(w: RPath|XPath; db: HexaryTreeDbRef; delim: string): string = + result = "<" & w.root.pp(db) & ">" + if 0 < w.path.len: + result &= delim & w.path.pp(db, delim) + result &= delim & "(" & $w.tail + when typeof(w) is XPath: + result &= "," & $w.depth + result &= ")" + +proc pp*(w: RPath|XPath; db: HexaryTreeDbRef; indent=4): string = + w.pp(db, indent.toPfx) + + +proc pp*(db: HexaryTreeDbRef; root: NodeKey; delim: string): string = + ## Dump the entries from the a generic accounts trie. These are + ## key value pairs for + ## :: + ## Branch: ($1,b(<$2,$3,..,$17>,)) + ## Extension: ($18,e(832b5e..06e697,$19)) + ## Leaf: ($20,l(cc9b5d..1c3b4,f84401..f9e5129d[#70])) + ## + ## where keys are typically represented as `$` or `¶` or `ø` + ## depending on whether a key is final (`$`), temporary (`¶`) + ## or unset/missing (`ø`). + ## + ## The node types are indicated by a letter after the first key before + ## the round brackets + ## :: + ## Branch: 'b', 'þ', or 'B' + ## Extension: 'e', '€', or 'E' + ## Leaf: 'l', 'ł', or 'L' + ## + ## Here a small letter indicates a `Static` node which was from the + ## original `proofs` list, a capital letter indicates a `Mutable` node + ## added on the fly which might need some change, and the decorated + ## letters stand for `Locked` nodes which are like `Static` ones but + ## added later (typically these nodes are update `Mutable` nodes.) + ## + ## Beware: dumping a large database is not recommended + db.ppImpl(root).join(delim) + +proc pp*(db: HexaryTreeDbRef; root: NodeKey; indent=4): string = + ## Dump the entries from the a generic repair tree. + db.pp(root, indent.toPfx) + + +proc pp*(m: Moment): string = + ## Prints a moment in time similar to *chronicles* time format. + m.asDateTime.format "yyyy-MM-dd HH:mm:ss'.'fff'+00:00'" + +# ------------------------------------------------------------------------------ +# Public functions, traversal over partial tree in persistent database +# ------------------------------------------------------------------------------ + +proc next*( + path: XPath; + getFn: HexaryGetFn; + minDepth = 64; + ): XPath + {.gcsafe, raises: [CatchableError].} = + ## Advance the argument `path` to the next leaf node (if any.). The + ## `minDepth` argument requires the result of `next()` to satisfy + ## `minDepth <= next().getNibbles.len`. + var pLen = path.path.len + + # Find the last branch in the path, increase link and step down + while 0 < pLen: + + # Find branch none + pLen.dec + + let it = path.path[pLen] + if it.node.kind == Branch and it.nibble < 15: + + # Find the next item to the right in the branch list + for inx in (it.nibble + 1) .. 15: + let link = it.node.bLink[inx] + if link.len != 0: + let + branch = XPathStep(key: it.key, node: it.node, nibble: inx.int8) + walk = path.path[0 ..< pLen] & branch + newPath = XPath(root: path.root, path: walk).pathLeast(link, getFn) + if minDepth <= newPath.depth and 0 < newPath.getLeafData.len: + return newPath + + +proc prev*( + path: XPath; + getFn: HexaryGetFn; + minDepth = 64; + ): XPath + {.gcsafe, raises: [CatchableError].} = + ## Advance the argument `path` to the previous leaf node (if any.) The + ## `minDepth` argument requires the result of `next()` to satisfy + ## `minDepth <= next().getNibbles.len`. + var pLen = path.path.len + + # Find the last branch in the path, decrease link and step down + while 0 < pLen: + + # Find branch none + pLen.dec + let it = path.path[pLen] + if it.node.kind == Branch and 0 < it.nibble: + + # Find the next item to the right in the branch list + for inx in (it.nibble - 1).countDown(0): + let link = it.node.bLink[inx] + if link.len != 0: + let + branch = XPathStep(key: it.key, node: it.node, nibble: inx.int8) + walk = path.path[0 ..< pLen] & branch + newPath = XPath(root: path.root, path: walk).pathMost(link,getFn) + if minDepth <= newPath.depth and 0 < newPath.getLeafData.len: + return newPath + + +proc fromPersistent*( + db: HexaryTreeDbRef; # Target in-memory database + rootKey: NodeKey; # State root for persistent source database + getFn: HexaryGetFn; # Source database abstraction + maxLeafs = 5000; # Error if more than this many leaf nodes + reverse = false; # Fill left to right by default + ): Result[int,HexaryError] + {.gcsafe, raises: [CatchableError].} = + ## Import persistent sub-tree into target database + if reverse: + db.fillFromLeft(rootKey, getFn, maxLeafs) + else: + db.fillFromRight(rootKey, getFn, maxLeafs) + +proc fromPersistent*( + rootKey: NodeKey; # State root for persistent source database + getFn: HexaryGetFn; # Source database abstraction + maxLeafs = 5000; # Error if more than this many leaf nodes + reverse = false; # Fill left to right by default + ): Result[HexaryTreeDbRef,HexaryError] + {.gcsafe, raises: [CatchableError].} = + ## Variant of `fromPersistent()` for an ad-hoc table + let + db = HexaryTreeDbRef() + rc = db.fromPersistent(rootKey, getFn, maxLeafs, reverse) + if rc.isErr: + return err(rc.error) + ok(db) + +# ------------------------------------------------------------------------------ +# End +# ------------------------------------------------------------------------------ diff --git a/nimbus/sync/snap/worker/db/hexary_desc.nim b/nimbus/sync/snap/worker/db/hexary_desc.nim index 644b685523..b9e347be1e 100644 --- a/nimbus/sync/snap/worker/db/hexary_desc.nim +++ b/nimbus/sync/snap/worker/db/hexary_desc.nim @@ -8,15 +8,15 @@ # at your option. This file may not be copied, modified, or distributed # except according to those terms. +{.push raises: [].} + import - std/[algorithm, hashes, sequtils, sets, strutils, tables], - eth/[common, p2p, trie/nibbles], + std/[hashes, sequtils, sets, tables], + eth/[common, trie/nibbles], stint, ../../range_desc, ./hexary_error -{.push raises: [].} - type HexaryPpFn* = proc(key: RepairKey): string {.gcsafe, raises: [CatchableError].} @@ -113,6 +113,7 @@ type nibble*: int8 ## Branch node selector (if any) RPath* = object + root*: RepairKey ## Root node needed when `path.len == 0` path*: seq[RPathStep] tail*: NibblesSeq ## Stands for non completed leaf path @@ -123,6 +124,7 @@ type nibble*: int8 ## Branch node selector (if any) XPath* = object + root*: NodeKey ## Root node needed when `path.len == 0` path*: seq[XPathStep] tail*: NibblesSeq ## Stands for non completed leaf path depth*: int ## May indicate path length (typically 64) @@ -172,14 +174,6 @@ proc isZero*(a: RepairKey): bool {.gcsafe.} # Private helpers # ------------------------------------------------------------------------------ -proc initImpl(key: var RepairKey; data: openArray[byte]): bool = - key.reset - if 0 < data.len and data.len <= 33: - let trg = addr key.ByteArray33[33 - data.len] - trg.copyMem(unsafeAddr data[0], data.len) - return true - - proc append(writer: var RlpWriter, node: RNodeRef) = ## Mixin for RLP writer proc appendOk(writer: var RlpWriter; key: RepairKey): bool = @@ -225,167 +219,16 @@ proc append(writer: var RlpWriter, node: XNodeObj) = writer.append(node.lPfx.hexPrefixEncode(isleaf = true)) writer.append(node.lData) -# ------------------------------------------------------------------------------ -# Private debugging helpers -# ------------------------------------------------------------------------------ - -proc to*(key: NodeKey; T: type RepairKey): T {.gcsafe.} - -proc toPfx(indent: int): string = - "\n" & " ".repeat(indent) - -proc ppImpl(s: string; hex = false): string = - ## For long strings print `begin..end` only - if hex: - let n = (s.len + 1) div 2 - (if s.len < 20: s else: s[0 .. 5] & ".." & s[s.len-8 .. s.len-1]) & - "[" & (if 0 < n: "#" & $n else: "") & "]" - elif s.len <= 30: - s - else: - (if (s.len and 1) == 0: s[0 ..< 8] else: "0" & s[0 ..< 7]) & - "..(" & $s.len & ").." & s[s.len-16 ..< s.len] - -proc ppImpl(key: RepairKey; db: HexaryTreeDbRef): string = - if key.isZero: - return "ø" - if not key.isNodekey: - var num: uint64 - (addr num).copyMem(unsafeAddr key.ByteArray33[25], 8) - return "%" & $num - try: - if not disablePrettyKeys and not db.keyPp.isNil: - return db.keyPp(key) - except CatchableError: - discard - key.ByteArray33.toSeq.mapIt(it.toHex(2)).join.toLowerAscii - -proc ppImpl(key: NodeKey; db: HexaryTreeDbRef): string = - key.to(RepairKey).ppImpl(db) - -proc ppImpl(w: openArray[RepairKey]; db: HexaryTreeDbRef): string = - w.mapIt(it.ppImpl(db)).join(",") - -proc ppImpl(w: openArray[Blob]; db: HexaryTreeDbRef): string = - var q: seq[RepairKey] - for a in w: - var key: RepairKey - discard key.initImpl(a) - q.add key - q.ppImpl(db) - -proc ppStr(blob: Blob): string = - if blob.len == 0: "" - else: blob.mapIt(it.toHex(2)).join.toLowerAscii.ppImpl(hex = true) - -proc ppImpl(n: RNodeRef; db: HexaryTreeDbRef): string = - let so = n.state.ord - case n.kind: - of Leaf: - ["l","ł","L","R"][so] & "(" & $n.lPfx & "," & n.lData.ppStr & ")" - of Extension: - ["e","€","E","R"][so] & "(" & $n.ePfx & "," & n.eLink.ppImpl(db) & ")" - of Branch: - ["b","þ","B","R"][so] & "(" & n.bLink.ppImpl(db) & "," & n.bData.ppStr & ")" - -proc ppImpl(n: XNodeObj; db: HexaryTreeDbRef): string = - case n.kind: - of Leaf: - "l(" & $n.lPfx & "," & n.lData.ppStr & ")" - of Extension: - var key: RepairKey - discard key.initImpl(n.eLink) - "e(" & $n.ePfx & "," & key.ppImpl(db) & ")" - of Branch: - "b(" & n.bLink[0..15].ppImpl(db) & "," & n.bLink[16].ppStr & ")" - -proc ppImpl(w: RPathStep; db: HexaryTreeDbRef): string = - let - nibble = if 0 <= w.nibble: w.nibble.toHex(1).toLowerAscii else: "ø" - key = w.key.ppImpl(db) - "(" & key & "," & nibble & "," & w.node.ppImpl(db) & ")" - -proc ppImpl(w: XPathStep; db: HexaryTreeDbRef): string = - let nibble = if 0 <= w.nibble: w.nibble.toHex(1).toLowerAscii else: "ø" - var key: RepairKey - discard key.initImpl(w.key) - "(" & key.ppImpl(db) & "," & $nibble & "," & w.node.ppImpl(db) & ")" - -proc ppImpl(db: HexaryTreeDbRef; root: NodeKey): seq[string] = - ## Dump the entries from the a generic repair tree. This function assumes - ## that mapped keys are printed `$###` if a node is locked or static, and - ## some substitute for the first letter `$` otherwise (if they are mutable.) - proc toKey(s: string): uint64 = - try: - result = s[1 ..< s.len].parseUint - except ValueError as e: - raiseAssert "Ooops ppImpl(s=" & s & "): name=" & $e.name & " msg=" & e.msg - if s[0] != '$': - result = result or (1u64 shl 63) - proc cmpIt(x, y: (uint64,string)): int = - cmp(x[0],y[0]) - - var accu: seq[(uint64,string)] - if root.ByteArray32 != ByteArray32.default: - accu.add @[(0u64, "($0" & "," & root.ppImpl(db) & ")")] - for key,node in db.tab.pairs: - accu.add ( - key.ppImpl(db).tokey, - "(" & key.ppImpl(db) & "," & node.ppImpl(db) & ")") - - accu.sorted(cmpIt).mapIt(it[1]) - -# ------------------------------------------------------------------------------ -# Public debugging helpers -# ------------------------------------------------------------------------------ - -proc pp*(s: string; hex = false): string = - ## For long strings print `begin..end` only - s.ppImpl(hex) - -proc pp*(w: NibblesSeq): string = - $w - -proc pp*(key: RepairKey): string = - ## Raw key, for referenced key dump use `key.pp(db)` below - key.ByteArray33.toSeq.mapIt(it.toHex(2)).join.tolowerAscii - -proc pp*(key: NodeKey): string = - ## Raw key, for referenced key dump use `key.pp(db)` below - key.ByteArray32.toSeq.mapIt(it.toHex(2)).join.tolowerAscii - -proc pp*(key: NodeKey|RepairKey; db: HexaryTreeDbRef): string = - key.ppImpl(db) - -proc pp*( - w: RNodeRef|XNodeObj|RPathStep|XPathStep; - db: HexaryTreeDbRef; - ): string = - w.ppImpl(db) - -proc pp*(w:openArray[RPathStep|XPathStep];db:HexaryTreeDbRef;indent=4): string = - w.toSeq.mapIt(it.ppImpl(db)).join(indent.toPfx) - -proc pp*(w: RPath; db: HexaryTreeDbRef; indent=4): string = - w.path.pp(db,indent) & indent.toPfx & "(" & $w.tail & ")" - -proc pp*(w: XPath; db: HexaryTreeDbRef; indent=4): string = - w.path.pp(db,indent) & indent.toPfx & "(" & $w.tail & "," & $w.depth & ")" - -proc pp*(db: HexaryTreeDbRef; root: NodeKey; indent=4): string = - ## Dump the entries from the a generic repair tree. - db.ppImpl(root).join(indent.toPfx) - -proc pp*(db: HexaryTreeDbRef; indent=4): string = - ## varinat of `pp()` above - db.ppImpl(NodeKey.default).join(indent.toPfx) - # ------------------------------------------------------------------------------ # Public constructor (or similar) # ------------------------------------------------------------------------------ proc init*(key: var RepairKey; data: openArray[byte]): bool = - key.initImpl(data) + key.reset + if 0 < data.len and data.len <= 33: + let trg = addr key.ByteArray33[33 - data.len] + trg.copyMem(unsafeAddr data[0], data.len) + return true proc newRepairKey*(db: HexaryTreeDbRef): RepairKey = db.repairKeyGen.inc @@ -434,7 +277,7 @@ proc convertTo*(data: Blob; T: type NodeTag): T = proc convertTo*(data: Blob; T: type RepairKey): T = ## Probably lossy conversion, use `init()` for safe conversion - discard result.initImpl(data) + discard result.init(data) proc convertTo*(node: RNodeRef; T: type Blob): T = ## Write the node as an RLP-encoded blob @@ -455,6 +298,26 @@ proc convertTo*(nodeList: openArray[XNodeObj]; T: type Blob): T = writer.append w writer.finish +proc padPartialPath*(pfx: NibblesSeq; dblNibble: byte): NodeKey = + ## Extend (or cut) `partialPath` nibbles sequence and generate `NodeKey`. + ## This function must be handled with some care regarding a meaningful value + ## for the `dblNibble` argument. Using values `0` or `255` is typically used + ## to create the minimum or maximum envelope value from the `pfx` argument. + # Pad with zeroes + var padded: NibblesSeq + + let padLen = 64 - pfx.len + if 0 <= padLen: + padded = pfx & dblNibble.repeat(padlen div 2).initNibbleRange + if (padLen and 1) == 1: + padded = padded & @[dblNibble].initNibbleRange.slice(1) + else: + let nope = seq[byte].default.initNibbleRange + padded = pfx.slice(0,64) & nope # nope forces re-alignment + + let bytes = padded.getBytes + (addr result.ByteArray32[0]).copyMem(unsafeAddr bytes[0], bytes.len) + # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/nimbus/sync/snap/worker/db/hexary_envelope.nim b/nimbus/sync/snap/worker/db/hexary_envelope.nim index c3b2e02e15..96c68fad22 100644 --- a/nimbus/sync/snap/worker/db/hexary_envelope.nim +++ b/nimbus/sync/snap/worker/db/hexary_envelope.nim @@ -70,6 +70,9 @@ ## * then there is a ``w = partialPath & w-ext`` in ``W`` with ## ``p-ext = w-ext & some-ext``. ## + +{.push raises: [].} + import std/[algorithm, sequtils, tables], eth/[common, trie/nibbles], @@ -77,8 +80,6 @@ import ../../range_desc, "."/[hexary_desc, hexary_error, hexary_nearby, hexary_paths] -{.push raises: [].} - # ------------------------------------------------------------------------------ # Private helpers # ------------------------------------------------------------------------------ @@ -136,24 +137,6 @@ template noRlpErrorOops(info: static[string]; code: untyped) = # Private functions # ------------------------------------------------------------------------------ -proc padPartialPath(pfx: NibblesSeq; dblNibble: byte): NodeKey = - ## Extend (or cut) `partialPath` nibbles sequence and generate `NodeKey` - # Pad with zeroes - var padded: NibblesSeq - - let padLen = 64 - pfx.len - if 0 <= padLen: - padded = pfx & dblNibble.repeat(padlen div 2).initNibbleRange - if (padLen and 1) == 1: - padded = padded & @[dblNibble].initNibbleRange.slice(1) - else: - let nope = seq[byte].default.initNibbleRange - padded = pfx.slice(0,64) & nope # nope forces re-alignment - - let bytes = padded.getBytes - (addr result.ByteArray32[0]).copyMem(unsafeAddr bytes[0], bytes.len) - - proc doDecomposeLeft( envQ: RPath|XPath; ivQ: RPath|XPath; diff --git a/nimbus/sync/snap/worker/db/hexary_error.nim b/nimbus/sync/snap/worker/db/hexary_error.nim index 1751b13f95..5cf529a35c 100644 --- a/nimbus/sync/snap/worker/db/hexary_error.nim +++ b/nimbus/sync/snap/worker/db/hexary_error.nim @@ -28,6 +28,13 @@ type TooManySlotAccounts NoAccountsYet + # debug + LeafMaxExceeded + GarbledNextLeaf + + # snap handler + DataSizeError + # range LeafNodeExpected FailedNextNode @@ -42,6 +49,7 @@ type NearbyEmptyPath NearbyLeafExpected NearbyDanglingLink + NearbyPathTail # envelope DecomposeDegenerated diff --git a/nimbus/sync/snap/worker/db/hexary_interpolate.nim b/nimbus/sync/snap/worker/db/hexary_interpolate.nim index e0a813c054..e81dd4d60e 100644 --- a/nimbus/sync/snap/worker/db/hexary_interpolate.nim +++ b/nimbus/sync/snap/worker/db/hexary_interpolate.nim @@ -14,6 +14,8 @@ ## purposes, it should be replaced by the new facility of the upcoming ## re-factored database layer. +{.push raises: [].} + import std/[tables], eth/[common, trie/nibbles], @@ -21,8 +23,6 @@ import ../../range_desc, "."/[hexary_desc, hexary_error, hexary_paths] -{.push raises: [].} - type RPathXStep = object ## Extended `RPathStep` needed for `NodeKey` assignmant @@ -114,6 +114,7 @@ proc rTreeExtendLeaf( if not key.isNodeKey: rPath.path[^1].node.bLink[nibble] = key return RPath( + root: rPath.root, path: rPath.path & RPathStep(key: key, node: leaf, nibble: -1), tail: EmptyNibbleRange) @@ -129,7 +130,10 @@ proc rTreeExtendLeaf( let nibble = rPath.tail[0].int8 xStep = RPathStep(key: key, node: node, nibble: nibble) - xPath = RPath(path: rPath.path & xStep, tail: rPath.tail.slice(1)) + xPath = RPath( + root: rPath.root, + path: rPath.path & xStep, + tail: rPath.tail.slice(1)) return db.rTreeExtendLeaf(xPath, db.newRepairKey()) diff --git a/nimbus/sync/snap/worker/db/hexary_nearby.nim b/nimbus/sync/snap/worker/db/hexary_nearby.nim index 62c615ee3d..3855c1ddc7 100644 --- a/nimbus/sync/snap/worker/db/hexary_nearby.nim +++ b/nimbus/sync/snap/worker/db/hexary_nearby.nim @@ -17,20 +17,22 @@ import ../../range_desc, "."/[hexary_desc, hexary_error, hexary_paths] -proc hexaryNearbyRight*(path: RPath; db: HexaryTreeDbRef; - ): Result[RPath,HexaryError] {.gcsafe, raises: [KeyError]} - -proc hexaryNearbyRight*(path: XPath; getFn: HexaryGetFn; - ): Result[XPath,HexaryError] {.gcsafe, raises: [CatchableError]} - # ------------------------------------------------------------------------------ # Private helpers # ------------------------------------------------------------------------------ +proc isZeroLink(a: Blob): bool = + ## Persistent database has `Blob` as key + a.len == 0 + +proc isZeroLink(a: RepairKey): bool = + ## Persistent database has `RepairKey` as key + a.isZero + proc toBranchNode( rlp: Rlp ): XNodeObj - {.gcsafe, raises: [RlpError]} = + {.gcsafe, raises: [RlpError].} = var rlp = rlp XNodeObj(kind: Branch, bLink: rlp.read(array[17,Blob])) @@ -38,16 +40,78 @@ proc toLeafNode( rlp: Rlp; pSegm: NibblesSeq ): XNodeObj - {.gcsafe, raises: [RlpError]} = + {.gcsafe, raises: [RlpError].} = XNodeObj(kind: Leaf, lPfx: pSegm, lData: rlp.listElem(1).toBytes) proc toExtensionNode( rlp: Rlp; pSegm: NibblesSeq ): XNodeObj - {.gcsafe, raises: [RlpError]} = + {.gcsafe, raises: [RlpError].} = XNodeObj(kind: Extension, ePfx: pSegm, eLink: rlp.listElem(1).toBytes) +proc getNode( + nodeKey: RepairKey; # Node key + db: HexaryTreeDbRef; # Database + ): Result[RNodeRef,HexaryError] + {.gcsafe, raises: [KeyError].} = + ## Fetch root node for given path + if db.tab.hasKey(nodeKey): + return ok(db.tab[nodeKey]) + err(NearbyDanglingLink) + +proc getNode( + nodeKey: openArray[byte]; # Node key + getFn: HexaryGetFn; # Database abstraction + ): Result[XNodeObj,HexaryError] + {.gcsafe, raises: [CatchableError].} = + ## Variant of `getRootNode()` + let nodeData = nodeKey.getFn + if 0 < nodeData.len: + let nodeRlp = rlpFromBytes nodeData + case nodeRlp.listLen: + of 17: + return ok(nodeRlp.toBranchNode) + of 2: + let (isLeaf,pfx) = hexPrefixDecode nodeRlp.listElem(0).toBytes + if isleaf: + return ok(nodeRlp.toLeafNode pfx) + else: + return ok(nodeRlp.toExtensionNode pfx) + else: + return err(NearbyGarbledNode) + err(NearbyDanglingLink) + +proc getNode( + nodeKey: NodeKey; # Node key + getFn: HexaryGetFn; # Database abstraction + ): Result[XNodeObj,HexaryError] + {.gcsafe, raises: [CatchableError].} = + ## Variant of `getRootNode()` + nodeKey.ByteArray32.getNode(getFn) + +# -------------------- + +proc branchNibbleMin(node: XNodeObj|RNodeRef; minInx: int8): int8 = + ## Find the least index for an argument branch `node` link with index + ## greater or equal the argument `nibble`. + if node.kind == Branch: + for n in minInx .. 15: + if not node.bLink[n].isZeroLink: + return n + -1 + +proc branchNibbleMax(node: XNodeObj|RNodeRef; maxInx: int8): int8 = + ## Find the greatest index for an argument branch `node` link with index + ## less or equal the argument `nibble`. + if node.kind == Branch: + for n in maxInx.countDown 0: + if not node.bLink[n].isZeroLink: + return n + -1 + +# -------------------- + proc `<=`(a, b: NibblesSeq): bool = ## Compare nibbles, different lengths are padded to the right with zeros let abMin = min(a.len, b.len) @@ -68,720 +132,355 @@ proc `<=`(a, b: NibblesSeq): bool = proc `<`(a, b: NibblesSeq): bool = not (b <= a) - -template noKeyErrorOops(info: static[string]; code: untyped) = - try: - code - except KeyError as e: - raiseAssert "Impossible KeyError (" & info & "): " & e.msg - -template noRlpErrorOops(info: static[string]; code: untyped) = - try: - code - except RlpError as e: - raiseAssert "Impossible RlpError (" & info & "): " & e.msg - -# ------------------------------------------------------------------------------ -# Private functions, wrappers -# ------------------------------------------------------------------------------ - -proc hexaryNearbyRightImpl( - baseTag: NodeTag; # Some node - rootKey: NodeKey; # State root - db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction - ): Result[NodeTag,HexaryError] - {.gcsafe, raises: [CatchableError]} = - ## Wrapper - let path = block: - let rc = baseTag.hexaryPath(rootKey, db).hexaryNearbyRight(db) - if rc.isErr: - return err(rc.error) - rc.value - - if 0 < path.path.len and path.path[^1].node.kind == Leaf: - let nibbles = path.getNibbles - if nibbles.len == 64: - return ok(nibbles.getBytes.convertTo(NodeTag)) - - err(NearbyLeafExpected) - -proc hexaryNearbyLeftImpl( - baseTag: NodeTag; # Some node - rootKey: NodeKey; # State root - db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction - ): Result[NodeTag,HexaryError] - {.gcsafe, raises: [CatchableError]} = - ## Wrapper - let path = block: - let rc = baseTag.hexaryPath(rootKey, db).hexaryNearbyLeft(db) - if rc.isErr: - return err(rc.error) - rc.value - - if 0 < path.path.len and path.path[^1].node.kind == Leaf: - let nibbles = path.getNibbles - if nibbles.len == 64: - return ok(nibbles.getBytes.convertTo(NodeTag)) - - err(NearbyLeafExpected) - # ------------------------------------------------------------------------------ # Private functions # ------------------------------------------------------------------------------ -proc completeLeast( - path: RPath; - key: RepairKey; - db: HexaryTreeDbRef; - pathLenMax = 64; - ): Result[RPath,HexaryError] - {.gcsafe, raises: [KeyError].} = - ## Extend path using least nodes without recursion. - var rPath = RPath(path: path.path) +proc complete( + path: RPath|XPath; # Partially expanded path + key: RepairKey|NodeKey|Blob; # Start key + db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction + pathLenMax: int; # Beware of loops (if any) + doLeast: static[bool]; # Direction: *least* or *most* + ): auto + {.gcsafe, raises: [CatchableError].} = + ## Extend path using least or last nodes without recursion. + var uPath = typeof(path)(root: path.root, path: path.path) - if not db.tab.hasKey(key): - return err(NearbyDanglingLink) + let firstNode = key.getNode(db) + if firstNode.isErr: + return Result[typeof(path),HexaryError].err(firstNode.error) var key = key - node = db.tab[key] + node = firstNode.value - while rPath.path.len < pathLenMax: + while uPath.path.len < pathLenMax: case node.kind: of Leaf: - rPath.path.add RPathStep(key: key, node: node, nibble: -1) - return ok(rPath) # done + uPath.path.add typeof(path.path[0])(key: key, node: node, nibble: -1) + return ok(uPath) # done of Extension: - block useExtensionLink: - let newKey = node.eLink - if not newkey.isZero: - if db.tab.hasKey(newKey): - rPath.path.add RPathStep(key: key, node: node, nibble: -1) - key = newKey - node = db.tab[key] - break useExtensionLink - return err(NearbyExtensionError) # Oops, no way + let newKey = node.eLink + if not newkey.isZeroLink: + let newNode = newKey.getNode(db) + if newNode.isOK: + uPath.path.add typeof(path.path[0])(key: key, node: node, nibble: -1) + key = newKey + node = newNode.value + continue + return err(NearbyExtensionError) # Oops, no way of Branch: - block findBranchLink: - for inx in 0 .. 15: - let newKey = node.bLink[inx] - if not newKey.isZero: - if db.tab.hasKey(newKey): - rPath.path.add RPathStep(key: key, node: node, nibble: inx.int8) - key = newKey - node = db.tab[key] - break findBranchLink - return err(NearbyBranchError) # Oops, no way + let n = block: + when doLeast: + node.branchNibbleMin 0 + else: + node.branchNibbleMax 15 + if 0 <= n: + let + newKey = node.bLink[n] + newNode = newKey.getNode(db) + if newNode.isOK: + uPath.path.add typeof(path.path[0])(key: key, node: node, nibble: n) + key = newKey + node = newNode.value + continue + return err(NearbyBranchError) # Oops, no way err(NearbyNestingTooDeep) -proc completeLeast( - path: XPath; - key: Blob; - getFn: HexaryGetFn; - pathLenMax = 64; - ): Result[XPath,HexaryError] - {.gcsafe, raises: [CatchableError].} = - ## Variant of `completeLeast()` for persistent database - var xPath = XPath(path: path.path) - - if key.getFn().len == 0: - return err(NearbyDanglingLink) - var - key = key - nodeRlp = rlpFromBytes key.getFn() - - while xPath.path.len < pathLenMax: - case nodeRlp.listLen: - of 2: - let (isLeaf,pathSegment) = hexPrefixDecode nodeRlp.listElem(0).toBytes - if isLeaf: - let node = nodeRlp.toLeafNode(pathSegment) - xPath.path.add XPathStep(key: key, node: node, nibble: -1) - return ok(xPath) # done - - # Extension - block useExtensionLink: - let - node = nodeRlp.toExtensionNode(pathSegment) - newKey = node.eLink - if 0 < newKey.len: - let newNode = newKey.getFn() - if 0 < newNode.len: - xPath.path.add XPathStep(key: key, node: node, nibble: -1) - key = newKey - nodeRlp = rlpFromBytes newNode - break useExtensionLink - return err(NearbyExtensionError) # Oops, no way - - of 17: - block findBranchLink: - let node = nodeRlp.toBranchNode() - for inx in 0 .. 15: - let newKey = node.bLink[inx] - if 0 < newKey.len: - let newNode = newKey.getFn() - if 0 < newNode.len: - xPath.path.add XPathStep(key: key, node: node, nibble: inx.int8) - key = newKey - nodeRlp = rlpFromBytes newNode - break findBranchLink - return err(NearbyBranchError) # Oops, no way - +proc zeroAdjust( + path: XPath|RPath; # Partially expanded path + db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction + doLeast: static[bool]; # Direction: *least* or *most* + ): auto + {.gcsafe, raises: [CatchableError].} = + ## Adjust empty argument path to the first node entry to the right. Ths + ## applies is the argument path `path` is before the first entry in the + ## database. The result is a path which is aligned with the first entry. + proc accept(p: typeof(path); pfx: NibblesSeq): bool = + when doLeast: + p.tail <= pfx else: - return err(NearbyGarbledNode) # Oops, no way - - err(NearbyNestingTooDeep) + pfx <= p.tail + proc branchNibble(w: typeof(path.path[0].node); n: int8): int8 = + when doLeast: + w.branchNibbleMin n + else: + w.branchNibbleMax n -proc completeMost( - path: RPath; - key: RepairKey; - db: HexaryTreeDbRef; - pathLenMax = 64; - ): Result[RPath,HexaryError] - {.gcsafe, raises: [KeyError].} = - ## Extend path using max nodes without recursion. - var rPath = RPath(path: path.path) - - if not db.tab.hasKey(key): - return err(NearbyDanglingLink) - var - key = key - node = db.tab[key] + if path.path.len == 0: + let root = path.root.getNode(db) + if root.isOk: + block fail: + var pfx: NibblesSeq + case root.value.kind: + of Branch: + # Find first non-dangling link and assign it + if path.tail.len == 0: + break fail + let n = root.value.branchNibble path.tail[0].int8 + if n < 0: + break fail + pfx = @[n.byte].initNibbleRange.slice(1) - while rPath.path.len < pathLenMax: - case node.kind: - of Leaf: - rPath.path.add RPathStep(key: key, node: node, nibble: -1) - return ok(rPath) # done + of Extension: + let ePfx = root.value.ePfx + # Must be followed by a branch node + if path.tail.len < 2 or not path.accept(ePfx): + break fail + let node = root.value.eLink.getNode(db) + if node.isErr: + break fail + let n = node.value.branchNibble path.tail[1].int8 + if n < 0: + break fail + pfx = ePfx & @[n.byte].initNibbleRange.slice(1) - of Extension: - block useExtensionLink: - let newKey = node.eLink - if not newkey.isZero: - if db.tab.hasKey(newKey): - rPath.path.add RPathStep(key: key, node: node, nibble: -1) - key = newKey - node = db.tab[newKey] - break useExtensionLink - return err(NearbyExtensionError) # Oops, no way + of Leaf: + pfx = root.value.lPfx + if not path.accept(pfx): + break fail - of Branch: - block findBranchLink: - for inx in 15.countDown(0): - let newKey = node.bLink[inx] - if not newKey.isZero: - if db.tab.hasKey(newKey): - rPath.path.add RPathStep(key: key, node: node, nibble: inx.int8) - key = newKey - node = db.tab[key] - break findBranchLink - return err(NearbyBranchError) # Oops, no way + return pfx.padPartialPath(0).hexaryPath(path.root, db) + path - err(NearbyNestingTooDeep) -proc completeMost( - path: XPath; - key: Blob; - getFn: HexaryGetFn; - pathLenMax = 64; - ): Result[XPath,HexaryError] +proc finalise( + path: XPath|RPath; # Partially expanded path + db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction + ): auto {.gcsafe, raises: [CatchableError].} = - ## Variant of `completeLeast()` for persistent database - var xPath = XPath(path: path.path) + ## Handle some pathological cases after main processing failed + if path.path.len == 0: + return Result[typeof(path),HexaryError].err(NearbyEmptyPath) - if key.getFn().len == 0: - return err(NearbyDanglingLink) - var - key = key - nodeRlp = rlpFromBytes key.getFn() + # Pathological cases + # * finalise right: nfffff.. for n < f or + # * finalise left: n00000.. for 0 < n + if path.path[0].node.kind == Branch or + (1 < path.path.len and path.path[1].node.kind == Branch): + return err(NearbyFailed) # no more nodes - while xPath.path.len < pathLenMax: - case nodeRlp.listLen: - of 2: - let (isLeaf,pathSegment) = hexPrefixDecode nodeRlp.listElem(0).toBytes - if isLeaf: - let node = nodeRlp.toLeafNode(pathSegment) - xPath.path.add XPathStep(key: key, node: node, nibble: -1) - return ok(xPath) # done - - # Extension - block useExtensionLink: - let - node = nodeRlp.toExtensionNode(pathSegment) - newKey = node.eLink - if 0 < newKey.len: - let newNode = newKey.getFn() - if 0 < newNode.len: - xPath.path.add XPathStep(key: key, node: node, nibble: -1) - key = newKey - nodeRlp = rlpFromBytes newNode - break useExtensionLink - return err(NearbyExtensionError) # Oops, no way + err(NearbyUnexpectedNode) # error - of 17: - block findBranchLink: - let node = nodeRlp.toBranchNode() - for inx in 15.countDown(0): - let newKey = node.bLink[inx] - if 0 < newKey.len: - let newNode = newKey.getFn() - if 0 < newNode.len: - xPath.path.add XPathStep(key: key, node: node, nibble: inx.int8) - key = newKey - nodeRlp = rlpFromBytes newNode - break findBranchLink - return err(NearbyBranchError) # Oops, no way +proc nearbyNext( + path: RPath|XPath; # Partially expanded path + db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction + doLeast: static[bool]; # Direction: *least* or *most* + pathLenMax = 64; # Beware of loops (if any) + ): auto + {.gcsafe, raises: [CatchableError].} = + ## Unified implementation of `hexaryNearbyRight()` and `hexaryNearbyLeft()`. + proc accept(nibble: int8): bool = + ## Accept `nibble` unless on boundaty dependent on `doLeast` + when doLeast: + nibble < 15 else: - return err(NearbyGarbledNode) # Oops, no way + 0 < nibble - err(NearbyNestingTooDeep) - -# ------------------------------------------------------------------------------ -# Public functions, left boundary proofs (moving right) -# ------------------------------------------------------------------------------ + proc accept(p: typeof(path); pfx: NibblesSeq): bool = + when doLeast: + p.tail <= pfx + else: + pfx <= p.tail -proc hexaryNearbyRight*( - path: RPath; # Partially expanded path - db: HexaryTreeDbRef; # Database - ): Result[RPath,HexaryError] - {.gcsafe, raises: [KeyError]} = - ## Extends the maximally extended argument nodes `path` to the right (i.e. - ## with non-decreasing path value). This is similar to the - ## `hexary_path.next()` function, only that this algorithm does not - ## backtrack if there are dangling links in between and rather returns - ## an error. - ## - ## This code is intended to be used for verifying a left-bound proof to - ## verify that there is no leaf node *right* of a boundary path value. + proc branchNibbleNext(w: typeof(path.path[0].node); n: int8): int8 = + when doLeast: + w.branchNibbleMin(n + 1) + else: + w.branchNibbleMax(n - 1) # Some easy cases + var path = path.zeroAdjust(db, doLeast) if path.path.len == 0: - return err(NearbyEmptyPath) # error - if path.path[^1].node.kind == Leaf: - return ok(path) + return Result[typeof(path),HexaryError].err(NearbyEmptyPath) # error var - rPath = path + uPath = path start = true - while 0 < rPath.path.len: - let top = rPath.path[^1] + while 0 < uPath.path.len: + let top = uPath.path[^1] case top.node.kind: of Leaf: - return err(NearbyUnexpectedNode) + return ok(uPath) of Branch: - if top.nibble < 0 or rPath.tail.len == 0: + if top.nibble < 0 or uPath.tail.len == 0: return err(NearbyUnexpectedNode) of Extension: - rPath.tail = top.node.ePfx & rPath.tail - rPath.path.setLen(rPath.path.len - 1) + uPath.tail = top.node.ePfx & uPath.tail + uPath.path.setLen(uPath.path.len - 1) continue var step = top let - rPathLen = rPath.path.len # in case of backtracking - rPathTail = rPath.tail # in case of backtracking + uPathLen = uPath.path.len # in case of backtracking + uPathTail = uPath.tail # in case of backtracking # Look ahead checking next node if start: - let topLink = top.node.bLink[top.nibble] - if topLink.isZero or not db.tab.hasKey(topLink): - return err(NearbyDanglingLink) # error + let + topLink = top.node.bLink[top.nibble] + nextNode = block: + if topLink.isZeroLink: + return err(NearbyDanglingLink) # error + let rc = topLink.getNode(db) + if rc.isErr: + return err(rc.error) # error + rc.value - let nextNode = db.tab[topLink] case nextNode.kind of Leaf: - if rPath.tail <= nextNode.lPfx: - return rPath.completeLeast(topLink, db) + if uPath.accept(nextNode.lPfx): + return uPath.complete(topLink, db, pathLenMax, doLeast) of Extension: - if rPath.tail <= nextNode.ePfx: - return rPath.completeLeast(topLink, db) + if uPath.accept(nextNode.ePfx): + return uPath.complete(topLink, db, pathLenMax, doLeast) of Branch: - let nextNibble = rPath.tail[0].int8 - if start and nextNibble < 15: + let nextNibble = uPath.tail[0].int8 + if start and accept(nextNibble): # Step down and complete with a branch link on the child node - step = RPathStep( + step = typeof(path.path[0])( key: topLink, node: nextNode, nibble: nextNibble) - rPath.path &= step + uPath.path &= step - # Find the next item to the right of the current top entry - for inx in (step.nibble + 1) .. 15: - let link = step.node.bLink[inx] - if not link.isZero: - rPath.path[^1].nibble = inx.int8 - return rPath.completeLeast(link, db) + # Find the next item to the right/left of the current top entry + let n = step.node.branchNibbleNext step.nibble + if 0 <= n: + uPath.path[^1].nibble = n + return uPath.complete(step.node.bLink[n], db, pathLenMax, doLeast) if start: # Retry without look ahead start = false - # Restore `rPath` (pop temporary extra step) - if rPathLen < rPath.path.len: - rPath.path.setLen(rPathLen) - rPath.tail = rPathTail + # Restore `uPath` (pop temporary extra step) + if uPathLen < uPath.path.len: + uPath.path.setLen(uPathLen) + uPath.tail = uPathTail else: # Pop current `Branch` node on top and append nibble to `tail` - rPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & rPath.tail - rPath.path.setLen(rPath.path.len - 1) + uPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & uPath.tail + uPath.path.setLen(uPath.path.len - 1) # End while - # Pathological case: nfffff.. for n < f - var step = path.path[0] - for inx in (step.nibble + 1) .. 15: - let link = step.node.bLink[inx] - if not link.isZero: - step.nibble = inx.int8 - rPath.path = @[step] - return rPath.completeLeast(link, db) + # Handle some pathological cases + return path.finalise(db) - err(NearbyFailed) # error -proc hexaryNearbyRight*( - path: XPath; # Partially expanded path - getFn: HexaryGetFn; # Database abstraction - ): Result[XPath,HexaryError] - {.gcsafe, raises: [CatchableError]} = - ## Variant of `hexaryNearbyRight()` for persistant database - - # Some easy cases - if path.path.len == 0: - return err(NearbyEmptyPath) # error - if path.path[^1].node.kind == Leaf: - return ok(path) - - var - xPath = path - start = true - while 0 < xPath.path.len: - let top = xPath.path[^1] - case top.node.kind: - of Leaf: - return err(NearbyUnexpectedNode) - of Branch: - if top.nibble < 0 or xPath.tail.len == 0: - return err(NearbyUnexpectedNode) - of Extension: - xPath.tail = top.node.ePfx & xPath.tail - xPath.path.setLen(xPath.path.len - 1) - continue - - var - step = top - let - xPathLen = xPath.path.len # in case of backtracking - xPathTail = xPath.tail # in case of backtracking - - # Look ahead checking next node - if start: - let topLink = top.node.bLink[top.nibble] - if topLink.len == 0 or topLink.getFn().len == 0: - return err(NearbyDanglingLink) # error - - let nextNodeRlp = rlpFromBytes topLink.getFn() - case nextNodeRlp.listLen: - of 2: - if xPath.tail <= nextNodeRlp.listElem(0).toBytes.hexPrefixDecode[1]: - return xPath.completeLeast(topLink, getFn) - of 17: - let nextNibble = xPath.tail[0].int8 - if nextNibble < 15: - # Step down and complete with a branch link on the child node - step = XPathStep( - key: topLink, - node: nextNodeRlp.toBranchNode, - nibble: nextNibble) - xPath.path &= step - else: - return err(NearbyGarbledNode) # error - - # Find the next item to the right of the current top entry - for inx in (step.nibble + 1) .. 15: - let link = step.node.bLink[inx] - if 0 < link.len: - xPath.path[^1].nibble = inx.int8 - return xPath.completeLeast(link, getFn) +proc nearbyNext( + baseTag: NodeTag; # Some node + rootKey: NodeKey; # State root + db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction + doLeast: static[bool]; # Direction: *least* or *most* + pathLenMax = 64; # Beware of loops (if any) + ): Result[NodeTag,HexaryError] + {.gcsafe, raises: [CatchableError].} = + ## Variant of `nearbyNext()`, convenience wrapper + let rc = baseTag.hexaryPath(rootKey, db).nearbyNext(db, doLeast) + if rc.isErr: + return err(rc.error) - if start: - # Retry without look ahead - start = false + let path = rc.value + if 0 < path.path.len and path.path[^1].node.kind == Leaf: + let nibbles = path.getNibbles + if nibbles.len == 64: + return ok(nibbles.getBytes.convertTo(NodeTag)) - # Restore `xPath` (pop temporary extra step) - if xPathLen < xPath.path.len: - xPath.path.setLen(xPathLen) - xPath.tail = xPathTail - else: - # Pop current `Branch` node on top and append nibble to `tail` - xPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & xPath.tail - xPath.path.setLen(xPath.path.len - 1) - # End while + err(NearbyLeafExpected) - # Pathological case: nfffff.. for n < f - var step = path.path[0] - for inx in (step.nibble + 1) .. 15: - let link = step.node.bLink[inx] - if 0 < link.len: - step.nibble = inx.int8 - xPath.path = @[step] - return xPath.completeLeast(link, getFn) +# ------------------------------------------------------------------------------ +# Public functions, moving and right boundary proof +# ------------------------------------------------------------------------------ - err(NearbyFailed) # error +proc hexaryNearbyRight*( + path: RPath|XPath; # Partially expanded path + db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction + ): auto + {.gcsafe, raises: [CatchableError].} = + ## Extends the maximally extended argument nodes `path` to the right (i.e. + ## with non-decreasing path value). This is similar to the + ## `hexary_path.next()` function, only that this algorithm does not + ## backtrack if there are dangling links in between and rather returns + ## an error. + ## + ## This code is intended to be used for verifying a left-bound proof to + ## verify that there is no leaf node *right* of a boundary path value. + path.nearbyNext(db, doLeast=true) +proc hexaryNearbyRight*( + baseTag: NodeTag; # Some node + rootKey: NodeKey; # State root + db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction + ): Result[NodeTag,HexaryError] + {.gcsafe, raises: [CatchableError].} = + ## Variant of `hexaryNearbyRight()` working with `NodeTag` arguments rather + ## than `RPath` or `XPath` ones. + baseTag.nearbyNext(rootKey, db, doLeast=true) proc hexaryNearbyRightMissing*( - path: RPath; - db: HexaryTreeDbRef; - ): bool - {.gcsafe, raises: [KeyError]} = + path: RPath|XPath; # Partially expanded path + db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction + ): Result[bool,HexaryError] + {.gcsafe, raises: [KeyError].} = ## Returns `true` if the maximally extended argument nodes `path` is the ## rightmost on the hexary trie database. It verifies that there is no more ## leaf entry to the right of the argument `path`. ## ## This code is intended be used for verifying a left-bound proof. - if 0 < path.path.len and 0 < path.tail.len: - let top = path.path[^1] - if top.node.kind == Branch and 0 <= top.nibble: - - let topLink = top.node.bLink[top.nibble] - if not topLink.isZero and db.tab.hasKey(topLink): - let - nextNibble = path.tail[0] - nextNode = db.tab[topLink] + if path.path.len == 0: + return err(NearbyEmptyPath) + if 0 < path.tail.len: + return err(NearbyPathTail) + + let top = path.path[^1] + if top.node.kind != Branch or top.nibble < 0: + return err(NearbyBranchError) + + let nextNode = block: + let topLink = top.node.bLink[top.nibble] + if topLink.isZeroLink: + return err(NearbyDanglingLink) # error + let rc = topLink.getNode(db) + if rc.isErr: + return err(rc.error) # error + rc.value - case nextNode.kind - of Leaf: - return nextNode.lPfx < path.tail + case nextNode.kind + of Leaf: + return ok(nextNode.lPfx < path.tail) + of Extension: + return ok(nextNode.ePfx < path.tail) + of Branch: + return ok(nextNode.branchNibbleMin(path.tail[0].int8) < 0) - of Extension: - return nextNode.ePfx < path.tail - - of Branch: - # Step down and verify that there is no branch link - for inx in nextNibble .. 15: - if not nextNode.bLink[inx].isZero: - return false - return true - -# ------------------------------------------------------------------------------ -# Public functions, right boundary proofs (moving left) -# ------------------------------------------------------------------------------ proc hexaryNearbyLeft*( - path: RPath; # Partially expanded path - db: HexaryTreeDbRef; # Database - ): Result[RPath,HexaryError] - {.gcsafe, raises: [KeyError]} = + path: RPath|XPath; # Partially expanded path + db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction + ): auto + {.gcsafe, raises: [CatchableError].} = ## Similar to `hexaryNearbyRight()`. ## ## This code is intended to be used for verifying a right-bound proof to ## verify that there is no leaf node *left* to a boundary path value. - - # Some easy cases - if path.path.len == 0: - return err(NearbyEmptyPath) # error - if path.path[^1].node.kind == Leaf: - return ok(path) - - var - rPath = path - start = true - while 0 < rPath.path.len: - let top = rPath.path[^1] - case top.node.kind: - of Leaf: - return err(NearbyUnexpectedNode) - of Branch: - if top.nibble < 0 or rPath.tail.len == 0: - return err(NearbyUnexpectedNode) - of Extension: - rPath.tail = top.node.ePfx & rPath.tail - rPath.path.setLen(rPath.path.len - 1) - continue - - var - step = top - let - rPathLen = rPath.path.len # in case of backtracking - rPathTail = rPath.tail # in case of backtracking - - # Look ahead checking next node - if start: - let topLink = top.node.bLink[top.nibble] - if topLink.isZero or not db.tab.hasKey(topLink): - return err(NearbyDanglingLink) # error - - let nextNode = db.tab[topLink] - case nextNode.kind - of Leaf: - if nextNode.lPfx <= rPath.tail: - return rPath.completeMost(topLink, db) - of Extension: - if nextNode.ePfx <= rPath.tail: - return rPath.completeMost(topLink, db) - of Branch: - let nextNibble = rPath.tail[0].int8 - if 0 < nextNibble: - # Step down and complete with a branch link on the child node - step = RPathStep( - key: topLink, - node: nextNode, - nibble: nextNibble) - rPath.path &= step - - # Find the next item to the right of the new top entry - for inx in (step.nibble - 1).countDown(0): - let link = step.node.bLink[inx] - if not link.isZero: - rPath.path[^1].nibble = inx.int8 - return rPath.completeMost(link, db) - - if start: - # Retry without look ahead - start = false - - # Restore `rPath` (pop temporary extra step) - if rPathLen < rPath.path.len: - rPath.path.setLen(rPathLen) - rPath.tail = rPathTail - else: - # Pop current `Branch` node on top and append nibble to `tail` - rPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & rPath.tail - rPath.path.setLen(rPath.path.len - 1) - # End while - - # Pathological case: n0000.. for 0 < n - var step = path.path[0] - for inx in (step.nibble - 1).countDown(0): - let link = step.node.bLink[inx] - if not link.isZero: - step.nibble = inx.int8 - rPath.path = @[step] - return rPath.completeMost(link, db) - - err(NearbyFailed) # error - - -proc hexaryNearbyLeft*( - path: XPath; # Partially expanded path - getFn: HexaryGetFn; # Database abstraction - ): Result[XPath,HexaryError] - {.gcsafe, raises: [CatchableError]} = - ## Variant of `hexaryNearbyLeft()` for persistant database - - # Some easy cases - if path.path.len == 0: - return err(NearbyEmptyPath) # error - if path.path[^1].node.kind == Leaf: - return ok(path) - - var - xPath = path - start = true - while 0 < xPath.path.len: - let top = xPath.path[^1] - case top.node.kind: - of Leaf: - return err(NearbyUnexpectedNode) - of Branch: - if top.nibble < 0 or xPath.tail.len == 0: - return err(NearbyUnexpectedNode) - of Extension: - xPath.tail = top.node.ePfx & xPath.tail - xPath.path.setLen(xPath.path.len - 1) - continue - - var - step = top - let - xPathLen = xPath.path.len # in case of backtracking - xPathTail = xPath.tail # in case of backtracking - - # Look ahead checking next node - if start: - let topLink = top.node.bLink[top.nibble] - if topLink.len == 0 or topLink.getFn().len == 0: - return err(NearbyDanglingLink) # error - - let nextNodeRlp = rlpFromBytes topLink.getFn() - case nextNodeRlp.listLen: - of 2: - if nextNodeRlp.listElem(0).toBytes.hexPrefixDecode[1] <= xPath.tail: - return xPath.completeMost(topLink, getFn) - of 17: - let nextNibble = xPath.tail[0].int8 - if 0 < nextNibble: - # Step down and complete with a branch link on the child node - step = XPathStep( - key: topLink, - node: nextNodeRlp.toBranchNode, - nibble: nextNibble) - xPath.path &= step - else: - return err(NearbyGarbledNode) # error - - # Find the next item to the right of the new top entry - for inx in (step.nibble - 1).countDown(0): - let link = step.node.bLink[inx] - if 0 < link.len: - xPath.path[^1].nibble = inx.int8 - return xPath.completeMost(link, getFn) - - if start: - # Retry without look ahead - start = false - - # Restore `xPath` (pop temporary extra step) - if xPathLen < xPath.path.len: - xPath.path.setLen(xPathLen) - xPath.tail = xPathTail - else: - # Pop `Branch` node on top and append nibble to `tail` - xPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & xPath.tail - xPath.path.setLen(xPath.path.len - 1) - # End while - - # Pathological case: n00000.. for 0 < n - var step = path.path[0] - for inx in (step.nibble - 1).countDown(0): - let link = step.node.bLink[inx] - if 0 < link.len: - step.nibble = inx.int8 - xPath.path = @[step] - return xPath.completeMost(link, getFn) - - err(NearbyFailed) # error - -# ------------------------------------------------------------------------------ -# Public functions, convenience wrappers -# ------------------------------------------------------------------------------ - -proc hexaryNearbyRight*( - baseTag: NodeTag; # Some node - rootKey: NodeKey; # State root - db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction - ): Result[NodeTag,HexaryError] - {.gcsafe, raises: [CatchableError]} = - ## Variant of `hexaryNearbyRight()` working with `NodeTag` arguments rather - ## than `RPath` or `XPath` ones. - noRlpErrorOops("hexaryNearbyRight"): - return baseTag.hexaryNearbyRightImpl(rootKey, db) - + path.nearbyNext(db, doLeast=false) proc hexaryNearbyLeft*( baseTag: NodeTag; # Some node rootKey: NodeKey; # State root db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction ): Result[NodeTag,HexaryError] - {.gcsafe, raises: [CatchableError]} = + {.gcsafe, raises: [CatchableError].} = ## Similar to `hexaryNearbyRight()` for `NodeKey` arguments. - noRlpErrorOops("hexaryNearbyLeft"): - return baseTag.hexaryNearbyLeftImpl(rootKey, db) + baseTag.nearbyNext(rootKey, db, doLeast=false) # ------------------------------------------------------------------------------ # End diff --git a/nimbus/sync/snap/worker/db/hexary_paths.nim b/nimbus/sync/snap/worker/db/hexary_paths.nim index 3c82cbb988..ba189cb8a7 100644 --- a/nimbus/sync/snap/worker/db/hexary_paths.nim +++ b/nimbus/sync/snap/worker/db/hexary_paths.nim @@ -89,16 +89,15 @@ proc toExtensionNode( # Private functions # ------------------------------------------------------------------------------ -proc pathExtend( +proc rootPathExtend( path: RPath; - key: RepairKey; db: HexaryTreeDbRef; ): RPath {.gcsafe, raises: [KeyError].} = ## For the given path, extend to the longest possible repair tree `db` ## path following the argument `path.tail`. result = path - var key = key + var key = path.root while db.tab.hasKey(key): let node = db.tab[key] @@ -127,15 +126,14 @@ proc pathExtend( key = node.eLink -proc pathExtend( +proc rootPathExtend( path: XPath; - key: Blob; getFn: HexaryGetFn; ): XPath {.gcsafe, raises: [CatchableError]} = ## Ditto for `XPath` rather than `RPath` result = path - var key = key + var key = path.root.to(Blob) while true: let value = key.getFn() if value.len == 0: @@ -187,187 +185,6 @@ proc pathExtend( # end while # notreached - -proc pathLeast( - path: XPath; - key: Blob; - getFn: HexaryGetFn; - ): XPath - {.gcsafe, raises: [CatchableError]} = - ## For the partial path given, extend by branch nodes with least node - ## indices. - result = path - result.tail = EmptyNibbleRange - result.depth = result.getNibblesImpl.len - - var - key = key - value = key.getFn() - if value.len == 0: - return - - while true: - block loopContinue: - let nodeRlp = rlpFromBytes value - case nodeRlp.listLen: - of 2: - let (isLeaf,pathSegment) = hexPrefixDecode nodeRlp.listElem(0).toBytes - - # Leaf node - if isLeaf: - let node = nodeRlp.toLeafNode(pathSegment) - result.path.add XPathStep(key: key, node: node, nibble: -1) - result.depth += pathSegment.len - return # done ok - - let node = nodeRlp.toExtensionNode(pathSegment) - if 0 < node.eLink.len: - value = node.eLink.getFn() - if 0 < value.len: - result.path.add XPathStep(key: key, node: node, nibble: -1) - result.depth += pathSegment.len - key = node.eLink - break loopContinue - of 17: - # Branch node - let node = nodeRlp.toBranchNode - if node.bLink[16].len != 0 and 64 <= result.depth: - result.path.add XPathStep(key: key, node: node, nibble: -1) - return # done ok - - for inx in 0 .. 15: - let newKey = node.bLink[inx] - if 0 < newKey.len: - value = newKey.getFn() - if 0 < value.len: - result.path.add XPathStep(key: key, node: node, nibble: inx.int8) - result.depth.inc - key = newKey - break loopContinue - else: - discard - - # Recurse (iteratively) - while true: - block loopRecurse: - # Modify last branch node and try again - if result.path[^1].node.kind == Branch: - for inx in result.path[^1].nibble+1 .. 15: - let newKey = result.path[^1].node.bLink[inx] - if 0 < newKey.len: - value = newKey.getFn() - if 0 < value.len: - result.path[^1].nibble = inx.int8 - key = newKey - break loopContinue - # Failed, step back and try predecessor branch. - while path.path.len < result.path.len: - case result.path[^1].node.kind: - of Branch: - result.depth.dec - result.path.setLen(result.path.len - 1) - break loopRecurse - of Extension: - result.depth -= result.path[^1].node.ePfx.len - result.path.setLen(result.path.len - 1) - of Leaf: - return # Ooops - return # Failed - # Notreached - # End while - # Notreached - - -proc pathMost( - path: XPath; - key: Blob; - getFn: HexaryGetFn; - ): XPath - {.gcsafe, raises: [CatchableError]} = - ## For the partial path given, extend by branch nodes with greatest node - ## indices. - result = path - result.tail = EmptyNibbleRange - result.depth = result.getNibblesImpl.len - - var - key = key - value = key.getFn() - if value.len == 0: - return - - while true: - block loopContinue: - let nodeRlp = rlpFromBytes value - case nodeRlp.listLen: - of 2: - let (isLeaf,pathSegment) = hexPrefixDecode nodeRlp.listElem(0).toBytes - - # Leaf node - if isLeaf: - let node = nodeRlp.toLeafNode(pathSegment) - result.path.add XPathStep(key: key, node: node, nibble: -1) - result.depth += pathSegment.len - return # done ok - - # Extension node - let node = nodeRlp.toExtensionNode(pathSegment) - if 0 < node.eLink.len: - value = node.eLink.getFn() - if 0 < value.len: - result.path.add XPathStep(key: key, node: node, nibble: -1) - result.depth += pathSegment.len - key = node.eLink - break loopContinue - of 17: - # Branch node - let node = nodeRlp.toBranchNode - if node.bLink[16].len != 0 and 64 <= result.depth: - result.path.add XPathStep(key: key, node: node, nibble: -1) - return # done ok - - for inx in 15.countDown(0): - let newKey = node.bLink[inx] - if 0 < newKey.len: - value = newKey.getFn() - if 0 < value.len: - result.path.add XPathStep(key: key, node: node, nibble: inx.int8) - result.depth.inc - key = newKey - break loopContinue - else: - discard - - # Recurse (iteratively) - while true: - block loopRecurse: - # Modify last branch node and try again - if result.path[^1].node.kind == Branch: - for inx in (result.path[^1].nibble-1).countDown(0): - let newKey = result.path[^1].node.bLink[inx] - if 0 < newKey.len: - value = newKey.getFn() - if 0 < value.len: - result.path[^1].nibble = inx.int8 - key = newKey - break loopContinue - # Failed, step back and try predecessor branch. - while path.path.len < result.path.len: - case result.path[^1].node.kind: - of Branch: - result.depth.dec - result.path.setLen(result.path.len - 1) - break loopRecurse - of Extension: - result.depth -= result.path[^1].node.ePfx.len - result.path.setLen(result.path.len - 1) - of Leaf: - return # Ooops - return # Failed - # Notreached - # End while - # Notreached - # ------------------------------------------------------------------------------ # Public helpers # ------------------------------------------------------------------------------ @@ -432,7 +249,7 @@ proc hexaryPath*( ## Compute the longest possible repair tree `db` path matching the `nodeKey` ## nibbles. The `nodeNey` path argument comes before the `db` one for ## supporting a more functional notation. - RPath(tail: partialPath).pathExtend(rootKey.to(RepairKey), db) + RPath(root: rootKey.to(RepairKey), tail: partialPath).rootPathExtend(db) proc hexaryPath*( nodeKey: NodeKey; @@ -469,7 +286,7 @@ proc hexaryPath*( ): XPath {.gcsafe, raises: [CatchableError]} = ## Compute the longest possible path on an arbitrary hexary trie. - XPath(tail: partialPath).pathExtend(rootKey.to(Blob), getFn) + XPath(root: rootKey, tail: partialPath).rootPathExtend(getFn) proc hexaryPath*( nodeKey: NodeKey; @@ -583,71 +400,6 @@ proc hexaryPathNodeKeys*( .mapIt(it.value) .toHashSet -# ------------------------------------------------------------------------------ -# Public functions, traversal -# ------------------------------------------------------------------------------ - -proc next*( - path: XPath; - getFn: HexaryGetFn; - minDepth = 64; - ): XPath - {.gcsafe, raises: [CatchableError]} = - ## Advance the argument `path` to the next leaf node (if any.). The - ## `minDepth` argument requires the result of `next()` to satisfy - ## `minDepth <= next().getNibbles.len`. - var pLen = path.path.len - - # Find the last branch in the path, increase link and step down - while 0 < pLen: - - # Find branch none - pLen.dec - - let it = path.path[pLen] - if it.node.kind == Branch and it.nibble < 15: - - # Find the next item to the right in the branch list - for inx in (it.nibble + 1) .. 15: - let link = it.node.bLink[inx] - if link.len != 0: - let - branch = XPathStep(key: it.key, node: it.node, nibble: inx.int8) - walk = path.path[0 ..< pLen] & branch - newPath = XPath(path: walk).pathLeast(link, getFn) - if minDepth <= newPath.depth and 0 < newPath.leafData.len: - return newPath - -proc prev*( - path: XPath; - getFn: HexaryGetFn; - minDepth = 64; - ): XPath - {.gcsafe, raises: [CatchableError]} = - ## Advance the argument `path` to the previous leaf node (if any.) The - ## `minDepth` argument requires the result of `next()` to satisfy - ## `minDepth <= next().getNibbles.len`. - var pLen = path.path.len - - # Find the last branch in the path, decrease link and step down - while 0 < pLen: - - # Find branch none - pLen.dec - let it = path.path[pLen] - if it.node.kind == Branch and 0 < it.nibble: - - # Find the next item to the right in the branch list - for inx in (it.nibble - 1).countDown(0): - let link = it.node.bLink[inx] - if link.len != 0: - let - branch = XPathStep(key: it.key, node: it.node, nibble: inx.int8) - walk = path.path[0 ..< pLen] & branch - newPath = XPath(path: walk).pathMost(link, getFn) - if minDepth <= newPath.depth and 0 < newPath.leafData.len: - return newPath - # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/nimbus/sync/snap/worker/db/hexary_range.nim b/nimbus/sync/snap/worker/db/hexary_range.nim index 01a3dd5a1e..1a6be91cee 100644 --- a/nimbus/sync/snap/worker/db/hexary_range.nim +++ b/nimbus/sync/snap/worker/db/hexary_range.nim @@ -12,6 +12,7 @@ import std/[sequtils, sets, tables], + chronicles, chronos, eth/[common, p2p, trie/nibbles], stew/[byteutils, interval_set], @@ -109,8 +110,9 @@ template collectLeafs( ): auto = ## Collect trie database leafs prototype. This directive is provided as ## `template` for avoiding varying exceprion annotations. - var rc: Result[RangeProof,HexaryError] - + var + rc: Result[RangeProof,HexaryError] + ttd = stopAt block body: let nodeMax = maxPt(iv) # `inject` is for debugging (if any) @@ -121,9 +123,9 @@ template collectLeafs( # Set up base node, the nearest node before `iv.minPt` if 0.to(NodeTag) < nodeTag: - let rx = nodeTag.hexaryPath(rootKey,db).hexaryNearbyLeft(db) + let rx = nodeTag.hexaryNearbyLeft(rootKey, db) if rx.isOk: - rls.base = getPartialPath(rx.value).convertTo(NodeKey).to(NodeTag) + rls.base = rx.value elif rx.error notin {NearbyFailed,NearbyEmptyPath}: rc = typeof(rc).err(rx.error) break body @@ -149,7 +151,7 @@ template collectLeafs( # Prevents from semi-endless looping if rightTag <= prevTag and 0 < rls.leafs.len: - # Oops, should have been tackeled by `hexaryNearbyRight()` + # Oops, should have been tackled by `hexaryNearbyRight()` rc = typeof(rc).err(FailedNextNode) break body # stop here @@ -165,7 +167,7 @@ template collectLeafs( key: rightKey, data: xPath.leafData) - if timeIsOver(stopAt): + if timeIsOver(ttd): break # timout prevTag = nodeTag diff --git a/nimbus/sync/snap/worker/db/snapdb_accounts.nim b/nimbus/sync/snap/worker/db/snapdb_accounts.nim index 460e3afeee..71064cc630 100644 --- a/nimbus/sync/snap/worker/db/snapdb_accounts.nim +++ b/nimbus/sync/snap/worker/db/snapdb_accounts.nim @@ -11,7 +11,7 @@ {.push raises: [].} import - std/[algorithm, sequtils, tables], + std/tables, chronicles, eth/[common, p2p, rlp, trie/nibbles], stew/[byteutils, interval_set], @@ -43,15 +43,6 @@ proc getAccountFn*(ps: SnapDbAccountsRef): HexaryGetFn proc to(h: Hash256; T: type NodeKey): T = h.data.T -proc convertTo(data: openArray[byte]; T: type Hash256): T = - discard result.data.NodeKey.init(data) # size error => zero - -template noKeyError(info: static[string]; code: untyped) = - try: - code - except KeyError as e: - raiseAssert "Not possible (" & info & "): " & e.msg - template noExceptionOops(info: static[string]; code: untyped) = try: code @@ -449,71 +440,6 @@ proc getAccountsData*( SnapDbAccountsRef.init( pv, root, Peer()).getAccountsData(path, persistent=true) -# ------------------------------------------------------------------------------ -# Public functions: additional helpers -# ------------------------------------------------------------------------------ - -proc sortMerge*(base: openArray[NodeTag]): NodeTag = - ## Helper for merging several `(NodeTag,seq[PackedAccount])` data sets - ## so that there are no overlap which would be rejected by `merge()`. - ## - ## This function selects a `NodeTag` from a list. - result = high(NodeTag) - for w in base: - if w < result: - result = w - -proc sortMerge*(acc: openArray[seq[PackedAccount]]): seq[PackedAccount] = - ## Helper for merging several `(NodeTag,seq[PackedAccount])` data sets - ## so that there are no overlap which would be rejected by `merge()`. - ## - ## This function flattens and sorts the argument account lists. - noKeyError("sortMergeAccounts"): - var accounts: Table[NodeTag,PackedAccount] - for accList in acc: - for item in accList: - accounts[item.accKey.to(NodeTag)] = item - result = toSeq(accounts.keys).sorted(cmp).mapIt(accounts[it]) - -proc getAccountsChainDb*( - ps: SnapDbAccountsRef; - accKey: NodeKey; - ): Result[Account,HexaryError] = - ## Fetch account via `ChainDBRef` - ps.getAccountsData(accKey, persistent = true) - -proc nextAccountsChainDbKey*( - ps: SnapDbAccountsRef; - accKey: NodeKey; - ): Result[NodeKey,HexaryError] = - ## Fetch the account path on the `ChainDBRef`, the one next to the - ## argument account key. - noExceptionOops("getChainDbAccount()"): - let path = accKey - .hexaryPath(ps.root, ps.getAccountFn) - .next(ps.getAccountFn) - .getNibbles - if 64 == path.len: - return ok(path.getBytes.convertTo(Hash256).to(NodeKey)) - - err(AccountNotFound) - -proc prevAccountsChainDbKey*( - ps: SnapDbAccountsRef; - accKey: NodeKey; - ): Result[NodeKey,HexaryError] = - ## Fetch the account path on the `ChainDBRef`, the one before to the - ## argument account. - noExceptionOops("getChainDbAccount()"): - let path = accKey - .hexaryPath(ps.root, ps.getAccountFn) - .prev(ps.getAccountFn) - .getNibbles - if 64 == path.len: - return ok(path.getBytes.convertTo(Hash256).to(NodeKey)) - - err(AccountNotFound) - # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/nimbus/sync/snap/worker/db/snapdb_debug.nim b/nimbus/sync/snap/worker/db/snapdb_debug.nim new file mode 100644 index 0000000000..1c6c3ef717 --- /dev/null +++ b/nimbus/sync/snap/worker/db/snapdb_debug.nim @@ -0,0 +1,180 @@ +# nimbus-eth1 +# Copyright (c) 2021 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed +# except according to those terms. + +{.push raises: [].} + +import + std/[algorithm, sequtils, tables], + eth/[common, trie/nibbles], + stew/results, + ../../range_desc, + "."/[hexary_debug, hexary_desc, hexary_error, hexary_paths, snapdb_desc] + +# ------------------------------------------------------------------------------ +# Private debugging helpers +# ------------------------------------------------------------------------------ + +template noPpError(info: static[string]; code: untyped) = + try: + code + except ValueError as e: + raiseAssert "Inconveivable (" & info & "): " & e.msg + except KeyError as e: + raiseAssert "Not possible (" & info & "): " & e.msg + except CatchableError as e: + raiseAssert "Ooops (" & info & ") " & $e.name & ": " & e.msg + +# ------------------------------------------------------------------------------ +# Private helpers +# ------------------------------------------------------------------------------ + +proc convertTo(data: openArray[byte]; T: type Hash256): T = + discard result.data.NodeKey.init(data) # size error => zero + +template noKeyError(info: static[string]; code: untyped) = + try: + code + except KeyError as e: + raiseAssert "Not possible (" & info & "): " & e.msg + +template noExceptionOops(info: static[string]; code: untyped) = + try: + code + except KeyError as e: + raiseAssert "Not possible -- " & info & ": " & e.msg + except RlpError: + return err(RlpEncoding) + except CatchableError as e: + return err(AccountNotFound) + +# ------------------------------------------------------------------------------ +# Public functions, pretty printing +# ------------------------------------------------------------------------------ + +proc pp*(a: RepairKey; ps: SnapDbBaseRef): string = + if not ps.isNil: + let toKey = ps.hexaDb.keyPp + if not toKey.isNil: + try: + return a.toKey + except CatchableError: + discard + $a.ByteArray33 + +proc pp*(a: NodeKey; ps: SnapDbBaseRef): string = + if not ps.isNil: + let toKey = ps.hexaDb.keyPp + if not toKey.isNil: + try: + return a.to(RepairKey).toKey + except CatchableError: + discard + $a.ByteArray32 + +proc pp*(a: NodeTag; ps: SnapDbBaseRef): string = + a.to(NodeKey).pp(ps) + +# ------------------------------------------------------------------------------ +# Public constructor +# ------------------------------------------------------------------------------ + +proc init*( + T: type HexaryTreeDbRef; + ): T = + ## Constructor variant. It provides a `HexaryTreeDbRef()` with a key cache + ## attached for pretty printing. So this one is mainly for debugging. + HexaryTreeDbRef.init(SnapDbRef()) + +# ------------------------------------------------------------------------------ +# Public functions +# ------------------------------------------------------------------------------ + +proc sortMerge*(base: openArray[NodeTag]): NodeTag = + ## Helper for merging several `(NodeTag,seq[PackedAccount])` data sets + ## so that there are no overlap which would be rejected by `merge()`. + ## + ## This function selects a `NodeTag` from a list. + result = high(NodeTag) + for w in base: + if w < result: + result = w + +proc sortMerge*(acc: openArray[seq[PackedAccount]]): seq[PackedAccount] = + ## Helper for merging several `(NodeTag,seq[PackedAccount])` data sets + ## so that there are no overlap which would be rejected by `merge()`. + ## + ## This function flattens and sorts the argument account lists. + noKeyError("sortMergeAccounts"): + var accounts: Table[NodeTag,PackedAccount] + for accList in acc: + for item in accList: + accounts[item.accKey.to(NodeTag)] = item + result = toSeq(accounts.keys).sorted(cmp).mapIt(accounts[it]) + +proc nextAccountsChainDbKey*( + ps: SnapDbBaseRef; + accKey: NodeKey; + getFn: HexaryGetFn; + ): Result[NodeKey,HexaryError] = + ## Fetch the account path on the `ChainDBRef`, the one next to the + ## argument account key. + noExceptionOops("getChainDbAccount()"): + let path = accKey + .hexaryPath(ps.root, getFn) # ps.getAccountFn) + .next(getFn) # ps.getAccountFn) + .getNibbles + if 64 == path.len: + return ok(path.getBytes.convertTo(Hash256).to(NodeKey)) + + err(AccountNotFound) + +proc prevAccountsChainDbKey*( + ps: SnapDbBaseRef; + accKey: NodeKey; + getFn: HexaryGetFn; + ): Result[NodeKey,HexaryError] = + ## Fetch the account path on the `ChainDBRef`, the one before to the + ## argument account. + noExceptionOops("getChainDbAccount()"): + let path = accKey + .hexaryPath(ps.root, getFn) # ps.getAccountFn) + .prev(getFn) # ps.getAccountFn) + .getNibbles + if 64 == path.len: + return ok(path.getBytes.convertTo(Hash256).to(NodeKey)) + + err(AccountNotFound) + +# ------------------------------------------------------------------------------ +# More debugging (and playing with the hexary database) +# ------------------------------------------------------------------------------ + +proc assignPrettyKeys*(xDb: HexaryTreeDbRef; root: NodeKey) = + ## Prepare for pretty pringing/debugging. Run early enough this function + ## sets the root key to `"$"`, for instance. + if not xDb.keyPp.isNil: + noPpError("validate(1)"): + # Make keys assigned in pretty order for printing + let rootKey = root.to(RepairKey) + discard xDb.keyPp rootKey + var keysList = toSeq(xDb.tab.keys) + if xDb.tab.hasKey(rootKey): + keysList = @[rootKey] & keysList + for key in keysList: + let node = xDb.tab[key] + discard xDb.keyPp key + case node.kind: + of Branch: (for w in node.bLink: discard xDb.keyPp w) + of Extension: discard xDb.keyPp node.eLink + of Leaf: discard + +# ------------------------------------------------------------------------------ +# End +# ------------------------------------------------------------------------------ diff --git a/nimbus/sync/snap/worker/db/snapdb_desc.nim b/nimbus/sync/snap/worker/db/snapdb_desc.nim index e49b9f21ef..5ceebb4467 100644 --- a/nimbus/sync/snap/worker/db/snapdb_desc.nim +++ b/nimbus/sync/snap/worker/db/snapdb_desc.nim @@ -11,13 +11,13 @@ {.push raises: [].} import - std/[sequtils, tables], + std/tables, chronicles, eth/[common, p2p, trie/db, trie/nibbles], ../../../../db/[select_backend, storage_types], ../../../protocol, ../../range_desc, - "."/[hexary_desc, hexary_error, hexary_import, hexary_nearby, + "."/[hexary_debug, hexary_desc, hexary_error, hexary_import, hexary_nearby, hexary_paths, rocky_bulk_load] logScope: @@ -46,47 +46,20 @@ type # Private debugging helpers # ------------------------------------------------------------------------------ -template noPpError(info: static[string]; code: untyped) = +template noKeyError(info: static[string]; code: untyped) = try: code - except ValueError as e: - raiseAssert "Inconveivable (" & info & "): " & e.msg except KeyError as e: raiseAssert "Not possible (" & info & "): " & e.msg - except CatchableError as e: - raiseAssert "Ooops (" & info & ") " & $e.name & ": " & e.msg -proc toKey(a: RepairKey; pv: SnapDbRef): uint = - if not a.isZero: - noPpError("pp(RepairKey)"): - if not pv.keyMap.hasKey(a): - pv.keyMap[a] = pv.keyMap.len.uint + 1 - result = pv.keyMap[a] - -proc toKey(a: RepairKey; ps: SnapDbBaseRef): uint = - a.toKey(ps.base) - -proc toKey(a: NodeKey; ps: SnapDbBaseRef): uint = - a.to(RepairKey).toKey(ps) - -#proc toKey(a: NodeTag; ps: SnapDbBaseRef): uint = -# a.to(NodeKey).toKey(ps) - -proc ppImpl(a: RepairKey; pv: SnapDbRef): string = - "$" & $a.toKey(pv) - -# ------------------------------------------------------------------------------ -# Debugging, pretty printing -# ------------------------------------------------------------------------------ - -proc pp*(a: NodeKey; ps: SnapDbBaseRef): string = - if a.isZero: "ø" else:"$" & $a.toKey(ps) - -proc pp*(a: RepairKey; ps: SnapDbBaseRef): string = - if a.isZero: "ø" elif a.isNodeKey: "$" & $a.toKey(ps) else: "@" & $a.toKey(ps) - -proc pp*(a: NodeTag; ps: SnapDbBaseRef): string = - a.to(NodeKey).pp(ps) +proc keyPp(a: RepairKey; pv: SnapDbRef): string = + if a.isZero: + return "ø" + if not pv.keyMap.hasKey(a): + pv.keyMap[a] = pv.keyMap.len.uint + 1 + result = if a.isNodeKey: "$" else: "@" + noKeyError("pp(RepairKey)"): + result &= $pv.keyMap[a] # ------------------------------------------------------------------------------ # Private helper @@ -127,7 +100,7 @@ proc init*( ): T = ## Constructor for inner hexary trie database let xDb = HexaryTreeDbRef() - xDb.keyPp = proc(key: RepairKey): string = key.ppImpl(pv) # will go away + xDb.keyPp = proc(key: RepairKey): string = key.keyPp(pv) # will go away return xDb proc init*( @@ -137,13 +110,6 @@ proc init*( ## Constructor variant HexaryTreeDbRef.init(ps.base) -proc init*( - T: type HexaryTreeDbRef; - ): T = - ## Constructor variant. It provides a `HexaryTreeDbRef()` with a key key cache attached - ## for pretty printing. So this one is mainly for debugging. - HexaryTreeDbRef.init(SnapDbRef()) - # --------------- proc init*( @@ -292,7 +258,10 @@ proc verifyNoMoreRight*( let root = root.to(RepairKey) base = base.to(NodeKey) - if base.hexaryPath(root, xDb).hexaryNearbyRightMissing(xDb): + rc = base.hexaryPath(root, xDb).hexaryNearbyRightMissing(xDb) + if rc.isErr: + return err(rc.error) + if rc.value: return ok() let error = LowerBoundProofError @@ -300,64 +269,6 @@ proc verifyNoMoreRight*( trace "verifyLeftmostBound()", peer, base=base.pp, error err(error) -# ------------------------------------------------------------------------------ -# Debugging (and playing with the hexary database) -# ------------------------------------------------------------------------------ - -proc assignPrettyKeys*(xDb: HexaryTreeDbRef; root: NodeKey) = - ## Prepare for pretty pringing/debugging. Run early enough this function - ## sets the root key to `"$"`, for instance. - if not xDb.keyPp.isNil: - noPpError("validate(1)"): - # Make keys assigned in pretty order for printing - let rootKey = root.to(RepairKey) - discard xDb.keyPp rootKey - var keysList = toSeq(xDb.tab.keys) - if xDb.tab.hasKey(rootKey): - keysList = @[rootKey] & keysList - for key in keysList: - let node = xDb.tab[key] - discard xDb.keyPp key - case node.kind: - of Branch: (for w in node.bLink: discard xDb.keyPp w) - of Extension: discard xDb.keyPp node.eLink - of Leaf: discard - -proc dumpPath*(ps: SnapDbBaseRef; key: NodeTag): seq[string] = - ## Pretty print helper compiling the path into the repair tree for the - ## argument `key`. - noPpError("dumpPath"): - let rPath= key.hexaryPath(ps.root, ps.hexaDb) - result = rPath.path.mapIt(it.pp(ps.hexaDb)) & @["(" & rPath.tail.pp & ")"] - -proc dumpHexaDB*(xDb: HexaryTreeDbRef; root: NodeKey; indent = 4): string = - ## Dump the entries from the a generic accounts trie. These are - ## key value pairs for - ## :: - ## Branch: ($1,b(<$2,$3,..,$17>,)) - ## Extension: ($18,e(832b5e..06e697,$19)) - ## Leaf: ($20,l(cc9b5d..1c3b4,f84401..f9e5129d[#70])) - ## - ## where keys are typically represented as `$` or `¶` or `ø` - ## depending on whether a key is final (`$`), temporary (`¶`) - ## or unset/missing (`ø`). - ## - ## The node types are indicated by a letter after the first key before - ## the round brackets - ## :: - ## Branch: 'b', 'þ', or 'B' - ## Extension: 'e', '€', or 'E' - ## Leaf: 'l', 'ł', or 'L' - ## - ## Here a small letter indicates a `Static` node which was from the - ## original `proofs` list, a capital letter indicates a `Mutable` node - ## added on the fly which might need some change, and the decorated - ## letters stand for `Locked` nodes which are like `Static` ones but - ## added later (typically these nodes are update `Mutable` nodes.) - ## - ## Beware: dumping a large database is not recommended - xDb.pp(root, indent) - # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/nimbus/sync/snap/worker_desc.nim b/nimbus/sync/snap/worker_desc.nim index 21f032ca25..e12662dca9 100644 --- a/nimbus/sync/snap/worker_desc.nim +++ b/nimbus/sync/snap/worker_desc.nim @@ -65,7 +65,6 @@ type fetchStorageFull*: SnapSlotsQueue ## Fetch storage trie for these accounts fetchStoragePart*: SnapSlotsQueue ## Partial storage trie to com[plete parkedStorage*: HashSet[NodeKey] ## Storage batch items in use - storageDone*: bool ## Done with storage, block sync next # Info nAccounts*: uint64 ## Imported # of accounts diff --git a/tests/replay/undump_accounts.nim b/tests/replay/undump_accounts.nim index 4e5a4f8b68..3525955f6f 100644 --- a/tests/replay/undump_accounts.nim +++ b/tests/replay/undump_accounts.nim @@ -99,8 +99,6 @@ proc dumpAccounts*( iterator undumpNextAccount*(gzFile: string): UndumpAccounts = var - line = "" - lno = 0 state = UndumpHeader data: UndumpAccounts nAccounts = 0u diff --git a/tests/replay/undump_blocks.nim b/tests/replay/undump_blocks.nim index 7a931e18ca..aad8fa35bb 100644 --- a/tests/replay/undump_blocks.nim +++ b/tests/replay/undump_blocks.nim @@ -24,9 +24,6 @@ template say(args: varargs[untyped]) = # echo args discard -proc toByteSeq(s: string): seq[byte] = - utils.fromHex(s) - # ------------------------------------------------------------------------------ # Public capture # ------------------------------------------------------------------------------ @@ -90,8 +87,6 @@ iterator undumpNextGroup*(gzFile: string): (seq[BlockHeader],seq[BlockBody]) = var headerQ: seq[BlockHeader] bodyQ: seq[BlockBody] - line = "" - lno = 0 current = 0u start = 0u top = 0u diff --git a/tests/replay/undump_storages.nim b/tests/replay/undump_storages.nim index 61201e1614..290f3871a3 100644 --- a/tests/replay/undump_storages.nim +++ b/tests/replay/undump_storages.nim @@ -104,8 +104,6 @@ proc dumpStorages*( iterator undumpNextStorages*(gzFile: string): UndumpStorages = var - line = "" - lno = 0 state = UndumpStoragesHeader data: UndumpStorages nAccounts = 0u diff --git a/tests/test_sync_snap.nim b/tests/test_sync_snap.nim index e301145f64..95ebe7efca 100644 --- a/tests/test_sync_snap.nim +++ b/tests/test_sync_snap.nim @@ -23,7 +23,7 @@ import ../nimbus/sync/snap/range_desc, ../nimbus/sync/snap/worker/db/[ hexary_desc, hexary_envelope, hexary_error, hexary_inspect, hexary_nearby, - hexary_paths, rocky_bulk_load, snapdb_accounts, snapdb_desc], + hexary_paths, rocky_bulk_load, snapdb_accounts, snapdb_debug, snapdb_desc], ./replay/[pp, undump_accounts, undump_storages], ./test_sync_snap/[ bulk_test_xx, snap_test_xx, @@ -91,12 +91,12 @@ proc findFilePath(file: string; proc getTmpDir(sampleDir = sampleDirRefFile): string = sampleDir.findFilePath(baseDir,repoDir).value.splitFile.dir -proc setTraceLevel = +proc setTraceLevel {.used.} = discard when defined(chronicles_runtime_filtering) and loggingEnabled: setLogLevel(LogLevel.TRACE) -proc setErrorLevel = +proc setErrorLevel {.used.} = discard when defined(chronicles_runtime_filtering) and loggingEnabled: setLogLevel(LogLevel.ERROR) @@ -144,12 +144,12 @@ proc flushDbDir(s: string; subDir = "") = let instDir = if subDir == "": baseDir / $n else: baseDir / subDir / $n if (instDir / "nimbus" / "data").dirExists: # Typically under Windows: there might be stale file locks. - try: instDir.removeDir except: discard - try: (baseDir / subDir).removeDir except: discard + try: instDir.removeDir except CatchableError: discard + try: (baseDir / subDir).removeDir except CatchableError: discard block dontClearUnlessEmpty: for w in baseDir.walkDir: break dontClearUnlessEmpty - try: baseDir.removeDir except: discard + try: baseDir.removeDir except CatchableError: discard proc flushDbs(db: TestDbs) = @@ -233,7 +233,7 @@ proc accountsRunner(noisy = true; persistent = true; sample = accSample) = hexaDb.assignPrettyKeys(root.to(NodeKey)) # Beware: dumping a large database is not recommended - # true.say "***", "database dump\n ", hexaDb.dumpHexaDB(root) + # true.say "***", "database dump\n ", hexaDb.pp(root.to(NodeKey)) test &"Retrieve accounts & proofs for previous account ranges": if db.persistent: diff --git a/tests/test_sync_snap/test_accounts.nim b/tests/test_sync_snap/test_accounts.nim index 2885803ba6..53c05f347e 100644 --- a/tests/test_sync_snap/test_accounts.nim +++ b/tests/test_sync_snap/test_accounts.nim @@ -64,7 +64,9 @@ import ../../nimbus/db/select_backend, ../../nimbus/sync/protocol, ../../nimbus/sync/snap/range_desc, - ../../nimbus/sync/snap/worker/db/[snapdb_accounts, snapdb_desc], + ../../nimbus/sync/snap/worker/db/[ + hexary_debug, hexary_desc, hexary_error, + snapdb_accounts, snapdb_debug, snapdb_desc], ../replay/[pp, undump_accounts], ./test_helpers @@ -97,10 +99,13 @@ proc test_accountsMergeProofs*( ) = ## Merge account proofs # Load/accumulate data from several samples (needs some particular sort) - let baseTag = inList.mapIt(it.base).sortMerge - let packed = PackedAccountRange( - accounts: inList.mapIt(it.data.accounts).sortMerge, - proof: inList.mapIt(it.data.proof).flatten) + let + getFn = desc.getAccountFn + baseTag = inList.mapIt(it.base).sortMerge + packed = PackedAccountRange( + accounts: inList.mapIt(it.data.accounts).sortMerge, + proof: inList.mapIt(it.data.proof).flatten) + nAccounts = packed.accounts.len # Merging intervals will produce gaps, so the result is expected OK but # different from `.isImportOk` check desc.importAccounts(baseTag, packed, true).isOk @@ -114,21 +119,29 @@ proc test_accountsMergeProofs*( # need to check for additional records only on either end of a range. var keySet = packed.accounts.mapIt(it.accKey).toHashSet for w in inList: - var key = desc.prevAccountsChainDbKey(w.data.accounts[0].accKey) + var key = desc.prevAccountsChainDbKey(w.data.accounts[0].accKey, getFn) while key.isOk and key.value notin keySet: keySet.incl key.value - let newKey = desc.prevAccountsChainDbKey(key.value) + let newKey = desc.prevAccountsChainDbKey(key.value, getFn) check newKey != key key = newKey - key = desc.nextAccountsChainDbKey(w.data.accounts[^1].accKey) + key = desc.nextAccountsChainDbKey(w.data.accounts[^1].accKey, getFn) while key.isOk and key.value notin keySet: keySet.incl key.value - let newKey = desc.nextAccountsChainDbKey(key.value) + let newKey = desc.nextAccountsChainDbKey(key.value, getFn) check newKey != key key = newKey accKeys = toSeq(keySet).mapIt(it.to(NodeTag)).sorted(cmp) .mapIt(it.to(NodeKey)) - check packed.accounts.len <= accKeys.len + # Some database samples have a few more account keys which come in by the + # proof nodes. + check nAccounts <= accKeys.len + + # Verify against table importer + let + xDb = HexaryTreeDbRef.init() # Can dump database with `.pp(xDb)` + rc = xDb.fromPersistent(desc.root, getFn, accKeys.len + 100) + check rc == Result[int,HexaryError].ok(accKeys.len) proc test_accountsRevisitStoredItems*( @@ -137,6 +150,8 @@ proc test_accountsRevisitStoredItems*( noisy = false; ) = ## Revisit stored items on ChainDBRef + let + getFn = desc.getAccountFn var nextAccount = accKeys[0] prevAccount: NodeKey @@ -145,12 +160,13 @@ proc test_accountsRevisitStoredItems*( count.inc let pfx = $count & "#" - byChainDB = desc.getAccountsChainDb(accKey) - byNextKey = desc.nextAccountsChainDbKey(accKey) - byPrevKey = desc.prevAccountsChainDbKey(accKey) - noisy.say "*** find", - "<", count, "> byChainDb=", byChainDB.pp - check byChainDB.isOk + byChainDB = desc.getAccountsData(accKey, persistent=true) + byNextKey = desc.nextAccountsChainDbKey(accKey, getFn) + byPrevKey = desc.prevAccountsChainDbKey(accKey, getFn) + if byChainDB.isErr: + noisy.say "*** find", + "<", count, "> byChainDb=", byChainDB.pp + check byChainDB.isOk # Check `next` traversal funcionality. If `byNextKey.isOk` fails, the # `nextAccount` value is still the old one and will be different from diff --git a/tests/test_sync_snap/test_node_range.nim b/tests/test_sync_snap/test_node_range.nim index e149cb35d0..b70cd31e21 100644 --- a/tests/test_sync_snap/test_node_range.nim +++ b/tests/test_sync_snap/test_node_range.nim @@ -19,8 +19,9 @@ import ../../nimbus/sync/[handlers, protocol, types], ../../nimbus/sync/snap/range_desc, ../../nimbus/sync/snap/worker/db/[ - hexary_desc, hexary_envelope, hexary_error, hexary_interpolate, - hexary_nearby, hexary_paths, hexary_range, snapdb_accounts, snapdb_desc], + hexary_debug, hexary_desc, hexary_envelope, hexary_error, + hexary_interpolate, hexary_nearby, hexary_paths, hexary_range, + snapdb_accounts, snapdb_debug, snapdb_desc], ../replay/[pp, undump_accounts], ./test_helpers @@ -209,7 +210,6 @@ proc verifyRangeProof( ): Result[void,HexaryError] = ## Re-build temporary database and prove or disprove let - dumpOk = dbg.isNil.not noisy = dbg.isNil.not xDb = HexaryTreeDbRef() if not dbg.isNil: @@ -252,7 +252,7 @@ proc verifyRangeProof( "\n\n last=", leafs[^1].key, "\n ", leafs[^1].key.hexaryPath(rootKey,xDb).pp(dbg), "\n\n database dump", - "\n ", xDb.dumpHexaDB(rootKey), + "\n ", xDb.pp(rootKey), "\n" # ------------------------------------------------------------------------------ @@ -270,8 +270,6 @@ proc test_NodeRangeDecompose*( # stray account nodes in the proof *before* the left boundary. doAssert 2 < accKeys.len - const - isPersistent = db.type is HexaryTreeDbRef let rootKey = root.to(NodeKey) baseTag = accKeys[0].to(NodeTag) + 1.u256 @@ -392,12 +390,16 @@ proc test_NodeRangeProof*( for n,w in inLst: doAssert 1 < w.data.accounts.len let - # Use the middle of the first two points as base - delta = (w.data.accounts[1].accKey.to(NodeTag) - - w.data.accounts[0].accKey.to(NodeTag)) div 2 - base = w.data.accounts[0].accKey.to(NodeTag) + delta + first = w.data.accounts[0].accKey.to(NodeTag) + delta = (w.data.accounts[1].accKey.to(NodeTag) - first) div 2 + # Use the middle of the first two points as base unless w.base is zero. + # This is needed as the range extractor needs the node before the `base` + # (if ateher is any) in order to assemble the proof. But this node might + # not be present in the partial database. + (base, start) = if w.base == 0.to(NodeTag): (w.base, 0) + else: (first + delta, 1) # Assemble accounts list starting at the second item - accounts = w.data.accounts[1 ..< min(w.data.accounts.len,maxLen)] + accounts = w.data.accounts[start ..< min(w.data.accounts.len,maxLen)] iv = NodeTagRange.new(base, accounts[^1].accKey.to(NodeTag)) rc = db.hexaryRangeLeafsProof(rootKey, iv) check rc.isOk @@ -486,7 +488,7 @@ proc test_NodeRangeLeftBoundary*( ## Verify left side boundary checks let rootKey = inLst[0].root.to(NodeKey) - noisy = not dbg.isNil + noisy {.used.} = not dbg.isNil # Assuming the `inLst` entries have been stored in the DB already for n,w in inLst: @@ -505,7 +507,7 @@ proc test_NodeRangeLeftBoundary*( check (n, j, leftKey) == (n, j, toLeftKey) rootKey.printCompareLeftNearby(leftKey, rightKey, db, dbg) return - noisy.say "***", "n=", n, " accounts=", accounts.len + # noisy.say "***", "n=", n, " accounts=", accounts.len # ------------------------------------------------------------------------------ # End From 8d9b2522eaa91c856ca9e23fae315d8f3398f8c2 Mon Sep 17 00:00:00 2001 From: Adam Spitz Date: Fri, 17 Mar 2023 14:16:24 -0400 Subject: [PATCH 3/4] More on withdrawals (#1508) * Gwei conversion should use u256 because u64 can overflow. * Make withdrawals follow the EIP-158 state-clearing rules. (i.e. Empty accounts should be deleted.) * Allow the zero address in normalizeNumber. (Necessary for one of the new withdrawals-related tests.) * Another fix with a withdrawals-related test. --- nimbus/core/executor/process_block.nim | 28 +++++++++++++++++--------- nimbus/evm/state.nim | 3 +++ nimbus/evm/state_transactions.nim | 2 +- nimbus/transaction/call_evm.nim | 2 +- nimbus/vm_state.nim | 1 + tests/test_blockchain_json.nim | 4 ++-- vendor/nim-eth | 2 +- 7 files changed, 28 insertions(+), 14 deletions(-) diff --git a/nimbus/core/executor/process_block.nim b/nimbus/core/executor/process_block.nim index deee5c3d04..f440348057 100644 --- a/nimbus/core/executor/process_block.nim +++ b/nimbus/core/executor/process_block.nim @@ -70,15 +70,25 @@ proc procBlkPreamble(vmState: BaseVMState; return false vmState.receipts[txIndex] = vmState.makeReceipt(tx.txType) - if header.withdrawalsRoot.isSome: - if body.withdrawals.get.calcWithdrawalsRoot != header.withdrawalsRoot.get: - debug "Mismatched withdrawalsRoot", - blockNumber = header.blockNumber - return false - - for withdrawal in body.withdrawals.get: - vmState.stateDB.addBalance(withdrawal.address, withdrawal.amount.gwei) - vmState.stateDB.deleteAccountIfEmpty(withdrawal.address) + if vmState.determineFork >= FkShanghai: + if header.withdrawalsRoot.isNone: + raise ValidationError.newException("Post-Shanghai block header must have withdrawalsRoot") + elif body.withdrawals.isNone: + raise ValidationError.newException("Post-Shanghai block body must have withdrawals") + else: + if body.withdrawals.get.calcWithdrawalsRoot != header.withdrawalsRoot.get: + debug "Mismatched withdrawalsRoot", + blockNumber = header.blockNumber + return false + + for withdrawal in body.withdrawals.get: + vmState.stateDB.addBalance(withdrawal.address, withdrawal.amount.gwei) + vmState.stateDB.deleteAccountIfEmpty(withdrawal.address) + else: + if header.withdrawalsRoot.isSome: + raise ValidationError.newException("Pre-Shanghai block header must not have withdrawalsRoot") + elif body.withdrawals.isSome: + raise ValidationError.newException("Pre-Shanghai block body must not have withdrawals") if vmState.cumulativeGasUsed != header.gasUsed: debug "gasUsed neq cumulativeGasUsed", diff --git a/nimbus/evm/state.nim b/nimbus/evm/state.nim index d973dbd4cb..ac54b3b5a7 100644 --- a/nimbus/evm/state.nim +++ b/nimbus/evm/state.nim @@ -388,6 +388,9 @@ func forkDeterminationInfoForVMState*(vmState: BaseVMState): ForkDeterminationIn # Also, can I get the TD? Do I need to? forkDeterminationInfo(vmState.blockNumber, vmState.timestamp) +func determineFork*(vmState: BaseVMState): EVMFork = + vmState.com.toEVMFork(vmState.forkDeterminationInfoForVMState) + proc clearSelfDestructsAndEmptyAccounts*(vmState: BaseVMState, fork: EVMFork, miner: EthAddress): void = vmState.mutateStateDB: for deletedAccount in vmState.selfDestructs: diff --git a/nimbus/evm/state_transactions.nim b/nimbus/evm/state_transactions.nim index 0d74dbe125..1aa5084e61 100644 --- a/nimbus/evm/state_transactions.nim +++ b/nimbus/evm/state_transactions.nim @@ -33,7 +33,7 @@ proc setupTxContext*(vmState: BaseVMState, origin: EthAddress, gasPrice: GasInt, if forkOverride.isSome: forkOverride.get else: - vmState.com.toEVMFork(vmState.forkDeterminationInfoForVMState) + vmState.determineFork vmState.gasCosts = vmState.fork.forkToSchedule diff --git a/nimbus/transaction/call_evm.nim b/nimbus/transaction/call_evm.nim index da09b06af2..0f34bc995e 100644 --- a/nimbus/transaction/call_evm.nim +++ b/nimbus/transaction/call_evm.nim @@ -101,7 +101,7 @@ proc rpcEstimateGas*(cd: RpcCallData, header: BlockHeader, com: CommonRef, gasCa gasLimit: 0.GasInt, ## ??? fee: UInt256.none()) ## ??? let vmState = BaseVMState.new(topHeader, com) - let fork = com.toEVMFork(vmState.forkDeterminationInfoForVMState) + let fork = vmState.determineFork let txGas = gasFees[fork][GasTransaction] # txGas always 21000, use constants? var params = toCallParams(vmState, cd, gasCap, header.fee) diff --git a/nimbus/vm_state.nim b/nimbus/vm_state.nim index 625b36c586..20fb5c6d2b 100644 --- a/nimbus/vm_state.nim +++ b/nimbus/vm_state.nim @@ -20,6 +20,7 @@ export vms.buildWitness, vms.clearSelfDestructsAndEmptyAccounts, vms.coinbase, + vms.determineFork, vms.difficulty, vms.disableTracing, vms.enableTracing, diff --git a/tests/test_blockchain_json.nim b/tests/test_blockchain_json.nim index 8f8010be5c..782c224fe0 100644 --- a/tests/test_blockchain_json.nim +++ b/tests/test_blockchain_json.nim @@ -11,7 +11,7 @@ import std/[json, os, tables, strutils, options], unittest2, - eth/rlp, eth/trie/trie_defs, + eth/rlp, eth/trie/trie_defs, eth/common/eth_types_rlp, stew/byteutils, ./test_helpers, ./test_allowed_to_fail, ../premix/parser, test_config, @@ -127,6 +127,7 @@ proc parseWithdrawals(withdrawals: JsonNode): Option[seq[Withdrawal]] = proc parseBlocks(blocks: JsonNode): seq[TestBlock] = for fixture in blocks: var t: TestBlock + t.withdrawals = none[seq[Withdrawal]]() for key, value in fixture: case key of "blockHeader": @@ -231,7 +232,6 @@ proc applyFixtureBlockToChain(tester: var Tester, tb: var TestBlock, var rlp = rlpFromBytes(tb.blockRLP) tb.header = rlp.read(EthHeader).header tb.body = rlp.readRecordType(BlockBody, false) - tb.body.withdrawals = tb.withdrawals tester.importBlock(com, tb, checkSeal, validation) func shouldCheckSeal(tester: Tester): bool = diff --git a/vendor/nim-eth b/vendor/nim-eth index d2ba753792..9e89f0dccc 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit d2ba7537924e18c02b22bb1df3fc5c8a9380ff54 +Subproject commit 9e89f0dccc54e4c8a670d073175de720af3423dc From d3dad1f232daa06afb349c7475b442b498673511 Mon Sep 17 00:00:00 2001 From: jangko Date: Fri, 17 Mar 2023 20:20:52 +0700 Subject: [PATCH 4/4] align accounts cache with EIP158/161 Some nomenclature used in accounts cache are not what described in EIP158/161, therefore causing confusion and introduce bugs. Now it should be fixed. --- nimbus/core/executor/process_transaction.nim | 1 - nimbus/db/accounts_cache.nim | 79 +++++----- nimbus/evm/computation.nim | 6 +- nimbus/transaction/host_services.nim | 3 +- nimbus/utils/debug.nim | 5 + tests/test_blockchain_json.nim | 61 ++------ tests/test_config.nim | 3 +- tests/test_generalstate_json.nim | 52 +++---- tests/test_helpers.nim | 2 +- tools/common/state_clearing.nim | 5 +- tools/t8n/t8n_test.nim | 10 +- tools/t8n/testdata/00-511/alloc.json | 143 +++++++++++++++++++ tools/t8n/testdata/00-511/env.json | 8 ++ tools/t8n/testdata/00-511/exp.json | 80 +++++++++++ tools/t8n/testdata/00-511/txs.rlp | 1 + 15 files changed, 326 insertions(+), 133 deletions(-) create mode 100644 tools/t8n/testdata/00-511/alloc.json create mode 100644 tools/t8n/testdata/00-511/env.json create mode 100644 tools/t8n/testdata/00-511/exp.json create mode 100644 tools/t8n/testdata/00-511/txs.rlp diff --git a/nimbus/core/executor/process_transaction.nim b/nimbus/core/executor/process_transaction.nim index 1185163d22..f24eaf308c 100644 --- a/nimbus/core/executor/process_transaction.nim +++ b/nimbus/core/executor/process_transaction.nim @@ -11,7 +11,6 @@ {.push raises: [].} import - std/[sets], ../../common/common, ../../db/accounts_cache, ../../transaction/call_evm, diff --git a/nimbus/db/accounts_cache.nim b/nimbus/db/accounts_cache.nim index b2a36ce04c..5600349ffa 100644 --- a/nimbus/db/accounts_cache.nim +++ b/nimbus/db/accounts_cache.nim @@ -1,6 +1,5 @@ import - tables, hashes, sets, - chronicles, + std/[tables, hashes, sets], eth/[common, rlp], eth/trie/[hexary, db, trie_defs], ../constants, ../utils/utils, storage_types, ../../stateless/multi_keys, @@ -9,11 +8,9 @@ import type AccountFlag = enum - IsAlive + Alive IsNew - IsDirty - IsTouched - IsClone + Dirty CodeLoaded CodeChanged StorageChanged @@ -37,6 +34,7 @@ type savePoint: SavePoint witnessCache: Table[EthAddress, WitnessData] isDirty: bool + touched: HashSet[EthAddress] ReadOnlyStateDB* = distinct AccountsCache @@ -55,10 +53,8 @@ const emptyAcc = newAccount() resetFlags = { - IsDirty, + Dirty, IsNew, - IsTouched, - IsClone, CodeChanged, StorageChanged } @@ -154,7 +150,7 @@ proc getAccount(ac: AccountsCache, address: EthAddress, shouldCreate = true): Re try: result = RefAccount( account: rlp.decode(recordFound, Account), - flags: {IsAlive} + flags: {Alive} ) except RlpError: raiseAssert("No RlpError should occur on decoding account from trie") @@ -164,7 +160,7 @@ proc getAccount(ac: AccountsCache, address: EthAddress, shouldCreate = true): Re # it's a request for new account result = RefAccount( account: newAccount(), - flags: {IsAlive, IsNew} + flags: {Alive, IsNew} ) # cache the account @@ -173,7 +169,7 @@ proc getAccount(ac: AccountsCache, address: EthAddress, shouldCreate = true): Re proc clone(acc: RefAccount, cloneStorage: bool): RefAccount = new(result) result.account = acc.account - result.flags = acc.flags + {IsClone} + result.flags = acc.flags result.code = acc.code if cloneStorage: @@ -187,7 +183,7 @@ proc isEmpty(acc: RefAccount): bool = acc.account.nonce == 0 template exists(acc: RefAccount): bool = - IsAlive in acc.flags + Alive in acc.flags template createTrieKeyFromSlot(slot: UInt256): auto = # XXX: This is too expensive. Similar to `createRangeFromAddress` @@ -235,7 +231,7 @@ proc storageValue(acc: RefAccount, slot: UInt256, db: TrieDatabaseRef): UInt256 result = acc.originalStorageValue(slot, db) proc kill(acc: RefAccount) = - acc.flags.excl IsAlive + acc.flags.excl Alive acc.overlayStorage.clear() acc.originalStorage = nil acc.account = newAccount() @@ -249,8 +245,8 @@ type proc persistMode(acc: RefAccount): PersistMode = result = DoNothing - if IsAlive in acc.flags: - if IsNew in acc.flags or IsDirty in acc.flags: + if Alive in acc.flags: + if IsNew in acc.flags or Dirty in acc.flags: result = Update else: if IsNew notin acc.flags: @@ -308,12 +304,12 @@ proc makeDirty(ac: AccountsCache, address: EthAddress, cloneStorage = true): Ref result = ac.getAccount(address) if address in ac.savePoint.cache: # it's already in latest savepoint - result.flags.incl IsDirty + result.flags.incl Dirty return # put a copy into latest savepoint result = result.clone(cloneStorage) - result.flags.incl IsDirty + result.flags.incl Dirty ac.savePoint.cache[address] = result proc getCodeHash*(ac: AccountsCache, address: EthAddress): Hash256 {.inline.} = @@ -379,25 +375,30 @@ proc isEmptyAccount*(ac: AccountsCache, address: EthAddress): bool {.inline.} = let acc = ac.getAccount(address, false) doAssert not acc.isNil doAssert acc.exists() - result = acc.isEmpty() + acc.isEmpty() proc isDeadAccount*(ac: AccountsCache, address: EthAddress): bool = let acc = ac.getAccount(address, false) if acc.isNil: - result = true - return + return true if not acc.exists(): - result = true - else: - result = acc.isEmpty() + return true + acc.isEmpty() proc setBalance*(ac: AccountsCache, address: EthAddress, balance: UInt256) = let acc = ac.getAccount(address) - acc.flags.incl {IsTouched, IsAlive} + acc.flags.incl {Alive} if acc.account.balance != balance: ac.makeDirty(address).account.balance = balance proc addBalance*(ac: AccountsCache, address: EthAddress, delta: UInt256) {.inline.} = + # EIP161: We must check emptiness for the objects such that the account + # clearing (0,0,0 objects) can take effect. + if delta == 0.u256: + let acc = ac.getAccount(address) + if acc.isEmpty: + ac.touched.incl address + return ac.setBalance(address, ac.getBalance(address) + delta) proc subBalance*(ac: AccountsCache, address: EthAddress, delta: UInt256) {.inline.} = @@ -405,7 +406,7 @@ proc subBalance*(ac: AccountsCache, address: EthAddress, delta: UInt256) {.inlin proc setNonce*(ac: AccountsCache, address: EthAddress, nonce: AccountNonce) = let acc = ac.getAccount(address) - acc.flags.incl {IsTouched, IsAlive} + acc.flags.incl {Alive} if acc.account.nonce != nonce: ac.makeDirty(address).account.nonce = nonce @@ -414,7 +415,7 @@ proc incNonce*(ac: AccountsCache, address: EthAddress) {.inline.} = proc setCode*(ac: AccountsCache, address: EthAddress, code: seq[byte]) = let acc = ac.getAccount(address) - acc.flags.incl {IsTouched, IsAlive} + acc.flags.incl {Alive} let codeHash = keccakHash(code) if acc.account.codeHash != codeHash: var acc = ac.makeDirty(address) @@ -424,7 +425,7 @@ proc setCode*(ac: AccountsCache, address: EthAddress, code: seq[byte]) = proc setStorage*(ac: AccountsCache, address: EthAddress, slot, value: UInt256) = let acc = ac.getAccount(address) - acc.flags.incl {IsTouched, IsAlive} + acc.flags.incl {Alive} let oldValue = acc.storageValue(slot, ac.db) if oldValue != value: var acc = ac.makeDirty(address) @@ -433,7 +434,7 @@ proc setStorage*(ac: AccountsCache, address: EthAddress, slot, value: UInt256) = proc clearStorage*(ac: AccountsCache, address: EthAddress) = let acc = ac.getAccount(address) - acc.flags.incl {IsTouched, IsAlive} + acc.flags.incl {Alive} if acc.account.storageRoot != emptyRlpHash: # there is no point to clone the storage since we want to remove it ac.makeDirty(address, cloneStorage = false).account.storageRoot = emptyRlpHash @@ -444,9 +445,21 @@ proc deleteAccount*(ac: AccountsCache, address: EthAddress) = let acc = ac.getAccount(address) acc.kill() -proc deleteAccountIfEmpty*(ac: AccountsCache, address: EthAddress): void = - if ac.accountExists(address) and ac.isEmptyAccount(address): - debug "state clearing", address +proc deleteAccountIfEmpty*(ac: AccountsCache, address: EthAddress) = + let acc = ac.getAccount(address, false) + if acc.isNil: + return + if not acc.isEmpty: + return + if not acc.exists: + return + + # make sure 'Touched' account + # in persistent storage also get deleted + if address in ac.touched: + acc.flags.excl IsNew + + if address in ac.touched or Dirty in acc.flags: ac.deleteAccount(address) proc persist*(ac: AccountsCache, clearCache: bool = true) = @@ -479,6 +492,8 @@ proc persist*(ac: AccountsCache, clearCache: bool = true) = for x in cleanAccounts: ac.savePoint.cache.del x + ac.touched.clear() + # EIP2929 ac.savePoint.accessList.clear() diff --git a/nimbus/evm/computation.nim b/nimbus/evm/computation.nim index 22d496723a..ea718de359 100644 --- a/nimbus/evm/computation.nim +++ b/nimbus/evm/computation.nim @@ -335,12 +335,10 @@ proc merge*(c, child: Computation) = proc execSelfDestruct*(c: Computation, beneficiary: EthAddress) {.gcsafe, raises: [CatchableError].} = c.vmState.mutateStateDB: - let - localBalance = c.getBalance(c.msg.contractAddress) - beneficiaryBalance = c.getBalance(beneficiary) + let localBalance = c.getBalance(c.msg.contractAddress) # Transfer to beneficiary - db.setBalance(beneficiary, localBalance + beneficiaryBalance) + db.addBalance(beneficiary, localBalance) # Zero the balance of the address being deleted. # This must come after sending to beneficiary in case the diff --git a/nimbus/transaction/host_services.nim b/nimbus/transaction/host_services.nim index fcc6b29fc6..815d9cd7fa 100644 --- a/nimbus/transaction/host_services.nim +++ b/nimbus/transaction/host_services.nim @@ -212,10 +212,9 @@ proc copyCode(host: TransactionHost, address: HostAddress, proc selfDestruct(host: TransactionHost, address, beneficiary: HostAddress) {.show.} = host.vmState.mutateStateDB: let closingBalance = db.getBalance(address) - let beneficiaryBalance = db.getBalance(beneficiary) # Transfer to beneficiary - db.setBalance(beneficiary, beneficiaryBalance + closingBalance) + db.addBalance(beneficiary, closingBalance) # Zero balance of account being deleted. # This must come after sending to the beneficiary in case the diff --git a/nimbus/utils/debug.nim b/nimbus/utils/debug.nim index c00b0b568a..78f8872794 100644 --- a/nimbus/utils/debug.nim +++ b/nimbus/utils/debug.nim @@ -66,6 +66,11 @@ proc dumpAccount(stateDB: AccountsCache, address: EthAddress): JsonNode = "storage": storage } +proc dumpAccounts*(vmState: BaseVMState): JsonNode = + result = newJObject() + for ac in vmState.stateDB.addresses: + result[ac.toHex] = dumpAccount(vmState.stateDB, ac) + proc debugAccounts*(vmState: BaseVMState): string = var accounts = newJObject() diff --git a/tests/test_blockchain_json.nim b/tests/test_blockchain_json.nim index 782c224fe0..e34f3bb322 100644 --- a/tests/test_blockchain_json.nim +++ b/tests/test_blockchain_json.nim @@ -17,7 +17,7 @@ import ../premix/parser, test_config, ../nimbus/[vm_state, vm_types, errors, constants], ../nimbus/db/accounts_cache, - ../nimbus/utils/utils, + ../nimbus/utils/[utils, debug], ../nimbus/core/[executor, validate, pow/header], ../stateless/[tree_from_witness, witness_types], ../tools/common/helpers as chp, @@ -299,67 +299,26 @@ proc runTester(tester: var Tester, com: CommonRef, testStatusIMPL: var TestStatu if tester.debugMode: tester.collectDebugData() -proc dumpAccount(stateDB: ReadOnlyStateDB, address: EthAddress, name: string): JsonNode = - result = %{ - "name": %name, - "address": %($address), - "nonce": %toHex(stateDB.getNonce(address)), - "balance": %stateDB.getBalance(address).toHex(), - "codehash": %($stateDB.getCodeHash(address)), - "storageRoot": %($stateDB.getStorageRoot(address)) - } - -proc dumpDebugData(tester: Tester, vmState: BaseVMState, accountList: JsonNode): JsonNode = - var accounts = newJObject() - var i = 0 - for ac, _ in accountList: - let account = ethAddressFromHex(ac) - accounts[$account] = dumpAccount(vmState.readOnlyStateDB, account, "acc" & $i) - inc i - - %{ - "debugData": tester.debugData, - "accounts": accounts - } - -proc accountList(fixture: JsonNode): JsonNode = - if fixture["postState"].kind == JObject: - fixture["postState"] - else: - fixture["pre"] - -proc debugDataFromAccountList(tester: Tester, fixture: JsonNode): JsonNode = - let accountList = fixture.accountList +proc debugDataFromAccountList(tester: Tester): JsonNode = let vmState = tester.vmState - if vmState.isNil: - %{"debugData": tester.debugData} - else: - dumpDebugData(tester, vmState, accountList) + result = %{"debugData": tester.debugData} + if not vmState.isNil: + result["accounts"] = vmState.dumpAccounts() proc debugDataFromPostStateHash(tester: Tester): JsonNode = - var - accounts = newJObject() - accountList = newSeq[EthAddress]() - vmState = tester.vmState - - for address in vmState.stateDB.addresses: - accountList.add address - - for i, ac in accountList: - accounts[ac.toHex] = dumpAccount(vmState.readOnlyStateDB, ac, "acc" & $i) - + let vmState = tester.vmState %{ "debugData": tester.debugData, "postStateHash": %($vmState.readOnlyStateDB.rootHash), "expectedStateHash": %($tester.postStateHash), - "accounts": accounts + "accounts": vmState.dumpAccounts() } -proc dumpDebugData(tester: Tester, fixture: JsonNode, fixtureName: string, fixtureIndex: int, success: bool) = +proc dumpDebugData(tester: Tester, fixtureName: string, fixtureIndex: int, success: bool) = let debugData = if tester.postStateHash != Hash256(): debugDataFromPostStateHash(tester) else: - debugDataFromAccountList(tester, fixture) + debugDataFromAccountList(tester) let status = if success: "_success" else: "_failed" writeFile("debug_" & fixtureName & "_" & $fixtureIndex & status & ".json", debugData.pretty()) @@ -423,7 +382,7 @@ proc testFixture(node: JsonNode, testStatusIMPL: var TestStatus, debugMode = fal success = false if tester.debugMode: - tester.dumpDebugData(fixture, fixtureName, fixtureIndex, success) + tester.dumpDebugData(fixtureName, fixtureIndex, success) fixtureTested = true check success == true diff --git a/tests/test_config.nim b/tests/test_config.nim index ea12abb45d..c964e0bf0c 100644 --- a/tests/test_config.nim +++ b/tests/test_config.nim @@ -1,6 +1,5 @@ import - std/[parseopt, strutils, options], - ../nimbus/common/evmforks + std/[parseopt, strutils, options] type ConfigStatus* = enum diff --git a/tests/test_generalstate_json.nim b/tests/test_generalstate_json.nim index 7c4bb3319e..3e685cddb5 100644 --- a/tests/test_generalstate_json.nim +++ b/tests/test_generalstate_json.nim @@ -13,7 +13,7 @@ import ../nimbus/[vm_state, vm_types], ../nimbus/db/accounts_cache, ../nimbus/common/common, - ../nimbus/utils/utils, + ../nimbus/utils/[utils, debug], ../tools/common/helpers as chp, ../tools/evmstate/helpers, ../tools/common/state_clearing, @@ -33,6 +33,7 @@ type debugMode: bool trace: bool index: int + fork: string proc toBytes(x: string): seq[byte] = result = newSeq[byte](x.len) @@ -48,41 +49,15 @@ method getAncestorHash*(vmState: BaseVMState; blockNumber: BlockNumber): Hash256 else: return keccakHash(toBytes($blockNumber)) -proc dumpAccount(stateDB: ReadOnlyStateDB, address: EthAddress, name: string): JsonNode = - result = %{ - "name": %name, - "address": %($address), - "nonce": %toHex(stateDB.getNonce(address)), - "balance": %stateDB.getBalance(address).toHex(), - "codehash": %($stateDB.getCodeHash(address)), - "storageRoot": %($stateDB.getStorageRoot(address)) - } - -proc dumpDebugData(tester: Tester, vmState: BaseVMState, sender: EthAddress, gasUsed: GasInt, success: bool) = - let recipient = tester.tx.getRecipient(sender) - let miner = tester.header.coinbase - var accounts = newJObject() - - accounts[$miner] = dumpAccount(vmState.readOnlyStateDB, miner, "miner") - accounts[$sender] = dumpAccount(vmState.readOnlyStateDB, sender, "sender") - accounts[$recipient] = dumpAccount(vmState.readOnlyStateDB, recipient, "recipient") - - let accountList = [sender, miner, recipient] - var i = 0 - for ac, _ in tester.pre: - let account = ethAddressFromHex(ac) - if account notin accountList: - accounts[$account] = dumpAccount(vmState.readOnlyStateDB, account, "pre" & $i) - inc i - +proc dumpDebugData(tester: Tester, vmState: BaseVMState, gasUsed: GasInt, success: bool) = let tracingResult = if tester.trace: vmState.getTracingResult() else: %[] let debugData = %{ "gasUsed": %gasUsed, "structLogs": tracingResult, - "accounts": accounts + "accounts": vmState.dumpAccounts() } let status = if success: "_success" else: "_failed" - writeFile("debug_" & tester.name & "_" & $tester.index & status & ".json", debugData.pretty()) + writeFile(tester.name & "_" & tester.fork & "_" & $tester.index & status & ".json", debugData.pretty()) proc testFixtureIndexes(tester: Tester, testStatusIMPL: var TestStatus) = let @@ -116,7 +91,7 @@ proc testFixtureIndexes(tester: Tester, testStatusIMPL: var TestStatus) = check(tester.expectedLogs == actualLogsHash) if tester.debugMode: let success = tester.expectedLogs == actualLogsHash and obtainedHash == tester.expectedHash - tester.dumpDebugData(vmState, sender, gasUsed, success) + tester.dumpDebugData(vmState, gasUsed, success) let rc = vmState.processTransaction( tester.tx, sender, tester.header, fork) @@ -163,25 +138,30 @@ proc testFixture(fixtures: JsonNode, testStatusIMPL: var TestStatus, debugEcho "selected fork not available: " & conf.fork return + tester.fork = conf.fork let forkData = post[conf.fork] prepareFork(conf.fork) if conf.index.isNone: for subTest in forkData: runSubTest(subTest) + inc tester.index else: - let index = conf.index.get() - if index > forkData.len or index < 0: + tester.index = conf.index.get() + if tester.index > forkData.len or tester.index < 0: debugEcho "selected index out of range(0-$1), requested $2" % - [$forkData.len, $index] + [$forkData.len, $tester.index] return - let subTest = forkData[index] + let subTest = forkData[tester.index] runSubTest(subTest) else: for forkName, forkData in post: prepareFork(forkName) + tester.fork = forkName + tester.index = 0 for subTest in forkData: runSubTest(subTest) + inc tester.index proc generalStateJsonMain*(debugMode = false) = const @@ -193,7 +173,7 @@ proc generalStateJsonMain*(debugMode = false) = # run all test fixtures if config.legacy: suite "generalstate json tests": - jsonTest(legacyFolder , "GeneralStateTests", testFixture, skipGSTTests) + jsonTest(legacyFolder, "GeneralStateTests", testFixture, skipGSTTests) else: suite "new generalstate json tests": jsonTest(newFolder, "newGeneralStateTests", testFixture, skipNewGSTTests) diff --git a/tests/test_helpers.nim b/tests/test_helpers.nim index 0049e78694..075e4334c0 100644 --- a/tests/test_helpers.nim +++ b/tests/test_helpers.nim @@ -6,7 +6,7 @@ # at your option. This file may not be copied, modified, or distributed except according to those terms. import - os, macros, json, strformat, strutils, os, tables, + std/[os, macros, json, strformat, strutils, tables], stew/byteutils, net, eth/[keys, rlp, p2p], unittest2, testutils/markdown_reports, ../nimbus/[constants, config, transaction, errors], diff --git a/tools/common/state_clearing.nim b/tools/common/state_clearing.nim index c92f6df503..4c3ae2ea69 100644 --- a/tools/common/state_clearing.nim +++ b/tools/common/state_clearing.nim @@ -30,10 +30,9 @@ proc coinbaseStateClearing*(vmState: BaseVMState, vmState.mutateStateDB: if touched: db.addBalance(miner, 0.u256) + if fork >= FkSpurious: - # EIP158/161 state clearing - if db.accountExists(miner) and db.isEmptyAccount(miner): - db.deleteAccount(miner) + db.deleteAccountIfEmpty(miner) # db.persist is an important step when using accounts_cache # it will affect the account storage's location diff --git a/tools/t8n/t8n_test.nim b/tools/t8n/t8n_test.nim index 72098a1589..8882f9b49b 100644 --- a/tools/t8n/t8n_test.nim +++ b/tools/t8n/t8n_test.nim @@ -147,7 +147,6 @@ proc runTest(appDir: string, spec: TestSpec): bool = echo "path: $1, error: $2" % [jsc.path, jsc.error] return false - return true const @@ -403,6 +402,15 @@ const ), output: T8nOutput(alloc: true, result: true), expOut: "exp.json", + ), + TestSpec( + name : "Legacy Byzantium State Clearing", + base : "testdata/00-511", + input : t8nInput( + "alloc.json", "txs.rlp", "env.json", "Byzantium", "-1" + ), + output: T8nOutput(alloc: true, result: true), + expOut: "exp.json", ) ] diff --git a/tools/t8n/testdata/00-511/alloc.json b/tools/t8n/testdata/00-511/alloc.json new file mode 100644 index 0000000000..b851914de3 --- /dev/null +++ b/tools/t8n/testdata/00-511/alloc.json @@ -0,0 +1,143 @@ +{ + "0x0000000000000000000000000000000000000000" : { + "balance" : "0x01", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0x0000000000000000000000000000000000000001" : { + "balance" : "0x01", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0x0000000000000000000000000000000000000002" : { + "balance" : "0x01", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0x0000000000000000000000000000000000000003" : { + "balance" : "0x01", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0x0000000000000000000000000000000000000004" : { + "balance" : "0x01", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0x0000000000000000000000000000000000000005" : { + "balance" : "0x01", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0x0000000000000000000000000000000000000006" : { + "balance" : "0x01", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0x0000000000000000000000000000000000000007" : { + "balance" : "0x01", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0x10a1c1cb95c92ec31d3f22c66eef1d9f3f258c6b" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0x13cbb8d99c6c4e0f2728c7d72606e78a29c4e224" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0x24143873e0e0815fdcbcffdbe09c979cbf9ad013" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0x3535353535353535353535353535353535353535" : { + "balance" : "0x00", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0x598443f1880ef585b21f1d7585bd0577402861e5" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0x77db2bebba79db42a978f896968f4afce746ea1f" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0x7d577a597b2742b498cb5cf0c26cdcd726d39e6e" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0x82a978b3f5962a5b0957d9ee9eef472ee55b42f1" : { + "balance" : "0x0f4240", + "code" : "0x", + "nonce" : "0x73", + "storage" : { + } + }, + "0x90f0b1ebbba1c1936aff7aaf20a7878ff9e04b6c" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0xc305c901078781c232a2a521c2af7980f8385ee9" : { + "balance" : "0x00", + "code" : "0x600035601c52740100000000000000000000000000000000000000006020526fffffffffffffffffffffffffffffffff6040527fffffffffffffffffffffffffffffffff000000000000000000000000000000016060527402540be3fffffffffffffffffffffffffdabf41c006080527ffffffffffffffffffffffffdabf41c00000000000000000000000002540be40060a0526330c8d1da600051141561012b5760c06004356004013511151558576004356004013560200160043560040161014037604061026061014051610160600060076305f5e0fff11558576040610240526102406060806102c0828460006004601bf15050506102c08051602082012090506000556102c060206020820352604081510160206001820306601f820103905060208203f350005b", + "nonce" : "0x01", + "storage" : { + "0x00" : "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5" + } + }, + "0xdceceaf3fc5c0a63d195d69b1a90011b7b19650d" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0xe0fc04fa2d34a66b779fd5cee748268032a146c0" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + } +} \ No newline at end of file diff --git a/tools/t8n/testdata/00-511/env.json b/tools/t8n/testdata/00-511/env.json new file mode 100644 index 0000000000..3cd2c4bcf4 --- /dev/null +++ b/tools/t8n/testdata/00-511/env.json @@ -0,0 +1,8 @@ +{ + "currentCoinbase" : "0x3535353535353535353535353535353535353535", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x00", + "currentTimestamp" : "0x03b6", + "currentGasLimit" : "0x05f5e100", + "previousHash" : "0x0000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/tools/t8n/testdata/00-511/exp.json b/tools/t8n/testdata/00-511/exp.json new file mode 100644 index 0000000000..2591886bb8 --- /dev/null +++ b/tools/t8n/testdata/00-511/exp.json @@ -0,0 +1,80 @@ +{ + "alloc": { + "0x0000000000000000000000000000000000000000": { + "balance": "0x1" + }, + "0x0000000000000000000000000000000000000001": { + "balance": "0x1" + }, + "0x0000000000000000000000000000000000000002": { + "balance": "0x1" + }, + "0x0000000000000000000000000000000000000003": { + "balance": "0x1" + }, + "0x0000000000000000000000000000000000000004": { + "balance": "0x1" + }, + "0x0000000000000000000000000000000000000005": { + "balance": "0x1" + }, + "0x0000000000000000000000000000000000000006": { + "balance": "0x1" + }, + "0x0000000000000000000000000000000000000007": { + "balance": "0x1" + }, + "0x10a1c1cb95c92ec31d3f22c66eef1d9f3f258c6b": { + "balance": "0xde0b6b3a7640000" + }, + "0x13cbb8d99c6c4e0f2728c7d72606e78a29c4e224": { + "balance": "0xde0b6b3a7640000" + }, + "0x24143873e0e0815fdcbcffdbe09c979cbf9ad013": { + "balance": "0xde0b6b3a7640000" + }, + "0x3535353535353535353535353535353535353535": { + "balance": "0x0" + }, + "0x598443f1880ef585b21f1d7585bd0577402861e5": { + "balance": "0xde0b6b3a7640000" + }, + "0x77db2bebba79db42a978f896968f4afce746ea1f": { + "balance": "0xde0b6b3a7640000" + }, + "0x7d577a597b2742b498cb5cf0c26cdcd726d39e6e": { + "balance": "0xde0b6b3a7640000" + }, + "0x82a978b3f5962a5b0957d9ee9eef472ee55b42f1": { + "balance": "0xf4240", + "nonce": "0x73" + }, + "0x90f0b1ebbba1c1936aff7aaf20a7878ff9e04b6c": { + "balance": "0xde0b6b3a7640000" + }, + "0xc305c901078781c232a2a521c2af7980f8385ee9": { + "code": "0x600035601c52740100000000000000000000000000000000000000006020526fffffffffffffffffffffffffffffffff6040527fffffffffffffffffffffffffffffffff000000000000000000000000000000016060527402540be3fffffffffffffffffffffffffdabf41c006080527ffffffffffffffffffffffffdabf41c00000000000000000000000002540be40060a0526330c8d1da600051141561012b5760c06004356004013511151558576004356004013560200160043560040161014037604061026061014051610160600060076305f5e0fff11558576040610240526102406060806102c0828460006004601bf15050506102c08051602082012090506000556102c060206020820352604081510160206001820306601f820103905060208203f350005b", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5" + }, + "balance": "0x0", + "nonce": "0x1" + }, + "0xdceceaf3fc5c0a63d195d69b1a90011b7b19650d": { + "balance": "0xde0b6b3a7640000" + }, + "0xe0fc04fa2d34a66b779fd5cee748268032a146c0": { + "balance": "0xde0b6b3a7640000" + } + }, + "result": { + "stateRoot": "0x26cf3e5b39723b362585cfb673b2cac335237f200a888b579bec383d50aa9d66", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "currentDifficulty": "0x20000", + "gasUsed": "0x0" + } +} diff --git a/tools/t8n/testdata/00-511/txs.rlp b/tools/t8n/testdata/00-511/txs.rlp new file mode 100644 index 0000000000..fcfbcfb790 --- /dev/null +++ b/tools/t8n/testdata/00-511/txs.rlp @@ -0,0 +1 @@ +"0xc0" \ No newline at end of file