Skip to content

Commit

Permalink
Run coredb without journal (#2266)
Browse files Browse the repository at this point in the history
* Add persistent last state stamp feature

why:
  This allows to run `CoreDb` without journal

* Start `CoreDb` without journal

* Remove journal related functions from `CoredDb`
  • Loading branch information
mjfh committed May 31, 2024
1 parent b72ebca commit bda760f
Show file tree
Hide file tree
Showing 22 changed files with 320 additions and 164 deletions.
18 changes: 11 additions & 7 deletions nimbus/core/chain/persist_blocks.nim
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ type
gas: GasInt

const
CleanUpEpoch = 30_000.u256
CleanUpEpoch = 30_000.toBlockNumber
## Regular checks for history clean up (applies to single state DB). This
## is mainly a debugging/testing feature so that the database can be held
## a bit smaller. It is not applicable to a full node.
Expand All @@ -64,11 +64,13 @@ proc getVmState(c: ChainRef, header: BlockHeader):

ok(vmState)

proc purgeOutOfJournalBlocks(db: CoreDbRef) {.inline, raises: [RlpError].} =
proc purgeOlderBlocksFromHistory(
db: CoreDbRef;
bn: BlockNumber;
) {.inline, raises: [RlpError].} =
## Remove non-reachable blocks from KVT database
var blkNum = db.getOldestJournalBlockNumber()
if 0 < blkNum:
blkNum = blkNum - 1
if 0 < bn:
var blkNum = bn - 1
while 0 < blkNum:
if not db.forgetHistory blkNum:
break
Expand Down Expand Up @@ -176,8 +178,10 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader];
if c.com.pruneHistory:
# There is a feature for test systems to regularly clean up older blocks
# from the database, not appicable to a full node set up.
if(fromBlock mod CleanUpEpoch) <= (toBlock - fromBlock):
c.db.purgeOutOfJournalBlocks()
let n = fromBlock div CleanUpEpoch
if 0 < n and n < (toBlock div CleanUpEpoch):
# Starts at around `2 * CleanUpEpoch`
c.db.purgeOlderBlocksFromHistory(fromBlock - CleanUpEpoch)

ok((headers.len, txs, vmState.cumulativeGasUsed))

Expand Down
22 changes: 20 additions & 2 deletions nimbus/db/aristo/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@ Contents
+ [4.7 Serialisation of the list of unused vertex IDs](#ch4x7)
+ [4.8 Backend filter record serialisation](#ch4x8)
+ [4.9 Serialisation of a list of filter IDs](#ch4x92)
+ [4.10 Serialisation record identifier identification](#ch4x10)
+ [4.10 Serialisation of a last saved state record](#ch4x10)
+ [4.11 Serialisation record identifier identification](#ch4x11)

* [5. *Patricia Trie* implementation notes](#ch5)
+ [5.1 Database decriptor representation](#ch5x1)
Expand Down Expand Up @@ -442,6 +443,22 @@ in a dedicated list (e.g. the latest filters) one can quickly access particular
entries without searching through the set of filters. In the current
implementation this list comes in ID pairs i.e. the number of entries is even.

<a name="ch4x9"></a>
### 4.10 Serialisation of a last saved state record

0 +--+--+--+--+--+ .. --+--+ .. --+
| | -- 32 bytes source state hash
32 +--+--+--+--+--+ .. --+--+ .. --+
| | -- 32 bytes target state hash
64 +--+--+--+--+--+ .. --+--+ .. --+
| | -- state number/block number
72 +--+--+--+--+--+--+--+--+
| | -- marker(8), 0x7f
+--+

where
marker(8) is the eight bit array *0111-111f*

<a name="ch4x10"></a>
### 4.10 Serialisation record identifier tags

Expand All @@ -458,7 +475,8 @@ i.e. the last byte of a serialised record.
| 0110 1011 | 0x6b | Unstructured payload | [4.6](#ch4x6) |
| 0111 1100 | 0x7c | List of vertex IDs | [4.7](#ch4x7) |
| 0111 1101 | 0x7d | Filter record | [4.8](#ch4x8) |
| 0111 1110 | 0x7e | List of vertex IDs | [4.9](#ch4x9) |
| 0111 1110 | 0x7e | List of filter IDs | [4.9](#ch4x9) |
| 0111 1111 | 0x7f | Last saved state | [4.10](#ch4x10) |

<a name="ch5"></a>
5. *Patricia Trie* implementation notes
Expand Down
168 changes: 100 additions & 68 deletions nimbus/db/aristo/aristo_api.nim
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,14 @@ type
## payload of type `AccountData`, its `storageID` field must be unset
## or equal to the `hike.root` vertex ID.

AristoApiFetchLastSavedStateFn* =
proc(db: AristoDbRef
): Result[SavedState,AristoError]
{.noRaise.}
## The function returns the state of the last saved state. This is a
## Merkle hash tag for vertex with ID 1 and a bespoke `uint64` identifier
## (may be interpreted as block number.)

AristoApiFetchPayloadFn* =
proc(db: AristoDbRef;
root: VertexID;
Expand Down Expand Up @@ -400,6 +408,7 @@ type
commit*: AristoApiCommitFn
delete*: AristoApiDeleteFn
delTree*: AristoApiDelTreeFn
fetchLastSavedState*: AristoApiFetchLastSavedStateFn
fetchPayload*: AristoApiFetchPayloadFn
findTx*: AristoApiFindTxFn
finish*: AristoApiFinishFn
Expand Down Expand Up @@ -431,46 +440,49 @@ type
## Index/name mapping for profile slots
AristoApiProfTotal = "total"

AristoApiProfCommitFn = "commit"
AristoApiProfDeleteFn = "delete"
AristoApiProfDelTreeFn = "delTree"
AristoApiProfFetchPayloadFn = "fetchPayload"
AristoApiProfFindTxFn = "findTx"
AristoApiProfFinishFn = "finish"
AristoApiProfForgetFn = "forget"
AristoApiProfForkTxFn = "forkTx"
AristoApiProfGetKeyRcFn = "getKeyRc"
AristoApiProfHashifyFn = "hashify"
AristoApiProfHasPathFn = "hasPath"
AristoApiProfHikeUpFn = "hikeUp"
AristoApiProfIsTopFn = "isTop"
AristoApiProfJournalGetFilterFn = "journalGetFilter"
AristoApiProfJournalGetInxFn = "journalGetInx"
AristoApiProfLevelFn = "level"
AristoApiProfNForkedFn = "nForked"
AristoApiProfMergeFn = "merge"
AristoApiProfMergePayloadFn = "mergePayload"
AristoApiProfPathAsBlobFn = "pathAsBlob"
AristoApiProfPersistFn = "persist"
AristoApiProfReCentreFn = "reCentre"
AristoApiProfRollbackFn = "rollback"
AristoApiProfSerialiseFn = "serialise"
AristoApiProfTxBeginFn = "txBegin"
AristoApiProfTxTopFn = "txTop"
AristoApiProfVidFetchFn = "vidFetch"
AristoApiProfVidDisposeFn = "vidDispose"

AristoApiProfBeGetVtxFn = "be/getVtx"
AristoApiProfBeGetKeyFn = "be/getKey"
AristoApiProfBeGetFilFn = "be/getFil"
AristoApiProfBeGetIdgFn = "be/getIfg"
AristoApiProfBeGetFqsFn = "be/getFqs"
AristoApiProfBePutVtxFn = "be/putVtx"
AristoApiProfBePutKeyFn = "be/putKey"
AristoApiProfBePutFilFn = "be/putFil"
AristoApiProfBePutIdgFn = "be/putIdg"
AristoApiProfBePutFqsFn = "be/putFqs"
AristoApiProfBePutEndFn = "be/putEnd"
AristoApiProfCommitFn = "commit"
AristoApiProfDeleteFn = "delete"
AristoApiProfDelTreeFn = "delTree"
AristoApiProfFetchLastSavedStateFn = "fetchPayload"
AristoApiProfFetchPayloadFn = "fetchPayload"
AristoApiProfFindTxFn = "findTx"
AristoApiProfFinishFn = "finish"
AristoApiProfForgetFn = "forget"
AristoApiProfForkTxFn = "forkTx"
AristoApiProfGetKeyRcFn = "getKeyRc"
AristoApiProfHashifyFn = "hashify"
AristoApiProfHasPathFn = "hasPath"
AristoApiProfHikeUpFn = "hikeUp"
AristoApiProfIsTopFn = "isTop"
AristoApiProfJournalGetFilterFn = "journalGetFilter"
AristoApiProfJournalGetInxFn = "journalGetInx"
AristoApiProfLevelFn = "level"
AristoApiProfNForkedFn = "nForked"
AristoApiProfMergeFn = "merge"
AristoApiProfMergePayloadFn = "mergePayload"
AristoApiProfPathAsBlobFn = "pathAsBlob"
AristoApiProfPersistFn = "persist"
AristoApiProfReCentreFn = "reCentre"
AristoApiProfRollbackFn = "rollback"
AristoApiProfSerialiseFn = "serialise"
AristoApiProfTxBeginFn = "txBegin"
AristoApiProfTxTopFn = "txTop"
AristoApiProfVidFetchFn = "vidFetch"
AristoApiProfVidDisposeFn = "vidDispose"

AristoApiProfBeGetVtxFn = "be/getVtx"
AristoApiProfBeGetKeyFn = "be/getKey"
AristoApiProfBeGetFilFn = "be/getFil"
AristoApiProfBeGetIdgFn = "be/getIfg"
AristoApiProfBeGetLstFn = "be/getLst"
AristoApiProfBeGetFqsFn = "be/getFqs"
AristoApiProfBePutVtxFn = "be/putVtx"
AristoApiProfBePutKeyFn = "be/putKey"
AristoApiProfBePutFilFn = "be/putFil"
AristoApiProfBePutIdgFn = "be/putIdg"
AristoApiProfBePutLstFn = "be/putLst"
AristoApiProfBePutFqsFn = "be/putFqs"
AristoApiProfBePutEndFn = "be/putEnd"

AristoApiProfRef* = ref object of AristoApiRef
## Profiling API extension of `AristoApiObj`
Expand All @@ -486,6 +498,7 @@ when AutoValidateApiHooks:
doAssert not api.commit.isNil
doAssert not api.delete.isNil
doAssert not api.delTree.isNil
doAssert not api.fetchLastSavedState.isNil
doAssert not api.fetchPayload.isNil
doAssert not api.findTx.isNil
doAssert not api.finish.isNil
Expand Down Expand Up @@ -540,6 +553,7 @@ func init*(api: var AristoApiObj) =
api.commit = commit
api.delete = delete
api.delTree = delTree
api.fetchLastSavedState = fetchLastSavedState
api.fetchPayload = fetchPayload
api.findTx = findTx
api.finish = finish
Expand Down Expand Up @@ -574,34 +588,35 @@ func init*(T: type AristoApiRef): T =

func dup*(api: AristoApiRef): AristoApiRef =
result = AristoApiRef(
commit: api.commit,
delete: api.delete,
delTree: api.delTree,
fetchPayload: api.fetchPayload,
findTx: api.findTx,
finish: api.finish,
forget: api.forget,
forkTx: api.forkTx,
getKeyRc: api.getKeyRc,
hashify: api.hashify,
hasPath: api.hasPath,
hikeUp: api.hikeUp,
isTop: api.isTop,
journalGetFilter: api.journalGetFilter,
journalGetInx: api.journalGetInx,
level: api.level,
nForked: api.nForked,
merge: api.merge,
mergePayload: api.mergePayload,
pathAsBlob: api.pathAsBlob,
persist: api.persist,
reCentre: api.reCentre,
rollback: api.rollback,
serialise: api.serialise,
txBegin: api.txBegin,
txTop: api.txTop,
vidFetch: api.vidFetch,
vidDispose: api.vidDispose)
commit: api.commit,
delete: api.delete,
delTree: api.delTree,
fetchLastSavedState: api.fetchLastSavedState,
fetchPayload: api.fetchPayload,
findTx: api.findTx,
finish: api.finish,
forget: api.forget,
forkTx: api.forkTx,
getKeyRc: api.getKeyRc,
hashify: api.hashify,
hasPath: api.hasPath,
hikeUp: api.hikeUp,
isTop: api.isTop,
journalGetFilter: api.journalGetFilter,
journalGetInx: api.journalGetInx,
level: api.level,
nForked: api.nForked,
merge: api.merge,
mergePayload: api.mergePayload,
pathAsBlob: api.pathAsBlob,
persist: api.persist,
reCentre: api.reCentre,
rollback: api.rollback,
serialise: api.serialise,
txBegin: api.txBegin,
txTop: api.txTop,
vidFetch: api.vidFetch,
vidDispose: api.vidDispose)
when AutoValidateApiHooks:
api.validate

Expand Down Expand Up @@ -647,6 +662,11 @@ func init*(
AristoApiProfDelTreeFn.profileRunner:
result = api.delTree(a, b, c)

profApi.fetchLastSavedState =
proc(a: AristoDbRef): auto =
AristoApiProfFetchLastSavedStateFn.profileRunner:
result = api.fetchLastSavedState(a)

profApi.fetchPayload =
proc(a: AristoDbRef; b: VertexID; c: openArray[byte]): auto =
AristoApiProfFetchPayloadFn.profileRunner:
Expand Down Expand Up @@ -802,6 +822,12 @@ func init*(
result = be.getIdgFn()
data.list[AristoApiProfBeGetIdgFn.ord].masked = true

beDup.getLstFn =
proc(): auto =
AristoApiProfBeGetLstFn.profileRunner:
result = be.getLstFn()
data.list[AristoApiProfBeGetLstFn.ord].masked = true

beDup.getFqsFn =
proc(): auto =
AristoApiProfBeGetFqsFn.profileRunner:
Expand Down Expand Up @@ -832,6 +858,12 @@ func init*(
be.putIdgFn(a,b)
data.list[AristoApiProfBePutIdgFn.ord].masked = true

beDup.putLstFn =
proc(a: PutHdlRef; b: SavedState) =
AristoApiProfBePutLstFn.profileRunner:
be.putLstFn(a,b)
data.list[AristoApiProfBePutLstFn.ord].masked = true

beDup.putFqsFn =
proc(a: PutHdlRef; b: openArray[(QueueID,QueueID)]) =
AristoApiProfBePutFqsFn.profileRunner:
Expand Down
Loading

0 comments on commit bda760f

Please sign in to comment.