Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Aristo cull journal related stuff #2288

Merged
merged 9 commits into from
Jun 3, 2024
Prev Previous commit
Next Next commit
remove trg fileld from FilterRef
why:
  Same as `kMap[$1]`
  • Loading branch information
mjfh committed Jun 3, 2024
commit f6ddb1d730a6642b17a8ff2cf575b85c30ba1e50
11 changes: 5 additions & 6 deletions nimbus/db/aristo/aristo_blobify.nim
Original file line number Diff line number Diff line change
Expand Up @@ -158,12 +158,11 @@ proc blobify*(vGen: openArray[VertexID]): Blob =

proc blobifyTo*(lSst: SavedState; data: var Blob) =
## Serialise a last saved state record
data.setLen(73)
(addr data[0]).copyMem(unsafeAddr lSst.src.data[0], 32)
(addr data[32]).copyMem(unsafeAddr lSst.trg.data[0], 32)
let w = lSst.serial.toBytesBE
(addr data[64]).copyMem(unsafeAddr w[0], 8)
data[72] = 0x7fu8
data.setLen(0)
data.add lSst.src.data
data.add lSst.trg.data
data.add lSst.serial.toBytesBE
data.add @[0x7fu8]

proc blobify*(lSst: SavedState): Blob =
## Variant of `blobify()`
Expand Down
1 change: 0 additions & 1 deletion nimbus/db/aristo/aristo_debug.nim
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,6 @@ proc ppFilter(
result &= " n/a"
return
result &= pfx & "src=" & fl.src.to(HashKey).ppKey(db)
result &= pfx & "trg=" & fl.trg.to(HashKey).ppKey(db)
result &= pfx & "vGen" & pfx1 & "[" &
fl.vGen.mapIt(it.ppVid).join(",") & "]"
result &= pfx & "sTab" & pfx1 & "{"
Expand Down
7 changes: 3 additions & 4 deletions nimbus/db/aristo/aristo_delta.nim
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,7 @@ proc deltaFwd*(
src: srcRoot,
sTab: layer.delta.sTab,
kMap: layer.delta.kMap,
vGen: layer.final.vGen.vidReorg, # Compact recycled IDs
trg: trgRoot)
vGen: layer.final.vGen.vidReorg) # Compact recycled IDs

# ------------------------------------------------------------------------------
# Public functions, apply/install filters
Expand All @@ -81,7 +80,7 @@ proc deltaMerge*(
return err((VertexID(1),rc.error))

db.roFilter = ? db.merge(filter, db.roFilter, ubeRoot)
if db.roFilter.src == db.roFilter.trg:
if db.roFilter.src == db.roFilter.kMap.getOrVoid(VertexID 1).to(Hash256):
# Under normal conditions, the root keys cannot be the same unless the
# database is empty. This changes if there is a fixed root vertex as
# used with the `snap` sync protocol boundaty proof. In that case, there
Expand Down Expand Up @@ -143,7 +142,7 @@ proc deltaPersistent*(

let lSst = SavedState(
src: db.roFilter.src,
trg: db.roFilter.trg,
trg: db.roFilter.kMap.getOrVoid(VertexID 1).to(Hash256),
serial: nxtFid)

# Store structural single trie entries
Expand Down
14 changes: 7 additions & 7 deletions nimbus/db/aristo/aristo_delta/delta_merge.nim
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,8 @@ proc merge*(
return ok(lower)

# Verify stackability
if upper.src != lower.trg:
let lowerTrg = lower.kMap.getOrVoid(VertexID(1)).to(Hash256)
if upper.src != lowerTrg:
return err((VertexID(0), FilTrgSrcMismatch))
if lower.src != beStateRoot:
return err((VertexID(0), FilStateRootMismatch))
Expand All @@ -68,8 +69,7 @@ proc merge*(
src: lower.src,
sTab: lower.sTab,
kMap: lower.kMap,
vGen: upper.vGen,
trg: upper.trg)
vGen: upper.vGen)

for (vid,vtx) in upper.sTab.pairs:
if vtx.isValid or not newFilter.sTab.hasKey vid:
Expand All @@ -96,7 +96,7 @@ proc merge*(
return err((vid,rc.error))

# Check consistency
if (newFilter.src == newFilter.trg) !=
if (newFilter.src == newFilter.kMap.getOrVoid(VertexID 1).to(Hash256)) !=
(newFilter.sTab.len == 0 and newFilter.kMap.len == 0):
return err((VertexID(0),FilSrcTrgInconsistent))

Expand All @@ -123,16 +123,16 @@ proc merge*(
return err((VertexID(0),FilNilFilterRejected))

# Verify stackability
if upper.src != lower.trg:
let lowerTrg = lower.kMap.getOrVoid(VertexID(1)).to(Hash256)
if upper.src != lowerTrg:
return err((VertexID(0), FilTrgSrcMismatch))

# There is no need to deep copy table vertices as they will not be modified.
let newFilter = FilterRef(
src: lower.src,
sTab: lower.sTab,
kMap: lower.kMap,
vGen: upper.vGen,
trg: upper.trg)
vGen: upper.vGen)

for (vid,vtx) in upper.sTab.pairs:
newFilter.sTab[vid] = vtx
Expand Down
5 changes: 2 additions & 3 deletions nimbus/db/aristo/aristo_delta/delta_reverse.nim
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@

import
std/tables,
eth/common,
results,
".."/[aristo_desc, aristo_get]

Expand All @@ -28,9 +29,7 @@ proc revFilter*(
## backend (excluding optionally installed read-only filter.)
##
# Register MPT state roots for reverting back
let rev = FilterRef(
src: filter.trg,
trg: filter.src)
let rev = FilterRef(src: filter.kMap.getOrVoid(VertexID 1).to(Hash256))

# Get vid generator state on backend
block:
Expand Down
6 changes: 4 additions & 2 deletions nimbus/db/aristo/aristo_delta/delta_siblings.nim
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@

import
results,
eth/common,
../aristo_desc,
"."/[delta_merge, delta_reverse]

Expand Down Expand Up @@ -93,9 +94,10 @@ proc update*(ctx: UpdateSiblingsRef): Result[UpdateSiblingsRef,AristoError] =
let db = ctx.db
# Update distributed filters. Note that the physical backend database
# must not have been updated, yet. So the new root key for the backend
# will be `db.roFilter.trg`.
# will be `db.roFilter.kMap[$1]`.
let trg = db.roFilter.kMap.getOrVoid(VertexID 1).to(Hash256)
for w in db.forked:
let rc = db.merge(w.roFilter, ctx.rev, db.roFilter.trg)
let rc = db.merge(w.roFilter, ctx.rev, trg)
if rc.isErr:
ctx.rollback()
return err(rc.error[1])
Expand Down
8 changes: 0 additions & 8 deletions nimbus/db/aristo/aristo_desc/desc_structural.nim
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,6 @@ type
FilterRef* = ref object
## Delta layer
src*: Hash256 ## Applicable to this state root
trg*: Hash256 ## Resulting state root (i.e. `kMap[1]`)
sTab*: Table[VertexID,VertexRef] ## Filter structural vertex table
kMap*: Table[VertexID,HashKey] ## Filter Merkle hash key mapping
vGen*: seq[VertexID] ## Filter unique vertex ID generator
Expand Down Expand Up @@ -133,13 +132,6 @@ type
final*: LayerFinalRef ## Stored as latest version
txUid*: uint ## Transaction identifier if positive

# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------

func max(a, b, c: int): int =
max(max(a,b),c)

# ------------------------------------------------------------------------------
# Public helpers (misc)
# ------------------------------------------------------------------------------
Expand Down
8 changes: 2 additions & 6 deletions tests/test_aristo.nim
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,6 @@ proc accountsRunner(
baseDir = getTmpDir() / sample.name & "-accounts"
dbDir = if persistent: baseDir / "tmp" else: ""
isPersistent = if persistent: "persistent DB" else: "mem DB only"
doRdbOk = (cmpBackends and 0 < dbDir.len)
cmpBeInfo = if doRdbOk: "persistent" else: "memory"

defer:
try: baseDir.removeDir except CatchableError: discard
Expand Down Expand Up @@ -132,8 +130,6 @@ proc storagesRunner(
baseDir = getTmpDir() / sample.name & "-storage"
dbDir = if persistent: baseDir / "tmp" else: ""
isPersistent = if persistent: "persistent DB" else: "mem DB only"
doRdbOk = (cmpBackends and 0 < dbDir.len)
cmpBeInfo = if doRdbOk: "persistent" else: "memory"

defer:
try: baseDir.removeDir except CatchableError: discard
Expand Down Expand Up @@ -170,7 +166,7 @@ when isMainModule:

when true and false:
# Verify Problem with the database for production test
noisy.accountsRunner(persistent=false)
noisy.aristoMain()

# This one uses dumps from the external `nimbus-eth1-blob` repo
when true and false:
Expand All @@ -180,7 +176,7 @@ when isMainModule:
noisy.accountsRunner(sam, resetDb=true)

when true: # and false:
let persistent = false # or true
let persistent = false or true
noisy.showElapsed("@snap_test_list"):
for n,sam in snapTestList:
noisy.accountsRunner(sam, persistent=persistent)
Expand Down
63 changes: 1 addition & 62 deletions tests/test_aristo/test_filter.nim
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ proc isDbEq(a, b: FilterRef; db: AristoDbRef; noisy = true): bool =
return false
if unsafeAddr(a[]) != unsafeAddr(b[]):
if a.src != b.src or
a.trg != b.trg or
a.kMap.getOrVoid(VertexID 1) != b.kMap.getOrVoid(VertexID 1) or
a.vGen != b.vGen:
return false

Expand Down Expand Up @@ -218,60 +218,6 @@ proc isDbEq(a, b: FilterRef; db: AristoDbRef; noisy = true): bool =

true

proc isEq(a, b: FilterRef; db: AristoDbRef; noisy = true): bool =
## ..
if a.src != b.src:
noisy.say "***", "not isEq:", " a.src=", a.src.pp, " b.src=", b.src.pp
return
if a.trg != b.trg:
noisy.say "***", "not isEq:", " a.trg=", a.trg.pp, " b.trg=", b.trg.pp
return
if a.vGen != b.vGen:
noisy.say "***", "not isEq:", " a.vGen=", a.vGen.pp, " b.vGen=", b.vGen.pp
return
if a.sTab.len != b.sTab.len:
noisy.say "***", "not isEq:",
" a.sTab.len=", a.sTab.len,
" b.sTab.len=", b.sTab.len
return
if a.kMap.len != b.kMap.len:
noisy.say "***", "not isEq:",
" a.kMap.len=", a.kMap.len,
" b.kMap.len=", b.kMap.len
return
for (vid,aVtx) in a.sTab.pairs:
if b.sTab.hasKey vid:
let bVtx = b.sTab.getOrVoid vid
if aVtx != bVtx:
noisy.say "***", "not isEq:",
" vid=", vid.pp,
" aVtx=", aVtx.pp(db),
" bVtx=", bVtx.pp(db)
return
else:
noisy.say "***", "not isEq:",
" vid=", vid.pp,
" aVtx=", aVtx.pp(db),
" bVtx=n/a"
return
for (vid,aKey) in a.kMap.pairs:
if b.kMap.hasKey vid:
let bKey = b.kMap.getOrVoid vid
if aKey != bKey:
noisy.say "***", "not isEq:",
" vid=", vid.pp,
" aKey=", aKey.pp,
" bKey=", bKey.pp
return
else:
noisy.say "*** not eq:",
" vid=", vid.pp,
" aKey=", aKey.pp,
" bKey=n/a"
return

true

# ----------------------

proc checkBeOk(
Expand All @@ -292,13 +238,6 @@ proc checkBeOk(
" cache=", cache
true

# ---------------------------------

iterator payload(list: openArray[ProofTrieData]): LeafTiePayload =
for w in list:
for p in w.kvpLst.mapRootVid VertexID(1):
yield p

# ------------------------------------------------------------------------------
# Public test function
# ------------------------------------------------------------------------------
Expand Down
10 changes: 4 additions & 6 deletions tests/test_coredb.nim
Original file line number Diff line number Diff line change
Expand Up @@ -328,18 +328,15 @@ proc coreDbMain*(noisy = defined(debug)) =
noisy.persistentSyncPreLoadAndResumeRunner()

when isMainModule:
import
std/times
const
noisy = defined(debug) or true
var
sampleList: seq[CaptureSpecs]

setErrorLevel()

when true and false:
when true: # and false:
false.coreDbMain()
false.persistentSyncPreLoadAndResumeRunner()

# This one uses the readily available dump: `bulkTest0` and some huge replay
# dumps `bulkTest2`, `bulkTest3`, .. from the `nimbus-eth1-blobs` package.
Expand All @@ -349,15 +346,16 @@ when isMainModule:
if sampleList.len == 0:
sampleList = @[memorySampleDefault]

when true: # and false:
when true and false:
import std/times
var state: (Duration, int)
for n,capture in sampleList:
noisy.profileSection("@sample #" & $n, state):
noisy.chainSyncRunner(
capture = capture,
pruneHistory = true,
#profilingOk = true,
finalDiskCleanUpOk = false,
#finalDiskCleanUpOk = false,
oldLogAlign = true
)

Expand Down