Skip to content

Commit 20830ec

Browse files
committed
triedb/pathdb: polish code
1 parent 5afd54a commit 20830ec

File tree

1 file changed

+10
-33
lines changed

1 file changed

+10
-33
lines changed

triedb/pathdb/nodes.go

+10-33
Original file line numberDiff line numberDiff line change
@@ -47,38 +47,30 @@ func newNodeSet(nodes map[common.Hash]map[string]*trienode.Node) *nodeSet {
4747
if nodes == nil {
4848
nodes = make(map[common.Hash]map[string]*trienode.Node)
4949
}
50-
51-
// Create the new structure with separate maps
5250
s := &nodeSet{
5351
accountNodes: make(map[string]*trienode.Node),
5452
storageNodes: make(map[common.Hash]map[string]*trienode.Node),
5553
}
56-
57-
// Migrate the nodes to the appropriate maps
5854
for owner, subset := range nodes {
5955
if owner == (common.Hash{}) {
60-
maps.Copy(s.accountNodes, subset)
56+
s.accountNodes = subset
6157
} else {
62-
s.storageNodes[owner] = maps.Clone(subset)
58+
s.storageNodes[owner] = subset
6359
}
6460
}
65-
6661
s.computeSize()
6762
return s
6863
}
6964

7065
// computeSize calculates the database size of the held trie nodes.
7166
func (s *nodeSet) computeSize() {
7267
var size uint64
73-
7468
for path, n := range s.accountNodes {
7569
size += uint64(len(n.Blob) + len(path))
7670
}
77-
7871
for _, subset := range s.storageNodes {
79-
prefix := common.HashLength // owner (32 bytes) for storage trie nodes
8072
for path, n := range subset {
81-
size += uint64(prefix + len(n.Blob) + len(path))
73+
size += uint64(common.HashLength + len(n.Blob) + len(path))
8274
}
8375
}
8476
s.size = size
@@ -97,12 +89,11 @@ func (s *nodeSet) updateSize(delta int64) {
9789

9890
// node retrieves the trie node with node path and its trie identifier.
9991
func (s *nodeSet) node(owner common.Hash, path []byte) (*trienode.Node, bool) {
92+
// Account trie node
10093
if owner == (common.Hash{}) {
101-
// Account trie node
10294
n, ok := s.accountNodes[string(path)]
10395
return n, ok
10496
}
105-
10697
// Storage trie node
10798
subset, ok := s.storageNodes[owner]
10899
if !ok {
@@ -133,11 +124,10 @@ func (s *nodeSet) merge(set *nodeSet) {
133124

134125
// Merge storage nodes
135126
for owner, subset := range set.storageNodes {
136-
prefix := common.HashLength
137127
current, exist := s.storageNodes[owner]
138128
if !exist {
139129
for path, n := range subset {
140-
delta += int64(prefix + len(n.Blob) + len(path))
130+
delta += int64(common.HashLength + len(n.Blob) + len(path))
141131
}
142132
// Perform a shallow copy of the map for the subset instead of claiming it
143133
// directly from the provided nodeset to avoid potential concurrent map
@@ -150,10 +140,10 @@ func (s *nodeSet) merge(set *nodeSet) {
150140
}
151141
for path, n := range subset {
152142
if orig, exist := current[path]; !exist {
153-
delta += int64(prefix + len(n.Blob) + len(path))
143+
delta += int64(common.HashLength + len(n.Blob) + len(path))
154144
} else {
155145
delta += int64(len(n.Blob) - len(orig.Blob))
156-
overwrite.add(prefix + len(orig.Blob) + len(path))
146+
overwrite.add(common.HashLength + len(orig.Blob) + len(path))
157147
}
158148
current[path] = n
159149
}
@@ -177,7 +167,7 @@ func (s *nodeSet) revertTo(db ethdb.KeyValueReader, nodes map[common.Hash]map[st
177167
if bytes.Equal(blob, n.Blob) {
178168
continue
179169
}
180-
panic(fmt.Sprintf("non-existent node (account %v) blob: %v", path, crypto.Keccak256Hash(n.Blob).Hex()))
170+
panic(fmt.Sprintf("non-existent account node (%v) blob: %v", path, crypto.Keccak256Hash(n.Blob).Hex()))
181171
}
182172
s.accountNodes[path] = n
183173
delta += int64(len(n.Blob)) - int64(len(orig.Blob))
@@ -195,7 +185,7 @@ func (s *nodeSet) revertTo(db ethdb.KeyValueReader, nodes map[common.Hash]map[st
195185
if bytes.Equal(blob, n.Blob) {
196186
continue
197187
}
198-
panic(fmt.Sprintf("non-existent node (%x %v) blob: %v", owner, path, crypto.Keccak256Hash(n.Blob).Hex()))
188+
panic(fmt.Sprintf("non-existent storage node (%x %v) blob: %v", owner, path, crypto.Keccak256Hash(n.Blob).Hex()))
199189
}
200190
current[path] = n
201191
delta += int64(len(n.Blob)) - int64(len(orig.Blob))
@@ -220,7 +210,7 @@ type journalNodes struct {
220210

221211
// encode serializes the content of trie nodes into the provided writer.
222212
func (s *nodeSet) encode(w io.Writer) error {
223-
nodes := make([]journalNodes, 0, len(s.storageNodes)+len(s.accountNodes))
213+
nodes := make([]journalNodes, 0, len(s.storageNodes)+1)
224214

225215
// Encode account nodes
226216
if len(s.accountNodes) > 0 {
@@ -233,7 +223,6 @@ func (s *nodeSet) encode(w io.Writer) error {
233223
}
234224
nodes = append(nodes, entry)
235225
}
236-
237226
// Encode storage nodes
238227
for owner, subset := range s.storageNodes {
239228
entry := journalNodes{Owner: owner}
@@ -254,12 +243,9 @@ func (s *nodeSet) decode(r *rlp.Stream) error {
254243
if err := r.Decode(&encoded); err != nil {
255244
return fmt.Errorf("load nodes: %v", err)
256245
}
257-
258-
// Initialize the maps
259246
s.accountNodes = make(map[string]*trienode.Node)
260247
s.storageNodes = make(map[common.Hash]map[string]*trienode.Node)
261248

262-
// Decode the nodes
263249
for _, entry := range encoded {
264250
if entry.Owner == (common.Hash{}) {
265251
// Account nodes
@@ -289,19 +275,13 @@ func (s *nodeSet) decode(r *rlp.Stream) error {
289275

290276
// write flushes nodes into the provided database batch as a whole.
291277
func (s *nodeSet) write(batch ethdb.Batch, clean *fastcache.Cache) int {
292-
// Convert the separate maps back to the format expected by writeNodes
293278
nodes := make(map[common.Hash]map[string]*trienode.Node)
294-
295-
// Add account nodes
296279
if len(s.accountNodes) > 0 {
297280
nodes[common.Hash{}] = s.accountNodes
298281
}
299-
300-
// Add storage nodes
301282
for owner, subset := range s.storageNodes {
302283
nodes[owner] = subset
303284
}
304-
305285
return writeNodes(batch, nodes, clean)
306286
}
307287

@@ -315,12 +295,9 @@ func (s *nodeSet) reset() {
315295
// dbsize returns the approximate size of db write.
316296
func (s *nodeSet) dbsize() int {
317297
var m int
318-
319298
m += len(s.accountNodes) * len(rawdb.TrieNodeAccountPrefix) // database key prefix
320-
321299
for _, nodes := range s.storageNodes {
322300
m += len(nodes) * (len(rawdb.TrieNodeStoragePrefix)) // database key prefix
323301
}
324-
325302
return m + int(s.size)
326303
}

0 commit comments

Comments
 (0)