@@ -47,38 +47,30 @@ func newNodeSet(nodes map[common.Hash]map[string]*trienode.Node) *nodeSet {
47
47
if nodes == nil {
48
48
nodes = make (map [common.Hash ]map [string ]* trienode.Node )
49
49
}
50
-
51
- // Create the new structure with separate maps
52
50
s := & nodeSet {
53
51
accountNodes : make (map [string ]* trienode.Node ),
54
52
storageNodes : make (map [common.Hash ]map [string ]* trienode.Node ),
55
53
}
56
-
57
- // Migrate the nodes to the appropriate maps
58
54
for owner , subset := range nodes {
59
55
if owner == (common.Hash {}) {
60
- maps . Copy ( s .accountNodes , subset )
56
+ s .accountNodes = subset
61
57
} else {
62
- s .storageNodes [owner ] = maps . Clone ( subset )
58
+ s .storageNodes [owner ] = subset
63
59
}
64
60
}
65
-
66
61
s .computeSize ()
67
62
return s
68
63
}
69
64
70
65
// computeSize calculates the database size of the held trie nodes.
71
66
func (s * nodeSet ) computeSize () {
72
67
var size uint64
73
-
74
68
for path , n := range s .accountNodes {
75
69
size += uint64 (len (n .Blob ) + len (path ))
76
70
}
77
-
78
71
for _ , subset := range s .storageNodes {
79
- prefix := common .HashLength // owner (32 bytes) for storage trie nodes
80
72
for path , n := range subset {
81
- size += uint64 (prefix + len (n .Blob ) + len (path ))
73
+ size += uint64 (common . HashLength + len (n .Blob ) + len (path ))
82
74
}
83
75
}
84
76
s .size = size
@@ -97,12 +89,11 @@ func (s *nodeSet) updateSize(delta int64) {
97
89
98
90
// node retrieves the trie node with node path and its trie identifier.
99
91
func (s * nodeSet ) node (owner common.Hash , path []byte ) (* trienode.Node , bool ) {
92
+ // Account trie node
100
93
if owner == (common.Hash {}) {
101
- // Account trie node
102
94
n , ok := s .accountNodes [string (path )]
103
95
return n , ok
104
96
}
105
-
106
97
// Storage trie node
107
98
subset , ok := s .storageNodes [owner ]
108
99
if ! ok {
@@ -133,11 +124,10 @@ func (s *nodeSet) merge(set *nodeSet) {
133
124
134
125
// Merge storage nodes
135
126
for owner , subset := range set .storageNodes {
136
- prefix := common .HashLength
137
127
current , exist := s .storageNodes [owner ]
138
128
if ! exist {
139
129
for path , n := range subset {
140
- delta += int64 (prefix + len (n .Blob ) + len (path ))
130
+ delta += int64 (common . HashLength + len (n .Blob ) + len (path ))
141
131
}
142
132
// Perform a shallow copy of the map for the subset instead of claiming it
143
133
// directly from the provided nodeset to avoid potential concurrent map
@@ -150,10 +140,10 @@ func (s *nodeSet) merge(set *nodeSet) {
150
140
}
151
141
for path , n := range subset {
152
142
if orig , exist := current [path ]; ! exist {
153
- delta += int64 (prefix + len (n .Blob ) + len (path ))
143
+ delta += int64 (common . HashLength + len (n .Blob ) + len (path ))
154
144
} else {
155
145
delta += int64 (len (n .Blob ) - len (orig .Blob ))
156
- overwrite .add (prefix + len (orig .Blob ) + len (path ))
146
+ overwrite .add (common . HashLength + len (orig .Blob ) + len (path ))
157
147
}
158
148
current [path ] = n
159
149
}
@@ -177,7 +167,7 @@ func (s *nodeSet) revertTo(db ethdb.KeyValueReader, nodes map[common.Hash]map[st
177
167
if bytes .Equal (blob , n .Blob ) {
178
168
continue
179
169
}
180
- panic (fmt .Sprintf ("non-existent node (account %v) blob: %v" , path , crypto .Keccak256Hash (n .Blob ).Hex ()))
170
+ panic (fmt .Sprintf ("non-existent account node (%v) blob: %v" , path , crypto .Keccak256Hash (n .Blob ).Hex ()))
181
171
}
182
172
s .accountNodes [path ] = n
183
173
delta += int64 (len (n .Blob )) - int64 (len (orig .Blob ))
@@ -195,7 +185,7 @@ func (s *nodeSet) revertTo(db ethdb.KeyValueReader, nodes map[common.Hash]map[st
195
185
if bytes .Equal (blob , n .Blob ) {
196
186
continue
197
187
}
198
- panic (fmt .Sprintf ("non-existent node (%x %v) blob: %v" , owner , path , crypto .Keccak256Hash (n .Blob ).Hex ()))
188
+ panic (fmt .Sprintf ("non-existent storage node (%x %v) blob: %v" , owner , path , crypto .Keccak256Hash (n .Blob ).Hex ()))
199
189
}
200
190
current [path ] = n
201
191
delta += int64 (len (n .Blob )) - int64 (len (orig .Blob ))
@@ -220,7 +210,7 @@ type journalNodes struct {
220
210
221
211
// encode serializes the content of trie nodes into the provided writer.
222
212
func (s * nodeSet ) encode (w io.Writer ) error {
223
- nodes := make ([]journalNodes , 0 , len (s .storageNodes )+ len ( s . accountNodes ) )
213
+ nodes := make ([]journalNodes , 0 , len (s .storageNodes )+ 1 )
224
214
225
215
// Encode account nodes
226
216
if len (s .accountNodes ) > 0 {
@@ -233,7 +223,6 @@ func (s *nodeSet) encode(w io.Writer) error {
233
223
}
234
224
nodes = append (nodes , entry )
235
225
}
236
-
237
226
// Encode storage nodes
238
227
for owner , subset := range s .storageNodes {
239
228
entry := journalNodes {Owner : owner }
@@ -254,12 +243,9 @@ func (s *nodeSet) decode(r *rlp.Stream) error {
254
243
if err := r .Decode (& encoded ); err != nil {
255
244
return fmt .Errorf ("load nodes: %v" , err )
256
245
}
257
-
258
- // Initialize the maps
259
246
s .accountNodes = make (map [string ]* trienode.Node )
260
247
s .storageNodes = make (map [common.Hash ]map [string ]* trienode.Node )
261
248
262
- // Decode the nodes
263
249
for _ , entry := range encoded {
264
250
if entry .Owner == (common.Hash {}) {
265
251
// Account nodes
@@ -289,19 +275,13 @@ func (s *nodeSet) decode(r *rlp.Stream) error {
289
275
290
276
// write flushes nodes into the provided database batch as a whole.
291
277
func (s * nodeSet ) write (batch ethdb.Batch , clean * fastcache.Cache ) int {
292
- // Convert the separate maps back to the format expected by writeNodes
293
278
nodes := make (map [common.Hash ]map [string ]* trienode.Node )
294
-
295
- // Add account nodes
296
279
if len (s .accountNodes ) > 0 {
297
280
nodes [common.Hash {}] = s .accountNodes
298
281
}
299
-
300
- // Add storage nodes
301
282
for owner , subset := range s .storageNodes {
302
283
nodes [owner ] = subset
303
284
}
304
-
305
285
return writeNodes (batch , nodes , clean )
306
286
}
307
287
@@ -315,12 +295,9 @@ func (s *nodeSet) reset() {
315
295
// dbsize returns the approximate size of db write.
316
296
func (s * nodeSet ) dbsize () int {
317
297
var m int
318
-
319
298
m += len (s .accountNodes ) * len (rawdb .TrieNodeAccountPrefix ) // database key prefix
320
-
321
299
for _ , nodes := range s .storageNodes {
322
300
m += len (nodes ) * (len (rawdb .TrieNodeStoragePrefix )) // database key prefix
323
301
}
324
-
325
302
return m + int (s .size )
326
303
}
0 commit comments