5
5
"context"
6
6
"errors"
7
7
"fmt"
8
+ "maps"
8
9
"math/big"
9
10
"os"
10
11
"path"
@@ -21,7 +22,7 @@ import (
21
22
objectsdk "github.com/nspcc-dev/neofs-sdk-go/object"
22
23
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
23
24
"github.com/nspcc-dev/neofs-sdk-go/version"
24
- "golang. org/x/sync/errgroup "
25
+ "go.uber. org/zap "
25
26
)
26
27
27
28
type containerStorage struct {
@@ -50,11 +51,46 @@ func (s *containerStorage) drop() error {
50
51
return nil
51
52
}
52
53
53
- func (s * containerStorage ) putMPTIndexes (ee []objEvent ) {
54
+ type eventWithMptKVs struct {
55
+ ev objEvent
56
+ additionalKVs map [string ][]byte
57
+ }
58
+
59
+ func (s * containerStorage ) putObjects (ctx context.Context , l * zap.Logger , bInd uint32 , ee []objEvent , net NeoFSNetwork ) {
54
60
s .m .Lock ()
55
61
defer s .m .Unlock ()
56
62
57
- for _ , e := range ee {
63
+ // raw indexes are responsible for object validation and only after
64
+ // object is taken as a valid one, it goes to the slower-on-read MPT
65
+ // storage via objCh
66
+ objCh := make (chan eventWithMptKVs , len (ee ))
67
+
68
+ var wg sync.WaitGroup
69
+ wg .Add (1 )
70
+ go func () {
71
+ defer wg .Done ()
72
+ err := s .putRawIndexes (ctx , l , ee , net , objCh )
73
+ if err != nil {
74
+ l .Error ("failed to put raw indexes" , zap .Error (err ))
75
+ }
76
+ }()
77
+ wg .Add (1 )
78
+ go func () {
79
+ defer wg .Done ()
80
+ err := s .putMPTIndexes (bInd , objCh )
81
+ if err != nil {
82
+ l .Error ("failed to put mpt indexes" , zap .Error (err ))
83
+ }
84
+ }()
85
+ wg .Wait ()
86
+ }
87
+
88
+ // lock should be taken.
89
+ func (s * containerStorage ) putMPTIndexes (bInd uint32 , ch <- chan eventWithMptKVs ) error {
90
+ for evWithKeys := range ch {
91
+ maps .Copy (s .mptOpsBatch , evWithKeys .additionalKVs )
92
+
93
+ e := evWithKeys .ev
58
94
commsuffix := e .oID [:]
59
95
60
96
// batching that is implemented for MPT ignores key's first byte
@@ -77,61 +113,124 @@ func (s *containerStorage) putMPTIndexes(ee []objEvent) {
77
113
s .mptOpsBatch [string (append ([]byte {0 , typeIndex }, commsuffix ... ))] = []byte {byte (e .typ )}
78
114
}
79
115
}
116
+
117
+ root := s .mpt .StateRoot ()
118
+ s .mpt .Store .Put ([]byte {rootKey }, root [:])
119
+
120
+ _ , err := s .mpt .PutBatch (mpt .MapToMPTBatch (s .mptOpsBatch ))
121
+ if err != nil {
122
+ return fmt .Errorf ("put batch to MPT storage: %w" , err )
123
+ }
124
+ clear (s .mptOpsBatch )
125
+
126
+ s .mpt .Flush (bInd )
127
+
128
+ return nil
80
129
}
81
130
82
- func (s * containerStorage ) putRawIndexes (ctx context.Context , ee []objEvent , net NeoFSNetwork ) error {
83
- var wg errgroup.Group
84
- wg .SetLimit (10 )
85
- objects := make ([]objectsdk.Object , len (ee ))
131
+ // lock should be taken.
132
+ func (s * containerStorage ) putRawIndexes (ctx context.Context , l * zap.Logger , ee []objEvent , net NeoFSNetwork , res chan <- eventWithMptKVs ) (finalErr error ) {
133
+ batch := make (map [string ][]byte )
134
+ defer func () {
135
+ close (res )
86
136
87
- for i , e := range ee {
88
- wg .Go (func () error {
89
- h , err := net .Head (ctx , e .cID , e .oID )
137
+ if finalErr == nil && len (batch ) > 0 {
138
+ err := s .db .PutChangeSet (batch , nil )
90
139
if err != nil {
91
- // TODO define behavior with status (non-network) errors; maybe it is near #3140
92
- return fmt .Errorf ("HEAD object: %w" , err )
140
+ finalErr = fmt .Errorf ("put change set to DB: %w" , err )
93
141
}
142
+ }
143
+ }()
94
144
95
- objects [i ] = h
96
- return nil
97
- })
98
- }
145
+ for _ , e := range ee {
146
+ err := isOpAllowed (s .db , e )
147
+ if err != nil {
148
+ l .Warn ("skip object" , zap .Stringer ("oid" , e .oID ), zap .String ("reason" , err .Error ()))
149
+ continue
150
+ }
99
151
100
- err := wg .Wait ()
101
- if err != nil {
102
- return err
103
- }
152
+ evWithMpt := eventWithMptKVs {ev : e }
104
153
105
- s .m .Lock ()
106
- defer s .m .Unlock ()
107
- batch := make (map [string ][]byte )
154
+ h , err := net .Head (ctx , e .cID , e .oID )
155
+ if err != nil {
156
+ // TODO define behavior with status (non-network) errors; maybe it is near #3140
157
+ return fmt .Errorf ("HEAD %s object: %w" , e .oID , err )
158
+ }
108
159
109
- for i , e := range ee {
110
160
commsuffix := e .oID [:]
111
161
112
162
batch [string (append ([]byte {oidIndex }, commsuffix ... ))] = []byte {}
113
163
if len (e .deletedObjects ) > 0 {
114
164
batch [string (append ([]byte {deletedIndex }, commsuffix ... ))] = e .deletedObjects
115
- err = deleteObjectsOps (batch , s . mptOpsBatch , s .db , e .deletedObjects )
165
+ evWithMpt . additionalKVs , err = deleteObjectsOps (batch , s .db , e .deletedObjects )
116
166
if err != nil {
117
- return fmt .Errorf ("cleaning operations for %s object: %w" , e .oID , err )
167
+ l .Error ("cleaning deleted object" , zap .Stringer ("oid" , e .oID ), zap .Error (err ))
168
+ continue
118
169
}
119
170
}
120
171
if len (e .lockedObjects ) > 0 {
121
172
batch [string (append ([]byte {lockedIndex }, commsuffix ... ))] = e .lockedObjects
173
+
174
+ for locked := range slices .Chunk (e .lockedObjects , oid .Size ) {
175
+ batch [string (append ([]byte {lockedByIndex }, locked ... ))] = commsuffix
176
+ }
122
177
}
123
178
124
- err = object .VerifyHeaderForMetadata (objects [ i ] )
179
+ err = object .VerifyHeaderForMetadata (h )
125
180
if err != nil {
126
- return fmt .Errorf ("invalid %s header: %w" , e .oID , err )
181
+ l .Error ("header verification" , zap .Stringer ("oid" , e .oID ), zap .Error (err ))
182
+ continue
127
183
}
128
184
129
- fillObjectIndex (batch , objects [i ])
185
+ res <- evWithMpt
186
+
187
+ fillObjectIndex (batch , h )
130
188
}
131
189
132
- err = s .db .PutChangeSet (batch , nil )
133
- if err != nil {
134
- return fmt .Errorf ("put change set to DB: %w" , err )
190
+ return finalErr
191
+ }
192
+
193
+ func isOpAllowed (db storage.Store , e objEvent ) error {
194
+ if len (e .deletedObjects ) == 0 && len (e .lockedObjects ) == 0 {
195
+ return nil
196
+ }
197
+
198
+ key := make ([]byte , 1 + oid .Size )
199
+
200
+ for obj := range slices .Chunk (e .deletedObjects , oid .Size ) {
201
+ copy (key [1 :], obj )
202
+
203
+ // delete object that does not exist
204
+ key [0 ] = oidIndex
205
+ _ , err := db .Get (key )
206
+ if err != nil {
207
+ if errors .Is (err , storage .ErrKeyNotFound ) {
208
+ return fmt .Errorf ("%s object-to-delete is missing" , oid .ID (obj ))
209
+ }
210
+ return fmt .Errorf ("%s object-to-delete's presence check: %w" , oid .ID (obj ), err )
211
+ }
212
+
213
+ // delete object that is locked
214
+ key [0 ] = lockedByIndex
215
+ v , err := db .Get (key )
216
+ if err != nil {
217
+ if errors .Is (err , storage .ErrKeyNotFound ) {
218
+ continue
219
+ }
220
+ return fmt .Errorf ("%s object-to-delete's lock status check: %w" , oid .ID (obj ), err )
221
+ }
222
+ return fmt .Errorf ("%s object-to-delete is locked by %s" , oid .ID (obj ), oid .ID (v ))
223
+ }
224
+
225
+ for obj := range slices .Chunk (e .lockedObjects , oid .Size ) {
226
+ copy (key [1 :], obj )
227
+
228
+ // lock object that does not exist
229
+ key [0 ] = oidIndex
230
+ _ , err := db .Get (key )
231
+ if err != nil {
232
+ return fmt .Errorf ("%s object-to-lock's presence check: %w" , oid .ID (obj ), err )
233
+ }
135
234
}
136
235
137
236
return nil
@@ -192,8 +291,9 @@ func fillObjectIndex(batch map[string][]byte, h objectsdk.Object) {
192
291
}
193
292
}
194
293
195
- func deleteObjectsOps (dbKV , mptKV map [string ][]byte , s storage.Store , objects []byte ) error {
294
+ func deleteObjectsOps (dbKV map [string ][]byte , s storage.Store , objects []byte ) ( map [ string ][] byte , error ) {
196
295
rng := storage.SeekRange {}
296
+ mptKV := make (map [string ][]byte )
197
297
198
298
// nil value means "delete" operation
199
299
@@ -271,11 +371,11 @@ func deleteObjectsOps(dbKV, mptKV map[string][]byte, s storage.Store, objects []
271
371
return true
272
372
})
273
373
if err != nil {
274
- return err
374
+ return nil , err
275
375
}
276
376
}
277
377
278
- return nil
378
+ return mptKV , nil
279
379
}
280
380
281
381
// lastObjectKey returns the least possible key in sorted DB list that
0 commit comments