Skip to content

Commit 9376260

Browse files
gregns1torcolvin
andauthored
[3.2.5 backport] CBG-4620: skip assigning nil body attachments to _attachments property if _attachments is uses as json value (#7511)
Co-authored-by: Tor Colvin <[email protected]>
1 parent 19065ee commit 9376260

File tree

3 files changed

+225
-75
lines changed

3 files changed

+225
-75
lines changed

db/blip_handler.go

Lines changed: 79 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -1123,96 +1123,100 @@ func (bh *blipHandler) processRev(rq *blip.Message, stats *processRevStats) (err
11231123
// Pull out attachments
11241124
if injectedAttachmentsForDelta || bytes.Contains(bodyBytes, []byte(BodyAttachments)) {
11251125
body := newDoc.Body(bh.loggingCtx)
1126-
1127-
var currentBucketDoc *Document
1128-
1129-
// Look at attachments with revpos > the last common ancestor's
1130-
minRevpos := 1
1131-
if len(history) > 0 {
1132-
currentDoc, rawDoc, err := bh.collection.GetDocumentWithRaw(bh.loggingCtx, docID, DocUnmarshalSync)
1133-
// If we're able to obtain current doc data then we should use the common ancestor generation++ for min revpos
1134-
// as we will already have any attachments on the common ancestor so don't need to ask for them.
1135-
// Otherwise we'll have to go as far back as we can in the doc history and choose the last entry in there.
1136-
if err == nil {
1137-
commonAncestor := currentDoc.History.findAncestorFromSet(currentDoc.CurrentRev, history)
1138-
minRevpos, _ = ParseRevID(bh.loggingCtx, commonAncestor)
1139-
minRevpos++
1140-
rawBucketDoc = rawDoc
1141-
currentBucketDoc = currentDoc
1142-
} else {
1143-
minRevpos, _ = ParseRevID(bh.loggingCtx, history[len(history)-1])
1126+
// The bytes.Contains([]byte(BodyAttachments)) check will pass even if _attachments is not a toplevel key but rather a nested key or subkey. That check is an optimization to avoid having to unmarshal the document if there are no attachments. Therefore, check again that the unmarshalled body contains BodyAttachments.
1127+
if body[BodyAttachments] != nil {
1128+
1129+
var currentBucketDoc *Document
1130+
1131+
// Look at attachments with revpos > the last common ancestor's
1132+
minRevpos := 1
1133+
if len(history) > 0 {
1134+
currentDoc, rawDoc, err := bh.collection.GetDocumentWithRaw(bh.loggingCtx, docID, DocUnmarshalSync)
1135+
// If we're able to obtain current doc data then we should use the common ancestor generation++ for min revpos
1136+
// as we will already have any attachments on the common ancestor so don't need to ask for them.
1137+
// Otherwise we'll have to go as far back as we can in the doc history and choose the last entry in there.
1138+
if err == nil {
1139+
commonAncestor := currentDoc.History.findAncestorFromSet(currentDoc.CurrentRev, history)
1140+
minRevpos, _ = ParseRevID(bh.loggingCtx, commonAncestor)
1141+
minRevpos++
1142+
rawBucketDoc = rawDoc
1143+
currentBucketDoc = currentDoc
1144+
} else {
1145+
minRevpos, _ = ParseRevID(bh.loggingCtx, history[len(history)-1])
1146+
}
11441147
}
1145-
}
11461148

1147-
// currentDigests is a map from attachment name to the current bucket doc digest,
1148-
// for any attachments on the incoming document that are also on the current bucket doc
1149-
var currentDigests map[string]string
1150-
1151-
// Do we have a previous doc? If not don't need to do this check
1152-
if currentBucketDoc != nil {
1153-
bodyAtts := GetBodyAttachments(body)
1154-
currentDigests = make(map[string]string, len(bodyAtts))
1155-
for name, value := range bodyAtts {
1156-
// Check if we have this attachment name already, if we do, continue check
1157-
currentAttachment, ok := currentBucketDoc.Attachments[name]
1158-
if !ok {
1159-
// If we don't have this attachment already, ensure incoming revpos is greater than minRevPos, otherwise
1160-
// update to ensure it's fetched and uploaded
1161-
bodyAtts[name].(map[string]interface{})["revpos"], _ = ParseRevID(bh.loggingCtx, revID)
1162-
continue
1163-
}
1149+
// currentDigests is a map from attachment name to the current bucket doc digest,
1150+
// for any attachments on the incoming document that are also on the current bucket doc
1151+
var currentDigests map[string]string
1152+
1153+
// Do we have a previous doc? If not don't need to do this check
1154+
if currentBucketDoc != nil {
1155+
bodyAtts := GetBodyAttachments(body)
1156+
currentDigests = make(map[string]string, len(bodyAtts))
1157+
for name, value := range bodyAtts {
1158+
// Check if we have this attachment name already, if we do, continue check
1159+
currentAttachment, ok := currentBucketDoc.Attachments[name]
1160+
if !ok {
1161+
// If we don't have this attachment already, ensure incoming revpos is greater than minRevPos, otherwise
1162+
// update to ensure it's fetched and uploaded
1163+
bodyAtts[name].(map[string]interface{})["revpos"], _ = ParseRevID(bh.loggingCtx, revID)
1164+
continue
1165+
}
11641166

1165-
currentAttachmentMeta, ok := currentAttachment.(map[string]interface{})
1166-
if !ok {
1167-
return base.HTTPErrorf(http.StatusInternalServerError, "Current attachment data is invalid")
1168-
}
1167+
currentAttachmentMeta, ok := currentAttachment.(map[string]interface{})
1168+
if !ok {
1169+
return base.HTTPErrorf(http.StatusInternalServerError, "Current attachment data is invalid")
1170+
}
11691171

1170-
currentAttachmentDigest, ok := currentAttachmentMeta["digest"].(string)
1171-
if !ok {
1172-
return base.HTTPErrorf(http.StatusInternalServerError, "Current attachment data is invalid")
1173-
}
1174-
currentDigests[name] = currentAttachmentDigest
1172+
currentAttachmentDigest, ok := currentAttachmentMeta["digest"].(string)
1173+
if !ok {
1174+
return base.HTTPErrorf(http.StatusInternalServerError, "Current attachment data is invalid")
1175+
}
1176+
currentDigests[name] = currentAttachmentDigest
11751177

1176-
incomingAttachmentMeta, ok := value.(map[string]interface{})
1177-
if !ok {
1178-
return base.HTTPErrorf(http.StatusBadRequest, "Invalid attachment")
1179-
}
1178+
incomingAttachmentMeta, ok := value.(map[string]interface{})
1179+
if !ok {
1180+
return base.HTTPErrorf(http.StatusBadRequest, "Invalid attachment")
1181+
}
11801182

1181-
// If this attachment has data then we're fine, this isn't a stub attachment and therefore doesn't
1182-
// need the check.
1183-
if incomingAttachmentMeta["data"] != nil {
1184-
continue
1185-
}
1183+
// If this attachment has data then we're fine, this isn't a stub attachment and therefore doesn't
1184+
// need the check.
1185+
if incomingAttachmentMeta["data"] != nil {
1186+
continue
1187+
}
11861188

1187-
incomingAttachmentDigest, ok := incomingAttachmentMeta["digest"].(string)
1188-
if !ok {
1189-
return base.HTTPErrorf(http.StatusBadRequest, "Invalid attachment")
1190-
}
1189+
incomingAttachmentDigest, ok := incomingAttachmentMeta["digest"].(string)
1190+
if !ok {
1191+
return base.HTTPErrorf(http.StatusBadRequest, "Invalid attachment")
1192+
}
11911193

1192-
incomingAttachmentRevpos, ok := base.ToInt64(incomingAttachmentMeta["revpos"])
1193-
if !ok {
1194-
return base.HTTPErrorf(http.StatusBadRequest, "Invalid attachment")
1195-
}
1194+
incomingAttachmentRevpos, ok := base.ToInt64(incomingAttachmentMeta["revpos"])
1195+
if !ok {
1196+
return base.HTTPErrorf(http.StatusBadRequest, "Invalid attachment")
1197+
}
11961198

1197-
// Compare the revpos and attachment digest. If incoming revpos is less than or equal to minRevPos and
1198-
// digest is different we need to override the revpos and set it to the current revision to ensure
1199-
// the attachment is requested and stored
1200-
if int(incomingAttachmentRevpos) <= minRevpos && currentAttachmentDigest != incomingAttachmentDigest {
1201-
bodyAtts[name].(map[string]interface{})["revpos"], _ = ParseRevID(bh.loggingCtx, revID)
1199+
// Compare the revpos and attachment digest. If incoming revpos is less than or equal to minRevPos and
1200+
// digest is different we need to override the revpos and set it to the current revision to ensure
1201+
// the attachment is requested and stored
1202+
if int(incomingAttachmentRevpos) <= minRevpos && currentAttachmentDigest != incomingAttachmentDigest {
1203+
bodyAtts[name].(map[string]interface{})["revpos"], _ = ParseRevID(bh.loggingCtx, revID)
1204+
}
12021205
}
1206+
1207+
body[BodyAttachments] = bodyAtts
12031208
}
12041209

1205-
body[BodyAttachments] = bodyAtts
1206-
}
1210+
if err := bh.downloadOrVerifyAttachments(rq.Sender, body, minRevpos, docID, currentDigests); err != nil {
1211+
base.ErrorfCtx(bh.loggingCtx, "Error during downloadOrVerifyAttachments for doc %s/%s: %v", base.UD(docID), revID, err)
1212+
return err
1213+
}
12071214

1208-
if err := bh.downloadOrVerifyAttachments(rq.Sender, body, minRevpos, docID, currentDigests); err != nil {
1209-
base.ErrorfCtx(bh.loggingCtx, "Error during downloadOrVerifyAttachments for doc %s/%s: %v", base.UD(docID), revID, err)
1210-
return err
1215+
newDoc.DocAttachments = GetBodyAttachments(body)
1216+
delete(body, BodyAttachments)
1217+
newDoc.UpdateBody(body)
12111218
}
12121219

1213-
newDoc.DocAttachments = GetBodyAttachments(body)
1214-
delete(body, BodyAttachments)
1215-
newDoc.UpdateBody(body)
12161220
}
12171221

12181222
if rawBucketDoc == nil && bh.collectionCtx.checkPendingInsertion(docID) {

rest/blip_api_attachment_test.go

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -700,3 +700,52 @@ func TestBlipLegacyAttachDocUpdate(t *testing.T) {
700700
}
701701
})
702702
}
703+
704+
func TestPushDocWithNonRootAttachmentProperty(t *testing.T) {
705+
base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll)
706+
rtConfig := &RestTesterConfig{
707+
GuestEnabled: true,
708+
}
709+
710+
btcRunner := NewBlipTesterClientRunner(t)
711+
712+
doc1ID := t.Name() + "doc1"
713+
doc2ID := t.Name() + "doc2"
714+
doc3ID := t.Name() + "doc3"
715+
doc4ID := t.Name() + "doc4"
716+
717+
btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) {
718+
rt := NewRestTester(t, rtConfig)
719+
defer rt.Close()
720+
721+
opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols}
722+
btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts)
723+
defer btc.Close()
724+
725+
testcases := []struct {
726+
initialBody []byte
727+
bodyUpdate []byte
728+
docID string
729+
}{
730+
{docID: doc1ID, initialBody: []byte(`{"data": "_attachments"}`), bodyUpdate: []byte(`{"data1": "_attachments"}`)},
731+
{docID: doc2ID, initialBody: []byte(`{"data": {"textfield": "_attachments"}}`), bodyUpdate: []byte(`{"data": {"textfield": "_attachments"}}`)},
732+
{docID: doc3ID, initialBody: []byte(`{"data": {"data": {"textfield": "_attachments"}}}`), bodyUpdate: []byte(`{"data1": {"data": {"textfield": "_attachments"}}}`)},
733+
{docID: doc4ID, initialBody: []byte(`{"parent": { "_attachments": "data" }}`), bodyUpdate: []byte(`{"parent": { "_attachments": "data1" }}`)},
734+
}
735+
for _, tc := range testcases {
736+
// add rev with _attachments property as value in json
737+
// pushing initial rev with _attachments in value on the json will work fine as there is different code path
738+
// for when the doc is new to SGW and when you are pushing new data onto pre-existing doc as SGW will scan
739+
// parent doc for attachment keys too, this is where the issue arose of assigning nil to _attachments key in the body
740+
docVersion, err := btcRunner.PushRev(btc.id, tc.docID, EmptyDocVersion(), tc.initialBody)
741+
require.NoError(t, err)
742+
require.NoError(t, rt.WaitForVersion(tc.docID, docVersion))
743+
744+
// add rev2 for each doc and wait to be replicated to SGW
745+
docVersion, err = btcRunner.PushRev(btc.id, tc.docID, docVersion, tc.bodyUpdate)
746+
require.NoError(t, err)
747+
require.NoError(t, rt.WaitForVersion(tc.docID, docVersion))
748+
}
749+
})
750+
751+
}

rest/blip_api_delta_sync_test.go

Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,103 @@ func TestBlipDeltaSyncPushAttachment(t *testing.T) {
9090
})
9191
}
9292

93+
// TestDeltaWithAttachmentJsonProperty tests pushing a delta when _attachments is present in either delta or existing doc
94+
func TestDeltaWithAttachmentJsonProperty(t *testing.T) {
95+
base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll)
96+
97+
if !base.IsEnterpriseEdition() {
98+
t.Skip("Delta test requires EE")
99+
}
100+
rtConfig := &RestTesterConfig{
101+
DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{
102+
DeltaSync: &DeltaSyncConfig{
103+
Enabled: base.Ptr(true),
104+
},
105+
}},
106+
GuestEnabled: true,
107+
}
108+
109+
doc1ID := t.Name() + "_doc1"
110+
doc2ID := t.Name() + "_doc2"
111+
doc3ID := t.Name() + "_doc3"
112+
doc4ID := t.Name() + "_doc4"
113+
114+
btcRunner := NewBlipTesterClientRunner(t)
115+
btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) {
116+
rt := NewRestTester(t, rtConfig)
117+
defer rt.Close()
118+
119+
opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols}
120+
btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts)
121+
defer btc.Close()
122+
123+
collection, ctx := rt.GetSingleTestDatabaseCollection()
124+
125+
attData := base64.StdEncoding.EncodeToString([]byte("attach"))
126+
127+
testcases := []struct {
128+
initialBody []byte
129+
bodyUpdate []byte
130+
expBody []byte
131+
docID string
132+
hasAttachment bool
133+
}{
134+
// test case: pushing delta with key update onto doc with _attachment in json value
135+
{
136+
docID: doc1ID,
137+
initialBody: []byte(`{"data":"_attachments"}`),
138+
bodyUpdate: []byte(`{"data1":"_attachments"}`),
139+
hasAttachment: false,
140+
},
141+
{
142+
// test case: pushing delta with key update onto doc with _attachment in json value and attachment defined
143+
docID: doc2ID,
144+
initialBody: []byte(`{"key":"_attachments","_attachments":{"myAttachment":{"data":"` + attData + `"}}}`),
145+
bodyUpdate: []byte(`{"key1":"_attachments","_attachments":{"myAttachment":{"data":"` + attData + `"}}}`),
146+
hasAttachment: true,
147+
},
148+
{
149+
// test case: pushing delta with attachment defined onto doc with _attachment in json value
150+
docID: doc3ID,
151+
initialBody: []byte(`{"key":"_attachments"}`),
152+
bodyUpdate: []byte(`{"key":"_attachments","_attachments":{"myAttachment":{"data":"` + attData + `"}}}`),
153+
hasAttachment: true,
154+
},
155+
{
156+
// test case: pushing delta with _attachment json value onto doc with attachment defined
157+
docID: doc4ID,
158+
initialBody: []byte(`{"key":"val","_attachments":{"myAttachment":{"data":"` + attData + `"}}}`),
159+
bodyUpdate: []byte(`{"key":"_attachments","_attachments":{"myAttachment":{"data":"` + attData + `"}}}`),
160+
hasAttachment: true,
161+
},
162+
}
163+
for _, tc := range testcases {
164+
165+
// Push first rev
166+
version, err := btcRunner.PushRev(btc.id, tc.docID, EmptyDocVersion(), tc.initialBody)
167+
require.NoError(t, err)
168+
require.NoError(t, rt.WaitForVersion(tc.docID, version))
169+
170+
btc.ClientDeltas = true
171+
172+
// Push second rev
173+
version, err = btcRunner.PushRev(btc.id, tc.docID, version, tc.bodyUpdate)
174+
require.NoError(t, err)
175+
require.NoError(t, rt.WaitForVersion(tc.docID, version))
176+
177+
if tc.hasAttachment {
178+
syncData, err := collection.GetDocSyncData(ctx, tc.docID)
179+
require.NoError(t, err)
180+
assert.Len(t, syncData.Attachments, 1)
181+
_, found := syncData.Attachments["myAttachment"]
182+
assert.True(t, found)
183+
}
184+
185+
btc.ClientDeltas = false
186+
}
187+
})
188+
}
189+
93190
// Test pushing and pulling new attachments through delta sync
94191
// 1. Create test client that have deltas enabled
95192
// 2. Start continuous push and pull replication in client

0 commit comments

Comments
 (0)