Skip to content

Commit 2ff7889

Browse files
add consumer code to account for the bad scenario to allow to exit out of bad blobs
1 parent 400e645 commit 2ff7889

File tree

1 file changed

+16
-3
lines changed

1 file changed

+16
-3
lines changed

hollow/src/main/java/com/netflix/hollow/core/read/engine/map/HollowMapDeltaApplicator.java

+16-3
Original file line numberDiff line numberDiff line change
@@ -61,9 +61,14 @@ public void applyDelta() {
6161
target.bitsPerMapPointer = delta.bitsPerMapPointer;
6262
target.bitsPerMapSizeValue = delta.bitsPerMapSizeValue;
6363
target.bitsPerKeyElement = delta.bitsPerKeyElement;
64-
target.bitsPerValueElement = delta.bitsPerValueElement;
64+
// Prior to Jan 2025, the producer was able to generate blobs where it reserved 0 bits for the
65+
// Map value element when all keys stored records referencing ordinal 0. In this case, when reading
66+
// the value of the last bucket, the code would trigger ArrayIndexOutOfBoundsException since there
67+
// no space reserved for the value element. This is a workaround to avoid the exception when
68+
// transitioning from one of these bad blobs.
69+
target.bitsPerValueElement = delta.bitsPerValueElement == 0 ? 1 : delta.bitsPerValueElement;
6570
target.bitsPerFixedLengthMapPortion = delta.bitsPerFixedLengthMapPortion;
66-
target.bitsPerMapEntry = delta.bitsPerMapEntry;
71+
target.bitsPerMapEntry = target.bitsPerKeyElement + target.bitsPerValueElement;
6772
target.emptyBucketKeyValue = delta.emptyBucketKeyValue;
6873
target.totalNumberOfBuckets = delta.totalNumberOfBuckets;
6974

@@ -143,7 +148,15 @@ private void mergeOrdinal(int ordinal) {
143148
if(!removeData) {
144149
for(long bucketIdx=currentFromStateStartBucket; bucketIdx<fromDataEndBucket; bucketIdx++) {
145150
long bucketKey = from.entryData.getElementValue(bucketIdx * from.bitsPerMapEntry, from.bitsPerKeyElement);
146-
long bucketValue = from.entryData.getElementValue(bucketIdx * from.bitsPerMapEntry + from.bitsPerKeyElement, from.bitsPerValueElement);
151+
// Prior to Jan 2025, the producer was able to generate blobs where it reserved 0 bits for the
152+
// Map value element when all keys stored records referencing ordinal 0. In this case, when reading
153+
// the value of the last bucket, the code would trigger ArrayIndexOutOfBoundsException since there
154+
// no space reserved for the value element. This is a workaround to avoid the exception when
155+
// transitioning from one of these bad blobs.
156+
long bucketValue =
157+
from.bitsPerValueElement == 0
158+
? 0
159+
: from.entryData.getElementValue(bucketIdx * from.bitsPerMapEntry + from.bitsPerKeyElement, from.bitsPerValueElement);
147160
if(bucketKey == from.emptyBucketKeyValue)
148161
bucketKey = target.emptyBucketKeyValue;
149162
long currentWriteStartBucketBit = currentWriteStartBucket * target.bitsPerMapEntry;

0 commit comments

Comments
 (0)