Skip to content

Commit 9e7ecf8

Browse files
committed
HBASE-29288 Avoid adding new blocks during prefetch if usage is greater than accept factor (apache#6965)
Signed-off-by: Tak Lon (Stephen) Wu <[email protected]>
1 parent 0b81dd2 commit 9e7ecf8

File tree

7 files changed

+153
-37
lines changed

7 files changed

+153
-37
lines changed

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -285,6 +285,7 @@ public static HFileBlock getBlockForCaching(CacheConfig cacheConf, HFileBlock bl
285285
.withPrevBlockOffset(block.getPrevBlockOffset()).withByteBuff(buff)
286286
.withFillHeader(FILL_HEADER).withOffset(block.getOffset()).withNextBlockOnDiskSize(-1)
287287
.withOnDiskDataSizeWithHeader(block.getOnDiskDataSizeWithHeader() + numBytes)
288+
.withNextBlockOnDiskSize(block.getNextBlockOnDiskSize())
288289
.withHFileContext(cloneContext(block.getHFileContext()))
289290
.withByteBuffAllocator(cacheConf.getByteBuffAllocator()).withShared(!buff.hasArray()).build();
290291
}

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java

Lines changed: 31 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -287,8 +287,8 @@ protected enum CacheState {
287287
"hbase.bucketcache.persistent.file.integrity.check.algorithm";
288288
private static final String DEFAULT_FILE_VERIFY_ALGORITHM = "MD5";
289289

290-
public static final String QUEUE_ADDITION_WAIT_TIME = "hbase.bucketcache.queue.addition.waittime";
291-
static final long DEFAULT_QUEUE_ADDITION_WAIT_TIME = 0;
290+
static final String QUEUE_ADDITION_WAIT_TIME = "hbase.bucketcache.queue.addition.waittime";
291+
private static final long DEFAULT_QUEUE_ADDITION_WAIT_TIME = 0;
292292
private long queueAdditionWaitTime;
293293
/**
294294
* Use {@link java.security.MessageDigest} class's encryption algorithms to check persistent file
@@ -586,7 +586,7 @@ protected void cacheBlockWithWaitInternal(BlockCacheKey cacheKey, Cacheable cach
586586
LOG.trace("Caching key={}, item={}", cacheKey, cachedItem);
587587
// Stuff the entry into the RAM cache so it can get drained to the persistent store
588588
RAMQueueEntry re = new RAMQueueEntry(cacheKey, cachedItem, accessCount.incrementAndGet(),
589-
inMemory, isCachePersistent() && ioEngine instanceof FileIOEngine);
589+
inMemory, isCachePersistent() && ioEngine instanceof FileIOEngine, wait);
590590
/**
591591
* Don't use ramCache.put(cacheKey, re) here. because there may be a existing entry with same
592592
* key in ramCache, the heap size of bucket cache need to update if replacing entry from
@@ -1390,8 +1390,8 @@ void doDrain(final List<RAMQueueEntry> entries, ByteBuffer metaBuff) throws Inte
13901390
// transferred with our current IOEngines. Should take care, when we have new kinds of
13911391
// IOEngine in the future.
13921392
metaBuff.clear();
1393-
BucketEntry bucketEntry =
1394-
re.writeToCache(ioEngine, bucketAllocator, realCacheSize, this::createRecycler, metaBuff);
1393+
BucketEntry bucketEntry = re.writeToCache(ioEngine, bucketAllocator, realCacheSize,
1394+
this::createRecycler, metaBuff, acceptableSize());
13951395
// Successfully added. Up index and add bucketEntry. Clear io exceptions.
13961396
bucketEntries[index] = bucketEntry;
13971397
if (ioErrorStartTime > 0) {
@@ -1412,8 +1412,11 @@ void doDrain(final List<RAMQueueEntry> entries, ByteBuffer metaBuff) throws Inte
14121412
index++;
14131413
} catch (CacheFullException cfe) {
14141414
// Cache full when we tried to add. Try freeing space and then retrying (don't up index)
1415-
if (!freeInProgress) {
1415+
if (!freeInProgress && !re.isPrefetch()) {
14161416
freeSpace("Full!");
1417+
} else if (re.isPrefetch()) {
1418+
bucketEntries[index] = null;
1419+
index++;
14171420
} else {
14181421
Thread.sleep(50);
14191422
}
@@ -1467,13 +1470,13 @@ void doDrain(final List<RAMQueueEntry> entries, ByteBuffer metaBuff) throws Inte
14671470
return null;
14681471
});
14691472
}
1473+
long used = bucketAllocator.getUsedSize();
1474+
if (!entries.get(i).isPrefetch() && used > acceptableSize()) {
1475+
LOG.debug("Calling freeSpace for block: {}", entries.get(i).getKey());
1476+
freeSpace("Used=" + used + " > acceptable=" + acceptableSize());
1477+
}
14701478
}
14711479

1472-
long used = bucketAllocator.getUsedSize();
1473-
if (used > acceptableSize()) {
1474-
freeSpace("Used=" + used + " > acceptable=" + acceptableSize());
1475-
}
1476-
return;
14771480
}
14781481

14791482
/**
@@ -1955,13 +1958,16 @@ static class RAMQueueEntry {
19551958
private boolean inMemory;
19561959
private boolean isCachePersistent;
19571960

1961+
private boolean isPrefetch;
1962+
19581963
RAMQueueEntry(BlockCacheKey bck, Cacheable data, long accessCounter, boolean inMemory,
1959-
boolean isCachePersistent) {
1964+
boolean isCachePersistent, boolean isPrefetch) {
19601965
this.key = bck;
19611966
this.data = data;
19621967
this.accessCounter = accessCounter;
19631968
this.inMemory = inMemory;
19641969
this.isCachePersistent = isCachePersistent;
1970+
this.isPrefetch = isPrefetch;
19651971
}
19661972

19671973
public Cacheable getData() {
@@ -1972,6 +1978,10 @@ public BlockCacheKey getKey() {
19721978
return key;
19731979
}
19741980

1981+
public boolean isPrefetch() {
1982+
return isPrefetch;
1983+
}
1984+
19751985
public void access(long accessCounter) {
19761986
this.accessCounter = accessCounter;
19771987
}
@@ -1985,7 +1995,7 @@ private ByteBuffAllocator getByteBuffAllocator() {
19851995

19861996
public BucketEntry writeToCache(final IOEngine ioEngine, final BucketAllocator alloc,
19871997
final LongAdder realCacheSize, Function<BucketEntry, Recycler> createRecycler,
1988-
ByteBuffer metaBuff) throws IOException {
1998+
ByteBuffer metaBuff, final Long acceptableSize) throws IOException {
19891999
int len = data.getSerializedLength();
19902000
// This cacheable thing can't be serialized
19912001
if (len == 0) {
@@ -1996,6 +2006,14 @@ public BucketEntry writeToCache(final IOEngine ioEngine, final BucketAllocator a
19962006
// recovery
19972007
}
19982008
long offset = alloc.allocateBlock(len);
2009+
// In the case of prefetch, we want to avoid freeSpace runs when the cache is full.
2010+
// this makes the cache allocation more predictable, and is particularly important
2011+
// when persistent cache is enabled, as it won't trigger evictions of the recovered blocks,
2012+
// which are likely the most accessed and relevant blocks in the cache.
2013+
if (isPrefetch() && alloc.getUsedSize() > acceptableSize) {
2014+
alloc.freeBlock(offset, len);
2015+
return null;
2016+
}
19992017
boolean succ = false;
20002018
BucketEntry bucketEntry = null;
20012019
try {

hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@
2222
import static org.junit.Assert.assertTrue;
2323

2424
import java.io.File;
25+
import java.io.IOException;
26+
import java.util.ArrayList;
2527
import java.util.List;
2628
import org.apache.hadoop.conf.Configuration;
2729
import org.apache.hadoop.fs.Path;
@@ -30,6 +32,7 @@
3032
import org.apache.hadoop.hbase.MiniHBaseCluster;
3133
import org.apache.hadoop.hbase.StartMiniClusterOption;
3234
import org.apache.hadoop.hbase.TableName;
35+
import org.apache.hadoop.hbase.Waiter;
3336
import org.apache.hadoop.hbase.client.Admin;
3437
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
3538
import org.apache.hadoop.hbase.client.Put;
@@ -113,11 +116,17 @@ public void testPrefetchPersistence() throws Exception {
113116

114117
// Default interval for cache persistence is 1000ms. So after 1000ms, both the persistence files
115118
// should exist.
116-
117119
HRegionServer regionServingRS = cluster.getRegionServer(0);
118-
119120
Admin admin = TEST_UTIL.getAdmin();
120-
List<String> cachedFilesList = admin.getCachedFilesList(regionServingRS.getServerName());
121+
List<String> cachedFilesList = new ArrayList<>();
122+
Waiter.waitFor(conf, 5000, () -> {
123+
try {
124+
cachedFilesList.addAll(admin.getCachedFilesList(regionServingRS.getServerName()));
125+
} catch (IOException e) {
126+
// let the test try again
127+
}
128+
return cachedFilesList.size() > 0;
129+
});
121130
assertEquals(1, cachedFilesList.size());
122131
for (HStoreFile h : regionServingRS.getRegions().get(0).getStores().get(0).getStorefiles()) {
123132
assertTrue(cachedFilesList.contains(h.getPath().getName()));

hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -748,8 +748,8 @@ public void testRAMCache() {
748748
HFileBlock.FILL_HEADER, -1, 52, -1, meta, ByteBuffAllocator.HEAP);
749749
HFileBlock blk2 = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf),
750750
HFileBlock.FILL_HEADER, -1, -1, -1, meta, ByteBuffAllocator.HEAP);
751-
RAMQueueEntry re1 = new RAMQueueEntry(key1, blk1, 1, false, false);
752-
RAMQueueEntry re2 = new RAMQueueEntry(key1, blk2, 1, false, false);
751+
RAMQueueEntry re1 = new RAMQueueEntry(key1, blk1, 1, false, false, false);
752+
RAMQueueEntry re2 = new RAMQueueEntry(key1, blk2, 1, false, false, false);
753753

754754
assertFalse(cache.containsKey(key1));
755755
assertNull(cache.putIfAbsent(key1, re1));
@@ -796,12 +796,12 @@ public void testFreeBlockWhenIOEngineWriteFailure() throws IOException {
796796
BucketAllocator allocator = new BucketAllocator(availableSpace, null);
797797

798798
BlockCacheKey key = new BlockCacheKey("dummy", 1L);
799-
RAMQueueEntry re = new RAMQueueEntry(key, block, 1, true, false);
799+
RAMQueueEntry re = new RAMQueueEntry(key, block, 1, true, false, false);
800800

801801
Assert.assertEquals(0, allocator.getUsedSize());
802802
try {
803803
re.writeToCache(ioEngine, allocator, null, null,
804-
ByteBuffer.allocate(HFileBlock.BLOCK_METADATA_SPACE));
804+
ByteBuffer.allocate(HFileBlock.BLOCK_METADATA_SPACE), Long.MAX_VALUE);
805805
Assert.fail();
806806
} catch (Exception e) {
807807
}

hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ public void testIOE() throws IOException, InterruptedException {
138138
RAMQueueEntry rqe = q.remove();
139139
RAMQueueEntry spiedRqe = Mockito.spy(rqe);
140140
Mockito.doThrow(new IOException("Mocked!")).when(spiedRqe).writeToCache(Mockito.any(),
141-
Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any());
141+
Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any());
142142
this.q.add(spiedRqe);
143143
doDrainOfOneEntry(bc, wt, q);
144144
assertTrue(bc.blocksByHFile.isEmpty());
@@ -158,7 +158,7 @@ public void testCacheFullException() throws IOException, InterruptedException {
158158
final CacheFullException cfe = new CacheFullException(0, 0);
159159
BucketEntry mockedBucketEntry = Mockito.mock(BucketEntry.class);
160160
Mockito.doThrow(cfe).doReturn(mockedBucketEntry).when(spiedRqe).writeToCache(Mockito.any(),
161-
Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any());
161+
Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any());
162162
this.q.add(spiedRqe);
163163
doDrainOfOneEntry(bc, wt, q);
164164
}

hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java renamed to hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestPrefetchWithBucketCache.java

Lines changed: 102 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,14 @@
1515
* See the License for the specific language governing permissions and
1616
* limitations under the License.
1717
*/
18-
package org.apache.hadoop.hbase.io.hfile;
18+
package org.apache.hadoop.hbase.io.hfile.bucket;
1919

2020
import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY;
2121
import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY;
2222
import static org.apache.hadoop.hbase.io.hfile.BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY;
23+
import static org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.QUEUE_ADDITION_WAIT_TIME;
2324
import static org.junit.Assert.assertEquals;
25+
import static org.junit.Assert.assertNotEquals;
2426
import static org.junit.Assert.assertNotNull;
2527
import static org.junit.Assert.assertNull;
2628
import static org.junit.Assert.assertTrue;
@@ -30,6 +32,7 @@
3032
import java.io.IOException;
3133
import java.util.Map;
3234
import java.util.Random;
35+
import java.util.concurrent.BlockingQueue;
3336
import java.util.concurrent.ThreadLocalRandom;
3437
import java.util.function.BiConsumer;
3538
import java.util.function.BiFunction;
@@ -48,8 +51,19 @@
4851
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
4952
import org.apache.hadoop.hbase.fs.HFileSystem;
5053
import org.apache.hadoop.hbase.io.ByteBuffAllocator;
51-
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
52-
import org.apache.hadoop.hbase.io.hfile.bucket.BucketEntry;
54+
import org.apache.hadoop.hbase.io.hfile.BlockCache;
55+
import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory;
56+
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
57+
import org.apache.hadoop.hbase.io.hfile.BlockType;
58+
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
59+
import org.apache.hadoop.hbase.io.hfile.Cacheable;
60+
import org.apache.hadoop.hbase.io.hfile.HFile;
61+
import org.apache.hadoop.hbase.io.hfile.HFileBlock;
62+
import org.apache.hadoop.hbase.io.hfile.HFileContext;
63+
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
64+
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
65+
import org.apache.hadoop.hbase.io.hfile.PrefetchExecutor;
66+
import org.apache.hadoop.hbase.io.hfile.RandomKeyValueUtil;
5367
import org.apache.hadoop.hbase.regionserver.BloomType;
5468
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
5569
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
@@ -202,40 +216,48 @@ public void testPrefetchInterruptOnCapacity() throws Exception {
202216
conf.setLong(BUCKET_CACHE_SIZE_KEY, 1);
203217
conf.set(BUCKET_CACHE_BUCKETS_KEY, "3072");
204218
conf.setDouble("hbase.bucketcache.acceptfactor", 0.98);
205-
conf.setDouble("hbase.bucketcache.minfactor", 0.95);
206-
conf.setDouble("hbase.bucketcache.extrafreefactor", 0.01);
219+
conf.setDouble("hbase.bucketcache.minfactor", 0.98);
220+
conf.setDouble("hbase.bucketcache.extrafreefactor", 0.0);
221+
conf.setLong(QUEUE_ADDITION_WAIT_TIME, 100);
207222
blockCache = BlockCacheFactory.createBlockCache(conf);
208223
cacheConf = new CacheConfig(conf, blockCache);
209224
Path storeFile = writeStoreFile("testPrefetchInterruptOnCapacity", 10000);
210225
// Prefetches the file blocks
211226
LOG.debug("First read should prefetch the blocks.");
212227
createReaderAndWaitForPrefetchInterruption(storeFile);
228+
Waiter.waitFor(conf, (PrefetchExecutor.getPrefetchDelay() + 1000),
229+
() -> PrefetchExecutor.isCompleted(storeFile));
213230
BucketCache bc = BucketCache.getBucketCacheFromCacheConfig(cacheConf).get();
214-
long evictionsFirstPrefetch = bc.getStats().getEvictionCount();
215-
LOG.debug("evictions after first prefetch: {}", bc.getStats().getEvictionCount());
231+
long evictedFirstPrefetch = bc.getStats().getEvictedCount();
216232
HFile.Reader reader = createReaderAndWaitForPrefetchInterruption(storeFile);
217-
LOG.debug("evictions after second prefetch: {}", bc.getStats().getEvictionCount());
218-
assertTrue((bc.getStats().getEvictionCount() - evictionsFirstPrefetch) < 10);
233+
assertEquals(evictedFirstPrefetch, bc.getStats().getEvictedCount());
219234
HFileScanner scanner = reader.getScanner(conf, true, true);
220235
scanner.seekTo();
221236
while (scanner.next()) {
222237
// do a full scan to force some evictions
223238
LOG.trace("Iterating the full scan to evict some blocks");
224239
}
225240
scanner.close();
226-
LOG.debug("evictions after scanner: {}", bc.getStats().getEvictionCount());
241+
Waiter.waitFor(conf, 5000, () -> {
242+
for (BlockingQueue<BucketCache.RAMQueueEntry> queue : bc.writerQueues) {
243+
if (!queue.isEmpty()) {
244+
return false;
245+
}
246+
}
247+
return true;
248+
});
227249
// The scanner should had triggered at least 3x evictions from the prefetch,
228250
// as we try cache each block without interruption.
229-
assertTrue(bc.getStats().getEvictionCount() > evictionsFirstPrefetch);
251+
assertTrue(bc.getStats().getEvictedCount() > evictedFirstPrefetch);
230252
}
231253

232254
@Test
233255
public void testPrefetchDoesntInterruptInMemoryOnCapacity() throws Exception {
234256
conf.setLong(BUCKET_CACHE_SIZE_KEY, 1);
235257
conf.set(BUCKET_CACHE_BUCKETS_KEY, "3072");
236258
conf.setDouble("hbase.bucketcache.acceptfactor", 0.98);
237-
conf.setDouble("hbase.bucketcache.minfactor", 0.95);
238-
conf.setDouble("hbase.bucketcache.extrafreefactor", 0.01);
259+
conf.setDouble("hbase.bucketcache.minfactor", 0.98);
260+
conf.setDouble("hbase.bucketcache.extrafreefactor", 0.0);
239261
blockCache = BlockCacheFactory.createBlockCache(conf);
240262
ColumnFamilyDescriptor family =
241263
ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f")).setInMemory(true).build();
@@ -245,7 +267,73 @@ public void testPrefetchDoesntInterruptInMemoryOnCapacity() throws Exception {
245267
LOG.debug("First read should prefetch the blocks.");
246268
createReaderAndWaitForPrefetchInterruption(storeFile);
247269
BucketCache bc = BucketCache.getBucketCacheFromCacheConfig(cacheConf).get();
248-
assertTrue(bc.getStats().getEvictedCount() > 200);
270+
Waiter.waitFor(conf, 1000, () -> PrefetchExecutor.isCompleted(storeFile));
271+
long evictions = bc.getStats().getEvictedCount();
272+
LOG.debug("Total evicted at this point: {}", evictions);
273+
// creates another reader, now that cache is full, no block would fit and prefetch should not
274+
// trigger any new evictions
275+
createReaderAndWaitForPrefetchInterruption(storeFile);
276+
assertEquals(evictions, bc.getStats().getEvictedCount());
277+
}
278+
279+
@Test
280+
public void testPrefetchRunNoEvictions() throws Exception {
281+
conf.setLong(BUCKET_CACHE_SIZE_KEY, 1);
282+
conf.set(BUCKET_CACHE_BUCKETS_KEY, "3072");
283+
conf.setDouble("hbase.bucketcache.acceptfactor", 0.98);
284+
conf.setDouble("hbase.bucketcache.minfactor", 0.98);
285+
conf.setDouble("hbase.bucketcache.extrafreefactor", 0.0);
286+
conf.setLong(QUEUE_ADDITION_WAIT_TIME, 100);
287+
blockCache = BlockCacheFactory.createBlockCache(conf);
288+
cacheConf = new CacheConfig(conf, blockCache);
289+
Path storeFile = writeStoreFile("testPrefetchRunNoEvictions", 10000);
290+
// Prefetches the file blocks
291+
createReaderAndWaitForPrefetchInterruption(storeFile);
292+
Waiter.waitFor(conf, (PrefetchExecutor.getPrefetchDelay() + 1000),
293+
() -> PrefetchExecutor.isCompleted(storeFile));
294+
BucketCache bc = BucketCache.getBucketCacheFromCacheConfig(cacheConf).get();
295+
// Wait until all cache writer queues are empty
296+
Waiter.waitFor(conf, 5000, () -> {
297+
for (BlockingQueue<BucketCache.RAMQueueEntry> queue : bc.writerQueues) {
298+
if (!queue.isEmpty()) {
299+
return false;
300+
}
301+
}
302+
return true;
303+
});
304+
// With the wait time configuration, prefetch should trigger no evictions once it reaches
305+
// cache capacity
306+
assertEquals(0, bc.getStats().getEvictedCount());
307+
}
308+
309+
@Test
310+
public void testPrefetchRunTriggersEvictions() throws Exception {
311+
conf.setLong(BUCKET_CACHE_SIZE_KEY, 1);
312+
conf.set(BUCKET_CACHE_BUCKETS_KEY, "3072");
313+
conf.setDouble("hbase.bucketcache.acceptfactor", 0.98);
314+
conf.setDouble("hbase.bucketcache.minfactor", 0.98);
315+
conf.setDouble("hbase.bucketcache.extrafreefactor", 0.0);
316+
conf.setLong(QUEUE_ADDITION_WAIT_TIME, 0);
317+
blockCache = BlockCacheFactory.createBlockCache(conf);
318+
cacheConf = new CacheConfig(conf, blockCache);
319+
Path storeFile = writeStoreFile("testPrefetchInterruptOnCapacity", 10000);
320+
// Prefetches the file blocks
321+
createReaderAndWaitForPrefetchInterruption(storeFile);
322+
Waiter.waitFor(conf, (PrefetchExecutor.getPrefetchDelay() + 1000),
323+
() -> PrefetchExecutor.isCompleted(storeFile));
324+
BucketCache bc = BucketCache.getBucketCacheFromCacheConfig(cacheConf).get();
325+
// Wait until all cache writer queues are empty
326+
Waiter.waitFor(conf, 5000, () -> {
327+
for (BlockingQueue<BucketCache.RAMQueueEntry> queue : bc.writerQueues) {
328+
if (!queue.isEmpty()) {
329+
return false;
330+
}
331+
}
332+
return true;
333+
});
334+
// With the wait time configuration, prefetch should trigger no evictions once it reaches
335+
// cache capacity
336+
assertNotEquals(0, bc.getStats().getEvictedCount());
249337
}
250338

251339
@Test

hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRAMCache.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ public void testAtomicRAMCache() throws Exception {
9090
MockHFileBlock blk = new MockHFileBlock(BlockType.DATA, size, size, -1,
9191
ByteBuffer.wrap(byteArr, 0, size), HFileBlock.FILL_HEADER, -1, 52, -1,
9292
new HFileContextBuilder().build(), ByteBuffAllocator.HEAP);
93-
RAMQueueEntry re = new RAMQueueEntry(key, blk, 1, false, false);
93+
RAMQueueEntry re = new RAMQueueEntry(key, blk, 1, false, false, false);
9494

9595
Assert.assertNull(cache.putIfAbsent(key, re));
9696
Assert.assertEquals(cache.putIfAbsent(key, re), re);

0 commit comments

Comments
 (0)