Skip to content

Commit 67cf00e

Browse files
authored
Merge pull request #22116 from LinHu2016/defrag-offheap-1.5
Decouple spine from array reserved regions
2 parents 34ccda4 + 240e42d commit 67cf00e

21 files changed

+597
-240
lines changed

runtime/gc_base/IndexableObjectAllocationModel.cpp

Lines changed: 23 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
#include "MemorySpace.hpp"
2828
#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) || defined(J9VM_GC_ENABLE_DOUBLE_MAP)
2929
#include "ArrayletLeafIterator.hpp"
30-
#include "HeapRegionManager.hpp"
30+
#include "HeapRegionManagerVLHGC.hpp"
3131
#include "HeapRegionDescriptorVLHGC.hpp"
3232
#include "Heap.hpp"
3333
#include "SparseVirtualMemory.hpp"
@@ -334,54 +334,49 @@ MM_IndexableObjectAllocationModel::layoutDiscontiguousArraylet(MM_EnvironmentBas
334334

335335
#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION)
336336
MMINLINE J9IndexableObject *
337-
MM_IndexableObjectAllocationModel::getSparseAddressAndDecommitLeaves(MM_EnvironmentBase *env, J9IndexableObject *spine)
337+
MM_IndexableObjectAllocationModel::getSparseAddressAndDecommitLeaves(MM_EnvironmentBase *envBase, J9IndexableObject *spine)
338338
{
339339
Assert_MM_true(_numberOfArraylets == _allocateDescription.getNumArraylets());
340340

341-
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
341+
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(envBase);
342342
GC_ArrayObjectModel *indexableObjectModel = &extensions->indexableObjectModel;
343-
const UDATA arrayletLeafSize = env->getOmrVM()->_arrayletLeafSize;
343+
const uintptr_t regionSize = extensions->heapRegionManager->getRegionSize();
344+
344345
uintptr_t byteAmount = 0;
345-
UDATA arrayletLeafCount = MM_Math::roundToCeiling(arrayletLeafSize, _dataSize) / arrayletLeafSize;
346+
UDATA arrayReservedRegionCount = MM_Math::roundToCeiling(regionSize, _dataSize) / regionSize;
346347

347348
/* Determine how many bytes to allocate outside of the spine (in arraylet leaves). */
348349
Assert_MM_true(_allocateDescription.getBytesRequested() >= _allocateDescription.getContiguousBytes());
349350
uintptr_t bytesRemaining = _allocateDescription.getBytesRequested() - _allocateDescription.getContiguousBytes();
350351

351352
/* Allocate leaf for each arraylet and attach it to its leaf pointer in the spine. */
352353
uintptr_t arrayoidIndex = 0;
353-
Trc_MM_getSparseAddressAndDecommitLeaves_Entry(env->getLanguageVMThread(), spine, (void *)bytesRemaining, arrayletLeafCount, (void *)arrayletLeafSize);
354+
Trc_MM_getSparseAddressAndDecommitLeaves_Entry(envBase->getLanguageVMThread(), spine, (void *)bytesRemaining, arrayReservedRegionCount, (void *)regionSize);
354355
while (0 < bytesRemaining) {
355356
/* Allocate the next arraylet leaf - leaves are allocated solely for the purpose of
356357
decommitting the memory later on in this function. */
357-
void *leaf = env->_objectAllocationInterface->allocateArrayletLeaf(
358-
env, &_allocateDescription, _allocateDescription.getMemorySpace(), true);
358+
void *leaf = envBase->_objectAllocationInterface->allocateArrayletLeaf(
359+
envBase, &_allocateDescription, _allocateDescription.getMemorySpace(), true);
359360

360361
/* If leaf allocation failed set the result to NULL and return. */
361362
if (NULL == leaf) {
362-
/* Spine and preceding arraylets are now floating garbage. */
363-
Trc_MM_allocateAndConnectNonContiguousArraylet_leafFailure(env->getLanguageVMThread());
363+
Trc_MM_allocateAndConnectNonContiguousArraylet_leafFailure(envBase->getLanguageVMThread());
364364
_allocateDescription.setSpine(NULL);
365365
spine = NULL;
366366
break;
367367
}
368368

369-
if (0 == arrayoidIndex) {
370-
MM_HeapRegionDescriptorVLHGC *firstLeafRegionDescriptor = (MM_HeapRegionDescriptorVLHGC *)extensions->getHeap()->getHeapRegionManager()->tableDescriptorForAddress(leaf);
371-
firstLeafRegionDescriptor->_sparseHeapAllocation = true;
372-
}
373-
374369
/* Disable region for reads and writes, since that'll be done through the contiguous double mapped region */
375-
void *highAddress = (void *)((uintptr_t)leaf + arrayletLeafSize);
376-
bool ret = extensions->heap->decommitMemory(leaf, arrayletLeafSize, leaf, highAddress);
370+
void *highAddress = (void *)((uintptr_t)leaf + regionSize);
371+
bool ret = extensions->heap->decommitMemory(leaf, regionSize, leaf, highAddress);
377372
if (!ret) {
378-
Trc_MM_VirtualMemory_decommitMemory_failure(leaf, arrayletLeafSize);
373+
Trc_MM_VirtualMemory_decommitMemory_failure(leaf, regionSize);
379374
}
380375

381376
/* Refresh the spine -- it might move if we GC while allocating the leaf */
382377
spine = _allocateDescription.getSpine();
383378

384-
bytesRemaining -= OMR_MIN(bytesRemaining, arrayletLeafSize);
379+
bytesRemaining -= OMR_MIN(bytesRemaining, regionSize);
385380
arrayoidIndex += 1;
386381
}
387382

@@ -390,10 +385,11 @@ MM_IndexableObjectAllocationModel::getSparseAddressAndDecommitLeaves(MM_Environm
390385
Assert_MM_true(_layout == GC_ArrayletObjectModel::InlineContiguous);
391386
Assert_MM_true(indexableObjectModel->isVirtualLargeObjectHeapEnabled());
392387
/* Number of arraylet leaves in the iterator must match the number of leaves calculated */
393-
Assert_MM_true(arrayletLeafCount == arrayoidIndex);
388+
Assert_MM_true(arrayReservedRegionCount == arrayoidIndex);
394389

395390
byteAmount = _dataSize;
396391
void *virtualLargeObjectHeapAddress = extensions->largeObjectVirtualMemory->allocateSparseFreeEntryAndMapToHeapObject(spine, byteAmount);
392+
397393
if (NULL != virtualLargeObjectHeapAddress) {
398394
indexableObjectModel->setDataAddrForContiguous((J9IndexableObject *)spine, virtualLargeObjectHeapAddress);
399395
} else {
@@ -402,7 +398,13 @@ MM_IndexableObjectAllocationModel::getSparseAddressAndDecommitLeaves(MM_Environm
402398
}
403399
}
404400

405-
Trc_MM_getSparseAddressAndDecommitLeaves_Exit(env->getLanguageVMThread(), spine, (void *)bytesRemaining);
401+
if (NULL == spine) {
402+
/* fail to reserve regions or allocateSparseFreeEntry, clean up reserved regions */
403+
if (0 < arrayoidIndex) {
404+
((MM_HeapRegionManagerVLHGC *)extensions->heapRegionManager)->recycleReservedRegionsForVirtualLargeObjectHeap(envBase, arrayoidIndex);
405+
}
406+
}
407+
Trc_MM_getSparseAddressAndDecommitLeaves_Exit(envBase->getLanguageVMThread(), spine, (void *)bytesRemaining);
406408

407409
return spine;
408410
}

runtime/gc_base/RootScanner.cpp

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@
4141
#if defined(J9VM_GC_FINALIZATION)
4242
#include "FinalizeListManager.hpp"
4343
#endif /* J9VM_GC_FINALIZATION*/
44+
#include "HashTableIterator.hpp"
4445
#include "Heap.hpp"
4546
#include "HeapRegionDescriptor.hpp"
4647
#include "HeapRegionIterator.hpp"
@@ -60,6 +61,10 @@
6061
#include "ParallelDispatcher.hpp"
6162
#include "PointerArrayIterator.hpp"
6263
#include "SlotObject.hpp"
64+
#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION)
65+
#include "SparseVirtualMemory.hpp"
66+
#include "SparseAddressOrderedFixedSizeDataPool.hpp"
67+
#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */
6368
#include "StringTable.hpp"
6469
#include "StringTableIncrementalIterator.hpp"
6570
#include "Task.hpp"
@@ -241,7 +246,7 @@ MM_RootScanner::doStringTableSlot(J9Object **slotPtr, GC_StringTableIterator *st
241246

242247
#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION)
243248
void
244-
MM_RootScanner::doObjectInVirtualLargeObjectHeap(J9Object *objectPtr, bool *sparseHeapAllocation)
249+
MM_RootScanner::doObjectInVirtualLargeObjectHeap(J9Object *objectPtr, GC_HashTableIterator *sparseDataEntryIterator)
245250
{
246251
/* No need to call doSlot() here since there's nothing to update */
247252
}
@@ -947,16 +952,16 @@ void
947952
MM_RootScanner::scanObjectsInVirtualLargeObjectHeap(MM_EnvironmentBase *env)
948953
{
949954
if (_singleThread || J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
950-
GC_HeapRegionIteratorVLHGC regionIterator(_extensions->heap->getHeapRegionManager());
951-
MM_HeapRegionDescriptorVLHGC *region = NULL;
952955
reportScanningStarted(RootScannerEntity_virtualLargeObjectHeapObjects);
953-
while (NULL != (region = regionIterator.nextRegion())) {
954-
if (region->isArrayletLeaf()) {
955-
if (region->_sparseHeapAllocation) {
956-
J9Object *spineObject = (J9Object *)region->_allocateData.getSpine();
957-
Assert_MM_true(NULL != spineObject);
958-
doObjectInVirtualLargeObjectHeap(spineObject, &region->_sparseHeapAllocation);
959-
}
956+
MM_SparseVirtualMemory *largeObjectVirtualMemory = _extensions->largeObjectVirtualMemory;
957+
J9HashTable *table = largeObjectVirtualMemory->getSparseDataPool()->getObjectToSparseDataTable();
958+
if (NULL != table) {
959+
MM_SparseDataTableEntry *sparseDataEntry = NULL;
960+
GC_HashTableIterator iterator(table);
961+
while (NULL != (sparseDataEntry = (MM_SparseDataTableEntry *)iterator.nextSlot())) {
962+
J9Object *spineObject = (J9Object *)sparseDataEntry->_proxyObjPtr;
963+
Assert_MM_true(NULL != spineObject);
964+
doObjectInVirtualLargeObjectHeap(spineObject, &iterator);
960965
}
961966
}
962967
reportScanningEnded(RootScannerEntity_virtualLargeObjectHeapObjects);
@@ -971,6 +976,7 @@ MM_RootScanner::scanDoubleMappedObjects(MM_EnvironmentBase *env)
971976
if (_singleThread || J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
972977
GC_HeapRegionIteratorVLHGC regionIterator(_extensions->heap->getHeapRegionManager());
973978
MM_HeapRegionDescriptorVLHGC *region = NULL;
979+
974980
reportScanningStarted(RootScannerEntity_DoubleMappedObjects);
975981
while (NULL != (region = regionIterator.nextRegion())) {
976982
if (region->isArrayletLeaf()) {

runtime/gc_base/RootScanner.hpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@
4646
#include "Task.hpp"
4747
#include "VMClassSlotIterator.hpp"
4848

49+
class GC_HashTableIterator;
4950
class GC_SlotObject;
5051
class MM_MemoryPool;
5152
class MM_CollectorLanguageInterfaceImpl;
@@ -570,7 +571,7 @@ class MM_RootScanner : public MM_BaseVirtual
570571
*
571572
* @param objectPtr[in] indexable object's spine
572573
*/
573-
virtual void doObjectInVirtualLargeObjectHeap(J9Object *objectPtr, bool *sparseHeapAllocation);
574+
virtual void doObjectInVirtualLargeObjectHeap(J9Object *objectPtr, GC_HashTableIterator *sparseDataEntryIterator);
574575
#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */
575576

576577
#if defined(J9VM_GC_ENABLE_DOUBLE_MAP)

runtime/gc_vlhgc/AllocationContextBalanced.cpp

Lines changed: 78 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -183,6 +183,7 @@ MM_AllocationContextBalanced::allocateTLH(MM_EnvironmentBase *env, MM_AllocateDe
183183
result = lockedReplenishAndAllocate(env, objectAllocationInterface, allocateDescription, MM_MemorySubSpace::ALLOCATION_TYPE_TLH);
184184
}
185185
unlockCommon();
186+
186187
/* if that still fails, try to invoke the collector */
187188
if (shouldCollectOnFailure && (NULL == result)) {
188189
result = _subspace->replenishAllocationContextFailed(env, _subspace, this, objectAllocationInterface, allocateDescription, MM_MemorySubSpace::ALLOCATION_TYPE_TLH);
@@ -256,6 +257,7 @@ MM_AllocationContextBalanced::allocateObject(MM_EnvironmentBase *env, MM_Allocat
256257
result = lockedReplenishAndAllocate(env, NULL, allocateDescription, MM_MemorySubSpace::ALLOCATION_TYPE_OBJECT);
257258
}
258259
unlockCommon();
260+
259261
/* if that still fails, try to invoke the collector */
260262
if (shouldCollectOnFailure && (NULL == result)) {
261263
result = _subspace->replenishAllocationContextFailed(env, _subspace, this, NULL, allocateDescription, MM_MemorySubSpace::ALLOCATION_TYPE_OBJECT);
@@ -321,6 +323,7 @@ MM_AllocationContextBalanced::allocateArrayletLeaf(MM_EnvironmentBase *env, MM_A
321323
lockCommon();
322324
result = lockedReplenishAndAllocate(env, NULL, allocateDescription, MM_MemorySubSpace::ALLOCATION_TYPE_LEAF);
323325
unlockCommon();
326+
324327
/* if that fails, try to invoke the collector */
325328
if (shouldCollectOnFailure && (NULL == result)) {
326329
result = _subspace->replenishAllocationContextFailed(env, _subspace, this, NULL, allocateDescription, MM_MemorySubSpace::ALLOCATION_TYPE_LEAF);
@@ -355,30 +358,56 @@ MM_AllocationContextBalanced::lockedAllocateArrayletLeaf(MM_EnvironmentBase *env
355358
/* look up the spine region since we need to add this region to its leaf list */
356359
MM_HeapRegionDescriptorVLHGC *spineRegion = (MM_HeapRegionDescriptorVLHGC *)_heapRegionManager->tableDescriptorForAddress(spine);
357360
/* the leaf requires a pointer back to the spine object so that it can verify its liveness elsewhere in the collector */
358-
leafAllocateData->setSpine(spine);
359-
freeRegionForArrayletLeaf->resetAge(env, (U_64)_subspace->getBytesRemainingBeforeTaxation());
360-
/* add the leaf to the spine region's leaf list */
361-
/* We own the lock on the spine region's context when this call is made so we can safely manipulate this list.
362-
* An exceptional scenario: A thread allocates a spine (and possibly a few arraylets), but does not complete the allocation. A global GC (or a series of regular PGCs) occurs
363-
* that age out regions to max age. The spine moves into a common context. Now, we successfully resume the leaf allocation, but the common lock that
364-
* we already hold is not sufficient any more. We need to additionally acquire common context' common lock, since multiple spines from different ACs could have come into this state,
365-
* and worse multiple spines originally allocated from different ACs may end up in a single common context region.
366-
*/
361+
if (!MM_GCExtensions::getExtensions(env)->isVirtualLargeObjectHeapEnabled) {
362+
leafAllocateData->setSpine(spine);
363+
/* add the leaf to the spine region's leaf list */
364+
/* We own the lock on the spine region's context when this call is made so we can safely manipulate this list.
365+
* An exceptional scenario: A thread allocates a spine (and possibly a few arraylets), but does not complete the allocation. A global GC (or a series of regular PGCs) occurs
366+
* that age out regions to max age. The spine moves into a common context. Now, we successfully resume the leaf allocation, but the common lock that
367+
* we already hold is not sufficient any more. We need to additionally acquire common context' common lock, since multiple spines from different ACs could have come into this state,
368+
* and worse multiple spines originally allocated from different ACs may end up in a single common context region.
369+
*/
370+
371+
MM_AllocationContextTarok *spineContext = spineRegion->_allocateData._owningContext;
372+
if (this != spineContext) {
373+
Assert_MM_true(env->getCommonAllocationContext() == spineContext);
374+
/* The common allocation context is always an instance of AllocationContextBalanced */
375+
((MM_AllocationContextBalanced *)spineContext)->lockCommon();
376+
}
367377

368-
MM_AllocationContextTarok *spineContext = spineRegion->_allocateData._owningContext;
369-
if (this != spineContext) {
370-
Assert_MM_true(env->getCommonAllocationContext() == spineContext);
371-
/* The common allocation context is always an instance of AllocationContextBalanced */
372-
((MM_AllocationContextBalanced *)spineContext)->lockCommon();
378+
leafAllocateData->addToArrayletLeafList(env, spineRegion);
379+
380+
if (this != spineContext) {
381+
/* The common allocation context is always an instance of AllocationContextBalanced */
382+
((MM_AllocationContextBalanced *)spineContext)->unlockCommon();
383+
}
373384
}
385+
#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION)
386+
else {
387+
/*
388+
* For now, only a single list of reserved regions owned by the common AC is maintained.
389+
* This is a sub-optimal approach, since due to different order of how regions are reserved and released
390+
* (objects don't die in order they are allocated) it may lead to imbalance of how many regions are committed in each AC.
391+
* In future, allocations should remember (somewhere in Off-heap meta structures) how many regions came from each AC
392+
* \and release exact same number back to each AC.
393+
*/
394+
MM_AllocationContextTarok *commonContext = (MM_AllocationContextTarok *)env->getCommonAllocationContext();
395+
if (this != commonContext) {
396+
/* The common allocation context is always an instance of AllocationContextBalanced */
397+
((MM_AllocationContextBalanced *)commonContext)->lockCommon();
398+
}
374399

375-
leafAllocateData->addToArrayletLeafList(spineRegion);
376-
377-
if (this != spineContext) {
378-
/* The common allocation context is always an instance of AllocationContextBalanced */
379-
((MM_AllocationContextBalanced *)spineContext)->unlockCommon();
400+
leafAllocateData->pushRegionToArrayReservedRegionList(env, ((MM_AllocationContextBalanced *)commonContext)->getArrayReservedRegionListAddress());
401+
((MM_AllocationContextBalanced *)commonContext)->incrementArrayReservedRegionCount();
402+
403+
if (this != commonContext) {
404+
/* The common allocation context is always an instance of AllocationContextBalanced */
405+
((MM_AllocationContextBalanced *)commonContext)->unlockCommon();
406+
}
380407
}
408+
#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */
381409

410+
freeRegionForArrayletLeaf->resetAge(env, (U_64)_subspace->getBytesRemainingBeforeTaxation());
382411
/* store the base address of the leaf for the memset and the return */
383412
return freeRegionForArrayletLeaf->getLowAddress();
384413
}
@@ -1073,3 +1102,33 @@ MM_AllocationContextBalanced::setNumaAffinityForThread(MM_EnvironmentBase *env)
10731102
return success;
10741103
}
10751104

1105+
#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION)
1106+
void
1107+
MM_AllocationContextBalanced::recycleReservedRegionsForVirtualLargeObjectHeap(MM_EnvironmentVLHGC *env, uintptr_t reservedRegionCount, bool needLock)
1108+
{
1109+
MM_HeapRegionDescriptorVLHGC **head = getArrayReservedRegionListAddress();
1110+
MM_HeapRegionDescriptorVLHGC *region = NULL;
1111+
1112+
if (needLock) {
1113+
lockCommon();
1114+
}
1115+
1116+
while ((reservedRegionCount > 0) && (NULL != (region = *head))) {
1117+
region->_allocateData.popRegionFromArrayReservedRegionList(env, head);
1118+
decrementArrayReservedRegionCount();
1119+
1120+
/* Restore/Recommit the reserved region that have been previously decommitted. */
1121+
MM_GCExtensions::getExtensions(env)->heap->commitMemory(region->getLowAddress(), _heapRegionManager->getRegionSize());
1122+
1123+
region->getSubSpace()->recycleRegion(env, region);
1124+
reservedRegionCount -= 1;
1125+
}
1126+
1127+
if (needLock) {
1128+
unlockCommon();
1129+
}
1130+
1131+
Assert_MM_true(0 == reservedRegionCount);
1132+
}
1133+
#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */
1134+

runtime/gc_vlhgc/AllocationContextBalanced.hpp

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,10 @@ class MM_AllocationContextBalanced : public MM_AllocationContextTarok
7171
UDATA *_freeProcessorNodes; /**< The array listing all the NUMA node numbers which account for the nodes with processors but no memory plus an empty slot for each context to use (element 0 is used by this context) - this is used when setting affinity */
7272
UDATA _freeProcessorNodeCount; /**< The length, in elements, of the _freeProcessorNodes array (always at least 1 after startup) */
7373

74+
#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION)
75+
MM_HeapRegionDescriptorVLHGC *_arrayReservedRegionList; /** for Off-heap case only, adding and removing via region->_allocateData.pushRegionToArrayReservedRegionList/popRegionFromArrayReservedRegionList */
76+
uintptr_t _arrayReservedRegionCount;
77+
#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */
7478
/* Methods */
7579
public:
7680
static MM_AllocationContextBalanced *newInstance(MM_EnvironmentBase *env, MM_MemorySubSpaceTarok *subspace, UDATA numaNode, UDATA allocationContextNumber);
@@ -270,6 +274,34 @@ class MM_AllocationContextBalanced : public MM_AllocationContextTarok
270274
*/
271275
virtual bool setNumaAffinityForThread(MM_EnvironmentBase *env);
272276

277+
#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION)
278+
MM_HeapRegionDescriptorVLHGC **getArrayReservedRegionListAddress()
279+
{
280+
return &_arrayReservedRegionList;
281+
}
282+
283+
uintptr_t getArrayReservedRegionCount()
284+
{
285+
return _arrayReservedRegionCount;
286+
}
287+
288+
void setArrayReservedRegionCount(uintptr_t reservedRegionCount)
289+
{
290+
_arrayReservedRegionCount = reservedRegionCount;
291+
}
292+
293+
void incrementArrayReservedRegionCount()
294+
{
295+
_arrayReservedRegionCount += 1;
296+
}
297+
298+
void decrementArrayReservedRegionCount()
299+
{
300+
_arrayReservedRegionCount -= 1;
301+
}
302+
303+
void recycleReservedRegionsForVirtualLargeObjectHeap(MM_EnvironmentVLHGC *env, uintptr_t reservedRegionCount, bool needLock = false);
304+
#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */
273305

274306
protected:
275307
virtual void tearDown(MM_EnvironmentBase *env);
@@ -293,6 +325,10 @@ class MM_AllocationContextBalanced : public MM_AllocationContextTarok
293325
, _heapRegionManager(NULL)
294326
, _freeProcessorNodes(NULL)
295327
, _freeProcessorNodeCount(0)
328+
#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION)
329+
, _arrayReservedRegionList(NULL)
330+
, _arrayReservedRegionCount(0)
331+
#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */
296332
{
297333
_typeId = __FUNCTION__;
298334
}

0 commit comments

Comments
 (0)