From 710666fec83339036591239a686b9dc7914aea7b Mon Sep 17 00:00:00 2001 From: lhu Date: Mon, 5 May 2025 13:52:56 -0400 Subject: [PATCH 1/3] Defragment reserved regions for off heap Step1 For off-heap enabled case, the Large size Array(larger than region size) will be allocated on sparse heap, but we still reserve leaf regions for preventing over usage on heap. Currently we have to allocate/reserve whole region for the remaining bytes(remainder of array size from region size), it potentially generate fragmentation on heap. first step: 1, Decouple spineObject with leaf regions for off-heap eanbled case. 2, find spineObjects via SparseDataTable in recycling leaf regions. Signed-off-by: lhu --- .../gc_vlhgc/AllocationContextBalanced.cpp | 42 ++++----- runtime/gc_vlhgc/CopyForwardScheme.cpp | 75 ++++++++++------ runtime/gc_vlhgc/ParallelSweepSchemeVLHGC.cpp | 86 +++++++++++++------ 3 files changed, 130 insertions(+), 73 deletions(-) diff --git a/runtime/gc_vlhgc/AllocationContextBalanced.cpp b/runtime/gc_vlhgc/AllocationContextBalanced.cpp index ad22ca24522..e7733746bc6 100644 --- a/runtime/gc_vlhgc/AllocationContextBalanced.cpp +++ b/runtime/gc_vlhgc/AllocationContextBalanced.cpp @@ -355,30 +355,32 @@ MM_AllocationContextBalanced::lockedAllocateArrayletLeaf(MM_EnvironmentBase *env /* look up the spine region since we need to add this region to its leaf list */ MM_HeapRegionDescriptorVLHGC *spineRegion = (MM_HeapRegionDescriptorVLHGC *)_heapRegionManager->tableDescriptorForAddress(spine); /* the leaf requires a pointer back to the spine object so that it can verify its liveness elsewhere in the collector */ - leafAllocateData->setSpine(spine); - freeRegionForArrayletLeaf->resetAge(env, (U_64)_subspace->getBytesRemainingBeforeTaxation()); - /* add the leaf to the spine region's leaf list */ - /* We own the lock on the spine region's context when this call is made so we can safely manipulate this list. - * An exceptional scenario: A thread allocates a spine (and possibly a few arraylets), but does not complete the allocation. A global GC (or a series of regular PGCs) occurs - * that age out regions to max age. The spine moves into a common context. Now, we successfully resume the leaf allocation, but the common lock that - * we already hold is not sufficient any more. We need to additionally acquire common context' common lock, since multiple spines from different ACs could have come into this state, - * and worse multiple spines originally allocated from different ACs may end up in a single common context region. - */ + if (!MM_GCExtensions::getExtensions(env)->isVirtualLargeObjectHeapEnabled) { + leafAllocateData->setSpine(spine); + /* add the leaf to the spine region's leaf list */ + /* We own the lock on the spine region's context when this call is made so we can safely manipulate this list. + * An exceptional scenario: A thread allocates a spine (and possibly a few arraylets), but does not complete the allocation. A global GC (or a series of regular PGCs) occurs + * that age out regions to max age. The spine moves into a common context. Now, we successfully resume the leaf allocation, but the common lock that + * we already hold is not sufficient any more. We need to additionally acquire common context' common lock, since multiple spines from different ACs could have come into this state, + * and worse multiple spines originally allocated from different ACs may end up in a single common context region. + */ - MM_AllocationContextTarok *spineContext = spineRegion->_allocateData._owningContext; - if (this != spineContext) { - Assert_MM_true(env->getCommonAllocationContext() == spineContext); - /* The common allocation context is always an instance of AllocationContextBalanced */ - ((MM_AllocationContextBalanced *)spineContext)->lockCommon(); - } + MM_AllocationContextTarok *spineContext = spineRegion->_allocateData._owningContext; + if (this != spineContext) { + Assert_MM_true(env->getCommonAllocationContext() == spineContext); + /* The common allocation context is always an instance of AllocationContextBalanced */ + ((MM_AllocationContextBalanced *)spineContext)->lockCommon(); + } - leafAllocateData->addToArrayletLeafList(spineRegion); - - if (this != spineContext) { - /* The common allocation context is always an instance of AllocationContextBalanced */ - ((MM_AllocationContextBalanced *)spineContext)->unlockCommon(); + leafAllocateData->addToArrayletLeafList(spineRegion); + + if (this != spineContext) { + /* The common allocation context is always an instance of AllocationContextBalanced */ + ((MM_AllocationContextBalanced *)spineContext)->unlockCommon(); + } } + freeRegionForArrayletLeaf->resetAge(env, (U_64)_subspace->getBytesRemainingBeforeTaxation()); /* store the base address of the leaf for the memset and the return */ return freeRegionForArrayletLeaf->getLowAddress(); } diff --git a/runtime/gc_vlhgc/CopyForwardScheme.cpp b/runtime/gc_vlhgc/CopyForwardScheme.cpp index a3b302a1b36..23c9ed3c8cf 100644 --- a/runtime/gc_vlhgc/CopyForwardScheme.cpp +++ b/runtime/gc_vlhgc/CopyForwardScheme.cpp @@ -97,6 +97,7 @@ #include "SlotObject.hpp" #if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) #include "SparseVirtualMemory.hpp" +#include "SparseAddressOrderedFixedSizeDataPool.hpp" #endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */ #include "StackSlotValidator.hpp" #include "SublistFragment.hpp" @@ -433,32 +434,56 @@ MM_CopyForwardScheme::updateLeafRegions(MM_EnvironmentVLHGC *env) GC_HeapRegionIteratorVLHGC regionIterator(_regionManager); MM_HeapRegionDescriptorVLHGC *region = NULL; - while (NULL != (region = regionIterator.nextRegion())) { - if (region->isArrayletLeaf()) { - J9Object *spineObject = (J9Object *)region->_allocateData.getSpine(); - Assert_MM_true(NULL != spineObject); - - J9Object *updatedSpineObject = updateForwardedPointer(spineObject); - if (updatedSpineObject != spineObject) { - MM_HeapRegionDescriptorVLHGC *spineRegion = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(spineObject); - MM_HeapRegionDescriptorVLHGC *updatedSpineRegion = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(updatedSpineObject); - - Assert_MM_true(spineRegion->_markData._shouldMark); - Assert_MM_true(spineRegion != updatedSpineRegion); - Assert_MM_true(updatedSpineRegion->containsObjects()); - - /* we need to move the leaf to another region's leaf list since its spine has moved */ - region->_allocateData.removeFromArrayletLeafList(env); - region->_allocateData.addToArrayletLeafList(updatedSpineRegion); - region->_allocateData.setSpine((J9IndexableObject *)updatedSpineObject); - } else if (!isLiveObject(spineObject)) { - Assert_MM_true(isObjectInEvacuateMemory(spineObject)); - /* the spine is in evacuate space so the arraylet is dead => recycle the leaf */ - /* remove arraylet leaf from list */ - region->_allocateData.removeFromArrayletLeafList(env); - /* recycle */ - region->_allocateData.setSpine(NULL); + if (MM_GCExtensions::getExtensions(env)->isVirtualLargeObjectHeapEnabled) { + const uintptr_t arrayletLeafSize = env->getOmrVM()->_arrayletLeafSize; + MM_SparseVirtualMemory *largeObjectVirtualMemory = MM_GCExtensions::getExtensions(env)->largeObjectVirtualMemory; + uintptr_t arrayletLeafCount = 0; + J9HashTableState walkState; + + MM_SparseDataTableEntry *sparseDataEntry = (MM_SparseDataTableEntry *)hashTableStartDo(largeObjectVirtualMemory->getSparseDataPool()->getObjectToSparseDataTable(), &walkState); + while (NULL != sparseDataEntry) { + J9Object *spineObject = (J9Object *)sparseDataEntry->_proxyObjPtr; + if (!isLiveObject(spineObject)) { + uintptr_t dataSize = sparseDataEntry->_size; + arrayletLeafCount += MM_Math::roundToCeiling(arrayletLeafSize, dataSize) / arrayletLeafSize; + } + sparseDataEntry = (MM_SparseDataTableEntry *)hashTableNextDo(&walkState); + } + while ((arrayletLeafCount > 0) && (NULL != (region = regionIterator.nextRegion()))) { + if (region->isArrayletLeaf()) { region->getSubSpace()->recycleRegion(env, region); + arrayletLeafCount -= 1; + } + } + Assert_MM_true(0 == arrayletLeafCount); + } else { + while (NULL != (region = regionIterator.nextRegion())) { + if (region->isArrayletLeaf()) { + J9Object *spineObject = (J9Object *)region->_allocateData.getSpine(); + Assert_MM_true(NULL != spineObject); + + J9Object *updatedSpineObject = updateForwardedPointer(spineObject); + if (updatedSpineObject != spineObject) { + MM_HeapRegionDescriptorVLHGC *spineRegion = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(spineObject); + MM_HeapRegionDescriptorVLHGC *updatedSpineRegion = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(updatedSpineObject); + + Assert_MM_true(spineRegion->_markData._shouldMark); + Assert_MM_true(spineRegion != updatedSpineRegion); + Assert_MM_true(updatedSpineRegion->containsObjects()); + + /* we need to move the leaf to another region's leaf list since its spine has moved */ + region->_allocateData.removeFromArrayletLeafList(env); + region->_allocateData.addToArrayletLeafList(updatedSpineRegion); + region->_allocateData.setSpine((J9IndexableObject *)updatedSpineObject); + } else if (!isLiveObject(spineObject)) { + Assert_MM_true(isObjectInEvacuateMemory(spineObject)); + /* the spine is in evacuate space so the arraylet is dead => recycle the leaf */ + /* remove arraylet leaf from list */ + region->_allocateData.removeFromArrayletLeafList(env); + /* recycle */ + region->_allocateData.setSpine(NULL); + region->getSubSpace()->recycleRegion(env, region); + } } } } diff --git a/runtime/gc_vlhgc/ParallelSweepSchemeVLHGC.cpp b/runtime/gc_vlhgc/ParallelSweepSchemeVLHGC.cpp index d8760e7b4f5..0c444f1f7d6 100644 --- a/runtime/gc_vlhgc/ParallelSweepSchemeVLHGC.cpp +++ b/runtime/gc_vlhgc/ParallelSweepSchemeVLHGC.cpp @@ -56,6 +56,8 @@ #include "ParallelDispatcher.hpp" #include "ParallelSweepChunk.hpp" #include "ParallelTask.hpp" +#include "SparseVirtualMemory.hpp" +#include "SparseAddressOrderedFixedSizeDataPool.hpp" #include "SweepHeapSectioningVLHGC.hpp" #include "SweepPoolManagerVLHGC.hpp" #include "SweepPoolManagerAddressOrderedList.hpp" @@ -1006,37 +1008,65 @@ MM_ParallelSweepSchemeVLHGC::recycleFreeRegions(MM_EnvironmentVLHGC *env) GC_HeapRegionIteratorVLHGC regionIterator(_regionManager); MM_HeapRegionDescriptorVLHGC *region = NULL; - while(NULL != (region = regionIterator.nextRegion())) { - /* Region must be marked for sweep */ - if (!region->_sweepData._alreadySwept && region->hasValidMarkMap()) { - MM_MemoryPool *regionPool = region->getMemoryPool(); - Assert_MM_true(NULL != regionPool); - MM_HeapRegionDescriptorVLHGC *walkRegion = region; - MM_HeapRegionDescriptorVLHGC *next = walkRegion->_allocateData.getNextArrayletLeafRegion(); - /* Try to walk list from this head */ - while (NULL != (walkRegion = next)) { - Assert_MM_true(walkRegion->isArrayletLeaf()); - J9Object *spineObject = (J9Object *)walkRegion->_allocateData.getSpine(); - next = walkRegion->_allocateData.getNextArrayletLeafRegion(); - Assert_MM_true( region->isAddressInRegion(spineObject) ); - if (!_cycleState._markMap->isBitSet(spineObject)) { - /* Arraylet is dead */ - - /* remove arraylet leaf from list */ - walkRegion->_allocateData.removeFromArrayletLeafList(env); - - /* recycle */ - walkRegion->_allocateData.setSpine(NULL); - - walkRegion->getSubSpace()->recycleRegion(env, walkRegion); - } + if (MM_GCExtensions::getExtensions(env)->isVirtualLargeObjectHeapEnabled) { + const uintptr_t arrayletLeafSize = env->getOmrVM()->_arrayletLeafSize; + MM_SparseVirtualMemory *largeObjectVirtualMemory = MM_GCExtensions::getExtensions(env)->largeObjectVirtualMemory; + uintptr_t arrayletLeafCount = 0; + J9HashTableState walkState; + + MM_SparseDataTableEntry *sparseDataEntry = (MM_SparseDataTableEntry *)hashTableStartDo(largeObjectVirtualMemory->getSparseDataPool()->getObjectToSparseDataTable(), &walkState); + while (NULL != sparseDataEntry) { + J9Object *spineObject = (J9Object *)sparseDataEntry->_proxyObjPtr; + + if (!_cycleState._markMap->isBitSet(spineObject)) { + /* Arraylet is dead */ + uintptr_t dataSize = sparseDataEntry->_size; + arrayletLeafCount += MM_Math::roundToCeiling(arrayletLeafSize, dataSize) / arrayletLeafSize; } + sparseDataEntry = (MM_SparseDataTableEntry *)hashTableNextDo(&walkState); + } - /* recycle if empty */ - if (region->getSize() == regionPool->getActualFreeMemorySize()) { - Assert_MM_true(NULL == region->_allocateData.getSpine()); - Assert_MM_true(NULL == region->_allocateData.getNextArrayletLeafRegion()); + while ((arrayletLeafCount > 0) && (NULL != (region = regionIterator.nextRegion()))) { + if (region->isArrayletLeaf()) { region->getSubSpace()->recycleRegion(env, region); + arrayletLeafCount -= 1; + } + } + Assert_MM_true(0 == arrayletLeafCount); + + } else { + while(NULL != (region = regionIterator.nextRegion())) { + /* Region must be marked for sweep */ + if (!region->_sweepData._alreadySwept && region->hasValidMarkMap()) { + MM_MemoryPool *regionPool = region->getMemoryPool(); + Assert_MM_true(NULL != regionPool); + MM_HeapRegionDescriptorVLHGC *walkRegion = region; + MM_HeapRegionDescriptorVLHGC *next = walkRegion->_allocateData.getNextArrayletLeafRegion(); + /* Try to walk list from this head */ + while (NULL != (walkRegion = next)) { + Assert_MM_true(walkRegion->isArrayletLeaf()); + J9Object *spineObject = (J9Object *)walkRegion->_allocateData.getSpine(); + next = walkRegion->_allocateData.getNextArrayletLeafRegion(); + Assert_MM_true( region->isAddressInRegion(spineObject) ); + if (!_cycleState._markMap->isBitSet(spineObject)) { + /* Arraylet is dead */ + + /* remove arraylet leaf from list */ + walkRegion->_allocateData.removeFromArrayletLeafList(env); + + /* recycle */ + walkRegion->_allocateData.setSpine(NULL); + + walkRegion->getSubSpace()->recycleRegion(env, walkRegion); + } + } + + /* recycle if empty */ + if (region->getSize() == regionPool->getActualFreeMemorySize()) { + Assert_MM_true(NULL == region->_allocateData.getSpine()); + Assert_MM_true(NULL == region->_allocateData.getNextArrayletLeafRegion()); + region->getSubSpace()->recycleRegion(env, region); + } } } } From cdb05bfb470e9f3639129078c67a1e9fe33794bf Mon Sep 17 00:00:00 2001 From: lhu Date: Mon, 12 May 2025 10:10:56 -0400 Subject: [PATCH 2/3] Update Signed-off-by: lhu --- .../IndexableObjectAllocationModel.cpp | 5 - runtime/gc_base/RootScanner.cpp | 30 ++-- runtime/gc_base/RootScanner.hpp | 2 +- runtime/gc_vlhgc/CollectionSetDelegate.cpp | 75 +++++++- runtime/gc_vlhgc/CopyForwardScheme.cpp | 65 +++++-- runtime/gc_vlhgc/GlobalMarkingScheme.cpp | 4 +- runtime/gc_vlhgc/ParallelSweepSchemeVLHGC.cpp | 11 +- ...ProjectedSurvivalCollectionSetDelegate.cpp | 71 +++++++- runtime/gc_vlhgc/RegionValidator.cpp | 2 +- runtime/gc_vlhgc/SchedulingDelegate.cpp | 23 ++- runtime/gc_vlhgc/WriteOnceCompactor.cpp | 162 +++++++++--------- 11 files changed, 322 insertions(+), 128 deletions(-) diff --git a/runtime/gc_base/IndexableObjectAllocationModel.cpp b/runtime/gc_base/IndexableObjectAllocationModel.cpp index 52512c8839c..3d928c0eece 100644 --- a/runtime/gc_base/IndexableObjectAllocationModel.cpp +++ b/runtime/gc_base/IndexableObjectAllocationModel.cpp @@ -366,11 +366,6 @@ MM_IndexableObjectAllocationModel::getSparseAddressAndDecommitLeaves(MM_Environm break; } - if (0 == arrayoidIndex) { - MM_HeapRegionDescriptorVLHGC *firstLeafRegionDescriptor = (MM_HeapRegionDescriptorVLHGC *)extensions->getHeap()->getHeapRegionManager()->tableDescriptorForAddress(leaf); - firstLeafRegionDescriptor->_sparseHeapAllocation = true; - } - /* Disable region for reads and writes, since that'll be done through the contiguous double mapped region */ void *highAddress = (void *)((uintptr_t)leaf + arrayletLeafSize); bool ret = extensions->heap->decommitMemory(leaf, arrayletLeafSize, leaf, highAddress); diff --git a/runtime/gc_base/RootScanner.cpp b/runtime/gc_base/RootScanner.cpp index d8007f8dd32..42140f52b56 100644 --- a/runtime/gc_base/RootScanner.cpp +++ b/runtime/gc_base/RootScanner.cpp @@ -60,6 +60,10 @@ #include "ParallelDispatcher.hpp" #include "PointerArrayIterator.hpp" #include "SlotObject.hpp" +#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) +#include "SparseVirtualMemory.hpp" +#include "SparseAddressOrderedFixedSizeDataPool.hpp" +#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */ #include "StringTable.hpp" #include "StringTableIncrementalIterator.hpp" #include "Task.hpp" @@ -238,7 +242,7 @@ MM_RootScanner::doStringTableSlot(J9Object **slotPtr, GC_StringTableIterator *st #if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) void -MM_RootScanner::doObjectInVirtualLargeObjectHeap(J9Object *objectPtr, bool *sparseHeapAllocation) +MM_RootScanner::doObjectInVirtualLargeObjectHeap(J9Object *objectPtr) { /* No need to call doSlot() here since there's nothing to update */ } @@ -944,17 +948,21 @@ void MM_RootScanner::scanObjectsInVirtualLargeObjectHeap(MM_EnvironmentBase *env) { if (_singleThread || J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) { - GC_HeapRegionIteratorVLHGC regionIterator(_extensions->heap->getHeapRegionManager()); - MM_HeapRegionDescriptorVLHGC *region = NULL; + PORT_ACCESS_FROM_ENVIRONMENT(env); + j9tty_printf(PORTLIB, "scanObjectsInVirtualLargeObjectHeap _singleThread=%zu, env=%p\n", _singleThread, env); reportScanningStarted(RootScannerEntity_virtualLargeObjectHeapObjects); - while (NULL != (region = regionIterator.nextRegion())) { - if (region->isArrayletLeaf()) { - if (region->_sparseHeapAllocation) { - J9Object *spineObject = (J9Object *)region->_allocateData.getSpine(); - Assert_MM_true(NULL != spineObject); - doObjectInVirtualLargeObjectHeap(spineObject, ®ion->_sparseHeapAllocation); - } - } + + MM_SparseVirtualMemory *largeObjectVirtualMemory = _extensions->largeObjectVirtualMemory; + J9HashTableState walkState; + + MM_SparseDataTableEntry *sparseDataEntry = (MM_SparseDataTableEntry *)hashTableStartDo(largeObjectVirtualMemory->getSparseDataPool()->getObjectToSparseDataTable(), &walkState); + j9tty_printf(PORTLIB, "hashTableStartDo sparseDataEntry=%p, env=%p\n", sparseDataEntry, env); + while (NULL != sparseDataEntry) { + J9Object *spineObject = (J9Object *)sparseDataEntry->_proxyObjPtr; + Assert_MM_true(NULL != spineObject); + doObjectInVirtualLargeObjectHeap(spineObject); + j9tty_printf(PORTLIB, "hashTableNextDo &walkState=%p, spineObject=%p, size=%zu, env=%p\n", &walkState, spineObject, sparseDataEntry->_size, env); + sparseDataEntry = (MM_SparseDataTableEntry *)hashTableNextDo(&walkState); } reportScanningEnded(RootScannerEntity_virtualLargeObjectHeapObjects); } diff --git a/runtime/gc_base/RootScanner.hpp b/runtime/gc_base/RootScanner.hpp index 711654325fb..5f44e1b1f88 100644 --- a/runtime/gc_base/RootScanner.hpp +++ b/runtime/gc_base/RootScanner.hpp @@ -570,7 +570,7 @@ class MM_RootScanner : public MM_BaseVirtual * * @param objectPtr[in] indexable object's spine */ - virtual void doObjectInVirtualLargeObjectHeap(J9Object *objectPtr, bool *sparseHeapAllocation); + virtual void doObjectInVirtualLargeObjectHeap(J9Object *objectPtr); #endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */ #if defined(J9VM_GC_ENABLE_DOUBLE_MAP) diff --git a/runtime/gc_vlhgc/CollectionSetDelegate.cpp b/runtime/gc_vlhgc/CollectionSetDelegate.cpp index 5f747db1c88..43328077d77 100644 --- a/runtime/gc_vlhgc/CollectionSetDelegate.cpp +++ b/runtime/gc_vlhgc/CollectionSetDelegate.cpp @@ -48,6 +48,10 @@ #include "MarkMap.hpp" #include "MemoryPool.hpp" #include "RegionValidator.hpp" +#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) +#include "SparseVirtualMemory.hpp" +#include "SparseAddressOrderedFixedSizeDataPool.hpp" +#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */ MM_CollectionSetDelegate::MM_CollectionSetDelegate(MM_EnvironmentBase *env, MM_HeapRegionManager *manager) : MM_BaseNonVirtual() @@ -539,7 +543,7 @@ MM_CollectionSetDelegate::rateOfReturnCalculationBeforeSweep(MM_EnvironmentVLHGC if(!region->getRememberedSetCardList()->isAccurate()) { stats->_reclaimStats._regionCountOverflow += 1; } - } else if(region->isArrayletLeaf()) { + } else if(region->isArrayletLeaf() && !_extensions->isVirtualLargeObjectHeapEnabled) { MM_HeapRegionDescriptorVLHGC *parentRegion = (MM_HeapRegionDescriptorVLHGC *)_regionManager->regionDescriptorForAddress((void *)region->_allocateData.getSpine()); Assert_MM_true(parentRegion->containsObjects()); SetSelectionData *stats = &_setSelectionDataTable[MM_CompactGroupManager::getCompactGroupNumber(env, parentRegion)]; @@ -556,6 +560,41 @@ MM_CollectionSetDelegate::rateOfReturnCalculationBeforeSweep(MM_EnvironmentVLHGC } } } + +#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) + if (_extensions->isVirtualLargeObjectHeapEnabled) { + const uintptr_t arrayletLeafSize = env->getOmrVM()->_arrayletLeafSize; + MM_SparseVirtualMemory *largeObjectVirtualMemory = _extensions->largeObjectVirtualMemory; + uintptr_t arrayletLeafCount = 0; + J9HashTableState walkState; + + MM_SparseDataTableEntry *sparseDataEntry = (MM_SparseDataTableEntry *)hashTableStartDo(largeObjectVirtualMemory->getSparseDataPool()->getObjectToSparseDataTable(), &walkState); + while (NULL != sparseDataEntry) { + J9Object *spineObject = (J9Object *)sparseDataEntry->_proxyObjPtr; + uintptr_t dataSize = sparseDataEntry->_size; + /* TODO: how fraction is counting here? */ +// arrayletLeafCount = MM_Math::roundToFloor(arrayletLeafSize, dataSize) / arrayletLeafSize; + arrayletLeafCount = MM_Math::roundToCeiling(arrayletLeafSize, dataSize) / arrayletLeafSize; + MM_HeapRegionDescriptorVLHGC *parentRegion = (MM_HeapRegionDescriptorVLHGC *)_regionManager->regionDescriptorForAddress((void *)spineObject); + Assert_MM_true(parentRegion->containsObjects()); + SetSelectionData *stats = &_setSelectionDataTable[MM_CompactGroupManager::getCompactGroupNumber(env, parentRegion)]; + + stats->_reclaimStats._regionCountBefore += arrayletLeafCount; + stats->_reclaimStats._regionCountArrayletLeafBefore += arrayletLeafCount; + + if(!parentRegion->_sweepData._alreadySwept) { + stats->_reclaimStats._reclaimableRegionCountBefore += arrayletLeafCount; + stats->_reclaimStats._reclaimableRegionCountArrayletLeafBefore += arrayletLeafCount; + } + if(!parentRegion->getRememberedSetCardList()->isAccurate()) { + stats->_reclaimStats._regionCountArrayletLeafOverflow += arrayletLeafCount; + } + + sparseDataEntry = (MM_SparseDataTableEntry *)hashTableNextDo(&walkState); + } + + } +#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */ } } @@ -580,7 +619,7 @@ MM_CollectionSetDelegate::rateOfReturnCalculationAfterSweep(MM_EnvironmentVLHGC stats->_reclaimStats._regionBytesFreeAfter += memoryPool->getActualFreeMemorySize(); stats->_reclaimStats._regionDarkMatterAfter += memoryPool->getDarkMatterBytes(); } - } else if(region->isArrayletLeaf()) { + } else if(region->isArrayletLeaf() && !_extensions->isVirtualLargeObjectHeapEnabled) { MM_HeapRegionDescriptorVLHGC *parentRegion = (MM_HeapRegionDescriptorVLHGC *)_regionManager->regionDescriptorForAddress((void *)region->_allocateData.getSpine()); Assert_MM_true(parentRegion->containsObjects()); SetSelectionData *stats = &_setSelectionDataTable[MM_CompactGroupManager::getCompactGroupNumber(env, parentRegion)]; @@ -595,6 +634,38 @@ MM_CollectionSetDelegate::rateOfReturnCalculationAfterSweep(MM_EnvironmentVLHGC } } +#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) + if (_extensions->isVirtualLargeObjectHeapEnabled) { + const uintptr_t arrayletLeafSize = env->getOmrVM()->_arrayletLeafSize; + MM_SparseVirtualMemory *largeObjectVirtualMemory = _extensions->largeObjectVirtualMemory; + uintptr_t arrayletLeafCount = 0; + J9HashTableState walkState; + + MM_SparseDataTableEntry *sparseDataEntry = (MM_SparseDataTableEntry *)hashTableStartDo(largeObjectVirtualMemory->getSparseDataPool()->getObjectToSparseDataTable(), &walkState); + while (NULL != sparseDataEntry) { + J9Object *spineObject = (J9Object *)sparseDataEntry->_proxyObjPtr; + uintptr_t dataSize = sparseDataEntry->_size; + /* TODO: how fraction is counting here? */ +// arrayletLeafCount = MM_Math::roundToFloor(arrayletLeafSize, dataSize) / arrayletLeafSize; + arrayletLeafCount = MM_Math::roundToCeiling(arrayletLeafSize, dataSize) / arrayletLeafSize; + MM_HeapRegionDescriptorVLHGC *parentRegion = (MM_HeapRegionDescriptorVLHGC *)_regionManager->regionDescriptorForAddress((void *)spineObject); + Assert_MM_true(parentRegion->containsObjects()); + SetSelectionData *stats = &_setSelectionDataTable[MM_CompactGroupManager::getCompactGroupNumber(env, parentRegion)]; + + stats->_reclaimStats._regionCountAfter += arrayletLeafCount; + stats->_reclaimStats._regionCountArrayletLeafAfter += arrayletLeafCount; + + if(!parentRegion->_sweepData._alreadySwept) { + stats->_reclaimStats._reclaimableRegionCountAfter += arrayletLeafCount; + stats->_reclaimStats._reclaimableRegionCountArrayletLeafAfter += arrayletLeafCount; + } + + sparseDataEntry = (MM_SparseDataTableEntry *)hashTableNextDo(&walkState); + } + + } +#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */ + /* We now have an expected change as a result of tracing and sweeping (parts of) the heap. Calculate the rate-of-return (ROR) on * tracing for age groups where work was done. * Use a weighted running average to calculate the ROR, where the weight is the % of regions in an age group that we are examining. diff --git a/runtime/gc_vlhgc/CopyForwardScheme.cpp b/runtime/gc_vlhgc/CopyForwardScheme.cpp index 23c9ed3c8cf..c21d63509aa 100644 --- a/runtime/gc_vlhgc/CopyForwardScheme.cpp +++ b/runtime/gc_vlhgc/CopyForwardScheme.cpp @@ -305,7 +305,7 @@ MM_CopyForwardScheme::initialize(MM_EnvironmentVLHGC *env) _maxCacheSize = _extensions->scavengerScanCacheMaximumSize; /* Cached pointer to the inter region remembered set */ - _interRegionRememberedSet = MM_GCExtensions::getExtensions(env)->interRegionRememberedSet; + _interRegionRememberedSet = _extensions->interRegionRememberedSet; _cacheLineAlignment = CACHE_LINE_SIZE; @@ -412,7 +412,7 @@ MM_CopyForwardScheme::raiseAbortFlag(MM_EnvironmentVLHGC *env) Trc_MM_CopyForwardScheme_abortFlagRaised(env->getLanguageVMThread()); PORT_ACCESS_FROM_ENVIRONMENT(env); - TRIGGER_J9HOOK_MM_PRIVATE_COPY_FORWARD_ABORT(MM_GCExtensions::getExtensions(env)->privateHookInterface, env->getOmrVMThread(), j9time_hires_clock(), J9HOOK_MM_PRIVATE_COPY_FORWARD_ABORT); + TRIGGER_J9HOOK_MM_PRIVATE_COPY_FORWARD_ABORT(_extensions->privateHookInterface, env->getOmrVMThread(), j9time_hires_clock(), J9HOOK_MM_PRIVATE_COPY_FORWARD_ABORT); } } } @@ -434,9 +434,9 @@ MM_CopyForwardScheme::updateLeafRegions(MM_EnvironmentVLHGC *env) GC_HeapRegionIteratorVLHGC regionIterator(_regionManager); MM_HeapRegionDescriptorVLHGC *region = NULL; - if (MM_GCExtensions::getExtensions(env)->isVirtualLargeObjectHeapEnabled) { + if (_extensions->isVirtualLargeObjectHeapEnabled) { const uintptr_t arrayletLeafSize = env->getOmrVM()->_arrayletLeafSize; - MM_SparseVirtualMemory *largeObjectVirtualMemory = MM_GCExtensions::getExtensions(env)->largeObjectVirtualMemory; + MM_SparseVirtualMemory *largeObjectVirtualMemory = _extensions->largeObjectVirtualMemory; uintptr_t arrayletLeafCount = 0; J9HashTableState walkState; @@ -3944,7 +3944,7 @@ class MM_CopyForwardSchemeRootScanner : public MM_RootScanner } } else { /* double check that there really was no work to do */ - Assert_MM_true(!MM_GCExtensions::getExtensions(env)->finalizeListManager->isFinalizableObjectProcessingRequired()); + Assert_MM_true(!_extensions->finalizeListManager->isFinalizableObjectProcessingRequired()); } reportScanningEnded(RootScannerEntity_FinalizableObjects); } @@ -4156,7 +4156,7 @@ class MM_CopyForwardSchemeRootClearer : public MM_RootScanner } #if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) - virtual void doObjectInVirtualLargeObjectHeap(J9Object *objectPtr, bool *sparseHeapAllocation) { + virtual void doObjectInVirtualLargeObjectHeap(J9Object *objectPtr) { MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(_env); env->_copyForwardStats._offHeapRegionCandidates += 1; @@ -4172,7 +4172,6 @@ class MM_CopyForwardSchemeRootClearer : public MM_RootScanner env->_copyForwardStats._offHeapRegionsCleared += 1; void *dataAddr = _extensions->indexableObjectModel.getDataAddrForContiguous((J9IndexableObject *)objectPtr); _extensions->largeObjectVirtualMemory->freeSparseRegionAndUnmapFromHeapObject(_env, dataAddr, objectPtr, _extensions->indexableObjectModel.getDataSizeInBytes((J9IndexableObject *)objectPtr)); - *sparseHeapAllocation = false; } else { void *dataAddr = _extensions->indexableObjectModel.getDataAddrForContiguous((J9IndexableObject *)fwdOjectPtr); if (NULL != dataAddr) { @@ -4755,17 +4754,19 @@ MM_CopyForwardScheme::verifyCopyForwardResult(MM_EnvironmentVLHGC *env) while (NULL != (region = regionIterator.nextRegion())) { if (region->isArrayletLeaf()) { - J9Object *spineObject = (J9Object *)region->_allocateData.getSpine(); - Assert_MM_true(NULL != spineObject); - /* the spine must be marked if it was copied as a live object or if we aborted the copy-forward */ - /* otherwise, it must not be forwarded (since that would imply that the spine survived but the pointer wasn't updated) */ - if (!_markMap->isBitSet(spineObject)) { - MM_ForwardedHeader forwardedSpine(spineObject, _extensions->compressObjectReferences()); - if (forwardedSpine.isForwardedPointer()) { - PORT_ACCESS_FROM_ENVIRONMENT(env); - j9tty_printf(PORTLIB, "Spine pointer is not marked and is forwarded (leaf region's pointer to spine not updated)! Region %p Spine %p (should be %p)\n", region, spineObject, forwardedSpine.getForwardedObject()); - verifyDumpObjectDetails(env, "spineObject", spineObject); - Assert_MM_unreachable(); + if (!_extensions->isVirtualLargeObjectHeapEnabled) { + J9Object *spineObject = (J9Object *)region->_allocateData.getSpine(); + Assert_MM_true(NULL != spineObject); + /* the spine must be marked if it was copied as a live object or if we aborted the copy-forward */ + /* otherwise, it must not be forwarded (since that would imply that the spine survived but the pointer wasn't updated) */ + if (!_markMap->isBitSet(spineObject)) { + MM_ForwardedHeader forwardedSpine(spineObject, _extensions->compressObjectReferences()); + if (forwardedSpine.isForwardedPointer()) { + PORT_ACCESS_FROM_ENVIRONMENT(env); + j9tty_printf(PORTLIB, "Spine pointer is not marked and is forwarded (leaf region's pointer to spine not updated)! Region %p Spine %p (should be %p)\n", region, spineObject, forwardedSpine.getForwardedObject()); + verifyDumpObjectDetails(env, "spineObject", spineObject); + Assert_MM_unreachable(); + } } } } else { @@ -4793,6 +4794,32 @@ MM_CopyForwardScheme::verifyCopyForwardResult(MM_EnvironmentVLHGC *env) } } +#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) + if (_extensions->isVirtualLargeObjectHeapEnabled) { + MM_SparseVirtualMemory *largeObjectVirtualMemory = _extensions->largeObjectVirtualMemory; + J9HashTableState walkState; + + MM_SparseDataTableEntry *sparseDataEntry = (MM_SparseDataTableEntry *)hashTableStartDo(largeObjectVirtualMemory->getSparseDataPool()->getObjectToSparseDataTable(), &walkState); + while (NULL != sparseDataEntry) { + J9Object *spineObject = (J9Object *)sparseDataEntry->_proxyObjPtr; + Assert_MM_true(NULL != spineObject); + /* the spine must be marked if it was copied as a live object or if we aborted the copy-forward */ + /* otherwise, it must not be forwarded (since that would imply that the spine survived but the pointer wasn't updated) */ + if (!_markMap->isBitSet(spineObject)) { + MM_ForwardedHeader forwardedSpine(spineObject, _extensions->compressObjectReferences()); + if (forwardedSpine.isForwardedPointer()) { + PORT_ACCESS_FROM_ENVIRONMENT(env); + j9tty_printf(PORTLIB, "Spine pointer is not marked and is forwarded (leaf region's pointer to spine not updated)! Region %p Spine %p (should be %p)\n", region, spineObject, forwardedSpine.getForwardedObject()); + verifyDumpObjectDetails(env, "spineObject", spineObject); + Assert_MM_unreachable(); + } + } + + sparseDataEntry = (MM_SparseDataTableEntry *)hashTableNextDo(&walkState); + } + } +#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */ + MM_CopyForwardVerifyScanner scanner(env, this); scanner.scanAllSlots(env); @@ -5209,7 +5236,7 @@ MM_CopyForwardScheme:: cleanOverflowInRange(MM_EnvironmentVLHGC *env, uintptr_t { /* At this point, no copying should happen, so that reservingContext is irrelevant */ MM_AllocationContextTarok *reservingContext = _commonContext; - MM_HeapMapIterator objectIterator = MM_HeapMapIterator(MM_GCExtensions::getExtensions(env), env->_cycleState->_markMap, lowAddress, highAddress); + MM_HeapMapIterator objectIterator = MM_HeapMapIterator(_extensions, env->_cycleState->_markMap, lowAddress, highAddress); J9Object *object = NULL; while (NULL != (object = objectIterator.nextObject())) { diff --git a/runtime/gc_vlhgc/GlobalMarkingScheme.cpp b/runtime/gc_vlhgc/GlobalMarkingScheme.cpp index 5e79bf0e955..fab455a8058 100644 --- a/runtime/gc_vlhgc/GlobalMarkingScheme.cpp +++ b/runtime/gc_vlhgc/GlobalMarkingScheme.cpp @@ -1412,7 +1412,7 @@ class MM_GlobalMarkingSchemeRootClearer : public MM_RootScanner } #if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) - virtual void doObjectInVirtualLargeObjectHeap(J9Object *objectPtr, bool *sparseHeapAllocation) { + virtual void doObjectInVirtualLargeObjectHeap(J9Object *objectPtr) { MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(_env); env->_markVLHGCStats._offHeapRegionCandidates += 1; if (!_markingScheme->isMarked(objectPtr)) { @@ -1420,8 +1420,6 @@ class MM_GlobalMarkingSchemeRootClearer : public MM_RootScanner void *dataAddr = _extensions->indexableObjectModel.getDataAddrForContiguous((J9IndexableObject *)objectPtr); if (NULL != dataAddr) { _extensions->largeObjectVirtualMemory->freeSparseRegionAndUnmapFromHeapObject(_env, dataAddr, objectPtr, _extensions->indexableObjectModel.getDataSizeInBytes((J9IndexableObject *)objectPtr)); - - *sparseHeapAllocation = false; } } } diff --git a/runtime/gc_vlhgc/ParallelSweepSchemeVLHGC.cpp b/runtime/gc_vlhgc/ParallelSweepSchemeVLHGC.cpp index 0c444f1f7d6..ec7bfdcdbfa 100644 --- a/runtime/gc_vlhgc/ParallelSweepSchemeVLHGC.cpp +++ b/runtime/gc_vlhgc/ParallelSweepSchemeVLHGC.cpp @@ -59,8 +59,10 @@ #include "SparseVirtualMemory.hpp" #include "SparseAddressOrderedFixedSizeDataPool.hpp" #include "SweepHeapSectioningVLHGC.hpp" +#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) #include "SweepPoolManagerVLHGC.hpp" #include "SweepPoolManagerAddressOrderedList.hpp" +#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */ #include "SweepPoolState.hpp" @@ -1008,9 +1010,10 @@ MM_ParallelSweepSchemeVLHGC::recycleFreeRegions(MM_EnvironmentVLHGC *env) GC_HeapRegionIteratorVLHGC regionIterator(_regionManager); MM_HeapRegionDescriptorVLHGC *region = NULL; - if (MM_GCExtensions::getExtensions(env)->isVirtualLargeObjectHeapEnabled) { +#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) + if (_extensions->isVirtualLargeObjectHeapEnabled) { const uintptr_t arrayletLeafSize = env->getOmrVM()->_arrayletLeafSize; - MM_SparseVirtualMemory *largeObjectVirtualMemory = MM_GCExtensions::getExtensions(env)->largeObjectVirtualMemory; + MM_SparseVirtualMemory *largeObjectVirtualMemory = _extensions->largeObjectVirtualMemory; uintptr_t arrayletLeafCount = 0; J9HashTableState walkState; @@ -1034,7 +1037,9 @@ MM_ParallelSweepSchemeVLHGC::recycleFreeRegions(MM_EnvironmentVLHGC *env) } Assert_MM_true(0 == arrayletLeafCount); - } else { + } else +#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */ + { while(NULL != (region = regionIterator.nextRegion())) { /* Region must be marked for sweep */ if (!region->_sweepData._alreadySwept && region->hasValidMarkMap()) { diff --git a/runtime/gc_vlhgc/ProjectedSurvivalCollectionSetDelegate.cpp b/runtime/gc_vlhgc/ProjectedSurvivalCollectionSetDelegate.cpp index ae8bb43c9b9..e1d06b3ce14 100644 --- a/runtime/gc_vlhgc/ProjectedSurvivalCollectionSetDelegate.cpp +++ b/runtime/gc_vlhgc/ProjectedSurvivalCollectionSetDelegate.cpp @@ -48,6 +48,10 @@ #include "MarkMap.hpp" #include "MemoryPool.hpp" #include "RegionValidator.hpp" +#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) +#include "SparseVirtualMemory.hpp" +#include "SparseAddressOrderedFixedSizeDataPool.hpp" +#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */ MM_ProjectedSurvivalCollectionSetDelegate::MM_ProjectedSurvivalCollectionSetDelegate(MM_EnvironmentBase *env, MM_HeapRegionManager *manager) : MM_BaseNonVirtual() @@ -540,7 +544,7 @@ MM_ProjectedSurvivalCollectionSetDelegate::rateOfReturnCalculationBeforeSweep(MM if(!region->getRememberedSetCardList()->isAccurate()) { stats->_reclaimStats._regionCountOverflow += 1; } - } else if(region->isArrayletLeaf()) { + } else if(region->isArrayletLeaf() && !_extensions->isVirtualLargeObjectHeapEnabled) { MM_HeapRegionDescriptorVLHGC *parentRegion = (MM_HeapRegionDescriptorVLHGC *)_regionManager->regionDescriptorForAddress((void *)region->_allocateData.getSpine()); Assert_MM_true(parentRegion->containsObjects()); SetSelectionData *stats = &_setSelectionDataTable[MM_CompactGroupManager::getCompactGroupNumber(env, parentRegion)]; @@ -557,6 +561,39 @@ MM_ProjectedSurvivalCollectionSetDelegate::rateOfReturnCalculationBeforeSweep(MM } } } +#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) + if (_extensions->isVirtualLargeObjectHeapEnabled) { + const uintptr_t arrayletLeafSize = env->getOmrVM()->_arrayletLeafSize; + MM_SparseVirtualMemory *largeObjectVirtualMemory = _extensions->largeObjectVirtualMemory; + uintptr_t arrayletLeafCount = 0; + J9HashTableState walkState; + + MM_SparseDataTableEntry *sparseDataEntry = (MM_SparseDataTableEntry *)hashTableStartDo(largeObjectVirtualMemory->getSparseDataPool()->getObjectToSparseDataTable(), &walkState); + while (NULL != sparseDataEntry) { + J9Object *spineObject = (J9Object *)sparseDataEntry->_proxyObjPtr; + uintptr_t dataSize = sparseDataEntry->_size; + /* TODO: how fraction is counting here? */ +// arrayletLeafCount = MM_Math::roundToFloor(arrayletLeafSize, dataSize) / arrayletLeafSize; + arrayletLeafCount = MM_Math::roundToCeiling(arrayletLeafSize, dataSize) / arrayletLeafSize; + MM_HeapRegionDescriptorVLHGC *parentRegion = (MM_HeapRegionDescriptorVLHGC *)_regionManager->regionDescriptorForAddress((void *)spineObject); + Assert_MM_true(parentRegion->containsObjects()); + SetSelectionData *stats = &_setSelectionDataTable[MM_CompactGroupManager::getCompactGroupNumber(env, parentRegion)]; + + stats->_reclaimStats._regionCountBefore += arrayletLeafCount; + stats->_reclaimStats._regionCountArrayletLeafBefore += arrayletLeafCount; + + if(!parentRegion->_sweepData._alreadySwept) { + stats->_reclaimStats._reclaimableRegionCountBefore += arrayletLeafCount; + stats->_reclaimStats._reclaimableRegionCountArrayletLeafBefore += arrayletLeafCount; + } + if(!parentRegion->getRememberedSetCardList()->isAccurate()) { + stats->_reclaimStats._regionCountArrayletLeafOverflow += arrayletLeafCount; + } + + sparseDataEntry = (MM_SparseDataTableEntry *)hashTableNextDo(&walkState); + } + } +#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */ } } @@ -581,7 +618,7 @@ MM_ProjectedSurvivalCollectionSetDelegate::rateOfReturnCalculationAfterSweep(MM_ stats->_reclaimStats._regionBytesFreeAfter += memoryPool->getActualFreeMemorySize(); stats->_reclaimStats._regionDarkMatterAfter += memoryPool->getDarkMatterBytes(); } - } else if(region->isArrayletLeaf()) { + } else if(region->isArrayletLeaf() && !_extensions->isVirtualLargeObjectHeapEnabled) { MM_HeapRegionDescriptorVLHGC *parentRegion = (MM_HeapRegionDescriptorVLHGC *)_regionManager->regionDescriptorForAddress((void *)region->_allocateData.getSpine()); Assert_MM_true(parentRegion->containsObjects()); SetSelectionData *stats = &_setSelectionDataTable[MM_CompactGroupManager::getCompactGroupNumber(env, parentRegion)]; @@ -595,6 +632,36 @@ MM_ProjectedSurvivalCollectionSetDelegate::rateOfReturnCalculationAfterSweep(MM_ } } } +#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) + if (_extensions->isVirtualLargeObjectHeapEnabled) { + const uintptr_t arrayletLeafSize = env->getOmrVM()->_arrayletLeafSize; + MM_SparseVirtualMemory *largeObjectVirtualMemory = _extensions->largeObjectVirtualMemory; + uintptr_t arrayletLeafCount = 0; + J9HashTableState walkState; + + MM_SparseDataTableEntry *sparseDataEntry = (MM_SparseDataTableEntry *)hashTableStartDo(largeObjectVirtualMemory->getSparseDataPool()->getObjectToSparseDataTable(), &walkState); + while (NULL != sparseDataEntry) { + J9Object *spineObject = (J9Object *)sparseDataEntry->_proxyObjPtr; + uintptr_t dataSize = sparseDataEntry->_size; + /* TODO: how fraction is counting here? */ +// arrayletLeafCount = MM_Math::roundToFloor(arrayletLeafSize, dataSize) / arrayletLeafSize; + arrayletLeafCount = MM_Math::roundToCeiling(arrayletLeafSize, dataSize) / arrayletLeafSize; + MM_HeapRegionDescriptorVLHGC *parentRegion = (MM_HeapRegionDescriptorVLHGC *)_regionManager->regionDescriptorForAddress((void *)spineObject); + Assert_MM_true(parentRegion->containsObjects()); + SetSelectionData *stats = &_setSelectionDataTable[MM_CompactGroupManager::getCompactGroupNumber(env, parentRegion)]; + + stats->_reclaimStats._regionCountAfter += arrayletLeafCount; + stats->_reclaimStats._regionCountArrayletLeafAfter += arrayletLeafCount; + + if(!parentRegion->_sweepData._alreadySwept) { + stats->_reclaimStats._reclaimableRegionCountAfter += arrayletLeafCount; + stats->_reclaimStats._reclaimableRegionCountArrayletLeafAfter += arrayletLeafCount; + } + + sparseDataEntry = (MM_SparseDataTableEntry *)hashTableNextDo(&walkState); + } + } +#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */ /* We now have an expected change as a result of tracing and sweeping (parts of) the heap. Calculate the rate-of-return (ROR) on * tracing for age groups where work was done. diff --git a/runtime/gc_vlhgc/RegionValidator.cpp b/runtime/gc_vlhgc/RegionValidator.cpp index d7c249334d3..1538e263283 100644 --- a/runtime/gc_vlhgc/RegionValidator.cpp +++ b/runtime/gc_vlhgc/RegionValidator.cpp @@ -113,7 +113,7 @@ MM_RegionValidator::validate(MM_EnvironmentBase *env) result = false; } } - } else if (_region->isArrayletLeaf()) { + } else if (_region->isArrayletLeaf() && !MM_GCExtensions::getExtensions(env)->isVirtualLargeObjectHeapEnabled) { /* Do a quick check to ensure that arraylets look reasonable before the collection to help debug problems like CMVC 174687 */ if (NULL == _region->_allocateData.getSpine()) { reportRegion(env, "NULL spine object"); diff --git a/runtime/gc_vlhgc/SchedulingDelegate.cpp b/runtime/gc_vlhgc/SchedulingDelegate.cpp index 9a6c4511ba4..20825ed5308 100644 --- a/runtime/gc_vlhgc/SchedulingDelegate.cpp +++ b/runtime/gc_vlhgc/SchedulingDelegate.cpp @@ -46,6 +46,10 @@ #include "HeapRegionManager.hpp" #include "IncrementalGenerationalGC.hpp" #include "MemoryPoolAddressOrderedList.hpp" +#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) +#include "SparseVirtualMemory.hpp" +#include "SparseAddressOrderedFixedSizeDataPool.hpp" +#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */ /* NOTE: old logic for determining incremental thresholds has been deleted. Please * see CVS history, version 1.14, if you need to find this logic @@ -629,12 +633,29 @@ MM_SchedulingDelegate::updateLiveBytesAfterPartialCollect() _liveSetBytesAfterPartialCollect += region->getSize(); _liveSetBytesAfterPartialCollect -= memoryPool->getActualFreeMemorySize(); _liveSetBytesAfterPartialCollect -= memoryPool->getDarkMatterBytes(); - } else if (region->isArrayletLeaf()) { + } else if (region->isArrayletLeaf() && !_extensions->isVirtualLargeObjectHeapEnabled) { if (_extensions->objectModel.isObjectArray(region->_allocateData.getSpine())) { _liveSetBytesAfterPartialCollect += region->getSize(); } } } +#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) + if (_extensions->isVirtualLargeObjectHeapEnabled) { + MM_SparseVirtualMemory *largeObjectVirtualMemory = _extensions->largeObjectVirtualMemory; + J9HashTableState walkState; + + MM_SparseDataTableEntry *sparseDataEntry = (MM_SparseDataTableEntry *)hashTableStartDo(largeObjectVirtualMemory->getSparseDataPool()->getObjectToSparseDataTable(), &walkState); + while (NULL != sparseDataEntry) { + J9Object *spineObject = (J9Object *)sparseDataEntry->_proxyObjPtr; + + if (_extensions->objectModel.isObjectArray(spineObject)) { + _liveSetBytesAfterPartialCollect += sparseDataEntry->_size; + } + + sparseDataEntry = (MM_SparseDataTableEntry *)hashTableNextDo(&walkState); + } + } +#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */ } double diff --git a/runtime/gc_vlhgc/WriteOnceCompactor.cpp b/runtime/gc_vlhgc/WriteOnceCompactor.cpp index 36c0763faea..d0be58ee638 100644 --- a/runtime/gc_vlhgc/WriteOnceCompactor.cpp +++ b/runtime/gc_vlhgc/WriteOnceCompactor.cpp @@ -1613,38 +1613,40 @@ MM_WriteOnceCompactor::flushRememberedSetIntoCardTable(MM_EnvironmentVLHGC *env) void MM_WriteOnceCompactor::tagArrayletLeafRegionsForFixup(MM_EnvironmentVLHGC *env) { - GC_HeapRegionIteratorVLHGC regionIterator(_regionManager); - MM_HeapRegionDescriptorVLHGC *region = NULL; - while (NULL != (region = regionIterator.nextRegion())) { - Assert_MM_false(region->_compactData._shouldFixup); - if (region->isArrayletLeaf()) { - Assert_MM_false(region->_compactData._shouldCompact); - J9IndexableObject *spineObject = region->_allocateData.getSpine(); - Assert_MM_true(NULL != spineObject); - if (_extensions->objectModel.isObjectArray(spineObject)) { - MM_HeapRegionDescriptorVLHGC* spineRegion = (MM_HeapRegionDescriptorVLHGC*)_regionManager->tableDescriptorForAddress(spineObject); - if (MM_CycleState::CT_PARTIAL_GARBAGE_COLLECTION != env->_cycleState->_collectionType) { - /* global collection - must fix up */ - region->_compactData._shouldFixup = true; - } else if (spineRegion->_compactData._shouldCompact) { - /* the spine object is moving, so this leaf will need to be fixed up */ - region->_compactData._shouldFixup = true; - } else { - Card * spineCard = _extensions->cardTable->heapAddrToCardAddr(env, spineObject); - switch (*spineCard) { - case CARD_CLEAN: - case CARD_GMP_MUST_SCAN: - /* clean card - no fixup required */ - break; - case CARD_REMEMBERED: - case CARD_REMEMBERED_AND_GMP_SCAN: - case CARD_DIRTY: - case CARD_PGC_MUST_SCAN: - /* the spine object is in a dirty card, so this leaf will need to be fixed up */ + if (!_extensions->isVirtualLargeObjectHeapEnabled) { + GC_HeapRegionIteratorVLHGC regionIterator(_regionManager); + MM_HeapRegionDescriptorVLHGC *region = NULL; + while (NULL != (region = regionIterator.nextRegion())) { + Assert_MM_false(region->_compactData._shouldFixup); + if (region->isArrayletLeaf()) { + Assert_MM_false(region->_compactData._shouldCompact); + J9IndexableObject *spineObject = region->_allocateData.getSpine(); + Assert_MM_true(NULL != spineObject); + if (_extensions->objectModel.isObjectArray(spineObject)) { + MM_HeapRegionDescriptorVLHGC* spineRegion = (MM_HeapRegionDescriptorVLHGC*)_regionManager->tableDescriptorForAddress(spineObject); + if (MM_CycleState::CT_PARTIAL_GARBAGE_COLLECTION != env->_cycleState->_collectionType) { + /* global collection - must fix up */ + region->_compactData._shouldFixup = true; + } else if (spineRegion->_compactData._shouldCompact) { + /* the spine object is moving, so this leaf will need to be fixed up */ region->_compactData._shouldFixup = true; - break; - default: - Assert_MM_unreachable(); + } else { + Card * spineCard = _extensions->cardTable->heapAddrToCardAddr(env, spineObject); + switch (*spineCard) { + case CARD_CLEAN: + case CARD_GMP_MUST_SCAN: + /* clean card - no fixup required */ + break; + case CARD_REMEMBERED: + case CARD_REMEMBERED_AND_GMP_SCAN: + case CARD_DIRTY: + case CARD_PGC_MUST_SCAN: + /* the spine object is in a dirty card, so this leaf will need to be fixed up */ + region->_compactData._shouldFixup = true; + break; + default: + Assert_MM_unreachable(); + } } } } @@ -1720,7 +1722,7 @@ class MM_WriteOnceCompactFixupRoots : public MM_RootScanner { } #if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) - virtual void doObjectInVirtualLargeObjectHeap(J9Object *objectPtr, bool *sparseHeapAllocation) { + virtual void doObjectInVirtualLargeObjectHeap(J9Object *objectPtr) { J9IndexableObject *fwdOjectPtr = (J9IndexableObject *)_compactScheme->getForwardingPtr(objectPtr); if ((J9IndexableObject *)objectPtr != fwdOjectPtr) { void *dataAddr = _extensions->indexableObjectModel.getDataAddrForContiguous(fwdOjectPtr); @@ -2022,30 +2024,32 @@ MM_WriteOnceCompactor::recycleFreeRegionsAndFixFreeLists(MM_EnvironmentVLHGC *en void MM_WriteOnceCompactor::fixupArrayletLeafRegionSpinePointers(MM_EnvironmentVLHGC *env) { - GC_HeapRegionIteratorVLHGC regionIterator(_regionManager); - MM_HeapRegionDescriptorVLHGC *region = NULL; + if (!_extensions->isVirtualLargeObjectHeapEnabled) { + GC_HeapRegionIteratorVLHGC regionIterator(_regionManager); + MM_HeapRegionDescriptorVLHGC *region = NULL; + + while (NULL != (region = regionIterator.nextRegion())) { + J9IndexableObject *spine = region->_allocateData.getSpine(); + + if (NULL != spine) { + Assert_MM_true(region->isArrayletLeaf()); + /* see if this spine has moved */ + J9IndexableObject *newSpine = (J9IndexableObject *)getForwardingPtr((J9Object *)spine); + if (newSpine != spine) { + MM_HeapRegionDescriptorVLHGC *spineRegion = (MM_HeapRegionDescriptorVLHGC*)_regionManager->tableDescriptorForAddress(spine); + MM_HeapRegionDescriptorVLHGC *newSpineRegion = (MM_HeapRegionDescriptorVLHGC*)_regionManager->tableDescriptorForAddress(newSpine); - while (NULL != (region = regionIterator.nextRegion())) { - J9IndexableObject *spine = region->_allocateData.getSpine(); - - if (NULL != spine) { - Assert_MM_true(region->isArrayletLeaf()); - /* see if this spine has moved */ - J9IndexableObject *newSpine = (J9IndexableObject *)getForwardingPtr((J9Object *)spine); - if (newSpine != spine) { - MM_HeapRegionDescriptorVLHGC *spineRegion = (MM_HeapRegionDescriptorVLHGC*)_regionManager->tableDescriptorForAddress(spine); - MM_HeapRegionDescriptorVLHGC *newSpineRegion = (MM_HeapRegionDescriptorVLHGC*)_regionManager->tableDescriptorForAddress(newSpine); - - /* Note that the previous spine region may be recycled while we are fixing up this region (recycling is done in parallel with - * this method) so we can't assert anything about the state of the previous region. - */ - Assert_MM_true( newSpineRegion->containsObjects() ); - if (spineRegion != newSpineRegion) { - /* we need to move the leaf to another region's leaf list since its spine has moved */ - region->_allocateData.removeFromArrayletLeafList(env); - region->_allocateData.addToArrayletLeafList(newSpineRegion); + /* Note that the previous spine region may be recycled while we are fixing up this region (recycling is done in parallel with + * this method) so we can't assert anything about the state of the previous region. + */ + Assert_MM_true( newSpineRegion->containsObjects() ); + if (spineRegion != newSpineRegion) { + /* we need to move the leaf to another region's leaf list since its spine has moved */ + region->_allocateData.removeFromArrayletLeafList(env); + region->_allocateData.addToArrayletLeafList(newSpineRegion); + } + region->_allocateData.setSpine(newSpine); } - region->_allocateData.setSpine(newSpine); } } } @@ -2063,37 +2067,35 @@ MM_WriteOnceCompactor::fixupArrayletLeafRegionContentsAndObjectLists(MM_Environm /* For off-heap/non-adjacent arrays, the fix up is done when any other * contiguous/adjacent array is fixed up. */ - if (!_extensions->isVirtualLargeObjectHeapEnabled) { - Assert_MM_true(region->isArrayletLeaf()); - J9Object* spineObject = (J9Object*)region->_allocateData.getSpine(); - Assert_MM_true(NULL != spineObject); + Assert_MM_true(region->isArrayletLeaf()); + J9Object* spineObject = (J9Object*)region->_allocateData.getSpine(); + Assert_MM_true(NULL != spineObject); - /* spine objects get fixed up later in fixupArrayletLeafRegionSpinePointers(), after a sync point */ - spineObject = getForwardingPtr(spineObject); - - fj9object_t* slotPointer = (fj9object_t*)region->getLowAddress(); - fj9object_t* endOfLeaf = (fj9object_t*)region->getHighAddress(); - while (slotPointer < endOfLeaf) { - /* TODO: 4096 elements is an arbitrary number */ - fj9object_t* endPointer = GC_SlotObject::addToSlotAddress(slotPointer, 4096, compressed); - if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) { - while (slotPointer < endPointer) { - GC_SlotObject slotObject(_javaVM->omrVM, slotPointer); - J9Object *pointer = slotObject.readReferenceFromSlot(); - if (NULL != pointer) { - J9Object *forwardedPtr = getForwardingPtr(pointer); - slotObject.writeReferenceToSlot(forwardedPtr); - _interRegionRememberedSet->rememberReferenceForCompact(env, spineObject, forwardedPtr); - } - slotPointer = GC_SlotObject::addToSlotAddress(slotPointer, 1, compressed); + /* spine objects get fixed up later in fixupArrayletLeafRegionSpinePointers(), after a sync point */ + spineObject = getForwardingPtr(spineObject); + + fj9object_t* slotPointer = (fj9object_t*)region->getLowAddress(); + fj9object_t* endOfLeaf = (fj9object_t*)region->getHighAddress(); + while (slotPointer < endOfLeaf) { + /* TODO: 4096 elements is an arbitrary number */ + fj9object_t* endPointer = GC_SlotObject::addToSlotAddress(slotPointer, 4096, compressed); + if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) { + while (slotPointer < endPointer) { + GC_SlotObject slotObject(_javaVM->omrVM, slotPointer); + J9Object *pointer = slotObject.readReferenceFromSlot(); + if (NULL != pointer) { + J9Object *forwardedPtr = getForwardingPtr(pointer); + slotObject.writeReferenceToSlot(forwardedPtr); + _interRegionRememberedSet->rememberReferenceForCompact(env, spineObject, forwardedPtr); } + slotPointer = GC_SlotObject::addToSlotAddress(slotPointer, 1, compressed); } - slotPointer = endPointer; } - - /* prove we didn't miss anything at the end */ - Assert_MM_true(slotPointer == endOfLeaf); + slotPointer = endPointer; } + + /* prove we didn't miss anything at the end */ + Assert_MM_true(slotPointer == endOfLeaf); } else if (region->_compactData._shouldCompact) { if (!region->getUnfinalizedObjectList()->wasEmpty()) { if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) { From 7a7d57298707cea0165b1bc1d6c4696901ad43a3 Mon Sep 17 00:00:00 2001 From: lhu Date: Wed, 14 May 2025 16:25:58 -0400 Subject: [PATCH 3/3] Debug Signed-off-by: lhu --- runtime/gc_base/IndexableObjectAllocationModel.cpp | 5 +++++ runtime/gc_vlhgc/CopyForwardScheme.cpp | 8 ++++++++ runtime/gc_vlhgc/GlobalMarkingScheme.cpp | 4 ++++ runtime/gc_vlhgc/ParallelSweepSchemeVLHGC.cpp | 14 ++++++++++++-- runtime/gc_vlhgc/WriteOnceCompactor.cpp | 4 ++++ 5 files changed, 33 insertions(+), 2 deletions(-) diff --git a/runtime/gc_base/IndexableObjectAllocationModel.cpp b/runtime/gc_base/IndexableObjectAllocationModel.cpp index 3d928c0eece..c44d210de23 100644 --- a/runtime/gc_base/IndexableObjectAllocationModel.cpp +++ b/runtime/gc_base/IndexableObjectAllocationModel.cpp @@ -389,6 +389,11 @@ MM_IndexableObjectAllocationModel::getSparseAddressAndDecommitLeaves(MM_Environm byteAmount = _dataSize; void *virtualLargeObjectHeapAddress = extensions->largeObjectVirtualMemory->allocateSparseFreeEntryAndMapToHeapObject(spine, byteAmount); + + PORT_ACCESS_FROM_ENVIRONMENT(env); + j9tty_printf(PORTLIB, "allocateSparseFreeEntryAndMapToHeapObject spine=%p, byteAmount=%zu\n", spine, byteAmount); + + if (NULL != virtualLargeObjectHeapAddress) { indexableObjectModel->setDataAddrForContiguous((J9IndexableObject *)spine, virtualLargeObjectHeapAddress); } else { diff --git a/runtime/gc_vlhgc/CopyForwardScheme.cpp b/runtime/gc_vlhgc/CopyForwardScheme.cpp index c21d63509aa..2a522fd1437 100644 --- a/runtime/gc_vlhgc/CopyForwardScheme.cpp +++ b/runtime/gc_vlhgc/CopyForwardScheme.cpp @@ -4172,6 +4172,10 @@ class MM_CopyForwardSchemeRootClearer : public MM_RootScanner env->_copyForwardStats._offHeapRegionsCleared += 1; void *dataAddr = _extensions->indexableObjectModel.getDataAddrForContiguous((J9IndexableObject *)objectPtr); _extensions->largeObjectVirtualMemory->freeSparseRegionAndUnmapFromHeapObject(_env, dataAddr, objectPtr, _extensions->indexableObjectModel.getDataSizeInBytes((J9IndexableObject *)objectPtr)); + + PORT_ACCESS_FROM_ENVIRONMENT(env); + j9tty_printf(PORTLIB, "doObjectInVirtualLargeObjectHeap-copyf freeSparseRegionAndUnmapFromHeapObject objectPtr=%p, byteAmount=%zu\n", objectPtr, _extensions->indexableObjectModel.getDataSizeInBytes((J9IndexableObject *)objectPtr)); + } else { void *dataAddr = _extensions->indexableObjectModel.getDataAddrForContiguous((J9IndexableObject *)fwdOjectPtr); if (NULL != dataAddr) { @@ -4184,6 +4188,10 @@ class MM_CopyForwardSchemeRootClearer : public MM_RootScanner _extensions->indexableObjectModel.getDataSizeInBytes((J9IndexableObject *)fwdOjectPtr), fwdOjectPtr ); + + PORT_ACCESS_FROM_ENVIRONMENT(env); + j9tty_printf(PORTLIB, "doObjectInVirtualLargeObjectHeap-copyf updateSparseDataEntryAfterObjectHasMoved objectPtr=%p, fwdOjectPtr=%p, byteAmount=%zu\n", objectPtr, fwdOjectPtr, _extensions->indexableObjectModel.getDataSizeInBytes((J9IndexableObject *)fwdOjectPtr)); + } } } diff --git a/runtime/gc_vlhgc/GlobalMarkingScheme.cpp b/runtime/gc_vlhgc/GlobalMarkingScheme.cpp index fab455a8058..e9a7c0cc5a0 100644 --- a/runtime/gc_vlhgc/GlobalMarkingScheme.cpp +++ b/runtime/gc_vlhgc/GlobalMarkingScheme.cpp @@ -1420,6 +1420,10 @@ class MM_GlobalMarkingSchemeRootClearer : public MM_RootScanner void *dataAddr = _extensions->indexableObjectModel.getDataAddrForContiguous((J9IndexableObject *)objectPtr); if (NULL != dataAddr) { _extensions->largeObjectVirtualMemory->freeSparseRegionAndUnmapFromHeapObject(_env, dataAddr, objectPtr, _extensions->indexableObjectModel.getDataSizeInBytes((J9IndexableObject *)objectPtr)); + + PORT_ACCESS_FROM_ENVIRONMENT(env); + j9tty_printf(PORTLIB, "doObjectInVirtualLargeObjectHeap-global freeSparseRegionAndUnmapFromHeapObject objectPtr=%p, byteAmount=%zu\n", objectPtr, _extensions->indexableObjectModel.getDataSizeInBytes((J9IndexableObject *)objectPtr)); + } } } diff --git a/runtime/gc_vlhgc/ParallelSweepSchemeVLHGC.cpp b/runtime/gc_vlhgc/ParallelSweepSchemeVLHGC.cpp index ec7bfdcdbfa..c57df83006b 100644 --- a/runtime/gc_vlhgc/ParallelSweepSchemeVLHGC.cpp +++ b/runtime/gc_vlhgc/ParallelSweepSchemeVLHGC.cpp @@ -1029,10 +1029,20 @@ MM_ParallelSweepSchemeVLHGC::recycleFreeRegions(MM_EnvironmentVLHGC *env) sparseDataEntry = (MM_SparseDataTableEntry *)hashTableNextDo(&walkState); } - while ((arrayletLeafCount > 0) && (NULL != (region = regionIterator.nextRegion()))) { - if (region->isArrayletLeaf()) { + while (NULL != (region = regionIterator.nextRegion())) { + if (region->isArrayletLeaf() && (arrayletLeafCount > 0)) { region->getSubSpace()->recycleRegion(env, region); arrayletLeafCount -= 1; + } else if (!region->_sweepData._alreadySwept && region->hasValidMarkMap()) { + MM_MemoryPool *regionPool = region->getMemoryPool(); + Assert_MM_true(NULL != regionPool); + /* recycle if empty */ + if (region->getSize() == regionPool->getActualFreeMemorySize()) { + Assert_MM_true(NULL == region->_allocateData.getSpine()); + Assert_MM_true(NULL == region->_allocateData.getNextArrayletLeafRegion()); + region->getSubSpace()->recycleRegion(env, region); + } + } } Assert_MM_true(0 == arrayletLeafCount); diff --git a/runtime/gc_vlhgc/WriteOnceCompactor.cpp b/runtime/gc_vlhgc/WriteOnceCompactor.cpp index d0be58ee638..ea5cda93ca0 100644 --- a/runtime/gc_vlhgc/WriteOnceCompactor.cpp +++ b/runtime/gc_vlhgc/WriteOnceCompactor.cpp @@ -1731,6 +1731,10 @@ class MM_WriteOnceCompactFixupRoots : public MM_RootScanner { * failure (reason why this GC cycle is happening). */ _extensions->largeObjectVirtualMemory->updateSparseDataEntryAfterObjectHasMoved(dataAddr, objectPtr, _extensions->indexableObjectModel.getDataSizeInBytes((J9IndexableObject *)fwdOjectPtr), fwdOjectPtr); + + PORT_ACCESS_FROM_ENVIRONMENT(_env); + j9tty_printf(PORTLIB, "doObjectInVirtualLargeObjectHeap-Compactor updateSparseDataEntryAfterObjectHasMoved objectPtr=%p, fwdOjectPtr=%p, byteAmount=%zu\n", objectPtr, fwdOjectPtr, _extensions->indexableObjectModel.getDataSizeInBytes((J9IndexableObject *)fwdOjectPtr)); + } } }