Skip to content

WIP:Defragment reserved regions for off heap Step1 #21817

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions runtime/gc_base/IndexableObjectAllocationModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -366,11 +366,6 @@ MM_IndexableObjectAllocationModel::getSparseAddressAndDecommitLeaves(MM_Environm
break;
}

if (0 == arrayoidIndex) {
MM_HeapRegionDescriptorVLHGC *firstLeafRegionDescriptor = (MM_HeapRegionDescriptorVLHGC *)extensions->getHeap()->getHeapRegionManager()->tableDescriptorForAddress(leaf);
firstLeafRegionDescriptor->_sparseHeapAllocation = true;
}

/* Disable region for reads and writes, since that'll be done through the contiguous double mapped region */
void *highAddress = (void *)((uintptr_t)leaf + arrayletLeafSize);
bool ret = extensions->heap->decommitMemory(leaf, arrayletLeafSize, leaf, highAddress);
Expand All @@ -394,6 +389,11 @@ MM_IndexableObjectAllocationModel::getSparseAddressAndDecommitLeaves(MM_Environm

byteAmount = _dataSize;
void *virtualLargeObjectHeapAddress = extensions->largeObjectVirtualMemory->allocateSparseFreeEntryAndMapToHeapObject(spine, byteAmount);

PORT_ACCESS_FROM_ENVIRONMENT(env);
j9tty_printf(PORTLIB, "allocateSparseFreeEntryAndMapToHeapObject spine=%p, byteAmount=%zu\n", spine, byteAmount);


if (NULL != virtualLargeObjectHeapAddress) {
indexableObjectModel->setDataAddrForContiguous((J9IndexableObject *)spine, virtualLargeObjectHeapAddress);
} else {
Expand Down
30 changes: 19 additions & 11 deletions runtime/gc_base/RootScanner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,10 @@
#include "ParallelDispatcher.hpp"
#include "PointerArrayIterator.hpp"
#include "SlotObject.hpp"
#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION)
#include "SparseVirtualMemory.hpp"
#include "SparseAddressOrderedFixedSizeDataPool.hpp"
#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */
#include "StringTable.hpp"
#include "StringTableIncrementalIterator.hpp"
#include "Task.hpp"
Expand Down Expand Up @@ -238,7 +242,7 @@ MM_RootScanner::doStringTableSlot(J9Object **slotPtr, GC_StringTableIterator *st

#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION)
void
MM_RootScanner::doObjectInVirtualLargeObjectHeap(J9Object *objectPtr, bool *sparseHeapAllocation)
MM_RootScanner::doObjectInVirtualLargeObjectHeap(J9Object *objectPtr)
{
/* No need to call doSlot() here since there's nothing to update */
}
Expand Down Expand Up @@ -944,17 +948,21 @@ void
MM_RootScanner::scanObjectsInVirtualLargeObjectHeap(MM_EnvironmentBase *env)
{
if (_singleThread || J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
GC_HeapRegionIteratorVLHGC regionIterator(_extensions->heap->getHeapRegionManager());
MM_HeapRegionDescriptorVLHGC *region = NULL;
PORT_ACCESS_FROM_ENVIRONMENT(env);
j9tty_printf(PORTLIB, "scanObjectsInVirtualLargeObjectHeap _singleThread=%zu, env=%p\n", _singleThread, env);
reportScanningStarted(RootScannerEntity_virtualLargeObjectHeapObjects);
while (NULL != (region = regionIterator.nextRegion())) {
if (region->isArrayletLeaf()) {
if (region->_sparseHeapAllocation) {
J9Object *spineObject = (J9Object *)region->_allocateData.getSpine();
Assert_MM_true(NULL != spineObject);
doObjectInVirtualLargeObjectHeap(spineObject, &region->_sparseHeapAllocation);
}
}

MM_SparseVirtualMemory *largeObjectVirtualMemory = _extensions->largeObjectVirtualMemory;
J9HashTableState walkState;

MM_SparseDataTableEntry *sparseDataEntry = (MM_SparseDataTableEntry *)hashTableStartDo(largeObjectVirtualMemory->getSparseDataPool()->getObjectToSparseDataTable(), &walkState);
j9tty_printf(PORTLIB, "hashTableStartDo sparseDataEntry=%p, env=%p\n", sparseDataEntry, env);
while (NULL != sparseDataEntry) {
J9Object *spineObject = (J9Object *)sparseDataEntry->_proxyObjPtr;
Assert_MM_true(NULL != spineObject);
doObjectInVirtualLargeObjectHeap(spineObject);
j9tty_printf(PORTLIB, "hashTableNextDo &walkState=%p, spineObject=%p, size=%zu, env=%p\n", &walkState, spineObject, sparseDataEntry->_size, env);
sparseDataEntry = (MM_SparseDataTableEntry *)hashTableNextDo(&walkState);
}
reportScanningEnded(RootScannerEntity_virtualLargeObjectHeapObjects);
}
Expand Down
2 changes: 1 addition & 1 deletion runtime/gc_base/RootScanner.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -570,7 +570,7 @@ class MM_RootScanner : public MM_BaseVirtual
*
* @param objectPtr[in] indexable object's spine
*/
virtual void doObjectInVirtualLargeObjectHeap(J9Object *objectPtr, bool *sparseHeapAllocation);
virtual void doObjectInVirtualLargeObjectHeap(J9Object *objectPtr);
#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */

#if defined(J9VM_GC_ENABLE_DOUBLE_MAP)
Expand Down
42 changes: 22 additions & 20 deletions runtime/gc_vlhgc/AllocationContextBalanced.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -355,30 +355,32 @@ MM_AllocationContextBalanced::lockedAllocateArrayletLeaf(MM_EnvironmentBase *env
/* look up the spine region since we need to add this region to its leaf list */
MM_HeapRegionDescriptorVLHGC *spineRegion = (MM_HeapRegionDescriptorVLHGC *)_heapRegionManager->tableDescriptorForAddress(spine);
/* the leaf requires a pointer back to the spine object so that it can verify its liveness elsewhere in the collector */
leafAllocateData->setSpine(spine);
freeRegionForArrayletLeaf->resetAge(env, (U_64)_subspace->getBytesRemainingBeforeTaxation());
/* add the leaf to the spine region's leaf list */
/* We own the lock on the spine region's context when this call is made so we can safely manipulate this list.
* An exceptional scenario: A thread allocates a spine (and possibly a few arraylets), but does not complete the allocation. A global GC (or a series of regular PGCs) occurs
* that age out regions to max age. The spine moves into a common context. Now, we successfully resume the leaf allocation, but the common lock that
* we already hold is not sufficient any more. We need to additionally acquire common context' common lock, since multiple spines from different ACs could have come into this state,
* and worse multiple spines originally allocated from different ACs may end up in a single common context region.
*/
if (!MM_GCExtensions::getExtensions(env)->isVirtualLargeObjectHeapEnabled) {
leafAllocateData->setSpine(spine);
/* add the leaf to the spine region's leaf list */
/* We own the lock on the spine region's context when this call is made so we can safely manipulate this list.
* An exceptional scenario: A thread allocates a spine (and possibly a few arraylets), but does not complete the allocation. A global GC (or a series of regular PGCs) occurs
* that age out regions to max age. The spine moves into a common context. Now, we successfully resume the leaf allocation, but the common lock that
* we already hold is not sufficient any more. We need to additionally acquire common context' common lock, since multiple spines from different ACs could have come into this state,
* and worse multiple spines originally allocated from different ACs may end up in a single common context region.
*/

MM_AllocationContextTarok *spineContext = spineRegion->_allocateData._owningContext;
if (this != spineContext) {
Assert_MM_true(env->getCommonAllocationContext() == spineContext);
/* The common allocation context is always an instance of AllocationContextBalanced */
((MM_AllocationContextBalanced *)spineContext)->lockCommon();
}
MM_AllocationContextTarok *spineContext = spineRegion->_allocateData._owningContext;
if (this != spineContext) {
Assert_MM_true(env->getCommonAllocationContext() == spineContext);
/* The common allocation context is always an instance of AllocationContextBalanced */
((MM_AllocationContextBalanced *)spineContext)->lockCommon();
}

leafAllocateData->addToArrayletLeafList(spineRegion);

if (this != spineContext) {
/* The common allocation context is always an instance of AllocationContextBalanced */
((MM_AllocationContextBalanced *)spineContext)->unlockCommon();
leafAllocateData->addToArrayletLeafList(spineRegion);

if (this != spineContext) {
/* The common allocation context is always an instance of AllocationContextBalanced */
((MM_AllocationContextBalanced *)spineContext)->unlockCommon();
}
}

freeRegionForArrayletLeaf->resetAge(env, (U_64)_subspace->getBytesRemainingBeforeTaxation());
/* store the base address of the leaf for the memset and the return */
return freeRegionForArrayletLeaf->getLowAddress();
}
Expand Down
75 changes: 73 additions & 2 deletions runtime/gc_vlhgc/CollectionSetDelegate.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,10 @@
#include "MarkMap.hpp"
#include "MemoryPool.hpp"
#include "RegionValidator.hpp"
#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION)
#include "SparseVirtualMemory.hpp"
#include "SparseAddressOrderedFixedSizeDataPool.hpp"
#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */

MM_CollectionSetDelegate::MM_CollectionSetDelegate(MM_EnvironmentBase *env, MM_HeapRegionManager *manager)
: MM_BaseNonVirtual()
Expand Down Expand Up @@ -539,7 +543,7 @@ MM_CollectionSetDelegate::rateOfReturnCalculationBeforeSweep(MM_EnvironmentVLHGC
if(!region->getRememberedSetCardList()->isAccurate()) {
stats->_reclaimStats._regionCountOverflow += 1;
}
} else if(region->isArrayletLeaf()) {
} else if(region->isArrayletLeaf() && !_extensions->isVirtualLargeObjectHeapEnabled) {
MM_HeapRegionDescriptorVLHGC *parentRegion = (MM_HeapRegionDescriptorVLHGC *)_regionManager->regionDescriptorForAddress((void *)region->_allocateData.getSpine());
Assert_MM_true(parentRegion->containsObjects());
SetSelectionData *stats = &_setSelectionDataTable[MM_CompactGroupManager::getCompactGroupNumber(env, parentRegion)];
Expand All @@ -556,6 +560,41 @@ MM_CollectionSetDelegate::rateOfReturnCalculationBeforeSweep(MM_EnvironmentVLHGC
}
}
}

#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION)
if (_extensions->isVirtualLargeObjectHeapEnabled) {
const uintptr_t arrayletLeafSize = env->getOmrVM()->_arrayletLeafSize;
MM_SparseVirtualMemory *largeObjectVirtualMemory = _extensions->largeObjectVirtualMemory;
uintptr_t arrayletLeafCount = 0;
J9HashTableState walkState;

MM_SparseDataTableEntry *sparseDataEntry = (MM_SparseDataTableEntry *)hashTableStartDo(largeObjectVirtualMemory->getSparseDataPool()->getObjectToSparseDataTable(), &walkState);
while (NULL != sparseDataEntry) {
J9Object *spineObject = (J9Object *)sparseDataEntry->_proxyObjPtr;
uintptr_t dataSize = sparseDataEntry->_size;
/* TODO: how fraction is counting here? */
// arrayletLeafCount = MM_Math::roundToFloor(arrayletLeafSize, dataSize) / arrayletLeafSize;
arrayletLeafCount = MM_Math::roundToCeiling(arrayletLeafSize, dataSize) / arrayletLeafSize;
MM_HeapRegionDescriptorVLHGC *parentRegion = (MM_HeapRegionDescriptorVLHGC *)_regionManager->regionDescriptorForAddress((void *)spineObject);
Assert_MM_true(parentRegion->containsObjects());
SetSelectionData *stats = &_setSelectionDataTable[MM_CompactGroupManager::getCompactGroupNumber(env, parentRegion)];

stats->_reclaimStats._regionCountBefore += arrayletLeafCount;
stats->_reclaimStats._regionCountArrayletLeafBefore += arrayletLeafCount;

if(!parentRegion->_sweepData._alreadySwept) {
stats->_reclaimStats._reclaimableRegionCountBefore += arrayletLeafCount;
stats->_reclaimStats._reclaimableRegionCountArrayletLeafBefore += arrayletLeafCount;
}
if(!parentRegion->getRememberedSetCardList()->isAccurate()) {
stats->_reclaimStats._regionCountArrayletLeafOverflow += arrayletLeafCount;
}

sparseDataEntry = (MM_SparseDataTableEntry *)hashTableNextDo(&walkState);
}

}
#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */
}
}

Expand All @@ -580,7 +619,7 @@ MM_CollectionSetDelegate::rateOfReturnCalculationAfterSweep(MM_EnvironmentVLHGC
stats->_reclaimStats._regionBytesFreeAfter += memoryPool->getActualFreeMemorySize();
stats->_reclaimStats._regionDarkMatterAfter += memoryPool->getDarkMatterBytes();
}
} else if(region->isArrayletLeaf()) {
} else if(region->isArrayletLeaf() && !_extensions->isVirtualLargeObjectHeapEnabled) {
MM_HeapRegionDescriptorVLHGC *parentRegion = (MM_HeapRegionDescriptorVLHGC *)_regionManager->regionDescriptorForAddress((void *)region->_allocateData.getSpine());
Assert_MM_true(parentRegion->containsObjects());
SetSelectionData *stats = &_setSelectionDataTable[MM_CompactGroupManager::getCompactGroupNumber(env, parentRegion)];
Expand All @@ -595,6 +634,38 @@ MM_CollectionSetDelegate::rateOfReturnCalculationAfterSweep(MM_EnvironmentVLHGC
}
}

#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION)
if (_extensions->isVirtualLargeObjectHeapEnabled) {
const uintptr_t arrayletLeafSize = env->getOmrVM()->_arrayletLeafSize;
MM_SparseVirtualMemory *largeObjectVirtualMemory = _extensions->largeObjectVirtualMemory;
uintptr_t arrayletLeafCount = 0;
J9HashTableState walkState;

MM_SparseDataTableEntry *sparseDataEntry = (MM_SparseDataTableEntry *)hashTableStartDo(largeObjectVirtualMemory->getSparseDataPool()->getObjectToSparseDataTable(), &walkState);
while (NULL != sparseDataEntry) {
J9Object *spineObject = (J9Object *)sparseDataEntry->_proxyObjPtr;
uintptr_t dataSize = sparseDataEntry->_size;
/* TODO: how fraction is counting here? */
// arrayletLeafCount = MM_Math::roundToFloor(arrayletLeafSize, dataSize) / arrayletLeafSize;
arrayletLeafCount = MM_Math::roundToCeiling(arrayletLeafSize, dataSize) / arrayletLeafSize;
MM_HeapRegionDescriptorVLHGC *parentRegion = (MM_HeapRegionDescriptorVLHGC *)_regionManager->regionDescriptorForAddress((void *)spineObject);
Assert_MM_true(parentRegion->containsObjects());
SetSelectionData *stats = &_setSelectionDataTable[MM_CompactGroupManager::getCompactGroupNumber(env, parentRegion)];

stats->_reclaimStats._regionCountAfter += arrayletLeafCount;
stats->_reclaimStats._regionCountArrayletLeafAfter += arrayletLeafCount;

if(!parentRegion->_sweepData._alreadySwept) {
stats->_reclaimStats._reclaimableRegionCountAfter += arrayletLeafCount;
stats->_reclaimStats._reclaimableRegionCountArrayletLeafAfter += arrayletLeafCount;
}

sparseDataEntry = (MM_SparseDataTableEntry *)hashTableNextDo(&walkState);
}

}
#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */

/* We now have an expected change as a result of tracing and sweeping (parts of) the heap. Calculate the rate-of-return (ROR) on
* tracing for age groups where work was done.
* Use a weighted running average to calculate the ROR, where the weight is the % of regions in an age group that we are examining.
Expand Down
Loading