Skip to content

Commit 0ecda6a

Browse files
committed
Merge branch 'resizable_cache'
2 parents db64841 + 095cd5e commit 0ecda6a

7 files changed

+996
-448
lines changed

Diff for: include/nbl/core/alloc/AddressAllocatorBase.h

+20-16
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,9 @@ namespace core
1414
{
1515

1616
#define _NBL_DECLARE_ADDRESS_ALLOCATOR_TYPEDEFS(SIZE_TYPE) \
17-
typedef SIZE_TYPE size_type;\
18-
typedef typename std::make_signed<size_type>::type difference_type;\
19-
typedef uint8_t* ubyte_pointer;\
17+
using size_type = SIZE_TYPE;\
18+
using difference_type = typename std::make_signed<size_type>::type;\
19+
using ubyte_pointer = uint8_t*;\
2020
static constexpr size_type invalid_address = nbl::core::address_type_traits<size_type>::invalid_address
2121

2222
template<typename CRTP, typename _size_type>
@@ -25,11 +25,7 @@ namespace core
2525
public:
2626
_NBL_DECLARE_ADDRESS_ALLOCATOR_TYPEDEFS(_size_type);
2727

28-
AddressAllocatorBase() :
29-
reservedSpace(nullptr), addressOffset(invalid_address), alignOffset(invalid_address),
30-
maxRequestableAlignment(invalid_address), combinedOffset(invalid_address) {}
31-
32-
28+
AddressAllocatorBase() { invalidate(); }
3329

3430
inline _size_type max_alignment() const noexcept
3531
{
@@ -72,9 +68,7 @@ namespace core
7268
assert(core::isPoT(maxRequestableAlignment)); // this is not a proper alignment value
7369
#endif // _NBL_DEBUG
7470
}
75-
AddressAllocatorBase(CRTP&& other, void* newReservedSpc) :
76-
reservedSpace(nullptr), addressOffset(invalid_address), alignOffset(invalid_address),
77-
maxRequestableAlignment(invalid_address), combinedOffset(invalid_address)
71+
AddressAllocatorBase(CRTP&& other, void* newReservedSpc)
7872
{
7973
operator=(std::move(other));
8074
reservedSpace = newReservedSpc;
@@ -123,14 +117,24 @@ namespace core
123117

124118
AddressAllocatorBase& operator=(AddressAllocatorBase&& other)
125119
{
126-
std::swap(reservedSpace,other.reservedSpace);
127-
std::swap(addressOffset,other.addressOffset);
128-
std::swap(alignOffset,other.alignOffset);
129-
std::swap(maxRequestableAlignment,other.maxRequestableAlignment);
130-
std::swap(combinedOffset,other.combinedOffset);
120+
reservedSpace = other.reservedSpace;
121+
addressOffset = other.addressOffset;
122+
alignOffset = other.alignOffset;
123+
maxRequestableAlignment = other.maxRequestableAlignment;
124+
combinedOffset = other.combinedOffset;
125+
other.invalidate();
131126
return *this;
132127
}
133128

129+
void invalidate()
130+
{
131+
reservedSpace = nullptr;
132+
addressOffset = invalid_address;
133+
alignOffset = invalid_address;
134+
maxRequestableAlignment = invalid_address;
135+
combinedOffset = invalid_address;
136+
}
137+
134138
// pointer to allocator specific state-keeping data, please note that irrBaW address allocators were designed to allocate memory they can't actually access
135139
void* reservedSpace;
136140
// automatic offset to be added to generated addresses

Diff for: include/nbl/core/alloc/IteratablePoolAddressAllocator.h

+30-30
Original file line numberDiff line numberDiff line change
@@ -21,61 +21,61 @@ namespace core
2121
template<typename _size_type>
2222
class IteratablePoolAddressAllocator : protected PoolAddressAllocator<_size_type>
2323
{
24-
using Base = PoolAddressAllocator<_size_type>;
24+
using base_t = PoolAddressAllocator<_size_type>;
2525
protected:
26-
inline _size_type* begin() { return &Base::getFreeStack(Base::freeStackCtr); }
27-
inline _size_type& getIteratorOffset(_size_type i) {return reinterpret_cast<_size_type*>(Base::reservedSpace)[Base::blockCount+i];}
28-
inline const _size_type& getIteratorOffset(_size_type i) const {return reinterpret_cast<const _size_type*>(Base::reservedSpace)[Base::blockCount+i];}
26+
inline _size_type* begin() { return &base_t::getFreeStack(base_t::freeStackCtr); }
27+
inline _size_type& getIteratorOffset(_size_type i) {return reinterpret_cast<_size_type*>(base_t::reservedSpace)[base_t::blockCount+i];}
28+
inline const _size_type& getIteratorOffset(_size_type i) const {return reinterpret_cast<const _size_type*>(base_t::reservedSpace)[base_t::blockCount+i];}
2929

3030
private:
3131

3232
void copySupplementaryState(const IteratablePoolAddressAllocator& other, _size_type newBuffSz)
3333
{
3434
std::copy(other.begin(),other.end(),begin());
35-
for (auto i=0u; i<std::min(Base::blockCount,other.blockCount); i++)
35+
for (auto i=0u; i<std::min(base_t::blockCount,other.blockCount); i++)
3636
getIteratorOffset(i) = other.getIteratorOffset(i);
3737
}
3838
// use [freeStackCtr,blockCount) as the iteratable range
3939
// use [blockCount,blockCount*2u) to store backreferences to iterators
4040
public:
4141
_NBL_DECLARE_ADDRESS_ALLOCATOR_TYPEDEFS(_size_type);
4242

43-
IteratablePoolAddressAllocator() : Base() {}
43+
IteratablePoolAddressAllocator() : base_t() {}
4444
virtual ~IteratablePoolAddressAllocator() {}
4545

4646
IteratablePoolAddressAllocator(void* reservedSpc, _size_type addressOffsetToApply, _size_type alignOffsetNeeded, _size_type maxAllocatableAlignment, _size_type bufSz, _size_type blockSz) noexcept :
47-
Base(reservedSpc,addressOffsetToApply,alignOffsetNeeded,maxAllocatableAlignment,bufSz,blockSz) {}
47+
base_t(reservedSpc,addressOffsetToApply,alignOffsetNeeded,maxAllocatableAlignment,bufSz,blockSz) {}
48+
4849

4950
//! When resizing we require that the copying of data buffer has already been handled by the user of the address allocator
5051
template<typename... Args>
51-
IteratablePoolAddressAllocator(_size_type newBuffSz, IteratablePoolAddressAllocator&& other, Args&&... args) noexcept :
52-
Base(newBuffSz,std::move(other),std::forward<Args>(args)...)
52+
IteratablePoolAddressAllocator(_size_type newBuffSz, const IteratablePoolAddressAllocator& other, Args&&... args) noexcept :
53+
base_t(newBuffSz, other, std::forward<Args>(args)...)
5354
{
54-
copyState(other, newBuffSz);
55+
copySupplementaryState(other, newBuffSz);
5556
}
56-
57+
5758
template<typename... Args>
58-
IteratablePoolAddressAllocator(_size_type newBuffSz, const IteratablePoolAddressAllocator& other, Args&&... args) noexcept :
59-
Base(newBuffSz,other,std::forward<Args>(args)...)
59+
IteratablePoolAddressAllocator(_size_type newBuffSz, IteratablePoolAddressAllocator&& other, Args&&... args) noexcept :
60+
IteratablePoolAddressAllocator(newBuffSz,other,std::forward<Args>(args)...)
6061
{
61-
copyState(other, newBuffSz);
62+
other.base_t::invalidate();
6263
}
6364

6465
IteratablePoolAddressAllocator& operator=(IteratablePoolAddressAllocator&& other)
6566
{
66-
Base::operator=(std::move(other));
67+
base_t::operator=(std::move(other));
6768
return *this;
6869
}
6970

70-
7171
//! Functions that actually differ
7272
inline _size_type alloc_addr(_size_type bytes, _size_type alignment, _size_type hint=0ull) noexcept
7373
{
74-
const _size_type allocatedAddress = Base::alloc_addr(bytes,alignment,hint);
74+
const _size_type allocatedAddress = base_t::alloc_addr(bytes,alignment,hint);
7575
if (allocatedAddress!=invalid_address)
7676
{
7777
*begin() = allocatedAddress;
78-
getIteratorOffset(addressToBlockID(allocatedAddress)) = Base::freeStackCtr;
78+
getIteratorOffset(addressToBlockID(allocatedAddress)) = base_t::freeStackCtr;
7979
}
8080
return allocatedAddress;
8181
}
@@ -84,21 +84,21 @@ class IteratablePoolAddressAllocator : protected PoolAddressAllocator<_size_type
8484
{
8585
const _size_type iteratorOffset = getIteratorOffset(addressToBlockID(addr));
8686
#ifdef _NBL_DEBUG
87-
assert(iteratorOffset>=Base::freeStackCtr);
87+
assert(iteratorOffset>=base_t::freeStackCtr);
8888
#endif
8989
// swap the erased element with either end of the array in the contiguous array
9090
// not using a swap cause it doesn't matter where the erased element points
9191
const _size_type otherNodeOffset = *begin();
92-
reinterpret_cast<_size_type*>(Base::reservedSpace)[iteratorOffset] = otherNodeOffset;
92+
reinterpret_cast<_size_type*>(base_t::reservedSpace)[iteratorOffset] = otherNodeOffset;
9393
// but I need to patch up the back-link of the moved element
9494
getIteratorOffset(addressToBlockID(otherNodeOffset)) = iteratorOffset;
9595

96-
Base::free_addr(addr,bytes);
96+
base_t::free_addr(addr,bytes);
9797
}
9898

9999
// gets a range of all the allocated addresses
100-
inline const _size_type* begin() const {return &Base::getFreeStack(Base::freeStackCtr);}
101-
inline const _size_type* end() const {return &Base::getFreeStack(Base::blockCount);}
100+
inline const _size_type* begin() const {return &base_t::getFreeStack(base_t::freeStackCtr);}
101+
inline const _size_type* end() const {return &base_t::getFreeStack(base_t::blockCount);}
102102

103103

104104
inline _size_type safe_shrink_size(_size_type sizeBound, _size_type newBuffAlignmentWeCanGuarantee=1u) noexcept
@@ -124,31 +124,31 @@ class IteratablePoolAddressAllocator : protected PoolAddressAllocator<_size_type
124124

125125
inline void reset()
126126
{
127-
Base::reset();
127+
base_t::reset();
128128
}
129129
inline _size_type max_size() const noexcept
130130
{
131-
return Base::max_size();
131+
return base_t::max_size();
132132
}
133133
inline _size_type min_size() const noexcept
134134
{
135-
return Base::min_size();
135+
return base_t::min_size();
136136
}
137137
inline _size_type get_free_size() const noexcept
138138
{
139-
return Base::get_free_size();
139+
return base_t::get_free_size();
140140
}
141141
inline _size_type get_allocated_size() const noexcept
142142
{
143-
return Base::get_allocated_size();
143+
return base_t::get_allocated_size();
144144
}
145145
inline _size_type get_total_size() const noexcept
146146
{
147-
return Base::get_total_size();
147+
return base_t::get_total_size();
148148
}
149149
inline _size_type addressToBlockID(_size_type addr) const noexcept
150150
{
151-
return Base::addressToBlockID(addr);
151+
return base_t::addressToBlockID(addr);
152152
}
153153
};
154154

Diff for: include/nbl/core/alloc/PoolAddressAllocator.h

+47-28
Original file line numberDiff line numberDiff line change
@@ -20,32 +20,32 @@ template<typename _size_type>
2020
class PoolAddressAllocator : public AddressAllocatorBase<PoolAddressAllocator<_size_type>,_size_type>
2121
{
2222
private:
23-
typedef AddressAllocatorBase<PoolAddressAllocator<_size_type>,_size_type> Base;
23+
using base_t = AddressAllocatorBase<PoolAddressAllocator<_size_type>,_size_type>;
2424

2525
void copyState(const PoolAddressAllocator& other, _size_type newBuffSz)
2626
{
2727
if (blockCount>other.blockCount)
2828
freeStackCtr = blockCount-other.blockCount;
2929

3030
#ifdef _NBL_DEBUG
31-
assert(Base::checkResize(newBuffSz,Base::alignOffset));
32-
assert(freeStackCtr==0u);
31+
assert(base_t::checkResize(newBuffSz,base_t::alignOffset));
3332
#endif // _NBL_DEBUG
3433

3534
for (_size_type i=0u; i<freeStackCtr; i++)
36-
getFreeStack(i) = (blockCount-1u-i)*blockSize+Base::combinedOffset;
35+
getFreeStack(i) = (blockCount-1u-i)*blockSize+base_t::combinedOffset;
3736

3837
for (_size_type i=0; i<other.freeStackCtr; i++)
3938
{
40-
_size_type freeEntry = other.getFreeStack(i)-other.combinedOffset;
39+
_size_type freeEntry = other.getFreeStack(i)-other.base_t::combinedOffset;
4140
// check in case of shrink
4241
if (freeEntry<blockCount*blockSize)
43-
getFreeStack(freeStackCtr++) = freeEntry+Base::combinedOffset;
42+
getFreeStack(freeStackCtr++) = freeEntry+base_t::combinedOffset;
4443
}
4544
}
45+
4646
inline bool safe_shrink_size_common(_size_type& sizeBound, _size_type newBuffAlignmentWeCanGuarantee) noexcept
4747
{
48-
_size_type capacity = get_total_size()-Base::alignOffset;
48+
_size_type capacity = get_total_size()-base_t::alignOffset;
4949
if (sizeBound>=capacity)
5050
return false;
5151

@@ -71,7 +71,7 @@ class PoolAddressAllocator : public AddressAllocatorBase<PoolAddressAllocator<_s
7171
virtual ~PoolAddressAllocator() {}
7272

7373
PoolAddressAllocator(void* reservedSpc, _size_type addressOffsetToApply, _size_type alignOffsetNeeded, _size_type maxAllocatableAlignment, size_type bufSz, size_type blockSz) noexcept :
74-
Base(reservedSpc,addressOffsetToApply,alignOffsetNeeded,maxAllocatableAlignment),
74+
base_t(reservedSpc,addressOffsetToApply,alignOffsetNeeded,maxAllocatableAlignment),
7575
blockCount((bufSz-alignOffsetNeeded)/blockSz), blockSize(blockSz), freeStackCtr(0u)
7676
{
7777
reset();
@@ -80,29 +80,28 @@ class PoolAddressAllocator : public AddressAllocatorBase<PoolAddressAllocator<_s
8080
//! When resizing we require that the copying of data buffer has already been handled by the user of the address allocator
8181
template<typename... Args>
8282
PoolAddressAllocator(_size_type newBuffSz, PoolAddressAllocator&& other, Args&&... args) noexcept :
83-
Base(std::move(other),std::forward<Args>(args)...),
84-
blockCount((newBuffSz-Base::alignOffset)/other.blockSize), blockSize(other.blockSize), freeStackCtr(0u)
83+
base_t(other,std::forward<Args>(args)...),
84+
blockCount((newBuffSz-base_t::alignOffset)/other.blockSize), blockSize(other.blockSize), freeStackCtr(0u)
8585
{
8686
copyState(other, newBuffSz);
8787

88-
other.blockCount = invalid_address;
89-
other.blockSize = invalid_address;
90-
other.freeStackCtr = invalid_address;
88+
other.invalidate();
9189
}
9290
template<typename... Args>
9391
PoolAddressAllocator(_size_type newBuffSz, const PoolAddressAllocator& other, Args&&... args) noexcept :
94-
Base(other, std::forward<Args>(args)...),
95-
blockCount((newBuffSz-Base::alignOffset)/other.blockSize), blockSize(other.blockSize), freeStackCtr(0u)
92+
base_t(other, std::forward<Args>(args)...),
93+
blockCount((newBuffSz-base_t::alignOffset)/other.blockSize), blockSize(other.blockSize), freeStackCtr(0u)
9694
{
9795
copyState(other, newBuffSz);
9896
}
9997

10098
PoolAddressAllocator& operator=(PoolAddressAllocator&& other)
10199
{
102-
Base::operator=(std::move(other));
103-
std::swap(blockCount,other.blockCount);
104-
std::swap(blockSize,other.blockSize);
105-
std::swap(freeStackCtr,other.freeStackCtr);
100+
base_t::operator=(std::move(other));
101+
blockCount = other.blockCount;
102+
blockSize = other.blockSize;
103+
freeStackCtr = other.freeStackCtr;
104+
other.invalidateLocal();
106105
return *this;
107106
}
108107

@@ -118,15 +117,15 @@ class PoolAddressAllocator : public AddressAllocatorBase<PoolAddressAllocator<_s
118117
inline void free_addr(size_type addr, size_type bytes) noexcept
119118
{
120119
#ifdef _NBL_DEBUG
121-
assert(addr>=Base::combinedOffset && (addr-Base::combinedOffset)%blockSize==0 && freeStackCtr<blockCount);
120+
assert(addr>=base_t::combinedOffset && (addr-base_t::combinedOffset)%blockSize==0 && freeStackCtr<blockCount);
122121
#endif // _NBL_DEBUG
123122
getFreeStack(freeStackCtr++) = addr;
124123
}
125124

126125
inline void reset()
127126
{
128127
for (freeStackCtr=0u; freeStackCtr<blockCount; freeStackCtr++)
129-
getFreeStack(freeStackCtr) = (blockCount-1u-freeStackCtr)*blockSize+Base::combinedOffset;
128+
getFreeStack(freeStackCtr) = (blockCount-1u-freeStackCtr)*blockSize+base_t::combinedOffset;
130129
}
131130

132131
//! conservative estimate, does not account for space lost to alignment
@@ -151,7 +150,7 @@ class PoolAddressAllocator : public AddressAllocatorBase<PoolAddressAllocator<_s
151150
for (size_type i=0; i<freeStackCtr; i++)
152151
{
153152
auto freeAddr = getFreeStack(i);
154-
if (freeAddr<sizeBound+Base::combinedOffset)
153+
if (freeAddr<sizeBound+base_t::combinedOffset)
155154
continue;
156155

157156
tmpStackCopy[boundedCount++] = freeAddr;
@@ -162,7 +161,7 @@ class PoolAddressAllocator : public AddressAllocatorBase<PoolAddressAllocator<_s
162161
std::make_heap(tmpStackCopy,tmpStackCopy+boundedCount);
163162
std::sort_heap(tmpStackCopy,tmpStackCopy+boundedCount);
164163
// could do sophisticated modified version of std::adjacent_find with a binary search, but F'it
165-
size_type endAddr = (blockCount-1u)*blockSize+Base::combinedOffset;
164+
size_type endAddr = (blockCount-1u)*blockSize+base_t::combinedOffset;
166165
size_type i=0u;
167166
for (;i<boundedCount; i++,endAddr-=blockSize)
168167
{
@@ -173,7 +172,7 @@ class PoolAddressAllocator : public AddressAllocatorBase<PoolAddressAllocator<_s
173172
sizeBound -= i*blockSize;
174173
}
175174
}
176-
return Base::safe_shrink_size(sizeBound,newBuffAlignmentWeCanGuarantee);
175+
return base_t::safe_shrink_size(sizeBound,newBuffAlignmentWeCanGuarantee);
177176
}
178177

179178

@@ -197,16 +196,36 @@ class PoolAddressAllocator : public AddressAllocatorBase<PoolAddressAllocator<_s
197196
}
198197
inline size_type get_total_size() const noexcept
199198
{
200-
return blockCount*blockSize+Base::alignOffset;
199+
return blockCount*blockSize+base_t::alignOffset;
201200
}
202201

203202

204203

205204
inline size_type addressToBlockID(size_type addr) const noexcept
206205
{
207-
return (addr-Base::combinedOffset)/blockSize;
206+
return (addr-base_t::combinedOffset)/blockSize;
208207
}
209208
protected:
209+
210+
/**
211+
* @brief Invalidates only fields from this class extension
212+
*/
213+
void invalidateLocal()
214+
{
215+
blockCount = invalid_address;
216+
blockSize = invalid_address;
217+
freeStackCtr = invalid_address;
218+
}
219+
220+
/**
221+
* @brief Invalidates all fields
222+
*/
223+
void invalidate()
224+
{
225+
base_t::invalidate();
226+
invalidateLocal();
227+
}
228+
210229
size_type blockCount;
211230
size_type blockSize;
212231
// TODO: free address min-heap and allocated addresses max-heap, packed into the same memory (whatever is not allocated is free)
@@ -215,8 +234,8 @@ class PoolAddressAllocator : public AddressAllocatorBase<PoolAddressAllocator<_s
215234
// but then should probably have two pool allocators, because doing that changes insertion/removal from O(1) to O(log(N))
216235
size_type freeStackCtr;
217236

218-
inline size_type& getFreeStack(size_type i) {return reinterpret_cast<size_type*>(Base::reservedSpace)[i];}
219-
inline const size_type& getFreeStack(size_type i) const {return reinterpret_cast<const size_type*>(Base::reservedSpace)[i];}
237+
inline size_type& getFreeStack(size_type i) {return reinterpret_cast<size_type*>(base_t::reservedSpace)[i];}
238+
inline const size_type& getFreeStack(size_type i) const {return reinterpret_cast<const size_type*>(base_t::reservedSpace)[i];}
220239
};
221240

222241

0 commit comments

Comments
 (0)