core: hle: kernel: k_page_heap: Refresh.

This commit is contained in:
bunnei 2022-10-29 13:55:30 -07:00
parent 6257461684
commit 0cb9bc12fc
2 changed files with 108 additions and 17 deletions

View File

@ -44,11 +44,11 @@ size_t KPageHeap::GetNumFreePages() const {
return num_free; return num_free;
} }
PAddr KPageHeap::AllocateBlock(s32 index, bool random) { PAddr KPageHeap::AllocateByLinearSearch(s32 index) {
const size_t needed_size = m_blocks[index].GetSize(); const size_t needed_size = m_blocks[index].GetSize();
for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) { for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
if (const PAddr addr = m_blocks[i].PopBlock(random); addr != 0) { if (const PAddr addr = m_blocks[i].PopBlock(false); addr != 0) {
if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) { if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
} }
@ -59,6 +59,88 @@ PAddr KPageHeap::AllocateBlock(s32 index, bool random) {
return 0; return 0;
} }
PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) {
// Get the size and required alignment.
const size_t needed_size = num_pages * PageSize;
const size_t align_size = align_pages * PageSize;
// Determine meta-alignment of our desired alignment size.
const size_t align_shift = std::countr_zero(align_size);
// Decide on a block to allocate from.
constexpr size_t MinimumPossibleAlignmentsForRandomAllocation = 4;
{
// By default, we'll want to look at all blocks larger than our current one.
s32 max_blocks = static_cast<s32>(m_num_blocks);
// Determine the maximum block we should try to allocate from.
size_t possible_alignments = 0;
for (s32 i = index; i < max_blocks; ++i) {
// Add the possible alignments from blocks at the current size.
possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) *
m_blocks[i].GetNumFreeBlocks();
// If there are enough possible alignments, we don't need to look at larger blocks.
if (possible_alignments >= MinimumPossibleAlignmentsForRandomAllocation) {
max_blocks = i + 1;
break;
}
}
// If we have any possible alignments which require a larger block, we need to pick one.
if (possible_alignments > 0 && index + 1 < max_blocks) {
// Select a random alignment from the possibilities.
const size_t rnd = m_rng.GenerateRandom(possible_alignments);
// Determine which block corresponds to the random alignment we chose.
possible_alignments = 0;
for (s32 i = index; i < max_blocks; ++i) {
// Add the possible alignments from blocks at the current size.
possible_alignments +=
(1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) *
m_blocks[i].GetNumFreeBlocks();
// If the current block gets us to our random choice, use the current block.
if (rnd < possible_alignments) {
index = i;
break;
}
}
}
}
// Pop a block from the index we selected.
if (PAddr addr = m_blocks[index].PopBlock(true); addr != 0) {
// Determine how much size we have left over.
if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size;
leftover_size > 0) {
// Determine how many valid alignments we can have.
const size_t possible_alignments = 1 + (leftover_size >> align_shift);
// Select a random valid alignment.
const size_t random_offset = m_rng.GenerateRandom(possible_alignments) << align_shift;
// Free memory before the random offset.
if (random_offset != 0) {
this->Free(addr, random_offset / PageSize);
}
// Advance our block by the random offset.
addr += random_offset;
// Free memory after our allocated block.
if (random_offset != leftover_size) {
this->Free(addr + needed_size, (leftover_size - random_offset) / PageSize);
}
}
// Return the block we allocated.
return addr;
}
return 0;
}
void KPageHeap::FreeBlock(PAddr block, s32 index) { void KPageHeap::FreeBlock(PAddr block, s32 index) {
do { do {
block = m_blocks[index++].PushBlock(block); block = m_blocks[index++].PushBlock(block);

View File

@ -14,13 +14,9 @@
namespace Kernel { namespace Kernel {
class KPageHeap final { class KPageHeap {
public: public:
YUZU_NON_COPYABLE(KPageHeap);
YUZU_NON_MOVEABLE(KPageHeap);
KPageHeap() = default; KPageHeap() = default;
~KPageHeap() = default;
constexpr PAddr GetAddress() const { constexpr PAddr GetAddress() const {
return m_heap_address; return m_heap_address;
@ -57,7 +53,20 @@ public:
m_initial_used_size = m_heap_size - free_size - reserved_size; m_initial_used_size = m_heap_size - free_size - reserved_size;
} }
PAddr AllocateBlock(s32 index, bool random); PAddr AllocateBlock(s32 index, bool random) {
if (random) {
const size_t block_pages = m_blocks[index].GetNumPages();
return this->AllocateByRandom(index, block_pages, block_pages);
} else {
return this->AllocateByLinearSearch(index);
}
}
PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
// TODO: linear search support?
return this->AllocateByRandom(index, num_pages, align_pages);
}
void Free(PAddr addr, size_t num_pages); void Free(PAddr addr, size_t num_pages);
static size_t CalculateManagementOverheadSize(size_t region_size) { static size_t CalculateManagementOverheadSize(size_t region_size) {
@ -68,7 +77,7 @@ public:
static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) { static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) {
const size_t target_pages = std::max(num_pages, align_pages); const size_t target_pages = std::max(num_pages, align_pages);
for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) { for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { if (target_pages <= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
return static_cast<s32>(i); return static_cast<s32>(i);
} }
} }
@ -77,7 +86,7 @@ public:
static constexpr s32 GetBlockIndex(size_t num_pages) { static constexpr s32 GetBlockIndex(size_t num_pages) {
for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) { for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) {
if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { if (num_pages >= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
return i; return i;
} }
} }
@ -85,7 +94,7 @@ public:
} }
static constexpr size_t GetBlockSize(size_t index) { static constexpr size_t GetBlockSize(size_t index) {
return size_t(1) << MemoryBlockPageShifts[index]; return static_cast<size_t>(1) << MemoryBlockPageShifts[index];
} }
static constexpr size_t GetBlockNumPages(size_t index) { static constexpr size_t GetBlockNumPages(size_t index) {
@ -93,13 +102,9 @@ public:
} }
private: private:
class Block final { class Block {
public: public:
YUZU_NON_COPYABLE(Block);
YUZU_NON_MOVEABLE(Block);
Block() = default; Block() = default;
~Block() = default;
constexpr size_t GetShift() const { constexpr size_t GetShift() const {
return m_block_shift; return m_block_shift;
@ -201,6 +206,9 @@ private:
}; };
private: private:
PAddr AllocateByLinearSearch(s32 index);
PAddr AllocateByRandom(s32 index, size_t num_pages, size_t align_pages);
static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts, static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts,
size_t num_block_shifts); size_t num_block_shifts);
@ -209,7 +217,8 @@ private:
size_t m_heap_size{}; size_t m_heap_size{};
size_t m_initial_used_size{}; size_t m_initial_used_size{};
size_t m_num_blocks{}; size_t m_num_blocks{};
std::array<Block, NumMemoryBlockPageShifts> m_blocks{}; std::array<Block, NumMemoryBlockPageShifts> m_blocks;
KPageBitmap::RandomBitGenerator m_rng;
std::vector<u64> m_management_data; std::vector<u64> m_management_data;
}; };