diff options
author | bunnei <bunneidev@gmail.com> | 2022-10-29 22:55:30 +0200 |
---|---|---|
committer | bunnei <bunneidev@gmail.com> | 2022-11-04 05:17:06 +0100 |
commit | 0cb9bc12fc80769676c48cb3fc173c4f46b7996e (patch) | |
tree | 642107a7cacaabc714d248c07095df7e10cd3213 /src/core/hle | |
parent | core: hle: kernel: k_page_group: Add KPageBufferSlabHeap. (diff) | |
download | yuzu-0cb9bc12fc80769676c48cb3fc173c4f46b7996e.tar yuzu-0cb9bc12fc80769676c48cb3fc173c4f46b7996e.tar.gz yuzu-0cb9bc12fc80769676c48cb3fc173c4f46b7996e.tar.bz2 yuzu-0cb9bc12fc80769676c48cb3fc173c4f46b7996e.tar.lz yuzu-0cb9bc12fc80769676c48cb3fc173c4f46b7996e.tar.xz yuzu-0cb9bc12fc80769676c48cb3fc173c4f46b7996e.tar.zst yuzu-0cb9bc12fc80769676c48cb3fc173c4f46b7996e.zip |
Diffstat (limited to 'src/core/hle')
-rw-r--r-- | src/core/hle/kernel/k_page_heap.cpp | 86 | ||||
-rw-r--r-- | src/core/hle/kernel/k_page_heap.h | 39 |
2 files changed, 108 insertions, 17 deletions
diff --git a/src/core/hle/kernel/k_page_heap.cpp b/src/core/hle/kernel/k_page_heap.cpp index 5ede60168..7b02c7d8b 100644 --- a/src/core/hle/kernel/k_page_heap.cpp +++ b/src/core/hle/kernel/k_page_heap.cpp @@ -44,11 +44,11 @@ size_t KPageHeap::GetNumFreePages() const { return num_free; } -PAddr KPageHeap::AllocateBlock(s32 index, bool random) { +PAddr KPageHeap::AllocateByLinearSearch(s32 index) { const size_t needed_size = m_blocks[index].GetSize(); for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) { - if (const PAddr addr = m_blocks[i].PopBlock(random); addr != 0) { + if (const PAddr addr = m_blocks[i].PopBlock(false); addr != 0) { if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) { this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); } @@ -59,6 +59,88 @@ PAddr KPageHeap::AllocateBlock(s32 index, bool random) { return 0; } +PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) { + // Get the size and required alignment. + const size_t needed_size = num_pages * PageSize; + const size_t align_size = align_pages * PageSize; + + // Determine meta-alignment of our desired alignment size. + const size_t align_shift = std::countr_zero(align_size); + + // Decide on a block to allocate from. + constexpr size_t MinimumPossibleAlignmentsForRandomAllocation = 4; + { + // By default, we'll want to look at all blocks larger than our current one. + s32 max_blocks = static_cast<s32>(m_num_blocks); + + // Determine the maximum block we should try to allocate from. + size_t possible_alignments = 0; + for (s32 i = index; i < max_blocks; ++i) { + // Add the possible alignments from blocks at the current size. + possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * + m_blocks[i].GetNumFreeBlocks(); + + // If there are enough possible alignments, we don't need to look at larger blocks. + if (possible_alignments >= MinimumPossibleAlignmentsForRandomAllocation) { + max_blocks = i + 1; + break; + } + } + + // If we have any possible alignments which require a larger block, we need to pick one. + if (possible_alignments > 0 && index + 1 < max_blocks) { + // Select a random alignment from the possibilities. + const size_t rnd = m_rng.GenerateRandom(possible_alignments); + + // Determine which block corresponds to the random alignment we chose. + possible_alignments = 0; + for (s32 i = index; i < max_blocks; ++i) { + // Add the possible alignments from blocks at the current size. + possible_alignments += + (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * + m_blocks[i].GetNumFreeBlocks(); + + // If the current block gets us to our random choice, use the current block. + if (rnd < possible_alignments) { + index = i; + break; + } + } + } + } + + // Pop a block from the index we selected. + if (PAddr addr = m_blocks[index].PopBlock(true); addr != 0) { + // Determine how much size we have left over. + if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size; + leftover_size > 0) { + // Determine how many valid alignments we can have. + const size_t possible_alignments = 1 + (leftover_size >> align_shift); + + // Select a random valid alignment. + const size_t random_offset = m_rng.GenerateRandom(possible_alignments) << align_shift; + + // Free memory before the random offset. + if (random_offset != 0) { + this->Free(addr, random_offset / PageSize); + } + + // Advance our block by the random offset. + addr += random_offset; + + // Free memory after our allocated block. + if (random_offset != leftover_size) { + this->Free(addr + needed_size, (leftover_size - random_offset) / PageSize); + } + } + + // Return the block we allocated. + return addr; + } + + return 0; +} + void KPageHeap::FreeBlock(PAddr block, s32 index) { do { block = m_blocks[index++].PushBlock(block); diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h index 0917a8bed..9021edcf7 100644 --- a/src/core/hle/kernel/k_page_heap.h +++ b/src/core/hle/kernel/k_page_heap.h @@ -14,13 +14,9 @@ namespace Kernel { -class KPageHeap final { +class KPageHeap { public: - YUZU_NON_COPYABLE(KPageHeap); - YUZU_NON_MOVEABLE(KPageHeap); - KPageHeap() = default; - ~KPageHeap() = default; constexpr PAddr GetAddress() const { return m_heap_address; @@ -57,7 +53,20 @@ public: m_initial_used_size = m_heap_size - free_size - reserved_size; } - PAddr AllocateBlock(s32 index, bool random); + PAddr AllocateBlock(s32 index, bool random) { + if (random) { + const size_t block_pages = m_blocks[index].GetNumPages(); + return this->AllocateByRandom(index, block_pages, block_pages); + } else { + return this->AllocateByLinearSearch(index); + } + } + + PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { + // TODO: linear search support? + return this->AllocateByRandom(index, num_pages, align_pages); + } + void Free(PAddr addr, size_t num_pages); static size_t CalculateManagementOverheadSize(size_t region_size) { @@ -68,7 +77,7 @@ public: static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) { const size_t target_pages = std::max(num_pages, align_pages); for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) { - if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { + if (target_pages <= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { return static_cast<s32>(i); } } @@ -77,7 +86,7 @@ public: static constexpr s32 GetBlockIndex(size_t num_pages) { for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) { - if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { + if (num_pages >= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { return i; } } @@ -85,7 +94,7 @@ public: } static constexpr size_t GetBlockSize(size_t index) { - return size_t(1) << MemoryBlockPageShifts[index]; + return static_cast<size_t>(1) << MemoryBlockPageShifts[index]; } static constexpr size_t GetBlockNumPages(size_t index) { @@ -93,13 +102,9 @@ public: } private: - class Block final { + class Block { public: - YUZU_NON_COPYABLE(Block); - YUZU_NON_MOVEABLE(Block); - Block() = default; - ~Block() = default; constexpr size_t GetShift() const { return m_block_shift; @@ -201,6 +206,9 @@ private: }; private: + PAddr AllocateByLinearSearch(s32 index); + PAddr AllocateByRandom(s32 index, size_t num_pages, size_t align_pages); + static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts, size_t num_block_shifts); @@ -209,7 +217,8 @@ private: size_t m_heap_size{}; size_t m_initial_used_size{}; size_t m_num_blocks{}; - std::array<Block, NumMemoryBlockPageShifts> m_blocks{}; + std::array<Block, NumMemoryBlockPageShifts> m_blocks; + KPageBitmap::RandomBitGenerator m_rng; std::vector<u64> m_management_data; }; |