From e63a5459e361d8f33fb9045bc9684d516d087b0c Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 5 Sep 2022 17:27:48 -0700 Subject: [PATCH 01/25] core: hle: kernel: svc_common: Add WaitInfinite & cleanup. --- src/core/hle/kernel/svc_common.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h index 95750c3ebe..85506710ef 100644 --- a/src/core/hle/kernel/svc_common.h +++ b/src/core/hle/kernel/svc_common.h @@ -14,8 +14,11 @@ namespace Kernel::Svc { using namespace Common::Literals; -constexpr s32 ArgumentHandleCountMax = 0x40; -constexpr u32 HandleWaitMask{1u << 30}; +constexpr inline s32 ArgumentHandleCountMax = 0x40; + +constexpr inline u32 HandleWaitMask = 1u << 30; + +constexpr inline s64 WaitInfinite = -1; constexpr inline std::size_t HeapSizeAlignment = 2_MiB; From cb073f95dc6ef07934a47137b121a1328667df6b Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 5 Sep 2022 17:29:14 -0700 Subject: [PATCH 02/25] core: hle: result: Add GetInnerValue and Includes methods. --- src/core/hle/result.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/core/hle/result.h b/src/core/hle/result.h index d67e68bae3..d714dea38e 100644 --- a/src/core/hle/result.h +++ b/src/core/hle/result.h @@ -135,6 +135,14 @@ union Result { [[nodiscard]] constexpr bool IsFailure() const { return !IsSuccess(); } + + [[nodiscard]] constexpr u32 GetInnerValue() const { + return static_cast(module.Value()) | (description << module.bits); + } + + [[nodiscard]] constexpr bool Includes(Result result) const { + return GetInnerValue() == result.GetInnerValue(); + } }; static_assert(std::is_trivial_v); From 47b8160666da8dcb679bb7cabe35a615a1786155 Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 5 Sep 2022 17:42:24 -0700 Subject: [PATCH 03/25] core: device_memory: Templatize GetPointer(..). --- src/core/device_memory.h | 10 ++++++---- src/core/hle/kernel/init/init_slab_setup.cpp | 6 +++--- src/core/hle/kernel/k_code_memory.cpp | 2 +- src/core/hle/kernel/k_memory_manager.cpp | 2 +- src/core/hle/kernel/k_page_buffer.cpp | 2 +- src/core/hle/kernel/k_page_table.cpp | 6 +++--- src/core/hle/kernel/k_shared_memory.cpp | 2 +- src/core/hle/kernel/k_shared_memory.h | 4 ++-- src/core/memory.cpp | 6 +++--- 9 files changed, 21 insertions(+), 19 deletions(-) diff --git a/src/core/device_memory.h b/src/core/device_memory.h index df61b0c0b6..90510733c8 100644 --- a/src/core/device_memory.h +++ b/src/core/device_memory.h @@ -31,12 +31,14 @@ public: DramMemoryMap::Base; } - u8* GetPointer(PAddr addr) { - return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base); + template + T* GetPointer(PAddr addr) { + return reinterpret_cast(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base)); } - const u8* GetPointer(PAddr addr) const { - return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base); + template + const T* GetPointer(PAddr addr) const { + return reinterpret_cast(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base)); } Common::HostMemory buffer; diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index 9b6b284d08..c84d36c8c2 100644 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp @@ -94,8 +94,8 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd // TODO(bunnei): Fix this once we support the kernel virtual memory layout. if (size > 0) { - void* backing_kernel_memory{ - system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))}; + void* backing_kernel_memory{system.DeviceMemory().GetPointer( + TranslateSlabAddrToPhysical(memory_layout, start))}; const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1); ASSERT(region != nullptr); @@ -181,7 +181,7 @@ void InitializeKPageBufferSlabHeap(Core::System& system) { ASSERT(slab_address != 0); // Initialize the slabheap. - KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address), + KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address), slab_size); } diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp index da57ceb21e..4b1c134d40 100644 --- a/src/core/hle/kernel/k_code_memory.cpp +++ b/src/core/hle/kernel/k_code_memory.cpp @@ -34,7 +34,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si // Clear the memory. for (const auto& block : m_page_group.Nodes()) { - std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize()); + std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize()); } // Set remaining tracking members. diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 5b0a9963a8..6467115056 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp @@ -331,7 +331,7 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag // Set all the allocated memory. for (const auto& block : out->Nodes()) { - std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern, + std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern, block.GetSize()); } diff --git a/src/core/hle/kernel/k_page_buffer.cpp b/src/core/hle/kernel/k_page_buffer.cpp index 1a0bf44393..0c16dded4b 100644 --- a/src/core/hle/kernel/k_page_buffer.cpp +++ b/src/core/hle/kernel/k_page_buffer.cpp @@ -12,7 +12,7 @@ namespace Kernel { KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) { ASSERT(Common::IsAligned(phys_addr, PageSize)); - return reinterpret_cast(system.DeviceMemory().GetPointer(phys_addr)); + return system.DeviceMemory().GetPointer(phys_addr); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index d975de8449..8ebb753381 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -1648,7 +1648,7 @@ Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { // Clear all the newly allocated pages. for (const auto& it : pg.Nodes()) { - std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value, + std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value, it.GetSize()); } @@ -1805,9 +1805,9 @@ bool KPageTable::IsRegionMapped(VAddr address, u64 size) { } bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { - auto start_ptr = system.Memory().GetPointer(addr); + auto start_ptr = system.DeviceMemory().GetPointer(addr); for (u64 offset{}; offset < size; offset += PageSize) { - if (start_ptr != system.Memory().GetPointer(addr + offset)) { + if (start_ptr != system.DeviceMemory().GetPointer(addr + offset)) { return false; } start_ptr += PageSize; diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp index 8ff1545b6c..a039cc591c 100644 --- a/src/core/hle/kernel/k_shared_memory.cpp +++ b/src/core/hle/kernel/k_shared_memory.cpp @@ -50,7 +50,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o is_initialized = true; // Clear all pages in the memory. - std::memset(device_memory_.GetPointer(physical_address_), 0, size_); + std::memset(device_memory_.GetPointer(physical_address_), 0, size_); return ResultSuccess; } diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h index 34cb984564..5620c3660a 100644 --- a/src/core/hle/kernel/k_shared_memory.h +++ b/src/core/hle/kernel/k_shared_memory.h @@ -54,7 +54,7 @@ public: * @return A pointer to the shared memory block from the specified offset */ u8* GetPointer(std::size_t offset = 0) { - return device_memory->GetPointer(physical_address + offset); + return device_memory->GetPointer(physical_address + offset); } /** @@ -63,7 +63,7 @@ public: * @return A pointer to the shared memory block from the specified offset */ const u8* GetPointer(std::size_t offset = 0) const { - return device_memory->GetPointer(physical_address + offset); + return device_memory->GetPointer(physical_address + offset); } void Finalize() override; diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 2ac792566e..9637cb5b1e 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -65,7 +65,7 @@ struct Memory::Impl { return {}; } - return system.DeviceMemory().GetPointer(paddr) + vaddr; + return system.DeviceMemory().GetPointer(paddr) + vaddr; } [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const { @@ -75,7 +75,7 @@ struct Memory::Impl { return {}; } - return system.DeviceMemory().GetPointer(paddr) + vaddr; + return system.DeviceMemory().GetPointer(paddr) + vaddr; } u8 Read8(const VAddr addr) { @@ -499,7 +499,7 @@ struct Memory::Impl { } else { while (base != end) { page_table.pointers[base].Store( - system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type); + system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type); page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS); ASSERT_MSG(page_table.pointers[base].Pointer(), From 113a5ed68fd2ab0050ebfb520bbd17399cc51298 Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 5 Sep 2022 17:43:36 -0700 Subject: [PATCH 04/25] core: hle: kernel: svc_types: Add SystemThreadPriorityHighest and ProcessState. --- src/core/hle/kernel/svc_types.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h index 79e15183aa..bb4f7b004b 100644 --- a/src/core/hle/kernel/svc_types.h +++ b/src/core/hle/kernel/svc_types.h @@ -95,6 +95,19 @@ constexpr inline s32 IdealCoreNoUpdate = -3; constexpr inline s32 LowestThreadPriority = 63; constexpr inline s32 HighestThreadPriority = 0; +constexpr inline s32 SystemThreadPriorityHighest = 16; + +enum ProcessState : u32 { + ProcessState_Created = 0, + ProcessState_CreatedAttached = 1, + ProcessState_Running = 2, + ProcessState_Crashed = 3, + ProcessState_RunningAttached = 4, + ProcessState_Terminating = 5, + ProcessState_Terminated = 6, + ProcessState_DebugBreak = 7, +}; + constexpr inline size_t ThreadLocalRegionSize = 0x200; } // namespace Kernel::Svc From 25dcaf1ecaeb3998a2cb8b03a7aa8a02402e0bad Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 5 Sep 2022 17:47:00 -0700 Subject: [PATCH 05/25] core: hle: kernel: k_process: Change Status -> State. --- src/core/hle/kernel/k_process.cpp | 20 ++++++++-------- src/core/hle/kernel/k_process.h | 40 ++++++++++++------------------- src/core/hle/kernel/svc.cpp | 4 ++-- 3 files changed, 27 insertions(+), 37 deletions(-) diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index d3e99665f8..1d3157a9f4 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -72,7 +72,7 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string process->name = std::move(process_name); process->resource_limit = res_limit; - process->status = ProcessStatus::Created; + process->state = State::Created; process->program_id = 0; process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() : kernel.CreateNewUserProcessID(); @@ -289,7 +289,7 @@ Result KProcess::Reset() { KScopedSchedulerLock sl{kernel}; // Validate that we're in a state that we can reset. - R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState); + R_UNLESS(state != State::Terminated, ResultInvalidState); R_UNLESS(is_signaled, ResultInvalidState); // Clear signaled. @@ -304,8 +304,8 @@ Result KProcess::SetActivity(ProcessActivity activity) { KScopedSchedulerLock sl{kernel}; // Validate our state. - R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState); - R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState); + R_UNLESS(state != State::Terminating, ResultInvalidState); + R_UNLESS(state != State::Terminated, ResultInvalidState); // Either pause or resume. if (activity == ProcessActivity::Paused) { @@ -411,13 +411,13 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) { const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; ASSERT(!page_table->SetMaxHeapSize(heap_capacity).IsError()); - ChangeStatus(ProcessStatus::Running); + ChangeState(State::Running); SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top); } void KProcess::PrepareForTermination() { - ChangeStatus(ProcessStatus::Exiting); + ChangeState(State::Terminating); const auto stop_threads = [this](const std::vector& in_thread_list) { for (auto* thread : in_thread_list) { @@ -445,7 +445,7 @@ void KProcess::PrepareForTermination() { main_thread_stack_size + image_size); } - ChangeStatus(ProcessStatus::Exited); + ChangeState(State::Terminated); } void KProcess::Finalize() { @@ -652,12 +652,12 @@ KProcess::KProcess(KernelCore& kernel_) KProcess::~KProcess() = default; -void KProcess::ChangeStatus(ProcessStatus new_status) { - if (status == new_status) { +void KProcess::ChangeState(State new_state) { + if (state == new_state) { return; } - status = new_status; + state = new_state; is_signaled = true; NotifyAvailable(); } diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index d56d73bab1..b1c7da4543 100644 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h @@ -45,24 +45,6 @@ enum class MemoryRegion : u16 { BASE = 3, }; -/** - * Indicates the status of a Process instance. - * - * @note These match the values as used by kernel, - * so new entries should only be added if RE - * shows that a new value has been introduced. - */ -enum class ProcessStatus { - Created, - CreatedWithDebuggerAttached, - Running, - WaitingForDebuggerToAttach, - DebuggerAttached, - Exiting, - Exited, - DebugBreak, -}; - enum class ProcessActivity : u32 { Runnable, Paused, @@ -89,6 +71,17 @@ public: explicit KProcess(KernelCore& kernel_); ~KProcess() override; + enum class State { + Created = Svc::ProcessState_Created, + CreatedAttached = Svc::ProcessState_CreatedAttached, + Running = Svc::ProcessState_Running, + Crashed = Svc::ProcessState_Crashed, + RunningAttached = Svc::ProcessState_RunningAttached, + Terminating = Svc::ProcessState_Terminating, + Terminated = Svc::ProcessState_Terminated, + DebugBreak = Svc::ProcessState_DebugBreak, + }; + enum : u64 { /// Lowest allowed process ID for a kernel initial process. InitialKIPIDMin = 1, @@ -163,8 +156,8 @@ public: } /// Gets the current status of the process - ProcessStatus GetStatus() const { - return status; + State GetState() const { + return state; } /// Gets the unique ID that identifies this particular process. @@ -415,10 +408,7 @@ private: pinned_threads[core_id] = nullptr; } - /// Changes the process status. If the status is different - /// from the current process status, then this will trigger - /// a process signal. - void ChangeStatus(ProcessStatus new_status); + void ChangeState(State new_state); /// Allocates the main thread stack for the process, given the stack size in bytes. Result AllocateMainThreadStack(std::size_t stack_size); @@ -427,7 +417,7 @@ private: std::unique_ptr page_table; /// Current status of the process - ProcessStatus status{}; + State state{}; /// The ID of this process u64 process_id = 0; diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 1d145ea91e..bac61fd096 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -1888,7 +1888,7 @@ static void ExitProcess(Core::System& system) { auto* current_process = system.Kernel().CurrentProcess(); LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID()); - ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running, + ASSERT_MSG(current_process->GetState() == KProcess::State::Running, "Process has already exited"); system.Exit(); @@ -2557,7 +2557,7 @@ static Result GetProcessInfo(Core::System& system, u64* out, Handle process_hand return ResultInvalidEnumValue; } - *out = static_cast(process->GetStatus()); + *out = static_cast(process->GetState()); return ResultSuccess; } From 345b9e6a08f7ce99bb71f7184157ed0fe22bf27d Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 5 Sep 2022 17:51:50 -0700 Subject: [PATCH 06/25] core: hle: kernel: Add KDynamicPageManager. --- src/core/CMakeLists.txt | 1 + src/core/hle/kernel/k_dynamic_page_manager.h | 136 +++++++++++++++++++ 2 files changed, 137 insertions(+) create mode 100644 src/core/hle/kernel/k_dynamic_page_manager.h diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index abeb5859b5..2bb4dea6a5 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -190,6 +190,7 @@ add_library(core STATIC hle/kernel/k_code_memory.h hle/kernel/k_condition_variable.cpp hle/kernel/k_condition_variable.h + hle/kernel/k_dynamic_page_manager.h hle/kernel/k_event.cpp hle/kernel/k_event.h hle/kernel/k_handle_table.cpp diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h new file mode 100644 index 0000000000..88d53776ae --- /dev/null +++ b/src/core/hle/kernel/k_dynamic_page_manager.h @@ -0,0 +1,136 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/alignment.h" +#include "common/common_types.h" +#include "core/hle/kernel/k_page_bitmap.h" +#include "core/hle/kernel/k_spin_lock.h" +#include "core/hle/kernel/memory_types.h" +#include "core/hle/kernel/svc_results.h" + +namespace Kernel { + +class KDynamicPageManager { +public: + class PageBuffer { + private: + u8 m_buffer[PageSize]; + }; + static_assert(sizeof(PageBuffer) == PageSize); + +public: + KDynamicPageManager() = default; + + template + T* GetPointer(VAddr addr) { + return reinterpret_cast(m_backing_memory.data() + (addr - m_address)); + } + + template + const T* GetPointer(VAddr addr) const { + return reinterpret_cast(m_backing_memory.data() + (addr - m_address)); + } + + Result Initialize(VAddr addr, size_t sz) { + // We need to have positive size. + R_UNLESS(sz > 0, ResultOutOfMemory); + m_backing_memory.resize(sz); + + // Calculate management overhead. + const size_t management_size = + KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer)); + const size_t allocatable_size = sz - management_size; + + // Set tracking fields. + m_address = addr; + m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer)); + m_count = allocatable_size / sizeof(PageBuffer); + R_UNLESS(m_count > 0, ResultOutOfMemory); + + // Clear the management region. + u64* management_ptr = GetPointer(m_address + allocatable_size); + std::memset(management_ptr, 0, management_size); + + // Initialize the bitmap. + m_page_bitmap.Initialize(management_ptr, m_count); + + // Free the pages to the bitmap. + for (size_t i = 0; i < m_count; i++) { + // Ensure the freed page is all-zero. + std::memset(GetPointer(m_address) + i, 0, PageSize); + + // Set the bit for the free page. + m_page_bitmap.SetBit(i); + } + + return ResultSuccess; + } + + VAddr GetAddress() const { + return m_address; + } + size_t GetSize() const { + return m_size; + } + size_t GetUsed() const { + return m_used; + } + size_t GetPeak() const { + return m_peak; + } + size_t GetCount() const { + return m_count; + } + + PageBuffer* Allocate() { + // Take the lock. + // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. + KScopedSpinLock lk(m_lock); + + // Find a random free block. + s64 soffset = m_page_bitmap.FindFreeBlock(true); + if (soffset < 0) [[unlikely]] { + return nullptr; + } + + const size_t offset = static_cast(soffset); + + // Update our tracking. + m_page_bitmap.ClearBit(offset); + m_peak = std::max(m_peak, (++m_used)); + + return GetPointer(m_address) + offset; + } + + void Free(PageBuffer* pb) { + // Ensure all pages in the heap are zero. + std::memset(pb, 0, PageSize); + + // Take the lock. + // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. + KScopedSpinLock lk(m_lock); + + // Set the bit for the free page. + size_t offset = (reinterpret_cast(pb) - m_address) / sizeof(PageBuffer); + m_page_bitmap.SetBit(offset); + + // Decrement our used count. + --m_used; + } + +private: + KSpinLock m_lock; + KPageBitmap m_page_bitmap; + size_t m_used{}; + size_t m_peak{}; + size_t m_count{}; + VAddr m_address{}; + size_t m_size{}; + + // TODO(bunnei): Back by host memory until we emulate kernel virtual address space. + std::vector m_backing_memory; +}; + +} // namespace Kernel From 9ec5f75f43c2ecbfdf52b45f78029b1fd1080658 Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 5 Sep 2022 17:53:44 -0700 Subject: [PATCH 07/25] core: hle: kernel: Add KDynamicSlabHeap. --- src/core/CMakeLists.txt | 1 + src/core/hle/kernel/k_dynamic_slab_heap.h | 122 ++++++++++++++++++++++ 2 files changed, 123 insertions(+) create mode 100644 src/core/hle/kernel/k_dynamic_slab_heap.h diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 2bb4dea6a5..2965717627 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -191,6 +191,7 @@ add_library(core STATIC hle/kernel/k_condition_variable.cpp hle/kernel/k_condition_variable.h hle/kernel/k_dynamic_page_manager.h + hle/kernel/k_dynamic_slab_heap.h hle/kernel/k_event.cpp hle/kernel/k_event.h hle/kernel/k_handle_table.cpp diff --git a/src/core/hle/kernel/k_dynamic_slab_heap.h b/src/core/hle/kernel/k_dynamic_slab_heap.h new file mode 100644 index 0000000000..3a0ddd0500 --- /dev/null +++ b/src/core/hle/kernel/k_dynamic_slab_heap.h @@ -0,0 +1,122 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include + +#include "common/common_funcs.h" +#include "core/hle/kernel/k_dynamic_page_manager.h" +#include "core/hle/kernel/k_slab_heap.h" + +namespace Kernel { + +template +class KDynamicSlabHeap : protected impl::KSlabHeapImpl { + YUZU_NON_COPYABLE(KDynamicSlabHeap); + YUZU_NON_MOVEABLE(KDynamicSlabHeap); + +public: + constexpr KDynamicSlabHeap() = default; + + constexpr VAddr GetAddress() const { + return m_address; + } + constexpr size_t GetSize() const { + return m_size; + } + constexpr size_t GetUsed() const { + return m_used.load(); + } + constexpr size_t GetPeak() const { + return m_peak.load(); + } + constexpr size_t GetCount() const { + return m_count.load(); + } + + constexpr bool IsInRange(VAddr addr) const { + return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1; + } + + void Initialize(KDynamicPageManager* page_allocator, size_t num_objects) { + ASSERT(page_allocator != nullptr); + + // Initialize members. + m_address = page_allocator->GetAddress(); + m_size = page_allocator->GetSize(); + + // Initialize the base allocator. + KSlabHeapImpl::Initialize(); + + // Allocate until we have the correct number of objects. + while (m_count.load() < num_objects) { + auto* allocated = reinterpret_cast(page_allocator->Allocate()); + ASSERT(allocated != nullptr); + + for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) { + KSlabHeapImpl::Free(allocated + i); + } + + m_count += sizeof(PageBuffer) / sizeof(T); + } + } + + T* Allocate(KDynamicPageManager* page_allocator) { + T* allocated = static_cast(KSlabHeapImpl::Allocate()); + + // If we successfully allocated and we should clear the node, do so. + if constexpr (ClearNode) { + if (allocated != nullptr) [[likely]] { + reinterpret_cast(allocated)->next = nullptr; + } + } + + // If we fail to allocate, try to get a new page from our next allocator. + if (allocated == nullptr) [[unlikely]] { + if (page_allocator != nullptr) { + allocated = reinterpret_cast(page_allocator->Allocate()); + if (allocated != nullptr) { + // If we succeeded in getting a page, free the rest to our slab. + for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) { + KSlabHeapImpl::Free(allocated + i); + } + m_count += sizeof(PageBuffer) / sizeof(T); + } + } + } + + if (allocated != nullptr) [[likely]] { + // Construct the object. + std::construct_at(allocated); + + // Update our tracking. + const size_t used = ++m_used; + size_t peak = m_peak.load(); + while (peak < used) { + if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) { + break; + } + } + } + + return allocated; + } + + void Free(T* t) { + KSlabHeapImpl::Free(t); + --m_used; + } + +private: + using PageBuffer = KDynamicPageManager::PageBuffer; + +private: + std::atomic m_used{}; + std::atomic m_peak{}; + std::atomic m_count{}; + VAddr m_address{}; + size_t m_size{}; +}; + +} // namespace Kernel From d02ccfb15d1f3d4fcdb9feae60ae136fcfd99788 Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 5 Sep 2022 17:55:51 -0700 Subject: [PATCH 08/25] core: hle: kernel: Add KDynamicResourceManager. --- src/core/CMakeLists.txt | 1 + .../hle/kernel/k_dynamic_resource_manager.h | 58 +++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 src/core/hle/kernel/k_dynamic_resource_manager.h diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 2965717627..e7fe675cbf 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -191,6 +191,7 @@ add_library(core STATIC hle/kernel/k_condition_variable.cpp hle/kernel/k_condition_variable.h hle/kernel/k_dynamic_page_manager.h + hle/kernel/k_dynamic_resource_manager.h hle/kernel/k_dynamic_slab_heap.h hle/kernel/k_event.cpp hle/kernel/k_event.h diff --git a/src/core/hle/kernel/k_dynamic_resource_manager.h b/src/core/hle/kernel/k_dynamic_resource_manager.h new file mode 100644 index 0000000000..1ce517e8e9 --- /dev/null +++ b/src/core/hle/kernel/k_dynamic_resource_manager.h @@ -0,0 +1,58 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/common_funcs.h" +#include "core/hle/kernel/k_dynamic_slab_heap.h" +#include "core/hle/kernel/k_memory_block.h" + +namespace Kernel { + +template +class KDynamicResourceManager { + YUZU_NON_COPYABLE(KDynamicResourceManager); + YUZU_NON_MOVEABLE(KDynamicResourceManager); + +public: + using DynamicSlabType = KDynamicSlabHeap; + +public: + constexpr KDynamicResourceManager() = default; + + constexpr size_t GetSize() const { + return m_slab_heap->GetSize(); + } + constexpr size_t GetUsed() const { + return m_slab_heap->GetUsed(); + } + constexpr size_t GetPeak() const { + return m_slab_heap->GetPeak(); + } + constexpr size_t GetCount() const { + return m_slab_heap->GetCount(); + } + + void Initialize(KDynamicPageManager* page_allocator, DynamicSlabType* slab_heap) { + m_page_allocator = page_allocator; + m_slab_heap = slab_heap; + } + + T* Allocate() const { + return m_slab_heap->Allocate(m_page_allocator); + } + + void Free(T* t) const { + m_slab_heap->Free(t); + } + +private: + KDynamicPageManager* m_page_allocator{}; + DynamicSlabType* m_slab_heap{}; +}; + +class KMemoryBlockSlabManager : public KDynamicResourceManager {}; + +using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType; + +} // namespace Kernel From 57a77e9ff4b4a63c106c0ac3448a8f1452b5384c Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 5 Sep 2022 18:19:30 -0700 Subject: [PATCH 09/25] core: hle: kernel: k_thread: Implement thread termination DPC. --- src/core/arm/arm_interface.cpp | 8 +++ src/core/hle/kernel/k_interrupt_manager.cpp | 8 +++ src/core/hle/kernel/k_interrupt_manager.h | 4 +- src/core/hle/kernel/k_thread.cpp | 76 +++++++++++++++++++++ src/core/hle/kernel/k_thread.h | 4 ++ 5 files changed, 99 insertions(+), 1 deletion(-) diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp index 953d964399..29ba562dce 100644 --- a/src/core/arm/arm_interface.cpp +++ b/src/core/arm/arm_interface.cpp @@ -134,6 +134,14 @@ void ARM_Interface::Run() { } system.ExitDynarmicProfile(); + // If the thread is scheduled for termination, exit the thread. + if (current_thread->HasDpc()) { + if (current_thread->IsTerminationRequested()) { + current_thread->Exit(); + UNREACHABLE(); + } + } + // Notify the debugger and go to sleep if a breakpoint was hit, // or if the thread is unable to continue for any reason. if (Has(hr, breakpoint) || Has(hr, no_execute)) { diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp index 1b577a5b3e..ad73f3eab5 100644 --- a/src/core/hle/kernel/k_interrupt_manager.cpp +++ b/src/core/hle/kernel/k_interrupt_manager.cpp @@ -36,4 +36,12 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) { kernel.CurrentScheduler()->RequestScheduleOnInterrupt(); } +void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask) { + for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) { + if (core_mask & (1ULL << core_id)) { + kernel.PhysicalCore(core_id).Interrupt(); + } + } +} + } // namespace Kernel::KInterruptManager diff --git a/src/core/hle/kernel/k_interrupt_manager.h b/src/core/hle/kernel/k_interrupt_manager.h index f103dfe3f1..803dc92117 100644 --- a/src/core/hle/kernel/k_interrupt_manager.h +++ b/src/core/hle/kernel/k_interrupt_manager.h @@ -11,6 +11,8 @@ class KernelCore; namespace KInterruptManager { void HandleInterrupt(KernelCore& kernel, s32 core_id); -} +void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask); + +} // namespace KInterruptManager } // namespace Kernel diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 174afc80d7..89b32d509e 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -30,6 +30,7 @@ #include "core/hle/kernel/k_worker_task_manager.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/svc_results.h" +#include "core/hle/kernel/svc_types.h" #include "core/hle/result.h" #include "core/memory.h" @@ -38,6 +39,9 @@ #endif namespace { + +constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1; + static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, u32 entry_point, u32 arg) { context = {}; @@ -1073,6 +1077,78 @@ void KThread::Exit() { UNREACHABLE_MSG("KThread::Exit() would return"); } +Result KThread::Terminate() { + ASSERT(this != GetCurrentThreadPointer(kernel)); + + // Request the thread terminate if it hasn't already. + if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) { + // If the thread isn't terminated, wait for it to terminate. + s32 index; + KSynchronizationObject* objects[] = {this}; + R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1, + Svc::WaitInfinite)); + } + + return ResultSuccess; +} + +ThreadState KThread::RequestTerminate() { + ASSERT(this != GetCurrentThreadPointer(kernel)); + + KScopedSchedulerLock sl{kernel}; + + // Determine if this is the first termination request. + const bool first_request = [&]() -> bool { + // Perform an atomic compare-and-swap from false to true. + bool expected = false; + return termination_requested.compare_exchange_strong(expected, true); + }(); + + // If this is the first request, start termination procedure. + if (first_request) { + // If the thread is in initialized state, just change state to terminated. + if (this->GetState() == ThreadState::Initialized) { + thread_state = ThreadState::Terminated; + return ThreadState::Terminated; + } + + // Register the terminating dpc. + this->RegisterDpc(DpcFlag::Terminating); + + // If the thread is pinned, unpin it. + if (this->GetStackParameters().is_pinned) { + this->GetOwnerProcess()->UnpinThread(this); + } + + // If the thread is suspended, continue it. + if (this->IsSuspended()) { + suspend_allowed_flags = 0; + this->UpdateState(); + } + + // Change the thread's priority to be higher than any system thread's. + if (this->GetBasePriority() >= Svc::SystemThreadPriorityHighest) { + this->SetBasePriority(TerminatingThreadPriority); + } + + // If the thread is runnable, send a termination interrupt to other cores. + if (this->GetState() == ThreadState::Runnable) { + if (const u64 core_mask = + physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel)); + core_mask != 0) { + Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask); + } + } + + // Wake up the thread. + if (this->GetState() == ThreadState::Waiting) { + wait_queue->CancelWait(this, ResultTerminationRequested, true); + } + } + + return this->GetState(); +} + Result KThread::Sleep(s64 timeout) { ASSERT(!kernel.GlobalSchedulerContext().IsLocked()); ASSERT(this == GetCurrentThreadPointer(kernel)); diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 9ee20208eb..e2a27d6036 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -180,6 +180,10 @@ public: void Exit(); + Result Terminate(); + + ThreadState RequestTerminate(); + [[nodiscard]] u32 GetSuspendFlags() const { return suspend_allowed_flags & suspend_request_flags; } From 2bb41cffca7e5ec6383a59c513ef9d7e2def5f51 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 9 Sep 2022 21:12:37 -0700 Subject: [PATCH 10/25] core: hle: kernel: k_memory_block_manager: Update. --- .../hle/kernel/k_memory_block_manager.cpp | 441 +++++++++++------- src/core/hle/kernel/k_memory_block_manager.h | 147 ++++-- 2 files changed, 397 insertions(+), 191 deletions(-) diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp index 3ddb9984fa..c908af75a9 100644 --- a/src/core/hle/kernel/k_memory_block_manager.cpp +++ b/src/core/hle/kernel/k_memory_block_manager.cpp @@ -2,221 +2,336 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include "core/hle/kernel/k_memory_block_manager.h" -#include "core/hle/kernel/memory_types.h" namespace Kernel { -KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_) - : start_addr{start_addr_}, end_addr{end_addr_} { - const u64 num_pages{(end_addr - start_addr) / PageSize}; - memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free, - KMemoryPermission::None, KMemoryAttribute::None); +KMemoryBlockManager::KMemoryBlockManager() = default; + +Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) { + // Allocate a block to encapsulate the address space, insert it into the tree. + KMemoryBlock* start_block = slab_manager->Allocate(); + R_UNLESS(start_block != nullptr, ResultOutOfResource); + + // Set our start and end. + m_start_address = st; + m_end_address = nd; + ASSERT(Common::IsAligned(m_start_address, PageSize)); + ASSERT(Common::IsAligned(m_end_address, PageSize)); + + // Initialize and insert the block. + start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize, + KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None); + m_memory_block_tree.insert(*start_block); + + return ResultSuccess; } -KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) { - auto node{memory_block_tree.begin()}; - while (node != end()) { - const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()}; - if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) { - return node; - } - node = std::next(node); +void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager, + HostUnmapCallback&& host_unmap_callback) { + // Erase every block until we have none left. + auto it = m_memory_block_tree.begin(); + while (it != m_memory_block_tree.end()) { + KMemoryBlock* block = std::addressof(*it); + it = m_memory_block_tree.erase(it); + slab_manager->Free(block); + host_unmap_callback(block->GetAddress(), block->GetSize()); } - return end(); + + ASSERT(m_memory_block_tree.empty()); } -VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages, - std::size_t num_pages, std::size_t align, - std::size_t offset, std::size_t guard_pages) { - if (num_pages == 0) { - return {}; - } +VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages, + size_t num_pages, size_t alignment, size_t offset, + size_t guard_pages) const { + if (num_pages > 0) { + const VAddr region_end = region_start + region_num_pages * PageSize; + const VAddr region_last = region_end - 1; + for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend(); + it++) { + const KMemoryInfo info = it->GetMemoryInfo(); + if (region_last < info.GetAddress()) { + break; + } + if (info.m_state != KMemoryState::Free) { + continue; + } - const VAddr region_end{region_start + region_num_pages * PageSize}; - const VAddr region_last{region_end - 1}; - for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) { - const auto info{it->GetMemoryInfo()}; - if (region_last < info.GetAddress()) { - break; - } + VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress(); + area += guard_pages * PageSize; - if (info.state != KMemoryState::Free) { - continue; - } + const VAddr offset_area = Common::AlignDown(area, alignment) + offset; + area = (area <= offset_area) ? offset_area : offset_area + alignment; - VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()}; - area += guard_pages * PageSize; + const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize; + const VAddr area_last = area_end - 1; - const VAddr offset_area{Common::AlignDown(area, align) + offset}; - area = (area <= offset_area) ? offset_area : offset_area + align; - - const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize}; - const VAddr area_last{area_end - 1}; - - if (info.GetAddress() <= area && area < area_last && area_last <= region_last && - area_last <= info.GetLastAddress()) { - return area; + if (info.GetAddress() <= area && area < area_last && area_last <= region_last && + area_last <= info.GetLastAddress()) { + return area; + } } } return {}; } -void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, - KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, - KMemoryState state, KMemoryPermission perm, - KMemoryAttribute attribute) { - const VAddr update_end_addr{addr + num_pages * PageSize}; - iterator node{memory_block_tree.begin()}; +void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, + VAddr address, size_t num_pages) { + // Find the iterator now that we've updated. + iterator it = this->FindIterator(address); + if (address != m_start_address) { + it--; + } - prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped; - - while (node != memory_block_tree.end()) { - KMemoryBlock* block{&(*node)}; - iterator next_node{std::next(node)}; - const VAddr cur_addr{block->GetAddress()}; - const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; - - if (addr < cur_end_addr && cur_addr < update_end_addr) { - if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) { - node = next_node; - continue; - } - - iterator new_node{node}; - if (addr > cur_addr) { - memory_block_tree.insert(node, block->Split(addr)); - } - - if (update_end_addr < cur_end_addr) { - new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); - } - - new_node->Update(state, perm, attribute); - - MergeAdjacent(new_node, next_node); - } - - if (cur_end_addr - 1 >= update_end_addr - 1) { + // Coalesce blocks that we can. + while (true) { + iterator prev = it++; + if (it == m_memory_block_tree.end()) { break; } - node = next_node; - } -} - -void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state, - KMemoryPermission perm, KMemoryAttribute attribute) { - const VAddr update_end_addr{addr + num_pages * PageSize}; - iterator node{memory_block_tree.begin()}; - - while (node != memory_block_tree.end()) { - KMemoryBlock* block{&(*node)}; - iterator next_node{std::next(node)}; - const VAddr cur_addr{block->GetAddress()}; - const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; - - if (addr < cur_end_addr && cur_addr < update_end_addr) { - iterator new_node{node}; - - if (addr > cur_addr) { - memory_block_tree.insert(node, block->Split(addr)); - } - - if (update_end_addr < cur_end_addr) { - new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); - } - - new_node->Update(state, perm, attribute); - - MergeAdjacent(new_node, next_node); + if (prev->CanMergeWith(*it)) { + KMemoryBlock* block = std::addressof(*it); + m_memory_block_tree.erase(it); + prev->Add(*block); + allocator->Free(block); + it = prev; } - if (cur_end_addr - 1 >= update_end_addr - 1) { + if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) { break; } - - node = next_node; } } -void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, +void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, + size_t num_pages, KMemoryState state, KMemoryPermission perm, + KMemoryAttribute attr, + KMemoryBlockDisableMergeAttribute set_disable_attr, + KMemoryBlockDisableMergeAttribute clear_disable_attr) { + // Ensure for auditing that we never end up with an invalid tree. + KScopedMemoryBlockManagerAuditor auditor(this); + ASSERT(Common::IsAligned(address, PageSize)); + ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == + KMemoryAttribute::None); + + VAddr cur_address = address; + size_t remaining_pages = num_pages; + iterator it = this->FindIterator(address); + + while (remaining_pages > 0) { + const size_t remaining_size = remaining_pages * PageSize; + KMemoryInfo cur_info = it->GetMemoryInfo(); + if (it->HasProperties(state, perm, attr)) { + // If we already have the right properties, just advance. + if (cur_address + remaining_size < cur_info.GetEndAddress()) { + remaining_pages = 0; + cur_address += remaining_size; + } else { + remaining_pages = + (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; + cur_address = cur_info.GetEndAddress(); + } + } else { + // If we need to, create a new block before and insert it. + if (cur_info.GetAddress() != cur_address) { + KMemoryBlock* new_block = allocator->Allocate(); + + it->Split(new_block, cur_address); + it = m_memory_block_tree.insert(*new_block); + it++; + + cur_info = it->GetMemoryInfo(); + cur_address = cur_info.GetAddress(); + } + + // If we need to, create a new block after and insert it. + if (cur_info.GetSize() > remaining_size) { + KMemoryBlock* new_block = allocator->Allocate(); + + it->Split(new_block, cur_address + remaining_size); + it = m_memory_block_tree.insert(*new_block); + + cur_info = it->GetMemoryInfo(); + } + + // Update block state. + it->Update(state, perm, attr, cur_address == address, static_cast(set_disable_attr), + static_cast(clear_disable_attr)); + cur_address += cur_info.GetSize(); + remaining_pages -= cur_info.GetNumPages(); + } + it++; + } + + this->CoalesceForUpdate(allocator, address, num_pages); +} + +void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, + VAddr address, size_t num_pages, KMemoryState test_state, + KMemoryPermission test_perm, KMemoryAttribute test_attr, + KMemoryState state, KMemoryPermission perm, + KMemoryAttribute attr) { + // Ensure for auditing that we never end up with an invalid tree. + KScopedMemoryBlockManagerAuditor auditor(this); + ASSERT(Common::IsAligned(address, PageSize)); + ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == + KMemoryAttribute::None); + + VAddr cur_address = address; + size_t remaining_pages = num_pages; + iterator it = this->FindIterator(address); + + while (remaining_pages > 0) { + const size_t remaining_size = remaining_pages * PageSize; + KMemoryInfo cur_info = it->GetMemoryInfo(); + if (it->HasProperties(test_state, test_perm, test_attr) && + !it->HasProperties(state, perm, attr)) { + // If we need to, create a new block before and insert it. + if (cur_info.GetAddress() != cur_address) { + KMemoryBlock* new_block = allocator->Allocate(); + + it->Split(new_block, cur_address); + it = m_memory_block_tree.insert(*new_block); + it++; + + cur_info = it->GetMemoryInfo(); + cur_address = cur_info.GetAddress(); + } + + // If we need to, create a new block after and insert it. + if (cur_info.GetSize() > remaining_size) { + KMemoryBlock* new_block = allocator->Allocate(); + + it->Split(new_block, cur_address + remaining_size); + it = m_memory_block_tree.insert(*new_block); + + cur_info = it->GetMemoryInfo(); + } + + // Update block state. + it->Update(state, perm, attr, false, 0, 0); + cur_address += cur_info.GetSize(); + remaining_pages -= cur_info.GetNumPages(); + } else { + // If we already have the right properties, just advance. + if (cur_address + remaining_size < cur_info.GetEndAddress()) { + remaining_pages = 0; + cur_address += remaining_size; + } else { + remaining_pages = + (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; + cur_address = cur_info.GetEndAddress(); + } + } + it++; + } + + this->CoalesceForUpdate(allocator, address, num_pages); +} + +void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, + size_t num_pages, MemoryBlockLockFunction lock_func, KMemoryPermission perm) { - const VAddr update_end_addr{addr + num_pages * PageSize}; - iterator node{memory_block_tree.begin()}; + // Ensure for auditing that we never end up with an invalid tree. + KScopedMemoryBlockManagerAuditor auditor(this); + ASSERT(Common::IsAligned(address, PageSize)); - while (node != memory_block_tree.end()) { - KMemoryBlock* block{&(*node)}; - iterator next_node{std::next(node)}; - const VAddr cur_addr{block->GetAddress()}; - const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; + VAddr cur_address = address; + size_t remaining_pages = num_pages; + iterator it = this->FindIterator(address); - if (addr < cur_end_addr && cur_addr < update_end_addr) { - iterator new_node{node}; + const VAddr end_address = address + (num_pages * PageSize); - if (addr > cur_addr) { - memory_block_tree.insert(node, block->Split(addr)); - } + while (remaining_pages > 0) { + const size_t remaining_size = remaining_pages * PageSize; + KMemoryInfo cur_info = it->GetMemoryInfo(); - if (update_end_addr < cur_end_addr) { - new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); - } + // If we need to, create a new block before and insert it. + if (cur_info.m_address != cur_address) { + KMemoryBlock* new_block = allocator->Allocate(); - lock_func(new_node, perm); + it->Split(new_block, cur_address); + it = m_memory_block_tree.insert(*new_block); + it++; - MergeAdjacent(new_node, next_node); + cur_info = it->GetMemoryInfo(); + cur_address = cur_info.GetAddress(); } - if (cur_end_addr - 1 >= update_end_addr - 1) { - break; + if (cur_info.GetSize() > remaining_size) { + // If we need to, create a new block after and insert it. + KMemoryBlock* new_block = allocator->Allocate(); + + it->Split(new_block, cur_address + remaining_size); + it = m_memory_block_tree.insert(*new_block); + + cur_info = it->GetMemoryInfo(); } - node = next_node; + // Call the locked update function. + (std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address, + cur_info.GetEndAddress() == end_address); + cur_address += cur_info.GetSize(); + remaining_pages -= cur_info.GetNumPages(); + it++; } + + this->CoalesceForUpdate(allocator, address, num_pages); } -void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) { - const_iterator it{FindIterator(start)}; - KMemoryInfo info{}; - do { - info = it->GetMemoryInfo(); - func(info); - it = std::next(it); - } while (info.addr + info.size - 1 < end - 1 && it != cend()); -} +// Debug. +bool KMemoryBlockManager::CheckState() const { + // Loop over every block, ensuring that we are sorted and coalesced. + auto it = m_memory_block_tree.cbegin(); + auto prev = it++; + while (it != m_memory_block_tree.cend()) { + const KMemoryInfo prev_info = prev->GetMemoryInfo(); + const KMemoryInfo cur_info = it->GetMemoryInfo(); -void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) { - KMemoryBlock* block{&(*it)}; - - auto EraseIt = [&](const iterator it_to_erase) { - if (next_it == it_to_erase) { - next_it = std::next(next_it); + // Sequential blocks which can be merged should be merged. + if (prev->CanMergeWith(*it)) { + return false; } - memory_block_tree.erase(it_to_erase); - }; - if (it != memory_block_tree.begin()) { - KMemoryBlock* prev{&(*std::prev(it))}; + // Sequential blocks should be sequential. + if (prev_info.GetEndAddress() != cur_info.GetAddress()) { + return false; + } - if (block->HasSameProperties(*prev)) { - const iterator prev_it{std::prev(it)}; + // If the block is ipc locked, it must have a count. + if ((cur_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None && + cur_info.m_ipc_lock_count == 0) { + return false; + } - prev->Add(block->GetNumPages()); - EraseIt(it); + // If the block is device shared, it must have a count. + if ((cur_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None && + cur_info.m_device_use_count == 0) { + return false; + } - it = prev_it; - block = prev; + // Advance the iterator. + prev = it++; + } + + // Our loop will miss checking the last block, potentially, so check it. + if (prev != m_memory_block_tree.cend()) { + const KMemoryInfo prev_info = prev->GetMemoryInfo(); + // If the block is ipc locked, it must have a count. + if ((prev_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None && + prev_info.m_ipc_lock_count == 0) { + return false; + } + + // If the block is device shared, it must have a count. + if ((prev_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None && + prev_info.m_device_use_count == 0) { + return false; } } - if (it != cend()) { - const KMemoryBlock* const next{&(*std::next(it))}; - - if (block->HasSameProperties(*next)) { - block->Add(next->GetNumPages()); - EraseIt(std::next(it)); - } - } + return true; } } // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h index e14741b898..b4ee4e319d 100644 --- a/src/core/hle/kernel/k_memory_block_manager.h +++ b/src/core/hle/kernel/k_memory_block_manager.h @@ -4,63 +4,154 @@ #pragma once #include -#include +#include "common/common_funcs.h" #include "common/common_types.h" +#include "core/hle/kernel/k_dynamic_resource_manager.h" #include "core/hle/kernel/k_memory_block.h" namespace Kernel { +class KMemoryBlockManagerUpdateAllocator { +public: + static constexpr size_t MaxBlocks = 2; + +private: + KMemoryBlock* m_blocks[MaxBlocks]; + size_t m_index; + KMemoryBlockSlabManager* m_slab_manager; + +private: + Result Initialize(size_t num_blocks) { + // Check num blocks. + ASSERT(num_blocks <= MaxBlocks); + + // Set index. + m_index = MaxBlocks - num_blocks; + + // Allocate the blocks. + for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) { + m_blocks[m_index + i] = m_slab_manager->Allocate(); + R_UNLESS(m_blocks[m_index + i] != nullptr, ResultOutOfResource); + } + + return ResultSuccess; + } + +public: + KMemoryBlockManagerUpdateAllocator(Result* out_result, KMemoryBlockSlabManager* sm, + size_t num_blocks = MaxBlocks) + : m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) { + *out_result = this->Initialize(num_blocks); + } + + ~KMemoryBlockManagerUpdateAllocator() { + for (const auto& block : m_blocks) { + if (block != nullptr) { + m_slab_manager->Free(block); + } + } + } + + KMemoryBlock* Allocate() { + ASSERT(m_index < MaxBlocks); + ASSERT(m_blocks[m_index] != nullptr); + KMemoryBlock* block = nullptr; + std::swap(block, m_blocks[m_index++]); + return block; + } + + void Free(KMemoryBlock* block) { + ASSERT(m_index <= MaxBlocks); + ASSERT(block != nullptr); + if (m_index == 0) { + m_slab_manager->Free(block); + } else { + m_blocks[--m_index] = block; + } + } +}; + class KMemoryBlockManager final { public: - using MemoryBlockTree = std::list; + using MemoryBlockTree = + Common::IntrusiveRedBlackTreeBaseTraits::TreeType; + using MemoryBlockLockFunction = void (KMemoryBlock::*)(KMemoryPermission new_perm, bool left, + bool right); using iterator = MemoryBlockTree::iterator; using const_iterator = MemoryBlockTree::const_iterator; public: - KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_); + KMemoryBlockManager(); + + using HostUnmapCallback = std::function; + + Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager); + void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback); iterator end() { - return memory_block_tree.end(); + return m_memory_block_tree.end(); } const_iterator end() const { - return memory_block_tree.end(); + return m_memory_block_tree.end(); } const_iterator cend() const { - return memory_block_tree.cend(); + return m_memory_block_tree.cend(); } - iterator FindIterator(VAddr addr); + VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, + size_t alignment, size_t offset, size_t guard_pages) const; - VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, - std::size_t align, std::size_t offset, std::size_t guard_pages); + void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages, + KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, + KMemoryBlockDisableMergeAttribute set_disable_attr, + KMemoryBlockDisableMergeAttribute clear_disable_attr); + void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages, + MemoryBlockLockFunction lock_func, KMemoryPermission perm); - void Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, - KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state, - KMemoryPermission perm, KMemoryAttribute attribute); + void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, + size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, + KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, + KMemoryAttribute attr); - void Update(VAddr addr, std::size_t num_pages, KMemoryState state, - KMemoryPermission perm = KMemoryPermission::None, - KMemoryAttribute attribute = KMemoryAttribute::None); + iterator FindIterator(VAddr address) const { + return m_memory_block_tree.find(KMemoryBlock( + address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None)); + } - using LockFunc = std::function; - void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, - KMemoryPermission perm); + const KMemoryBlock* FindBlock(VAddr address) const { + if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) { + return std::addressof(*it); + } - using IterateFunc = std::function; - void IterateForRange(VAddr start, VAddr end, IterateFunc&& func); + return nullptr; + } - KMemoryBlock& FindBlock(VAddr addr) { - return *FindIterator(addr); + // Debug. + bool CheckState() const; + +private: + void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, + size_t num_pages); + + MemoryBlockTree m_memory_block_tree; + VAddr m_start_address{}; + VAddr m_end_address{}; +}; + +class KScopedMemoryBlockManagerAuditor { +public: + explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) { + ASSERT(m_manager->CheckState()); + } + explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager& m) + : KScopedMemoryBlockManagerAuditor(std::addressof(m)) {} + ~KScopedMemoryBlockManagerAuditor() { + ASSERT(m_manager->CheckState()); } private: - void MergeAdjacent(iterator it, iterator& next_it); - - [[maybe_unused]] const VAddr start_addr; - [[maybe_unused]] const VAddr end_addr; - - MemoryBlockTree memory_block_tree; + KMemoryBlockManager* m_manager; }; } // namespace Kernel From 58eb6953d1417d667af36461ac8391e005f49457 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 9 Sep 2022 21:17:52 -0700 Subject: [PATCH 11/25] core: hle: kernel: k_memory_block: Update. --- src/core/hle/kernel/k_memory_block.h | 514 ++++++++++++++++++++------- src/core/hle/service/ldr/ldr.cpp | 4 +- 2 files changed, 395 insertions(+), 123 deletions(-) diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h index 18df1f836a..9444f6bd28 100644 --- a/src/core/hle/kernel/k_memory_block.h +++ b/src/core/hle/kernel/k_memory_block.h @@ -6,6 +6,7 @@ #include "common/alignment.h" #include "common/assert.h" #include "common/common_types.h" +#include "common/intrusive_red_black_tree.h" #include "core/hle/kernel/memory_types.h" #include "core/hle/kernel/svc_types.h" @@ -168,9 +169,8 @@ constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission per enum class KMemoryAttribute : u8 { None = 0x00, - Mask = 0x7F, - All = Mask, - DontCareMask = 0x80, + All = 0xFF, + UserMask = All, Locked = static_cast(Svc::MemoryAttribute::Locked), IpcLocked = static_cast(Svc::MemoryAttribute::IpcLocked), @@ -178,76 +178,112 @@ enum class KMemoryAttribute : u8 { Uncached = static_cast(Svc::MemoryAttribute::Uncached), SetMask = Uncached, - - IpcAndDeviceMapped = IpcLocked | DeviceShared, - LockedAndIpcLocked = Locked | IpcLocked, - DeviceSharedAndUncached = DeviceShared | Uncached }; DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute); -static_assert((static_cast(KMemoryAttribute::Mask) & - static_cast(KMemoryAttribute::DontCareMask)) == 0); +enum class KMemoryBlockDisableMergeAttribute : u8 { + None = 0, + Normal = (1u << 0), + DeviceLeft = (1u << 1), + IpcLeft = (1u << 2), + Locked = (1u << 3), + DeviceRight = (1u << 4), + + AllLeft = Normal | DeviceLeft | IpcLeft | Locked, + AllRight = DeviceRight, +}; +DECLARE_ENUM_FLAG_OPERATORS(KMemoryBlockDisableMergeAttribute); struct KMemoryInfo { - VAddr addr{}; - std::size_t size{}; - KMemoryState state{}; - KMemoryPermission perm{}; - KMemoryAttribute attribute{}; - KMemoryPermission original_perm{}; - u16 ipc_lock_count{}; - u16 device_use_count{}; + uintptr_t m_address; + size_t m_size; + KMemoryState m_state; + u16 m_device_disable_merge_left_count; + u16 m_device_disable_merge_right_count; + u16 m_ipc_lock_count; + u16 m_device_use_count; + u16 m_ipc_disable_merge_count; + KMemoryPermission m_permission; + KMemoryAttribute m_attribute; + KMemoryPermission m_original_permission; + KMemoryBlockDisableMergeAttribute m_disable_merge_attribute; constexpr Svc::MemoryInfo GetSvcMemoryInfo() const { return { - addr, - size, - static_cast(state & KMemoryState::Mask), - static_cast(attribute & KMemoryAttribute::Mask), - static_cast(perm & KMemoryPermission::UserMask), - ipc_lock_count, - device_use_count, + .addr = m_address, + .size = m_size, + .state = static_cast(m_state & KMemoryState::Mask), + .attr = static_cast(m_attribute & KMemoryAttribute::UserMask), + .perm = static_cast(m_permission & KMemoryPermission::UserMask), + .ipc_refcount = m_ipc_lock_count, + .device_refcount = m_device_use_count, + .padding = {}, }; } - constexpr VAddr GetAddress() const { - return addr; + constexpr uintptr_t GetAddress() const { + return m_address; } - constexpr std::size_t GetSize() const { - return size; + + constexpr size_t GetSize() const { + return m_size; } - constexpr std::size_t GetNumPages() const { - return GetSize() / PageSize; + + constexpr size_t GetNumPages() const { + return this->GetSize() / PageSize; } - constexpr VAddr GetEndAddress() const { - return GetAddress() + GetSize(); + + constexpr uintptr_t GetEndAddress() const { + return this->GetAddress() + this->GetSize(); } - constexpr VAddr GetLastAddress() const { - return GetEndAddress() - 1; + + constexpr uintptr_t GetLastAddress() const { + return this->GetEndAddress() - 1; } + + constexpr u16 GetIpcLockCount() const { + return m_ipc_lock_count; + } + + constexpr u16 GetIpcDisableMergeCount() const { + return m_ipc_disable_merge_count; + } + constexpr KMemoryState GetState() const { - return state; - } - constexpr KMemoryAttribute GetAttribute() const { - return attribute; + return m_state; } + constexpr KMemoryPermission GetPermission() const { - return perm; + return m_permission; + } + + constexpr KMemoryPermission GetOriginalPermission() const { + return m_original_permission; + } + + constexpr KMemoryAttribute GetAttribute() const { + return m_attribute; + } + + constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const { + return m_disable_merge_attribute; } }; -class KMemoryBlock final { - friend class KMemoryBlockManager; - +class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode { private: - VAddr addr{}; - std::size_t num_pages{}; - KMemoryState state{KMemoryState::None}; - u16 ipc_lock_count{}; - u16 device_use_count{}; - KMemoryPermission perm{KMemoryPermission::None}; - KMemoryPermission original_perm{KMemoryPermission::None}; - KMemoryAttribute attribute{KMemoryAttribute::None}; + u16 m_device_disable_merge_left_count; + u16 m_device_disable_merge_right_count; + VAddr m_address; + size_t m_num_pages; + KMemoryState m_memory_state; + u16 m_ipc_lock_count; + u16 m_device_use_count; + u16 m_ipc_disable_merge_count; + KMemoryPermission m_permission; + KMemoryPermission m_original_permission; + KMemoryAttribute m_attribute; + KMemoryBlockDisableMergeAttribute m_disable_merge_attribute; public: static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) { @@ -261,113 +297,349 @@ public: } public: - constexpr KMemoryBlock() = default; - constexpr KMemoryBlock(VAddr addr_, std::size_t num_pages_, KMemoryState state_, - KMemoryPermission perm_, KMemoryAttribute attribute_) - : addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {} - constexpr VAddr GetAddress() const { - return addr; + return m_address; } - constexpr std::size_t GetNumPages() const { - return num_pages; + constexpr size_t GetNumPages() const { + return m_num_pages; } - constexpr std::size_t GetSize() const { - return GetNumPages() * PageSize; + constexpr size_t GetSize() const { + return this->GetNumPages() * PageSize; } constexpr VAddr GetEndAddress() const { - return GetAddress() + GetSize(); + return this->GetAddress() + this->GetSize(); } constexpr VAddr GetLastAddress() const { - return GetEndAddress() - 1; + return this->GetEndAddress() - 1; + } + + constexpr u16 GetIpcLockCount() const { + return m_ipc_lock_count; + } + + constexpr u16 GetIpcDisableMergeCount() const { + return m_ipc_disable_merge_count; + } + + constexpr KMemoryPermission GetPermission() const { + return m_permission; + } + + constexpr KMemoryPermission GetOriginalPermission() const { + return m_original_permission; + } + + constexpr KMemoryAttribute GetAttribute() const { + return m_attribute; } constexpr KMemoryInfo GetMemoryInfo() const { return { - GetAddress(), GetSize(), state, perm, - attribute, original_perm, ipc_lock_count, device_use_count, + .m_address = this->GetAddress(), + .m_size = this->GetSize(), + .m_state = m_memory_state, + .m_device_disable_merge_left_count = m_device_disable_merge_left_count, + .m_device_disable_merge_right_count = m_device_disable_merge_right_count, + .m_ipc_lock_count = m_ipc_lock_count, + .m_device_use_count = m_device_use_count, + .m_ipc_disable_merge_count = m_ipc_disable_merge_count, + .m_permission = m_permission, + .m_attribute = m_attribute, + .m_original_permission = m_original_permission, + .m_disable_merge_attribute = m_disable_merge_attribute, }; } - void ShareToDevice(KMemoryPermission /*new_perm*/) { - ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || - device_use_count == 0); - attribute |= KMemoryAttribute::DeviceShared; - const u16 new_use_count{++device_use_count}; - ASSERT(new_use_count > 0); +public: + explicit KMemoryBlock() = default; + + constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p, + KMemoryAttribute attr) + : Common::IntrusiveRedBlackTreeBaseNode(), + m_device_disable_merge_left_count(), m_device_disable_merge_right_count(), + m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0), + m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p), + m_original_permission(KMemoryPermission::None), m_attribute(attr), + m_disable_merge_attribute() {} + + constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p, + KMemoryAttribute attr) { + m_device_disable_merge_left_count = 0; + m_device_disable_merge_right_count = 0; + m_address = addr; + m_num_pages = np; + m_memory_state = ms; + m_ipc_lock_count = 0; + m_device_use_count = 0; + m_permission = p; + m_original_permission = KMemoryPermission::None; + m_attribute = attr; + m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None; } - void UnshareToDevice(KMemoryPermission /*new_perm*/) { - ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); - const u16 prev_use_count{device_use_count--}; - ASSERT(prev_use_count > 0); - if (prev_use_count == 1) { - attribute &= ~KMemoryAttribute::DeviceShared; - } - } - -private: constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const { - constexpr KMemoryAttribute AttributeIgnoreMask{KMemoryAttribute::DontCareMask | - KMemoryAttribute::IpcLocked | - KMemoryAttribute::DeviceShared}; - return state == s && perm == p && - (attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask); + constexpr auto AttributeIgnoreMask = + KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; + return m_memory_state == s && m_permission == p && + (m_attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask); } constexpr bool HasSameProperties(const KMemoryBlock& rhs) const { - return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm && - attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count && - device_use_count == rhs.device_use_count; + return m_memory_state == rhs.m_memory_state && m_permission == rhs.m_permission && + m_original_permission == rhs.m_original_permission && + m_attribute == rhs.m_attribute && m_ipc_lock_count == rhs.m_ipc_lock_count && + m_device_use_count == rhs.m_device_use_count; } - constexpr bool Contains(VAddr start) const { - return GetAddress() <= start && start <= GetEndAddress(); + constexpr bool CanMergeWith(const KMemoryBlock& rhs) const { + return this->HasSameProperties(rhs) && + (m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight) == + KMemoryBlockDisableMergeAttribute::None && + (rhs.m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft) == + KMemoryBlockDisableMergeAttribute::None; } - constexpr void Add(std::size_t count) { - ASSERT(count > 0); - ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1); - - num_pages += count; + constexpr bool Contains(VAddr addr) const { + return this->GetAddress() <= addr && addr <= this->GetEndAddress(); } - constexpr void Update(KMemoryState new_state, KMemoryPermission new_perm, - KMemoryAttribute new_attribute) { - ASSERT(original_perm == KMemoryPermission::None); - ASSERT((attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None); + constexpr void Add(const KMemoryBlock& added_block) { + ASSERT(added_block.GetNumPages() > 0); + ASSERT(this->GetAddress() + added_block.GetSize() - 1 < + this->GetEndAddress() + added_block.GetSize() - 1); - state = new_state; - perm = new_perm; - - attribute = static_cast( - new_attribute | - (attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared))); + m_num_pages += added_block.GetNumPages(); + m_disable_merge_attribute = static_cast( + m_disable_merge_attribute | added_block.m_disable_merge_attribute); + m_device_disable_merge_right_count = added_block.m_device_disable_merge_right_count; } - constexpr KMemoryBlock Split(VAddr split_addr) { - ASSERT(GetAddress() < split_addr); - ASSERT(Contains(split_addr)); - ASSERT(Common::IsAligned(split_addr, PageSize)); + constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a, + bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) { + ASSERT(m_original_permission == KMemoryPermission::None); + ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None); - KMemoryBlock block; - block.addr = addr; - block.num_pages = (split_addr - GetAddress()) / PageSize; - block.state = state; - block.ipc_lock_count = ipc_lock_count; - block.device_use_count = device_use_count; - block.perm = perm; - block.original_perm = original_perm; - block.attribute = attribute; + m_memory_state = s; + m_permission = p; + m_attribute = static_cast( + a | (m_attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared))); - addr = split_addr; - num_pages -= block.num_pages; + if (set_disable_merge_attr && set_mask != 0) { + m_disable_merge_attribute = m_disable_merge_attribute | + static_cast(set_mask); + } + if (clear_mask != 0) { + m_disable_merge_attribute = m_disable_merge_attribute & + static_cast(~clear_mask); + } + } - return block; + constexpr void Split(KMemoryBlock* block, VAddr addr) { + ASSERT(this->GetAddress() < addr); + ASSERT(this->Contains(addr)); + ASSERT(Common::IsAligned(addr, PageSize)); + + block->m_address = m_address; + block->m_num_pages = (addr - this->GetAddress()) / PageSize; + block->m_memory_state = m_memory_state; + block->m_ipc_lock_count = m_ipc_lock_count; + block->m_device_use_count = m_device_use_count; + block->m_permission = m_permission; + block->m_original_permission = m_original_permission; + block->m_attribute = m_attribute; + block->m_disable_merge_attribute = static_cast( + m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft); + block->m_ipc_disable_merge_count = m_ipc_disable_merge_count; + block->m_device_disable_merge_left_count = m_device_disable_merge_left_count; + block->m_device_disable_merge_right_count = 0; + + m_address = addr; + m_num_pages -= block->m_num_pages; + + m_ipc_disable_merge_count = 0; + m_device_disable_merge_left_count = 0; + m_disable_merge_attribute = static_cast( + m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight); + } + + constexpr void UpdateDeviceDisableMergeStateForShareLeft( + [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { + if (left) { + m_disable_merge_attribute = static_cast( + m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft); + const u16 new_device_disable_merge_left_count = ++m_device_disable_merge_left_count; + ASSERT(new_device_disable_merge_left_count > 0); + } + } + + constexpr void UpdateDeviceDisableMergeStateForShareRight( + [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { + if (right) { + m_disable_merge_attribute = static_cast( + m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight); + const u16 new_device_disable_merge_right_count = ++m_device_disable_merge_right_count; + ASSERT(new_device_disable_merge_right_count > 0); + } + } + + constexpr void UpdateDeviceDisableMergeStateForShare(KMemoryPermission new_perm, bool left, + bool right) { + this->UpdateDeviceDisableMergeStateForShareLeft(new_perm, left, right); + this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right); + } + + constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, + bool right) { + // We must either be shared or have a zero lock count. + ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || + m_device_use_count == 0); + + // Share. + const u16 new_count = ++m_device_use_count; + ASSERT(new_count > 0); + + m_attribute = static_cast(m_attribute | KMemoryAttribute::DeviceShared); + + this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right); + } + + constexpr void UpdateDeviceDisableMergeStateForUnshareLeft( + [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { + + if (left) { + if (!m_device_disable_merge_left_count) { + return; + } + --m_device_disable_merge_left_count; + } + + m_device_disable_merge_left_count = + std::min(m_device_disable_merge_left_count, m_device_use_count); + + if (m_device_disable_merge_left_count == 0) { + m_disable_merge_attribute = static_cast( + m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceLeft); + } + } + + constexpr void UpdateDeviceDisableMergeStateForUnshareRight( + [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { + if (right) { + const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--; + ASSERT(old_device_disable_merge_right_count > 0); + if (old_device_disable_merge_right_count == 1) { + m_disable_merge_attribute = static_cast( + m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceRight); + } + } + } + + constexpr void UpdateDeviceDisableMergeStateForUnshare(KMemoryPermission new_perm, bool left, + bool right) { + this->UpdateDeviceDisableMergeStateForUnshareLeft(new_perm, left, right); + this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right); + } + + constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, + bool right) { + // We must be shared. + ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); + + // Unhare. + const u16 old_count = m_device_use_count--; + ASSERT(old_count > 0); + + if (old_count == 1) { + m_attribute = + static_cast(m_attribute & ~KMemoryAttribute::DeviceShared); + } + + this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right); + } + + constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left, + bool right) { + + // We must be shared. + ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); + + // Unhare. + const u16 old_count = m_device_use_count--; + ASSERT(old_count > 0); + + if (old_count == 1) { + m_attribute = + static_cast(m_attribute & ~KMemoryAttribute::DeviceShared); + } + + this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right); + } + + constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { + // We must either be locked or have a zero lock count. + ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked || + m_ipc_lock_count == 0); + + // Lock. + const u16 new_lock_count = ++m_ipc_lock_count; + ASSERT(new_lock_count > 0); + + // If this is our first lock, update our permissions. + if (new_lock_count == 1) { + ASSERT(m_original_permission == KMemoryPermission::None); + ASSERT((m_permission | new_perm | KMemoryPermission::NotMapped) == + (m_permission | KMemoryPermission::NotMapped)); + ASSERT((m_permission & KMemoryPermission::UserExecute) != + KMemoryPermission::UserExecute || + (new_perm == KMemoryPermission::UserRead)); + m_original_permission = m_permission; + m_permission = static_cast( + (new_perm & KMemoryPermission::IpcLockChangeMask) | + (m_original_permission & ~KMemoryPermission::IpcLockChangeMask)); + } + m_attribute = static_cast(m_attribute | KMemoryAttribute::IpcLocked); + + if (left) { + m_disable_merge_attribute = static_cast( + m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::IpcLeft); + const u16 new_ipc_disable_merge_count = ++m_ipc_disable_merge_count; + ASSERT(new_ipc_disable_merge_count > 0); + } + } + + constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left, + [[maybe_unused]] bool right) { + // We must be locked. + ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked); + + // Unlock. + const u16 old_lock_count = m_ipc_lock_count--; + ASSERT(old_lock_count > 0); + + // If this is our last unlock, update our permissions. + if (old_lock_count == 1) { + ASSERT(m_original_permission != KMemoryPermission::None); + m_permission = m_original_permission; + m_original_permission = KMemoryPermission::None; + m_attribute = static_cast(m_attribute & ~KMemoryAttribute::IpcLocked); + } + + if (left) { + const u16 old_ipc_disable_merge_count = m_ipc_disable_merge_count--; + ASSERT(old_ipc_disable_merge_count > 0); + if (old_ipc_disable_merge_count == 1) { + m_disable_merge_attribute = static_cast( + m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::IpcLeft); + } + } + } + + constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const { + return m_disable_merge_attribute; } }; static_assert(std::is_trivially_destructible::value); diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp index becd6d1b9f..652441bc29 100644 --- a/src/core/hle/service/ldr/ldr.cpp +++ b/src/core/hle/service/ldr/ldr.cpp @@ -290,7 +290,7 @@ public: const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize}; const auto start_info{page_table.QueryInfo(start - 1)}; - if (start_info.state != Kernel::KMemoryState::Free) { + if (start_info.GetState() != Kernel::KMemoryState::Free) { return {}; } @@ -300,7 +300,7 @@ public: const auto end_info{page_table.QueryInfo(start + size)}; - if (end_info.state != Kernel::KMemoryState::Free) { + if (end_info.GetState() != Kernel::KMemoryState::Free) { return {}; } From ed591934fbffa32af0151302fd07e9fce776eb17 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 9 Sep 2022 21:38:28 -0700 Subject: [PATCH 12/25] core: hle: kernel: k_page_table: Update, and integrate with new KMemoryBlockManager/SlabManager. --- src/core/hle/kernel/k_page_table.cpp | 619 ++++++++++++++++----------- src/core/hle/kernel/k_page_table.h | 25 +- 2 files changed, 393 insertions(+), 251 deletions(-) diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 8ebb753381..2cf46af0a4 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -49,6 +49,7 @@ KPageTable::~KPageTable() = default; Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, VAddr code_addr, std::size_t code_size, + KMemoryBlockSlabManager* mem_block_slab_manager, KMemoryManager::Pool pool) { const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { @@ -113,6 +114,7 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type address_space_start = start; address_space_end = end; is_kernel = false; + memory_block_slab_manager = mem_block_slab_manager; // Determine the region we can place our undetermineds in VAddr alloc_start{}; @@ -254,7 +256,14 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type page_table_impl.Resize(address_space_width, PageBits); - return InitializeMemoryLayout(start, end); + return memory_block_manager.Initialize(address_space_start, address_space_end, + memory_block_slab_manager); +} + +void KPageTable::Finalize() { + memory_block_manager.Finalize(memory_block_slab_manager, [&](VAddr addr, u64 size) { + system.Memory().UnmapRegion(page_table_impl, addr, size); + }); } Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state, @@ -271,6 +280,13 @@ Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryStat R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); + + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + memory_block_slab_manager); + + // Allocate and open. KPageGroup pg; R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( &pg, num_pages, @@ -278,7 +294,10 @@ Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryStat R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup)); - block_manager->Update(addr, num_pages, state, perm); + // Update the blocks. + memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); return ResultSuccess; } @@ -307,6 +326,18 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); + // Create an update allocator for the source. + Result src_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator src_allocator( + std::addressof(src_allocator_result), memory_block_slab_manager, num_src_allocator_blocks); + R_TRY(src_allocator_result); + + // Create an update allocator for the destination. + Result dst_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator dst_allocator( + std::addressof(dst_allocator_result), memory_block_slab_manager, num_dst_allocator_blocks); + R_TRY(dst_allocator_result); + // Map the code memory. { // Determine the number of pages being operated on. @@ -335,10 +366,14 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size unprot_guard.Cancel(); // Apply the memory block updates. - block_manager->Update(src_address, num_pages, src_state, new_perm, - KMemoryAttribute::Locked); - block_manager->Update(dst_address, num_pages, KMemoryState::AliasCode, new_perm, - KMemoryAttribute::None); + memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, + src_state, new_perm, KMemoryAttribute::Locked, + KMemoryBlockDisableMergeAttribute::Locked, + KMemoryBlockDisableMergeAttribute::None); + memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, + KMemoryState::AliasCode, new_perm, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); } return ResultSuccess; @@ -370,7 +405,7 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si // Determine whether any pages being unmapped are code. bool any_code_pages = false; { - KMemoryBlockManager::const_iterator it = block_manager->FindIterator(dst_address); + KMemoryBlockManager::const_iterator it = memory_block_manager.FindIterator(dst_address); while (true) { // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -408,6 +443,20 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si // Determine the number of pages being operated on. const std::size_t num_pages = size / PageSize; + // Create an update allocator for the source. + Result src_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + memory_block_slab_manager, + num_src_allocator_blocks); + R_TRY(src_allocator_result); + + // Create an update allocator for the destination. + Result dst_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + memory_block_slab_manager, + num_dst_allocator_blocks); + R_TRY(dst_allocator_result); + // Unmap the aliased copy of the pages. R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); @@ -416,9 +465,14 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si OperationType::ChangePermissions)); // Apply the memory block updates. - block_manager->Update(dst_address, num_pages, KMemoryState::None); - block_manager->Update(src_address, num_pages, KMemoryState::Normal, - KMemoryPermission::UserReadWrite); + memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, + KMemoryState::None, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); + memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, + KMemoryState::Normal, KMemoryPermission::UserReadWrite, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Locked); // Note that we reprotected pages. reprotected_pages = true; @@ -434,55 +488,12 @@ VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages, if (num_pages <= region_num_pages) { if (this->IsAslrEnabled()) { - // Try to directly find a free area up to 8 times. - for (std::size_t i = 0; i < 8; i++) { - const std::size_t random_offset = - KSystemControl::GenerateRandomRange( - 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) * - alignment; - const VAddr candidate = - Common::AlignDown((region_start + random_offset), alignment) + offset; - - KMemoryInfo info = this->QueryInfoImpl(candidate); - - if (info.state != KMemoryState::Free) { - continue; - } - if (region_start > candidate) { - continue; - } - if (info.GetAddress() + guard_pages * PageSize > candidate) { - continue; - } - - const VAddr candidate_end = candidate + (num_pages + guard_pages) * PageSize - 1; - if (candidate_end > info.GetLastAddress()) { - continue; - } - if (candidate_end > region_start + region_num_pages * PageSize - 1) { - continue; - } - - address = candidate; - break; - } - // Fall back to finding the first free area with a random offset. - if (address == 0) { - // NOTE: Nintendo does not account for guard pages here. - // This may theoretically cause an offset to be chosen that cannot be mapped. We - // will account for guard pages. - const std::size_t offset_pages = KSystemControl::GenerateRandomRange( - 0, region_num_pages - num_pages - guard_pages); - address = block_manager->FindFreeArea(region_start + offset_pages * PageSize, - region_num_pages - offset_pages, num_pages, - alignment, offset, guard_pages); - } + UNIMPLEMENTED(); } - // Find the first free area. if (address == 0) { - address = block_manager->FindFreeArea(region_start, region_num_pages, num_pages, - alignment, offset, guard_pages); + address = memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, + alignment, offset, guard_pages); } } @@ -649,11 +660,19 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTab KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); // Apply the memory block update. - block_manager->Update(dst_addr, num_pages, KMemoryState::Free, KMemoryPermission::None, - KMemoryAttribute::None); + memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); system.InvalidateCpuInstructionCaches(); @@ -682,10 +701,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { cur_address = address; mapped_size = 0; - auto it = block_manager->FindIterator(cur_address); + auto it = memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != block_manager->end()); + ASSERT(it != memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -739,10 +758,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { size_t checked_mapped_size = 0; cur_address = address; - auto it = block_manager->FindIterator(cur_address); + auto it = memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != block_manager->end()); + ASSERT(it != memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -782,6 +801,14 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { } } + // Create an update allocator. + ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + memory_block_slab_manager, + num_allocator_blocks); + R_TRY(allocator_result); + // Reset the current tracking address, and make sure we clean up on failure. cur_address = address; auto unmap_guard = detail::ScopeExit([&] { @@ -791,10 +818,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { // Iterate, unmapping the pages. cur_address = address; - auto it = block_manager->FindIterator(cur_address); + auto it = memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != block_manager->end()); + ASSERT(it != memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -830,10 +857,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { PAddr pg_phys_addr = pg_it->GetAddress(); size_t pg_pages = pg_it->GetNumPages(); - auto it = block_manager->FindIterator(cur_address); + auto it = memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != block_manager->end()); + ASSERT(it != memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -889,10 +916,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { mapped_physical_memory_size += (size - mapped_size); // Update the relevant memory blocks. - block_manager->Update(address, size / PageSize, KMemoryState::Free, - KMemoryPermission::None, KMemoryAttribute::None, - KMemoryState::Normal, KMemoryPermission::UserReadWrite, - KMemoryAttribute::None); + memory_block_manager.UpdateIfMatch( + std::addressof(allocator), address, size / PageSize, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None); // Cancel our guard. unmap_guard.Cancel(); @@ -924,10 +951,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { cur_address = address; mapped_size = 0; - auto it = block_manager->FindIterator(cur_address); + auto it = memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != block_manager->end()); + ASSERT(it != memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -1022,6 +1049,13 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { } ASSERT(pg.GetNumPages() == mapped_size / PageSize); + // Create an update allocator. + ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Reset the current tracking address, and make sure we clean up on failure. cur_address = address; auto remap_guard = detail::ScopeExit([&] { @@ -1030,7 +1064,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { cur_address = address; // Iterate over the memory we unmapped. - auto it = block_manager->FindIterator(cur_address); + auto it = memory_block_manager.FindIterator(cur_address); auto pg_it = pg.Nodes().begin(); PAddr pg_phys_addr = pg_it->GetAddress(); size_t pg_pages = pg_it->GetNumPages(); @@ -1085,10 +1119,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { }); // Iterate over the memory, unmapping as we go. - auto it = block_manager->FindIterator(cur_address); + auto it = memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != block_manager->end()); + ASSERT(it != memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -1120,8 +1154,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); // Update memory blocks. - block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, - KMemoryAttribute::None); + memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, + KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); // TODO(bunnei): This is a workaround until the next set of changes, where we add reference // counting for mapped pages. Until then, we must manually close the reference to the page @@ -1134,83 +1170,134 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { return ResultSuccess; } -Result KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { +Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, std::size_t size) { + // Lock the table. KScopedLightLock lk(general_lock); - KMemoryState src_state{}; - CASCADE_CODE(CheckMemoryState( - &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, - KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::UserReadWrite, - KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); + // Validate that the source address's state is valid. + KMemoryState src_state; + size_t num_src_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr, + std::addressof(num_src_allocator_blocks), src_address, size, + KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, + KMemoryPermission::All, KMemoryPermission::UserReadWrite, + KMemoryAttribute::All, KMemoryAttribute::None)); - if (IsRegionMapped(dst_addr, size)) { - return ResultInvalidCurrentMemory; - } + // Validate that the dst address's state is valid. + size_t num_dst_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, + KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryAttribute::None)); + // Create an update allocator for the source. + Result src_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator src_allocator( + std::addressof(src_allocator_result), memory_block_slab_manager, num_src_allocator_blocks); + R_TRY(src_allocator_result); + + // Create an update allocator for the destination. + Result dst_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator dst_allocator( + std::addressof(dst_allocator_result), memory_block_slab_manager, num_dst_allocator_blocks); + R_TRY(dst_allocator_result); + + // Map the memory. KPageGroup page_linked_list; const std::size_t num_pages{size / PageSize}; + const KMemoryPermission new_src_perm = static_cast( + KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); + const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; - AddRegionToPages(src_addr, num_pages, page_linked_list); - + AddRegionToPages(src_address, num_pages, page_linked_list); { + // Reprotect the source as kernel-read/not mapped. auto block_guard = detail::ScopeExit([&] { - Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite, + Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, OperationType::ChangePermissions); }); - - CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None, - OperationType::ChangePermissions)); - CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::UserReadWrite)); + R_TRY(Operate(src_address, num_pages, new_src_perm, OperationType::ChangePermissions)); + R_TRY(MapPages(dst_address, page_linked_list, KMemoryPermission::UserReadWrite)); block_guard.Cancel(); } - block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::None, - KMemoryAttribute::Locked); - block_manager->Update(dst_addr, num_pages, KMemoryState::Stack, - KMemoryPermission::UserReadWrite); + // Apply the memory block updates. + memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, + new_src_perm, new_src_attr, + KMemoryBlockDisableMergeAttribute::Locked, + KMemoryBlockDisableMergeAttribute::None); + memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, + KMemoryState::Stack, KMemoryPermission::UserReadWrite, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); return ResultSuccess; } -Result KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { +Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, std::size_t size) { + // Lock the table. KScopedLightLock lk(general_lock); - KMemoryState src_state{}; - CASCADE_CODE(CheckMemoryState( - &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, - KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::None, - KMemoryAttribute::Mask, KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); + // Validate that the source address's state is valid. + KMemoryState src_state; + size_t num_src_allocator_blocks; + R_TRY(this->CheckMemoryState( + std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks), + src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, + KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead, + KMemoryAttribute::All, KMemoryAttribute::Locked)); - KMemoryPermission dst_perm{}; - CASCADE_CODE(CheckMemoryState(nullptr, &dst_perm, nullptr, nullptr, dst_addr, size, - KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None, - KMemoryPermission::None, KMemoryAttribute::Mask, - KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); + // Validate that the dst address's state is valid. + KMemoryPermission dst_perm; + size_t num_dst_allocator_blocks; + R_TRY(this->CheckMemoryState( + nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks), + dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); + + // Create an update allocator for the source. + Result src_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator src_allocator( + std::addressof(src_allocator_result), memory_block_slab_manager, num_src_allocator_blocks); + R_TRY(src_allocator_result); + + // Create an update allocator for the destination. + Result dst_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator dst_allocator( + std::addressof(dst_allocator_result), memory_block_slab_manager, num_dst_allocator_blocks); + R_TRY(dst_allocator_result); KPageGroup src_pages; KPageGroup dst_pages; const std::size_t num_pages{size / PageSize}; - AddRegionToPages(src_addr, num_pages, src_pages); - AddRegionToPages(dst_addr, num_pages, dst_pages); + AddRegionToPages(src_address, num_pages, src_pages); + AddRegionToPages(dst_address, num_pages, dst_pages); if (!dst_pages.IsEqual(src_pages)) { return ResultInvalidMemoryRegion; } { - auto block_guard = detail::ScopeExit([&] { MapPages(dst_addr, dst_pages, dst_perm); }); + auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); - CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); - CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite, - OperationType::ChangePermissions)); + R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); + R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, + OperationType::ChangePermissions)); block_guard.Cancel(); } - block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::UserReadWrite); - block_manager->Update(dst_addr, num_pages, KMemoryState::Free); + // Apply the memory block updates. + memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Locked); + memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, + KMemoryState::None, KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); return ResultSuccess; } @@ -1254,11 +1341,18 @@ Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemory KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + memory_block_slab_manager); + // Map the pages. R_TRY(MapPages(address, page_linked_list, perm)); // Update the blocks. - block_manager->Update(address, num_pages, state, perm); + memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); return ResultSuccess; } @@ -1288,6 +1382,11 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t KMemoryAttribute::None, KMemoryAttribute::None) .IsSuccess()); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + memory_block_slab_manager); + // Perform mapping operation. if (is_pa_valid) { R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr)); @@ -1296,7 +1395,9 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t } // Update the blocks. - block_manager->Update(addr, num_pages, state, perm); + memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); // We successfully mapped the pages. *out_addr = addr; @@ -1321,25 +1422,36 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { return ResultSuccess; } -Result KPageTable::UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state) { +Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) { // Check that the unmap is in range. const std::size_t num_pages{page_linked_list.GetNumPages()}; const std::size_t size{num_pages * PageSize}; - R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); // Lock the table. KScopedLightLock lk(general_lock); // Check the memory state. - R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, state, KMemoryPermission::None, + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, + KMemoryState::All, state, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Perform the unmap. - R_TRY(UnmapPages(addr, page_linked_list)); + R_TRY(UnmapPages(address, page_linked_list)); // Update the blocks. - block_manager->Update(addr, num_pages, state, KMemoryPermission::None); + memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); return ResultSuccess; } @@ -1359,11 +1471,20 @@ Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Perform the unmap. R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap)); // Update the blocks. - block_manager->Update(address, num_pages, KMemoryState::Free, KMemoryPermission::None); + memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); return ResultSuccess; } @@ -1435,13 +1556,21 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, // Succeed if there's nothing to do. R_SUCCEED_IF(old_perm == new_perm && old_state == new_state); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Perform mapping operation. const auto operation = was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions; R_TRY(Operate(addr, num_pages, new_perm, operation)); // Update the blocks. - block_manager->Update(addr, num_pages, new_state, new_perm, KMemoryAttribute::None); + memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); // Ensure cache coherency, if we're setting pages as executable. if (is_x) { @@ -1454,51 +1583,30 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) { KScopedLightLock lk(general_lock); - return block_manager->FindBlock(addr).GetMemoryInfo(); + return memory_block_manager.FindBlock(addr)->GetMemoryInfo(); } KMemoryInfo KPageTable::QueryInfo(VAddr addr) { if (!Contains(addr, 1)) { - return {address_space_end, 0 - address_space_end, KMemoryState::Inaccessible, - KMemoryPermission::None, KMemoryAttribute::None, KMemoryPermission::None}; + return { + .m_address = address_space_end, + .m_size = 0 - address_space_end, + .m_state = static_cast(Svc::MemoryState::Inaccessible), + .m_device_disable_merge_left_count = 0, + .m_device_disable_merge_right_count = 0, + .m_ipc_lock_count = 0, + .m_device_use_count = 0, + .m_ipc_disable_merge_count = 0, + .m_permission = KMemoryPermission::None, + .m_attribute = KMemoryAttribute::None, + .m_original_permission = KMemoryPermission::None, + .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None, + }; } return QueryInfoImpl(addr); } -Result KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) { - KScopedLightLock lk(general_lock); - - KMemoryState state{}; - KMemoryAttribute attribute{}; - - R_TRY(CheckMemoryState(&state, nullptr, &attribute, nullptr, addr, size, - KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, - KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, - KMemoryPermission::All, KMemoryPermission::UserReadWrite, - KMemoryAttribute::Mask, KMemoryAttribute::None, - KMemoryAttribute::IpcAndDeviceMapped)); - - block_manager->Update(addr, size / PageSize, state, perm, attribute | KMemoryAttribute::Locked); - - return ResultSuccess; -} - -Result KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) { - KScopedLightLock lk(general_lock); - - KMemoryState state{}; - - R_TRY(CheckMemoryState(&state, nullptr, nullptr, nullptr, addr, size, - KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, - KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, - KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::Mask, - KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); - - block_manager->Update(addr, size / PageSize, state, KMemoryPermission::UserReadWrite); - return ResultSuccess; -} - Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm) { const size_t num_pages = size / PageSize; @@ -1509,20 +1617,30 @@ Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size, // Verify we can change the memory permission. KMemoryState old_state; KMemoryPermission old_perm; - R_TRY(this->CheckMemoryState( - std::addressof(old_state), std::addressof(old_perm), nullptr, nullptr, addr, size, - KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, KMemoryPermission::None, - KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, + std::addressof(num_allocator_blocks), addr, size, + KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, + KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::All, KMemoryAttribute::None)); // Determine new perm. const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); R_SUCCEED_IF(old_perm == new_perm); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Perform mapping operation. R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); // Update the blocks. - block_manager->Update(addr, num_pages, old_state, new_perm, KMemoryAttribute::None); + memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); return ResultSuccess; } @@ -1548,6 +1666,12 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3 KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Determine the new attribute. const KMemoryAttribute new_attr = static_cast(((old_attr & static_cast(~mask)) | @@ -1557,7 +1681,9 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3 this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh); // Update the blocks. - block_manager->Update(addr, num_pages, old_state, old_perm, new_attr); + memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm, + new_attr, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); return ResultSuccess; } @@ -1603,6 +1729,12 @@ Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator( + std::addressof(allocator_result), memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Unmap the end of the heap. const auto num_pages = (GetHeapSize() - size) / PageSize; R_TRY(Operate(heap_region_start + size, num_pages, KMemoryPermission::None, @@ -1613,8 +1745,12 @@ Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { LimitableResource::PhysicalMemory, num_pages * PageSize); // Apply the memory block update. - block_manager->Update(heap_region_start + size, num_pages, KMemoryState::Free, - KMemoryPermission::None, KMemoryAttribute::None); + memory_block_manager.Update(std::addressof(allocator), heap_region_start + size, + num_pages, KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + size == 0 ? KMemoryBlockDisableMergeAttribute::Normal + : KMemoryBlockDisableMergeAttribute::None); // Update the current heap end. current_heap_end = heap_region_start + size; @@ -1667,6 +1803,12 @@ Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator( + std::addressof(allocator_result), memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Map the pages. const auto num_pages = allocation_size / PageSize; R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup)); @@ -1681,8 +1823,12 @@ Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { memory_reservation.Commit(); // Apply the memory block update. - block_manager->Update(current_heap_end, num_pages, KMemoryState::Normal, - KMemoryPermission::UserReadWrite, KMemoryAttribute::None); + memory_block_manager.Update( + std::addressof(allocator), current_heap_end, num_pages, KMemoryState::Normal, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + heap_region_start == current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal + : KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); // Update the current heap end. current_heap_end = heap_region_start + size; @@ -1713,6 +1859,11 @@ ResultVal KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, return ResultOutOfMemory; } + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + memory_block_slab_manager); + if (is_map_only) { R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); } else { @@ -1723,53 +1874,38 @@ ResultVal KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); } - block_manager->Update(addr, needed_num_pages, state, perm); + // Update the blocks. + memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); return addr; } -Result KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) { +Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, std::size_t size) { + // Lightly validate the range before doing anything else. + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Lock the table. KScopedLightLock lk(general_lock); - KMemoryPermission perm{}; - if (const Result result{CheckMemoryState( - nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute, - KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, - KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None, - KMemoryAttribute::DeviceSharedAndUncached)}; - result.IsError()) { - return result; - } + // Check the memory state. + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryStateContiguous( + std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap, + KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); - block_manager->UpdateLock( - addr, size / PageSize, - [](KMemoryBlockManager::iterator block, KMemoryPermission permission) { - block->ShareToDevice(permission); - }, - perm); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); - return ResultSuccess; -} - -Result KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) { - KScopedLightLock lk(general_lock); - - KMemoryPermission perm{}; - if (const Result result{CheckMemoryState( - nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute, - KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, - KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None, - KMemoryAttribute::DeviceSharedAndUncached)}; - result.IsError()) { - return result; - } - - block_manager->UpdateLock( - addr, size / PageSize, - [](KMemoryBlockManager::iterator block, KMemoryPermission permission) { - block->UnshareToDevice(permission); - }, - perm); + // Update the memory blocks. + memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, + &KMemoryBlock::UnshareToDevice, KMemoryPermission::None); return ResultSuccess; } @@ -1791,19 +1927,6 @@ Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPage KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg); } -Result KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) { - block_manager = std::make_unique(start, end); - - return ResultSuccess; -} - -bool KPageTable::IsRegionMapped(VAddr address, u64 size) { - return CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, - KMemoryPermission::All, KMemoryPermission::None, KMemoryAttribute::Mask, - KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped) - .IsError(); -} - bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { auto start_ptr = system.DeviceMemory().GetPointer(addr); for (u64 offset{}; offset < size; offset += PageSize) { @@ -1831,8 +1954,8 @@ VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_page if (is_aslr_enabled) { UNIMPLEMENTED(); } - return block_manager->FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, - IsKernel() ? 1 : 4); + return memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, + IsKernel() ? 1 : 4); } Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, @@ -2008,9 +2131,9 @@ Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_ KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const { // Validate the states match expectation. - R_UNLESS((info.state & state_mask) == state, ResultInvalidCurrentMemory); - R_UNLESS((info.perm & perm_mask) == perm, ResultInvalidCurrentMemory); - R_UNLESS((info.attribute & attr_mask) == attr, ResultInvalidCurrentMemory); + R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory); + R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory); + R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory); return ResultSuccess; } @@ -2024,7 +2147,7 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA // Get information about the first block. const VAddr last_addr = addr + size - 1; - KMemoryBlockManager::const_iterator it = block_manager->FindIterator(addr); + KMemoryBlockManager::const_iterator it = memory_block_manager.FindIterator(addr); KMemoryInfo info = it->GetMemoryInfo(); // If the start address isn't aligned, we need a block. @@ -2042,7 +2165,7 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA // Advance our iterator. it++; - ASSERT(it != block_manager->cend()); + ASSERT(it != memory_block_manager.cend()); info = it->GetMemoryInfo(); } @@ -2067,7 +2190,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* // Get information about the first block. const VAddr last_addr = addr + size - 1; - KMemoryBlockManager::const_iterator it = block_manager->FindIterator(addr); + KMemoryBlockManager::const_iterator it = memory_block_manager.FindIterator(addr); KMemoryInfo info = it->GetMemoryInfo(); // If the start address isn't aligned, we need a block. @@ -2075,14 +2198,14 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0; // Validate all blocks in the range have correct state. - const KMemoryState first_state = info.state; - const KMemoryPermission first_perm = info.perm; - const KMemoryAttribute first_attr = info.attribute; + const KMemoryState first_state = info.m_state; + const KMemoryPermission first_perm = info.m_permission; + const KMemoryAttribute first_attr = info.m_attribute; while (true) { // Validate the current block. - R_UNLESS(info.state == first_state, ResultInvalidCurrentMemory); - R_UNLESS(info.perm == first_perm, ResultInvalidCurrentMemory); - R_UNLESS((info.attribute | ignore_attr) == (first_attr | ignore_attr), + R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory); + R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory); + R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr), ResultInvalidCurrentMemory); // Validate against the provided masks. @@ -2095,7 +2218,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* // Advance our iterator. it++; - ASSERT(it != block_manager->cend()); + ASSERT(it != memory_block_manager.cend()); info = it->GetMemoryInfo(); } @@ -2162,6 +2285,12 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr R_TRY(this->MakePageGroup(*out_pg, addr, num_pages)); } + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Decide on new perm and attr. new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; KMemoryAttribute new_attr = static_cast(old_attr | lock_attr); @@ -2172,7 +2301,9 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr } // Apply the memory block updates. - block_manager->Update(addr, num_pages, old_state, new_perm, new_attr); + memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, + new_attr, KMemoryBlockDisableMergeAttribute::Locked, + KMemoryBlockDisableMergeAttribute::None); return ResultSuccess; } @@ -2213,13 +2344,21 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; KMemoryAttribute new_attr = static_cast(old_attr & ~lock_attr); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Update permission, if we need to. if (new_perm != old_perm) { R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); } // Apply the memory block updates. - block_manager->Update(addr, num_pages, old_state, new_perm, new_attr); + memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, + new_attr, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Locked); return ResultSuccess; } diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 25774f2321..fa11a0fe36 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -9,8 +9,10 @@ #include "common/common_types.h" #include "common/page_table.h" #include "core/file_sys/program_metadata.h" +#include "core/hle/kernel/k_dynamic_resource_manager.h" #include "core/hle/kernel/k_light_lock.h" #include "core/hle/kernel/k_memory_block.h" +#include "core/hle/kernel/k_memory_block_manager.h" #include "core/hle/kernel/k_memory_layout.h" #include "core/hle/kernel/k_memory_manager.h" #include "core/hle/result.h" @@ -34,7 +36,12 @@ public: ~KPageTable(); Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, - VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool); + VAddr code_addr, std::size_t code_size, + KMemoryBlockSlabManager* mem_block_slab_manager, + KMemoryManager::Pool pool); + + void Finalize(); + Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state, KMemoryPermission perm); Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size); @@ -58,8 +65,6 @@ public: Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state); Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm); KMemoryInfo QueryInfo(VAddr addr); - Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm); - Result ResetTransferMemory(VAddr addr, std::size_t size); Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm); Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr); Result SetMaxHeapSize(std::size_t size); @@ -68,7 +73,6 @@ public: bool is_map_only, VAddr region_start, std::size_t region_num_pages, KMemoryState state, KMemoryPermission perm, PAddr map_addr = 0); - Result LockForDeviceAddressSpace(VAddr addr, std::size_t size); Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size); Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size); Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg); @@ -96,17 +100,14 @@ private: ChangePermissionsAndRefresh, }; - static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = KMemoryAttribute::DontCareMask | - KMemoryAttribute::IpcLocked | - KMemoryAttribute::DeviceShared; + static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = + KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; - Result InitializeMemoryLayout(VAddr start, VAddr end); Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm); Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, bool is_pa_valid, VAddr region_start, std::size_t region_num_pages, KMemoryState state, KMemoryPermission perm); Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list); - bool IsRegionMapped(VAddr address, u64 size); bool IsRegionContiguous(VAddr addr, u64 size) const; void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list); KMemoryInfo QueryInfoImpl(VAddr addr); @@ -194,8 +195,6 @@ private: mutable KLightLock general_lock; mutable KLightLock map_physical_memory_lock; - std::unique_ptr block_manager; - public: constexpr VAddr GetAddressSpaceStart() const { return address_space_start; @@ -346,9 +345,13 @@ private: std::size_t max_physical_memory_size{}; std::size_t address_space_width{}; + KMemoryBlockManager memory_block_manager; + bool is_kernel{}; bool is_aslr_enabled{}; + KMemoryBlockSlabManager* memory_block_slab_manager{}; + u32 heap_fill_value{}; const KMemoryRegion* cached_physical_heap_region{}; From 1baedfa12cc84efd878567e91672f7e0f6de7b5a Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 16 Sep 2022 23:33:47 -0700 Subject: [PATCH 13/25] core: hle: kernel: Integration application memory block slab manager. --- src/core/hle/kernel/k_process.cpp | 6 +++--- src/core/hle/kernel/kernel.cpp | 34 +++++++++++++++++++++++++++++++ src/core/hle/kernel/kernel.h | 7 +++++++ 3 files changed, 44 insertions(+), 3 deletions(-) diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 1d3157a9f4..abc2115bd9 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -356,9 +356,9 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: return ResultLimitReached; } // Initialize proces address space - if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, - 0x8000000, code_size, - KMemoryManager::Pool::Application)}; + if (const Result result{page_table->InitializeForProcess( + metadata.GetAddressSpaceType(), false, 0x8000000, code_size, + &kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)}; result.IsError()) { return result; } diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 9251f29ad7..d572394727 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -24,6 +24,7 @@ #include "core/hardware_properties.h" #include "core/hle/kernel/init/init_slab_setup.h" #include "core/hle/kernel/k_client_port.h" +#include "core/hle/kernel/k_dynamic_resource_manager.h" #include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/k_memory_layout.h" #include "core/hle/kernel/k_memory_manager.h" @@ -76,6 +77,14 @@ struct KernelCore::Impl { InitializePreemption(kernel); InitializePhysicalCores(); + // Initialize the Dynamic Slab Heaps. + { + const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion(); + ASSERT(pt_heap_region.GetEndAddress() != 0); + + InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize()); + } + RegisterHostThread(); } @@ -257,6 +266,18 @@ struct KernelCore::Impl { system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event); } + void InitializeResourceManagers(VAddr address, size_t size) { + dynamic_page_manager = std::make_unique(); + memory_block_heap = std::make_unique(); + app_memory_block_manager = std::make_unique(); + + dynamic_page_manager->Initialize(address, size); + static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000; + memory_block_heap->Initialize(dynamic_page_manager.get(), + ApplicationMemoryBlockSlabHeapSize); + app_memory_block_manager->Initialize(nullptr, memory_block_heap.get()); + } + void InitializeShutdownThreads() { for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { shutdown_threads[core_id] = KThread::Create(system.Kernel()); @@ -770,6 +791,11 @@ struct KernelCore::Impl { // Kernel memory management std::unique_ptr memory_manager; + // Dynamic slab managers + std::unique_ptr dynamic_page_manager; + std::unique_ptr memory_block_heap; + std::unique_ptr app_memory_block_manager; + // Shared memory for services Kernel::KSharedMemory* hid_shared_mem{}; Kernel::KSharedMemory* font_shared_mem{}; @@ -1041,6 +1067,14 @@ const KMemoryManager& KernelCore::MemoryManager() const { return *impl->memory_manager; } +KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() { + return *impl->app_memory_block_manager; +} + +const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const { + return *impl->app_memory_block_manager; +} + Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { return *impl->hid_shared_mem; } diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 0847cbcbf7..79e66483ee 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -37,6 +37,7 @@ class KClientSession; class KEvent; class KHandleTable; class KLinkedListNode; +class KMemoryBlockSlabManager; class KMemoryLayout; class KMemoryManager; class KPageBuffer; @@ -238,6 +239,12 @@ public: /// Gets the virtual memory manager for the kernel. const KMemoryManager& MemoryManager() const; + /// Gets the application memory block manager for the kernel. + KMemoryBlockSlabManager& GetApplicationMemoryBlockManager(); + + /// Gets the application memory block manager for the kernel. + const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const; + /// Gets the shared memory object for HID services. Kernel::KSharedMemory& GetHidSharedMem(); From d00245d4440d30a2217c025572cf8a47f4ea2573 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 10 Sep 2022 23:59:34 -0700 Subject: [PATCH 14/25] video_core: renderer_vulkan: vk_query_cache: Avoid shutdown crash in QueryPool::Reserve. --- src/video_core/renderer_vulkan/vk_query_cache.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp index 7cb02631c4..4b15c0f85b 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp @@ -59,10 +59,11 @@ void QueryPool::Reserve(std::pair query) { std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) { return query_pool == *pool; }); - ASSERT(it != std::end(pools)); - const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it); - usage[pool_index * GROW_STEP + static_cast(query.second)] = false; + if (it != std::end(pools)) { + const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it); + usage[pool_index * GROW_STEP + static_cast(query.second)] = false; + } } QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_, From ff26190d422599ded0166cba686e7456c59163a5 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 1 Oct 2022 14:08:47 -0700 Subject: [PATCH 15/25] core: hle: kernel: k_page_table: Impl. LockForUn/MapDeviceAddressSpace, cleanup. --- src/core/hle/kernel/k_page_table.cpp | 881 ++++++++++--------- src/core/hle/kernel/k_page_table.h | 269 +++--- src/core/hle/service/nvdrv/devices/nvmap.cpp | 3 +- 3 files changed, 616 insertions(+), 537 deletions(-) diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 2cf46af0a4..fcffc0b88d 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -25,7 +25,7 @@ namespace { using namespace Common::Literals; -constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { +constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { switch (as_type) { case FileSys::ProgramAddressSpaceType::Is32Bit: case FileSys::ProgramAddressSpaceType::Is32BitNoMap: @@ -43,28 +43,29 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT } // namespace KPageTable::KPageTable(Core::System& system_) - : general_lock{system_.Kernel()}, map_physical_memory_lock{system_.Kernel()}, system{system_} {} + : m_general_lock{system_.Kernel()}, + m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {} KPageTable::~KPageTable() = default; Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, - VAddr code_addr, std::size_t code_size, + VAddr code_addr, size_t code_size, KMemoryBlockSlabManager* mem_block_slab_manager, KMemoryManager::Pool pool) { const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { - return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type); + return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); }; const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) { - return KAddressSpaceInfo::GetAddressSpaceSize(address_space_width, type); + return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type); }; // Set our width and heap/alias sizes - address_space_width = GetAddressSpaceWidthFromType(as_type); + m_address_space_width = GetAddressSpaceWidthFromType(as_type); const VAddr start = 0; - const VAddr end{1ULL << address_space_width}; - std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; - std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; + const VAddr end{1ULL << m_address_space_width}; + size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; + size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; ASSERT(code_addr < code_addr + code_size); ASSERT(code_addr + code_size - 1 <= end - 1); @@ -76,67 +77,68 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type } // Set code regions and determine remaining - constexpr std::size_t RegionAlignment{2_MiB}; + constexpr size_t RegionAlignment{2_MiB}; VAddr process_code_start{}; VAddr process_code_end{}; - std::size_t stack_region_size{}; - std::size_t kernel_map_region_size{}; + size_t stack_region_size{}; + size_t kernel_map_region_size{}; - if (address_space_width == 39) { + if (m_address_space_width == 39) { alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack); kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); - code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); - code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); - alias_code_region_start = code_region_start; - alias_code_region_end = code_region_end; + m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); + m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); + m_alias_code_region_start = m_code_region_start; + m_alias_code_region_end = m_code_region_end; process_code_start = Common::AlignDown(code_addr, RegionAlignment); process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment); } else { stack_region_size = 0; kernel_map_region_size = 0; - code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); - code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); - stack_region_start = code_region_start; - alias_code_region_start = code_region_start; - alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + - GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); - stack_region_end = code_region_end; - kernel_map_region_start = code_region_start; - kernel_map_region_end = code_region_end; - process_code_start = code_region_start; - process_code_end = code_region_end; + m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); + m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); + m_stack_region_start = m_code_region_start; + m_alias_code_region_start = m_code_region_start; + m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + + GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); + m_stack_region_end = m_code_region_end; + m_kernel_map_region_start = m_code_region_start; + m_kernel_map_region_end = m_code_region_end; + process_code_start = m_code_region_start; + process_code_end = m_code_region_end; } // Set other basic fields - is_aslr_enabled = enable_aslr; - address_space_start = start; - address_space_end = end; - is_kernel = false; - memory_block_slab_manager = mem_block_slab_manager; + m_enable_aslr = enable_aslr; + m_enable_device_address_space_merge = false; + m_address_space_start = start; + m_address_space_end = end; + m_is_kernel = false; + m_memory_block_slab_manager = mem_block_slab_manager; // Determine the region we can place our undetermineds in VAddr alloc_start{}; - std::size_t alloc_size{}; - if ((process_code_start - code_region_start) >= (end - process_code_end)) { - alloc_start = code_region_start; - alloc_size = process_code_start - code_region_start; + size_t alloc_size{}; + if ((process_code_start - m_code_region_start) >= (end - process_code_end)) { + alloc_start = m_code_region_start; + alloc_size = process_code_start - m_code_region_start; } else { alloc_start = process_code_end; alloc_size = end - process_code_end; } - const std::size_t needed_size{ + const size_t needed_size{ (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)}; if (alloc_size < needed_size) { ASSERT(false); return ResultOutOfMemory; } - const std::size_t remaining_size{alloc_size - needed_size}; + const size_t remaining_size{alloc_size - needed_size}; // Determine random placements for each region - std::size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; + size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; if (enable_aslr) { alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment; @@ -149,124 +151,124 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type } // Setup heap and alias regions - alias_region_start = alloc_start + alias_rnd; - alias_region_end = alias_region_start + alias_region_size; - heap_region_start = alloc_start + heap_rnd; - heap_region_end = heap_region_start + heap_region_size; + m_alias_region_start = alloc_start + alias_rnd; + m_alias_region_end = m_alias_region_start + alias_region_size; + m_heap_region_start = alloc_start + heap_rnd; + m_heap_region_end = m_heap_region_start + heap_region_size; if (alias_rnd <= heap_rnd) { - heap_region_start += alias_region_size; - heap_region_end += alias_region_size; + m_heap_region_start += alias_region_size; + m_heap_region_end += alias_region_size; } else { - alias_region_start += heap_region_size; - alias_region_end += heap_region_size; + m_alias_region_start += heap_region_size; + m_alias_region_end += heap_region_size; } // Setup stack region if (stack_region_size) { - stack_region_start = alloc_start + stack_rnd; - stack_region_end = stack_region_start + stack_region_size; + m_stack_region_start = alloc_start + stack_rnd; + m_stack_region_end = m_stack_region_start + stack_region_size; if (alias_rnd < stack_rnd) { - stack_region_start += alias_region_size; - stack_region_end += alias_region_size; + m_stack_region_start += alias_region_size; + m_stack_region_end += alias_region_size; } else { - alias_region_start += stack_region_size; - alias_region_end += stack_region_size; + m_alias_region_start += stack_region_size; + m_alias_region_end += stack_region_size; } if (heap_rnd < stack_rnd) { - stack_region_start += heap_region_size; - stack_region_end += heap_region_size; + m_stack_region_start += heap_region_size; + m_stack_region_end += heap_region_size; } else { - heap_region_start += stack_region_size; - heap_region_end += stack_region_size; + m_heap_region_start += stack_region_size; + m_heap_region_end += stack_region_size; } } // Setup kernel map region if (kernel_map_region_size) { - kernel_map_region_start = alloc_start + kmap_rnd; - kernel_map_region_end = kernel_map_region_start + kernel_map_region_size; + m_kernel_map_region_start = alloc_start + kmap_rnd; + m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size; if (alias_rnd < kmap_rnd) { - kernel_map_region_start += alias_region_size; - kernel_map_region_end += alias_region_size; + m_kernel_map_region_start += alias_region_size; + m_kernel_map_region_end += alias_region_size; } else { - alias_region_start += kernel_map_region_size; - alias_region_end += kernel_map_region_size; + m_alias_region_start += kernel_map_region_size; + m_alias_region_end += kernel_map_region_size; } if (heap_rnd < kmap_rnd) { - kernel_map_region_start += heap_region_size; - kernel_map_region_end += heap_region_size; + m_kernel_map_region_start += heap_region_size; + m_kernel_map_region_end += heap_region_size; } else { - heap_region_start += kernel_map_region_size; - heap_region_end += kernel_map_region_size; + m_heap_region_start += kernel_map_region_size; + m_heap_region_end += kernel_map_region_size; } if (stack_region_size) { if (stack_rnd < kmap_rnd) { - kernel_map_region_start += stack_region_size; - kernel_map_region_end += stack_region_size; + m_kernel_map_region_start += stack_region_size; + m_kernel_map_region_end += stack_region_size; } else { - stack_region_start += kernel_map_region_size; - stack_region_end += kernel_map_region_size; + m_stack_region_start += kernel_map_region_size; + m_stack_region_end += kernel_map_region_size; } } } // Set heap members - current_heap_end = heap_region_start; - max_heap_size = 0; - max_physical_memory_size = 0; + m_current_heap_end = m_heap_region_start; + m_max_heap_size = 0; + m_max_physical_memory_size = 0; // Ensure that we regions inside our address space auto IsInAddressSpace = [&](VAddr addr) { - return address_space_start <= addr && addr <= address_space_end; + return m_address_space_start <= addr && addr <= m_address_space_end; }; - ASSERT(IsInAddressSpace(alias_region_start)); - ASSERT(IsInAddressSpace(alias_region_end)); - ASSERT(IsInAddressSpace(heap_region_start)); - ASSERT(IsInAddressSpace(heap_region_end)); - ASSERT(IsInAddressSpace(stack_region_start)); - ASSERT(IsInAddressSpace(stack_region_end)); - ASSERT(IsInAddressSpace(kernel_map_region_start)); - ASSERT(IsInAddressSpace(kernel_map_region_end)); + ASSERT(IsInAddressSpace(m_alias_region_start)); + ASSERT(IsInAddressSpace(m_alias_region_end)); + ASSERT(IsInAddressSpace(m_heap_region_start)); + ASSERT(IsInAddressSpace(m_heap_region_end)); + ASSERT(IsInAddressSpace(m_stack_region_start)); + ASSERT(IsInAddressSpace(m_stack_region_end)); + ASSERT(IsInAddressSpace(m_kernel_map_region_start)); + ASSERT(IsInAddressSpace(m_kernel_map_region_end)); // Ensure that we selected regions that don't overlap - const VAddr alias_start{alias_region_start}; - const VAddr alias_last{alias_region_end - 1}; - const VAddr heap_start{heap_region_start}; - const VAddr heap_last{heap_region_end - 1}; - const VAddr stack_start{stack_region_start}; - const VAddr stack_last{stack_region_end - 1}; - const VAddr kmap_start{kernel_map_region_start}; - const VAddr kmap_last{kernel_map_region_end - 1}; + const VAddr alias_start{m_alias_region_start}; + const VAddr alias_last{m_alias_region_end - 1}; + const VAddr heap_start{m_heap_region_start}; + const VAddr heap_last{m_heap_region_end - 1}; + const VAddr stack_start{m_stack_region_start}; + const VAddr stack_last{m_stack_region_end - 1}; + const VAddr kmap_start{m_kernel_map_region_start}; + const VAddr kmap_last{m_kernel_map_region_end - 1}; ASSERT(alias_last < heap_start || heap_last < alias_start); ASSERT(alias_last < stack_start || stack_last < alias_start); ASSERT(alias_last < kmap_start || kmap_last < alias_start); ASSERT(heap_last < stack_start || stack_last < heap_start); ASSERT(heap_last < kmap_start || kmap_last < heap_start); - current_heap_end = heap_region_start; - max_heap_size = 0; - mapped_physical_memory_size = 0; - memory_pool = pool; + m_current_heap_end = m_heap_region_start; + m_max_heap_size = 0; + m_mapped_physical_memory_size = 0; + m_memory_pool = pool; - page_table_impl.Resize(address_space_width, PageBits); + m_page_table_impl.Resize(m_address_space_width, PageBits); - return memory_block_manager.Initialize(address_space_start, address_space_end, - memory_block_slab_manager); + return m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, + m_memory_block_slab_manager); } void KPageTable::Finalize() { - memory_block_manager.Finalize(memory_block_slab_manager, [&](VAddr addr, u64 size) { - system.Memory().UnmapRegion(page_table_impl, addr, size); + m_memory_block_manager.Finalize(m_memory_block_slab_manager, [&](VAddr addr, u64 size) { + m_system.Memory().UnmapRegion(m_page_table_impl, addr, size); }); } -Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state, +Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) { const u64 size{num_pages * PageSize}; @@ -274,7 +276,7 @@ Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryStat R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify that the destination memory is unmapped. R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, @@ -284,43 +286,43 @@ Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryStat // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager); + m_memory_block_slab_manager); // Allocate and open. KPageGroup pg; - R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( + R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( &pg, num_pages, - KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option))); + KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup)); // Update the blocks. - memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); return ResultSuccess; } -Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) { +Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size) { // Validate the mapping request. R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), ResultInvalidMemoryRegion); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify that the source memory is normal heap. KMemoryState src_state{}; KMemoryPermission src_perm{}; - std::size_t num_src_allocator_blocks{}; + size_t num_src_allocator_blocks{}; R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks, src_address, size, KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)); // Verify that the destination memory is unmapped. - std::size_t num_dst_allocator_blocks{}; + size_t num_dst_allocator_blocks{}; R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, @@ -328,20 +330,22 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size // Create an update allocator for the source. Result src_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator src_allocator( - std::addressof(src_allocator_result), memory_block_slab_manager, num_src_allocator_blocks); + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + m_memory_block_slab_manager, + num_src_allocator_blocks); R_TRY(src_allocator_result); // Create an update allocator for the destination. Result dst_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator dst_allocator( - std::addressof(dst_allocator_result), memory_block_slab_manager, num_dst_allocator_blocks); + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + m_memory_block_slab_manager, + num_dst_allocator_blocks); R_TRY(dst_allocator_result); // Map the code memory. { // Determine the number of pages being operated on. - const std::size_t num_pages = size / PageSize; + const size_t num_pages = size / PageSize; // Create page groups for the memory being mapped. KPageGroup pg; @@ -366,37 +370,37 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size unprot_guard.Cancel(); // Apply the memory block updates. - memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, - src_state, new_perm, KMemoryAttribute::Locked, - KMemoryBlockDisableMergeAttribute::Locked, - KMemoryBlockDisableMergeAttribute::None); - memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, - KMemoryState::AliasCode, new_perm, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::Normal, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, + src_state, new_perm, KMemoryAttribute::Locked, + KMemoryBlockDisableMergeAttribute::Locked, + KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, + KMemoryState::AliasCode, new_perm, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); } return ResultSuccess; } -Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, +Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size, ICacheInvalidationStrategy icache_invalidation_strategy) { // Validate the mapping request. R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), ResultInvalidMemoryRegion); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify that the source memory is locked normal heap. - std::size_t num_src_allocator_blocks{}; + size_t num_src_allocator_blocks{}; R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::Locked)); // Verify that the destination memory is aliasable code. - std::size_t num_dst_allocator_blocks{}; + size_t num_dst_allocator_blocks{}; R_TRY(this->CheckMemoryStateContiguous( std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, @@ -405,7 +409,7 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si // Determine whether any pages being unmapped are code. bool any_code_pages = false; { - KMemoryBlockManager::const_iterator it = memory_block_manager.FindIterator(dst_address); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address); while (true) { // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -431,9 +435,9 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si SCOPE_EXIT({ if (reprotected_pages && any_code_pages) { if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) { - system.InvalidateCpuInstructionCacheRange(dst_address, size); + m_system.InvalidateCpuInstructionCacheRange(dst_address, size); } else { - system.InvalidateCpuInstructionCaches(); + m_system.InvalidateCpuInstructionCaches(); } } }); @@ -441,19 +445,19 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si // Unmap. { // Determine the number of pages being operated on. - const std::size_t num_pages = size / PageSize; + const size_t num_pages = size / PageSize; // Create an update allocator for the source. Result src_allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), - memory_block_slab_manager, + m_memory_block_slab_manager, num_src_allocator_blocks); R_TRY(src_allocator_result); // Create an update allocator for the destination. Result dst_allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), - memory_block_slab_manager, + m_memory_block_slab_manager, num_dst_allocator_blocks); R_TRY(dst_allocator_result); @@ -465,14 +469,14 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si OperationType::ChangePermissions)); // Apply the memory block updates. - memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, - KMemoryState::None, KMemoryPermission::None, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Normal); - memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, - KMemoryState::Normal, KMemoryPermission::UserReadWrite, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Locked); + m_memory_block_manager.Update( + std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); + m_memory_block_manager.Update( + std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); // Note that we reprotected pages. reprotected_pages = true; @@ -481,9 +485,8 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si return ResultSuccess; } -VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages, - std::size_t num_pages, std::size_t alignment, std::size_t offset, - std::size_t guard_pages) { +VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, + size_t alignment, size_t offset, size_t guard_pages) { VAddr address = 0; if (num_pages <= region_num_pages) { @@ -492,8 +495,8 @@ VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages, } // Find the first free area. if (address == 0) { - address = memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, - alignment, offset, guard_pages); + address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, + alignment, offset, guard_pages); } } @@ -511,7 +514,8 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { // Begin traversal. Common::PageTable::TraversalContext context; Common::PageTable::TraversalEntry next_entry; - R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory); + R_UNLESS(m_page_table_impl.BeginTraversal(next_entry, context, addr), + ResultInvalidCurrentMemory); // Prepare tracking variables. PAddr cur_addr = next_entry.phys_addr; @@ -519,9 +523,9 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { size_t tot_size = cur_size; // Iterate, adding to group as we go. - const auto& memory_layout = system.Kernel().MemoryLayout(); + const auto& memory_layout = m_system.Kernel().MemoryLayout(); while (tot_size < size) { - R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context), + R_UNLESS(m_page_table_impl.ContinueTraversal(next_entry, context), ResultInvalidCurrentMemory); if (next_entry.phys_addr != (cur_addr + cur_size)) { @@ -557,7 +561,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu const size_t size = num_pages * PageSize; const auto& pg = pg_ll.Nodes(); - const auto& memory_layout = system.Kernel().MemoryLayout(); + const auto& memory_layout = m_system.Kernel().MemoryLayout(); // Empty groups are necessarily invalid. if (pg.empty()) { @@ -584,7 +588,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu // Begin traversal. Common::PageTable::TraversalContext context; Common::PageTable::TraversalEntry next_entry; - if (!page_table_impl.BeginTraversal(next_entry, context, addr)) { + if (!m_page_table_impl.BeginTraversal(next_entry, context, addr)) { return false; } @@ -595,7 +599,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu // Iterate, comparing expected to actual. while (tot_size < size) { - if (!page_table_impl.ContinueTraversal(next_entry, context)) { + if (!m_page_table_impl.ContinueTraversal(next_entry, context)) { return false; } @@ -641,11 +645,11 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); } -Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, +Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, VAddr src_addr) { - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); - const std::size_t num_pages{size / PageSize}; + const size_t num_pages{size / PageSize}; // Check that the memory is mapped in the destination process. size_t num_allocator_blocks; @@ -663,48 +667,48 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTab // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); // Apply the memory block update. - memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages, KMemoryState::Free, - KMemoryPermission::None, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Normal); + m_memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages, + KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); - system.InvalidateCpuInstructionCaches(); + m_system.InvalidateCpuInstructionCaches(); return ResultSuccess; } -Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { +Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { // Lock the physical memory lock. - KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); + KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); // Calculate the last address for convenience. const VAddr last_address = address + size - 1; // Define iteration variables. VAddr cur_address; - std::size_t mapped_size; + size_t mapped_size; // The entire mapping process can be retried. while (true) { // Check if the memory is already mapped. { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Iterate over the memory. cur_address = address; mapped_size = 0; - auto it = memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != memory_block_manager.end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -735,20 +739,20 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { { // Reserve the memory from the process resource limit. KScopedResourceReservation memory_reservation( - system.Kernel().CurrentProcess()->GetResourceLimit(), + m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, size - mapped_size); R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); // Allocate pages for the new memory. KPageGroup pg; - R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( + R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( &pg, (size - mapped_size) / PageSize, - KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); + KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); // Map the memory. { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); size_t num_allocator_blocks = 0; @@ -758,10 +762,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { size_t checked_mapped_size = 0; cur_address = address; - auto it = memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != memory_block_manager.end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -805,7 +809,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); @@ -818,10 +822,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { // Iterate, unmapping the pages. cur_address = address; - auto it = memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != memory_block_manager.end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -857,10 +861,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { PAddr pg_phys_addr = pg_it->GetAddress(); size_t pg_pages = pg_it->GetNumPages(); - auto it = memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != memory_block_manager.end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -913,10 +917,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { memory_reservation.Commit(); // Increase our tracked mapped size. - mapped_physical_memory_size += (size - mapped_size); + m_mapped_physical_memory_size += (size - mapped_size); // Update the relevant memory blocks. - memory_block_manager.UpdateIfMatch( + m_memory_block_manager.UpdateIfMatch( std::addressof(allocator), address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, KMemoryPermission::UserReadWrite, KMemoryAttribute::None); @@ -930,20 +934,20 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { } } -Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { +Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { // Lock the physical memory lock. - KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); + KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Calculate the last address for convenience. const VAddr last_address = address + size - 1; // Define iteration variables. VAddr cur_address = 0; - std::size_t mapped_size = 0; - std::size_t num_allocator_blocks = 0; + size_t mapped_size = 0; + size_t num_allocator_blocks = 0; // Check if the memory is mapped. { @@ -951,10 +955,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { cur_address = address; mapped_size = 0; - auto it = memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != memory_block_manager.end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -1053,7 +1057,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Reset the current tracking address, and make sure we clean up on failure. @@ -1064,7 +1068,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { cur_address = address; // Iterate over the memory we unmapped. - auto it = memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); auto pg_it = pg.Nodes().begin(); PAddr pg_phys_addr = pg_it->GetAddress(); size_t pg_pages = pg_it->GetNumPages(); @@ -1119,10 +1123,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { }); // Iterate over the memory, unmapping as we go. - auto it = memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != memory_block_manager.end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -1149,20 +1153,20 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { } // Release the memory resource. - mapped_physical_memory_size -= mapped_size; - auto process{system.Kernel().CurrentProcess()}; + m_mapped_physical_memory_size -= mapped_size; + auto process{m_system.Kernel().CurrentProcess()}; process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); // Update memory blocks. - memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, - KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, + KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); // TODO(bunnei): This is a workaround until the next set of changes, where we add reference // counting for mapped pages. Until then, we must manually close the reference to the page // group. - system.Kernel().MemoryManager().Close(pg); + m_system.Kernel().MemoryManager().Close(pg); // We succeeded. remap_guard.Cancel(); @@ -1170,9 +1174,9 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { return ResultSuccess; } -Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, std::size_t size) { +Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Validate that the source address's state is valid. KMemoryState src_state; @@ -1192,19 +1196,21 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, std::size_t s // Create an update allocator for the source. Result src_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator src_allocator( - std::addressof(src_allocator_result), memory_block_slab_manager, num_src_allocator_blocks); + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + m_memory_block_slab_manager, + num_src_allocator_blocks); R_TRY(src_allocator_result); // Create an update allocator for the destination. Result dst_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator dst_allocator( - std::addressof(dst_allocator_result), memory_block_slab_manager, num_dst_allocator_blocks); + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + m_memory_block_slab_manager, + num_dst_allocator_blocks); R_TRY(dst_allocator_result); // Map the memory. KPageGroup page_linked_list; - const std::size_t num_pages{size / PageSize}; + const size_t num_pages{size / PageSize}; const KMemoryPermission new_src_perm = static_cast( KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; @@ -1223,21 +1229,21 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, std::size_t s } // Apply the memory block updates. - memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, - new_src_perm, new_src_attr, - KMemoryBlockDisableMergeAttribute::Locked, - KMemoryBlockDisableMergeAttribute::None); - memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, - KMemoryState::Stack, KMemoryPermission::UserReadWrite, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, + new_src_perm, new_src_attr, + KMemoryBlockDisableMergeAttribute::Locked, + KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, + KMemoryState::Stack, KMemoryPermission::UserReadWrite, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); return ResultSuccess; } -Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, std::size_t size) { +Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Validate that the source address's state is valid. KMemoryState src_state; @@ -1258,19 +1264,21 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, std::size_t // Create an update allocator for the source. Result src_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator src_allocator( - std::addressof(src_allocator_result), memory_block_slab_manager, num_src_allocator_blocks); + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + m_memory_block_slab_manager, + num_src_allocator_blocks); R_TRY(src_allocator_result); // Create an update allocator for the destination. Result dst_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator dst_allocator( - std::addressof(dst_allocator_result), memory_block_slab_manager, num_dst_allocator_blocks); + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + m_memory_block_slab_manager, + num_dst_allocator_blocks); R_TRY(dst_allocator_result); KPageGroup src_pages; KPageGroup dst_pages; - const std::size_t num_pages{size / PageSize}; + const size_t num_pages{size / PageSize}; AddRegionToPages(src_address, num_pages, src_pages); AddRegionToPages(dst_address, num_pages, dst_pages); @@ -1290,14 +1298,14 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, std::size_t } // Apply the memory block updates. - memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, - KMemoryPermission::UserReadWrite, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Locked); - memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, - KMemoryState::None, KMemoryPermission::None, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Normal); + m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Locked); + m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, + KMemoryState::None, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); return ResultSuccess; } @@ -1312,7 +1320,7 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, if (const auto result{ Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; result.IsError()) { - const std::size_t num_pages{(addr - cur_addr) / PageSize}; + const size_t num_pages{(addr - cur_addr) / PageSize}; ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap) .IsSuccess()); @@ -1329,12 +1337,12 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state, KMemoryPermission perm) { // Check that the map is in range. - const std::size_t num_pages{page_linked_list.GetNumPages()}; - const std::size_t size{num_pages * PageSize}; + const size_t num_pages{page_linked_list.GetNumPages()}; + const size_t size{num_pages * PageSize}; R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check the memory state. R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, @@ -1344,23 +1352,22 @@ Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemory // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager); + m_memory_block_slab_manager); // Map the pages. R_TRY(MapPages(address, page_linked_list, perm)); // Update the blocks. - memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); return ResultSuccess; } -Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, - PAddr phys_addr, bool is_pa_valid, VAddr region_start, - std::size_t region_num_pages, KMemoryState state, - KMemoryPermission perm) { +Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, + bool is_pa_valid, VAddr region_start, size_t region_num_pages, + KMemoryState state, KMemoryPermission perm) { ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); // Ensure this is a valid map request. @@ -1369,7 +1376,7 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Find a random address to map at. VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, @@ -1385,7 +1392,7 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager); + m_memory_block_slab_manager); // Perform mapping operation. if (is_pa_valid) { @@ -1395,9 +1402,9 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t } // Update the blocks. - memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); // We successfully mapped the pages. *out_addr = addr; @@ -1424,12 +1431,12 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) { // Check that the unmap is in range. - const std::size_t num_pages{page_linked_list.GetNumPages()}; - const std::size_t size{num_pages * PageSize}; + const size_t num_pages{page_linked_list.GetNumPages()}; + const size_t size{num_pages * PageSize}; R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check the memory state. size_t num_allocator_blocks; @@ -1441,31 +1448,31 @@ Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemo // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Perform the unmap. R_TRY(UnmapPages(address, page_linked_list)); // Update the blocks. - memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, - KMemoryPermission::None, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Normal); + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); return ResultSuccess; } -Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) { +Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) { // Check that the unmap is in range. - const std::size_t size = num_pages * PageSize; + const size_t size = num_pages * PageSize; R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check the memory state. - std::size_t num_allocator_blocks{}; + size_t num_allocator_blocks{}; R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState::All, state, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, @@ -1474,17 +1481,17 @@ Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Perform the unmap. R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap)); // Update the blocks. - memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, - KMemoryPermission::None, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Normal); + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); return ResultSuccess; } @@ -1501,7 +1508,7 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check if state allows us to create the group. R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted, @@ -1514,12 +1521,12 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n return ResultSuccess; } -Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, +Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm) { const size_t num_pages = size / PageSize; // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify we can change the memory permission. KMemoryState old_state; @@ -1559,7 +1566,7 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Perform mapping operation. @@ -1568,29 +1575,29 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, R_TRY(Operate(addr, num_pages, new_perm, operation)); // Update the blocks. - memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); // Ensure cache coherency, if we're setting pages as executable. if (is_x) { - system.InvalidateCpuInstructionCacheRange(addr, size); + m_system.InvalidateCpuInstructionCacheRange(addr, size); } return ResultSuccess; } KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) { - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); - return memory_block_manager.FindBlock(addr)->GetMemoryInfo(); + return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo(); } KMemoryInfo KPageTable::QueryInfo(VAddr addr) { if (!Contains(addr, 1)) { return { - .m_address = address_space_end, - .m_size = 0 - address_space_end, + .m_address = m_address_space_end, + .m_size = 0 - m_address_space_end, .m_state = static_cast(Svc::MemoryState::Inaccessible), .m_device_disable_merge_left_count = 0, .m_device_disable_merge_right_count = 0, @@ -1607,12 +1614,11 @@ KMemoryInfo KPageTable::QueryInfo(VAddr addr) { return QueryInfoImpl(addr); } -Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size, - Svc::MemoryPermission svc_perm) { +Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm) { const size_t num_pages = size / PageSize; // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify we can change the memory permission. KMemoryState old_state; @@ -1631,27 +1637,27 @@ Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size, // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Perform mapping operation. R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); // Update the blocks. - memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); return ResultSuccess; } -Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) { +Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr) { const size_t num_pages = size / PageSize; ASSERT((static_cast(mask) | KMemoryAttribute::SetMask) == KMemoryAttribute::SetMask); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify we can change the memory attribute. KMemoryState old_state; @@ -1669,7 +1675,7 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3 // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Determine the new attribute. @@ -1681,124 +1687,125 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3 this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh); // Update the blocks. - memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm, - new_attr, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm, + new_attr, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); return ResultSuccess; } -Result KPageTable::SetMaxHeapSize(std::size_t size) { +Result KPageTable::SetMaxHeapSize(size_t size) { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Only process page tables are allowed to set heap size. ASSERT(!this->IsKernel()); - max_heap_size = size; + m_max_heap_size = size; return ResultSuccess; } -Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { +Result KPageTable::SetHeapSize(VAddr* out, size_t size) { // Lock the physical memory mutex. - KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); + KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); // Try to perform a reduction in heap, instead of an extension. VAddr cur_address{}; - std::size_t allocation_size{}; + size_t allocation_size{}; { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Validate that setting heap size is possible at all. - R_UNLESS(!is_kernel, ResultOutOfMemory); - R_UNLESS(size <= static_cast(heap_region_end - heap_region_start), + R_UNLESS(!m_is_kernel, ResultOutOfMemory); + R_UNLESS(size <= static_cast(m_heap_region_end - m_heap_region_start), ResultOutOfMemory); - R_UNLESS(size <= max_heap_size, ResultOutOfMemory); + R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory); if (size < GetHeapSize()) { // The size being requested is less than the current size, so we need to free the end of // the heap. // Validate memory state. - std::size_t num_allocator_blocks; + size_t num_allocator_blocks; R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), - heap_region_start + size, GetHeapSize() - size, + m_heap_region_start + size, GetHeapSize() - size, KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)); // Create an update allocator. Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator( - std::addressof(allocator_result), memory_block_slab_manager, num_allocator_blocks); + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, + num_allocator_blocks); R_TRY(allocator_result); // Unmap the end of the heap. const auto num_pages = (GetHeapSize() - size) / PageSize; - R_TRY(Operate(heap_region_start + size, num_pages, KMemoryPermission::None, + R_TRY(Operate(m_heap_region_start + size, num_pages, KMemoryPermission::None, OperationType::Unmap)); // Release the memory from the resource limit. - system.Kernel().CurrentProcess()->GetResourceLimit()->Release( + m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release( LimitableResource::PhysicalMemory, num_pages * PageSize); // Apply the memory block update. - memory_block_manager.Update(std::addressof(allocator), heap_region_start + size, - num_pages, KMemoryState::Free, KMemoryPermission::None, - KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, - size == 0 ? KMemoryBlockDisableMergeAttribute::Normal - : KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, + num_pages, KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + size == 0 ? KMemoryBlockDisableMergeAttribute::Normal + : KMemoryBlockDisableMergeAttribute::None); // Update the current heap end. - current_heap_end = heap_region_start + size; + m_current_heap_end = m_heap_region_start + size; // Set the output. - *out = heap_region_start; + *out = m_heap_region_start; return ResultSuccess; } else if (size == GetHeapSize()) { // The size requested is exactly the current size. - *out = heap_region_start; + *out = m_heap_region_start; return ResultSuccess; } else { // We have to allocate memory. Determine how much to allocate and where while the table // is locked. - cur_address = current_heap_end; + cur_address = m_current_heap_end; allocation_size = size - GetHeapSize(); } } // Reserve memory for the heap extension. KScopedResourceReservation memory_reservation( - system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, + m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, allocation_size); R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); // Allocate pages for the heap extension. KPageGroup pg; - R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( + R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( &pg, allocation_size / PageSize, - KMemoryManager::EncodeOption(memory_pool, allocation_option))); + KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); // Clear all the newly allocated pages. for (const auto& it : pg.Nodes()) { - std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value, + std::memset(m_system.DeviceMemory().GetPointer(it.GetAddress()), m_heap_fill_value, it.GetSize()); } // Map the pages. { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Ensure that the heap hasn't changed since we began executing. - ASSERT(cur_address == current_heap_end); + ASSERT(cur_address == m_current_heap_end); // Check the memory state. - std::size_t num_allocator_blocks{}; - R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), current_heap_end, + size_t num_allocator_blocks{}; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end, allocation_size, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); @@ -1806,16 +1813,16 @@ Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator( - std::addressof(allocator_result), memory_block_slab_manager, num_allocator_blocks); + std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Map the pages. const auto num_pages = allocation_size / PageSize; - R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup)); + R_TRY(Operate(m_current_heap_end, num_pages, pg, OperationType::MapGroup)); // Clear all the newly allocated pages. - for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) { - std::memset(system.Memory().GetPointer(current_heap_end + (cur_page * PageSize)), 0, + for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) { + std::memset(m_system.Memory().GetPointer(m_current_heap_end + (cur_page * PageSize)), 0, PageSize); } @@ -1823,27 +1830,27 @@ Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { memory_reservation.Commit(); // Apply the memory block update. - memory_block_manager.Update( - std::addressof(allocator), current_heap_end, num_pages, KMemoryState::Normal, + m_memory_block_manager.Update( + std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal, KMemoryPermission::UserReadWrite, KMemoryAttribute::None, - heap_region_start == current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal - : KMemoryBlockDisableMergeAttribute::None, + m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal + : KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::None); // Update the current heap end. - current_heap_end = heap_region_start + size; + m_current_heap_end = m_heap_region_start + size; // Set the output. - *out = heap_region_start; + *out = m_heap_region_start; return ResultSuccess; } } -ResultVal KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, +ResultVal KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only, VAddr region_start, - std::size_t region_num_pages, KMemoryState state, + size_t region_num_pages, KMemoryState state, KMemoryPermission perm, PAddr map_addr) { - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); if (!CanContain(region_start, region_num_pages * PageSize, state)) { return ResultInvalidCurrentMemory; @@ -1862,33 +1869,98 @@ ResultVal KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager); + m_memory_block_slab_manager); if (is_map_only) { R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); } else { KPageGroup page_group; - R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( + R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( &page_group, needed_num_pages, - KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); + KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); } // Update the blocks. - memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); return addr; } -Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, std::size_t size) { +Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, + bool is_aligned) { // Lightly validate the range before doing anything else. const size_t num_pages = size / PageSize; R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); + + // Check the memory state. + const auto test_state = + (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap); + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state, + test_state, perm, perm, + KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, + KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); + + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // Update the memory blocks. + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, + &KMemoryBlock::ShareToDevice, KMemoryPermission::None); + + return ResultSuccess; +} + +Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) { + // Lightly validate the range before doing anything else. + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check the memory state. + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryStateContiguous( + std::addressof(num_allocator_blocks), address, size, + KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, + KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, + KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); + + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // Update the memory blocks. + const KMemoryBlockManager::MemoryBlockLockFunction lock_func = + m_enable_device_address_space_merge + ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare + : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight; + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, + KMemoryPermission::None); + + return ResultSuccess; +} + +Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) { + // Lightly validate the range before doing anything else. + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); // Check the memory state. size_t num_allocator_blocks; @@ -1900,17 +1972,17 @@ Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, std::size_t size) // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Update the memory blocks. - memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, - &KMemoryBlock::UnshareToDevice, KMemoryPermission::None); + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, + &KMemoryBlock::UnshareToDevice, KMemoryPermission::None); return ResultSuccess; } -Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size) { +Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) { return this->LockMemoryAndOpen( out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, @@ -1920,7 +1992,7 @@ Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t si KMemoryAttribute::Locked); } -Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg) { +Result KPageTable::UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg) { return this->UnlockMemory( addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, @@ -1928,9 +2000,9 @@ Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPage } bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { - auto start_ptr = system.DeviceMemory().GetPointer(addr); + auto start_ptr = m_system.DeviceMemory().GetPointer(addr); for (u64 offset{}; offset < size; offset += PageSize) { - if (start_ptr != system.DeviceMemory().GetPointer(addr + offset)) { + if (start_ptr != m_system.DeviceMemory().GetPointer(addr + offset)) { return false; } start_ptr += PageSize; @@ -1938,8 +2010,7 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { return true; } -void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages, - KPageGroup& page_linked_list) { +void KPageTable::AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list) { VAddr addr{start}; while (addr < start + (num_pages * PageSize)) { const PAddr paddr{GetPhysicalAddr(addr)}; @@ -1949,16 +2020,16 @@ void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages, } } -VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, - u64 needed_num_pages, std::size_t align) { - if (is_aslr_enabled) { +VAddr KPageTable::AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages, + size_t align) { + if (m_enable_aslr) { UNIMPLEMENTED(); } - return memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, - IsKernel() ? 1 : 4); + return m_memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, + IsKernel() ? 1 : 4); } -Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, +Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group, OperationType operation) { ASSERT(this->IsLockedByCurrentThread()); @@ -1967,11 +2038,11 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& ASSERT(num_pages == page_group.GetNumPages()); for (const auto& node : page_group.Nodes()) { - const std::size_t size{node.GetNumPages() * PageSize}; + const size_t size{node.GetNumPages() * PageSize}; switch (operation) { case OperationType::MapGroup: - system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress()); + m_system.Memory().MapMemoryRegion(m_page_table_impl, addr, size, node.GetAddress()); break; default: ASSERT(false); @@ -1983,7 +2054,7 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& return ResultSuccess; } -Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, +Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, PAddr map_addr) { ASSERT(this->IsLockedByCurrentThread()); @@ -1993,12 +2064,12 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission switch (operation) { case OperationType::Unmap: - system.Memory().UnmapRegion(page_table_impl, addr, num_pages * PageSize); + m_system.Memory().UnmapRegion(m_page_table_impl, addr, num_pages * PageSize); break; case OperationType::Map: { ASSERT(map_addr); ASSERT(Common::IsAligned(map_addr, PageSize)); - system.Memory().MapMemoryRegion(page_table_impl, addr, num_pages * PageSize, map_addr); + m_system.Memory().MapMemoryRegion(m_page_table_impl, addr, num_pages * PageSize, map_addr); break; } case OperationType::ChangePermissions: @@ -2014,18 +2085,18 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const { switch (state) { case KMemoryState::Free: case KMemoryState::Kernel: - return address_space_start; + return m_address_space_start; case KMemoryState::Normal: - return heap_region_start; + return m_heap_region_start; case KMemoryState::Ipc: case KMemoryState::NonSecureIpc: case KMemoryState::NonDeviceIpc: - return alias_region_start; + return m_alias_region_start; case KMemoryState::Stack: - return stack_region_start; + return m_stack_region_start; case KMemoryState::Static: case KMemoryState::ThreadLocal: - return kernel_map_region_start; + return m_kernel_map_region_start; case KMemoryState::Io: case KMemoryState::Shared: case KMemoryState::AliasCode: @@ -2036,31 +2107,31 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const { case KMemoryState::GeneratedCode: case KMemoryState::CodeOut: case KMemoryState::Coverage: - return alias_code_region_start; + return m_alias_code_region_start; case KMemoryState::Code: case KMemoryState::CodeData: - return code_region_start; + return m_code_region_start; default: UNREACHABLE(); } } -std::size_t KPageTable::GetRegionSize(KMemoryState state) const { +size_t KPageTable::GetRegionSize(KMemoryState state) const { switch (state) { case KMemoryState::Free: case KMemoryState::Kernel: - return address_space_end - address_space_start; + return m_address_space_end - m_address_space_start; case KMemoryState::Normal: - return heap_region_end - heap_region_start; + return m_heap_region_end - m_heap_region_start; case KMemoryState::Ipc: case KMemoryState::NonSecureIpc: case KMemoryState::NonDeviceIpc: - return alias_region_end - alias_region_start; + return m_alias_region_end - m_alias_region_start; case KMemoryState::Stack: - return stack_region_end - stack_region_start; + return m_stack_region_end - m_stack_region_start; case KMemoryState::Static: case KMemoryState::ThreadLocal: - return kernel_map_region_end - kernel_map_region_start; + return m_kernel_map_region_end - m_kernel_map_region_start; case KMemoryState::Io: case KMemoryState::Shared: case KMemoryState::AliasCode: @@ -2071,16 +2142,16 @@ std::size_t KPageTable::GetRegionSize(KMemoryState state) const { case KMemoryState::GeneratedCode: case KMemoryState::CodeOut: case KMemoryState::Coverage: - return alias_code_region_end - alias_code_region_start; + return m_alias_code_region_end - m_alias_code_region_start; case KMemoryState::Code: case KMemoryState::CodeData: - return code_region_end - code_region_start; + return m_code_region_end - m_code_region_start; default: UNREACHABLE(); } } -bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const { +bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const { const VAddr end = addr + size; const VAddr last = end - 1; @@ -2089,10 +2160,10 @@ bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) co const bool is_in_region = region_start <= addr && addr < end && last <= region_start + region_size - 1; - const bool is_in_heap = !(end <= heap_region_start || heap_region_end <= addr || - heap_region_start == heap_region_end); - const bool is_in_alias = !(end <= alias_region_start || alias_region_end <= addr || - alias_region_start == alias_region_end); + const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr || + m_heap_region_start == m_heap_region_end); + const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || + m_alias_region_start == m_alias_region_end); switch (state) { case KMemoryState::Free: case KMemoryState::Kernel: @@ -2138,16 +2209,16 @@ Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_ return ResultSuccess; } -Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, - std::size_t size, KMemoryState state_mask, - KMemoryState state, KMemoryPermission perm_mask, - KMemoryPermission perm, KMemoryAttribute attr_mask, +Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size, + KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr) const { ASSERT(this->IsLockedByCurrentThread()); // Get information about the first block. const VAddr last_addr = addr + size - 1; - KMemoryBlockManager::const_iterator it = memory_block_manager.FindIterator(addr); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); KMemoryInfo info = it->GetMemoryInfo(); // If the start address isn't aligned, we need a block. @@ -2165,7 +2236,7 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA // Advance our iterator. it++; - ASSERT(it != memory_block_manager.cend()); + ASSERT(it != m_memory_block_manager.cend()); info = it->GetMemoryInfo(); } @@ -2181,8 +2252,8 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA } Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, - KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, - VAddr addr, std::size_t size, KMemoryState state_mask, + KMemoryAttribute* out_attr, size_t* out_blocks_needed, + VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { @@ -2190,7 +2261,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* // Get information about the first block. const VAddr last_addr = addr + size - 1; - KMemoryBlockManager::const_iterator it = memory_block_manager.FindIterator(addr); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); KMemoryInfo info = it->GetMemoryInfo(); // If the start address isn't aligned, we need a block. @@ -2218,7 +2289,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* // Advance our iterator. it++; - ASSERT(it != memory_block_manager.cend()); + ASSERT(it != m_memory_block_manager.cend()); info = it->GetMemoryInfo(); } @@ -2257,7 +2328,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check that the output page group is empty, if it exists. if (out_pg) { @@ -2288,7 +2359,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Decide on new perm and attr. @@ -2301,9 +2372,9 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr } // Apply the memory block updates. - memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, - new_attr, KMemoryBlockDisableMergeAttribute::Locked, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, + new_attr, KMemoryBlockDisableMergeAttribute::Locked, + KMemoryBlockDisableMergeAttribute::None); return ResultSuccess; } @@ -2322,7 +2393,7 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check the state. KMemoryState old_state{}; @@ -2347,7 +2418,7 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Update permission, if we need to. @@ -2356,9 +2427,9 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask } // Apply the memory block updates. - memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, - new_attr, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Locked); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, + new_attr, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Locked); return ResultSuccess; } diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index fa11a0fe36..2258543197 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -36,60 +36,66 @@ public: ~KPageTable(); Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, - VAddr code_addr, std::size_t code_size, + VAddr code_addr, size_t code_size, KMemoryBlockSlabManager* mem_block_slab_manager, KMemoryManager::Pool pool); void Finalize(); - Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state, + Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state, KMemoryPermission perm); - Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size); - Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, + Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size); + Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size, ICacheInvalidationStrategy icache_invalidation_strategy); - Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, + Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, VAddr src_addr); - Result MapPhysicalMemory(VAddr addr, std::size_t size); - Result UnmapPhysicalMemory(VAddr addr, std::size_t size); - Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); - Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); + Result MapPhysicalMemory(VAddr addr, size_t size); + Result UnmapPhysicalMemory(VAddr addr, size_t size); + Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size); + Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size); Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state, KMemoryPermission perm); - Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, + Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, KMemoryState state, KMemoryPermission perm) { return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, state, perm); } Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state); - Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state); - Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm); + Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state); + Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm); KMemoryInfo QueryInfo(VAddr addr); - Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm); - Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr); - Result SetMaxHeapSize(std::size_t size); - Result SetHeapSize(VAddr* out, std::size_t size); - ResultVal AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, - bool is_map_only, VAddr region_start, - std::size_t region_num_pages, KMemoryState state, - KMemoryPermission perm, PAddr map_addr = 0); - Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size); - Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size); - Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg); + Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm); + Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr); + Result SetMaxHeapSize(size_t size); + Result SetHeapSize(VAddr* out, size_t size); + ResultVal AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only, + VAddr region_start, size_t region_num_pages, + KMemoryState state, KMemoryPermission perm, + PAddr map_addr = 0); + + Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, + bool is_aligned); + Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size); + + Result UnlockForDeviceAddressSpace(VAddr addr, size_t size); + + Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size); + Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg); Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr); Common::PageTable& PageTableImpl() { - return page_table_impl; + return m_page_table_impl; } const Common::PageTable& PageTableImpl() const { - return page_table_impl; + return m_page_table_impl; } - bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const; + bool CanContain(VAddr addr, size_t size, KMemoryState state) const; private: enum class OperationType : u32 { @@ -104,30 +110,30 @@ private: KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm); - Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, - bool is_pa_valid, VAddr region_start, std::size_t region_num_pages, + Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, + bool is_pa_valid, VAddr region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm); Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list); bool IsRegionContiguous(VAddr addr, u64 size) const; - void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list); + void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list); KMemoryInfo QueryInfoImpl(VAddr addr); - VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages, - std::size_t align); - Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, + VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages, + size_t align); + Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group, OperationType operation); - Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, - OperationType operation, PAddr map_addr = 0); + Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, + PAddr map_addr = 0); VAddr GetRegionAddress(KMemoryState state) const; - std::size_t GetRegionSize(KMemoryState state) const; + size_t GetRegionSize(KMemoryState state) const; - VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, - std::size_t alignment, std::size_t offset, std::size_t guard_pages); + VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, + size_t alignment, size_t offset, size_t guard_pages); - Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, + Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const; - Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask, + Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const { @@ -139,12 +145,12 @@ private: KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const; Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, - KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr, - std::size_t size, KMemoryState state_mask, KMemoryState state, + KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr, + size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; - Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, + Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, @@ -152,8 +158,8 @@ private: return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); } - Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask, - KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, + Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, @@ -175,13 +181,13 @@ private: bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages); bool IsLockedByCurrentThread() const { - return general_lock.IsLockedByCurrentThread(); + return m_general_lock.IsLockedByCurrentThread(); } bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) { ASSERT(this->IsLockedByCurrentThread()); - return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr); + return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr); } bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const { @@ -192,93 +198,93 @@ private: return *out != 0; } - mutable KLightLock general_lock; - mutable KLightLock map_physical_memory_lock; + mutable KLightLock m_general_lock; + mutable KLightLock m_map_physical_memory_lock; public: constexpr VAddr GetAddressSpaceStart() const { - return address_space_start; + return m_address_space_start; } constexpr VAddr GetAddressSpaceEnd() const { - return address_space_end; + return m_address_space_end; } - constexpr std::size_t GetAddressSpaceSize() const { - return address_space_end - address_space_start; + constexpr size_t GetAddressSpaceSize() const { + return m_address_space_end - m_address_space_start; } constexpr VAddr GetHeapRegionStart() const { - return heap_region_start; + return m_heap_region_start; } constexpr VAddr GetHeapRegionEnd() const { - return heap_region_end; + return m_heap_region_end; } - constexpr std::size_t GetHeapRegionSize() const { - return heap_region_end - heap_region_start; + constexpr size_t GetHeapRegionSize() const { + return m_heap_region_end - m_heap_region_start; } constexpr VAddr GetAliasRegionStart() const { - return alias_region_start; + return m_alias_region_start; } constexpr VAddr GetAliasRegionEnd() const { - return alias_region_end; + return m_alias_region_end; } - constexpr std::size_t GetAliasRegionSize() const { - return alias_region_end - alias_region_start; + constexpr size_t GetAliasRegionSize() const { + return m_alias_region_end - m_alias_region_start; } constexpr VAddr GetStackRegionStart() const { - return stack_region_start; + return m_stack_region_start; } constexpr VAddr GetStackRegionEnd() const { - return stack_region_end; + return m_stack_region_end; } - constexpr std::size_t GetStackRegionSize() const { - return stack_region_end - stack_region_start; + constexpr size_t GetStackRegionSize() const { + return m_stack_region_end - m_stack_region_start; } constexpr VAddr GetKernelMapRegionStart() const { - return kernel_map_region_start; + return m_kernel_map_region_start; } constexpr VAddr GetKernelMapRegionEnd() const { - return kernel_map_region_end; + return m_kernel_map_region_end; } constexpr VAddr GetCodeRegionStart() const { - return code_region_start; + return m_code_region_start; } constexpr VAddr GetCodeRegionEnd() const { - return code_region_end; + return m_code_region_end; } constexpr VAddr GetAliasCodeRegionStart() const { - return alias_code_region_start; + return m_alias_code_region_start; } constexpr VAddr GetAliasCodeRegionSize() const { - return alias_code_region_end - alias_code_region_start; + return m_alias_code_region_end - m_alias_code_region_start; } - std::size_t GetNormalMemorySize() { - KScopedLightLock lk(general_lock); - return GetHeapSize() + mapped_physical_memory_size; + size_t GetNormalMemorySize() { + KScopedLightLock lk(m_general_lock); + return GetHeapSize() + m_mapped_physical_memory_size; } - constexpr std::size_t GetAddressSpaceWidth() const { - return address_space_width; + constexpr size_t GetAddressSpaceWidth() const { + return m_address_space_width; } - constexpr std::size_t GetHeapSize() const { - return current_heap_end - heap_region_start; + constexpr size_t GetHeapSize() const { + return m_current_heap_end - m_heap_region_start; } - constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const { - return address_space_start <= address && address + size - 1 <= address_space_end - 1; + constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const { + return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1; } - constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const { - return alias_region_start > address || address + size - 1 > alias_region_end - 1; + constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const { + return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1; } - constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const { - return stack_region_start > address || address + size - 1 > stack_region_end - 1; + constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const { + return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1; } - constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const { + constexpr bool IsInvalidRegion(VAddr address, size_t size) const { return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1; } - constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const { - return address + size > heap_region_start && heap_region_end > address; + constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const { + return address + size > m_heap_region_start && m_heap_region_end > address; } - constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const { - return address + size > alias_region_start && alias_region_end > address; + constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const { + return address + size > m_alias_region_start && m_alias_region_end > address; } - constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const { + constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const { if (IsInvalidRegion(address, size)) { return true; } @@ -290,77 +296,78 @@ public: } return {}; } - constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const { + constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const { return !IsOutsideASLRRegion(address, size); } - constexpr std::size_t GetNumGuardPages() const { + constexpr size_t GetNumGuardPages() const { return IsKernel() ? 1 : 4; } PAddr GetPhysicalAddr(VAddr addr) const { - const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits]; + const auto backing_addr = m_page_table_impl.backing_addr[addr >> PageBits]; ASSERT(backing_addr); return backing_addr + addr; } constexpr bool Contains(VAddr addr) const { - return address_space_start <= addr && addr <= address_space_end - 1; + return m_address_space_start <= addr && addr <= m_address_space_end - 1; } - constexpr bool Contains(VAddr addr, std::size_t size) const { - return address_space_start <= addr && addr < addr + size && - addr + size - 1 <= address_space_end - 1; + constexpr bool Contains(VAddr addr, size_t size) const { + return m_address_space_start <= addr && addr < addr + size && + addr + size - 1 <= m_address_space_end - 1; } private: constexpr bool IsKernel() const { - return is_kernel; + return m_is_kernel; } constexpr bool IsAslrEnabled() const { - return is_aslr_enabled; + return m_enable_aslr; } - constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const { - return (address_space_start <= addr) && - (num_pages <= (address_space_end - address_space_start) / PageSize) && - (addr + num_pages * PageSize - 1 <= address_space_end - 1); + constexpr bool ContainsPages(VAddr addr, size_t num_pages) const { + return (m_address_space_start <= addr) && + (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && + (addr + num_pages * PageSize - 1 <= m_address_space_end - 1); } private: - VAddr address_space_start{}; - VAddr address_space_end{}; - VAddr heap_region_start{}; - VAddr heap_region_end{}; - VAddr current_heap_end{}; - VAddr alias_region_start{}; - VAddr alias_region_end{}; - VAddr stack_region_start{}; - VAddr stack_region_end{}; - VAddr kernel_map_region_start{}; - VAddr kernel_map_region_end{}; - VAddr code_region_start{}; - VAddr code_region_end{}; - VAddr alias_code_region_start{}; - VAddr alias_code_region_end{}; + VAddr m_address_space_start{}; + VAddr m_address_space_end{}; + VAddr m_heap_region_start{}; + VAddr m_heap_region_end{}; + VAddr m_current_heap_end{}; + VAddr m_alias_region_start{}; + VAddr m_alias_region_end{}; + VAddr m_stack_region_start{}; + VAddr m_stack_region_end{}; + VAddr m_kernel_map_region_start{}; + VAddr m_kernel_map_region_end{}; + VAddr m_code_region_start{}; + VAddr m_code_region_end{}; + VAddr m_alias_code_region_start{}; + VAddr m_alias_code_region_end{}; - std::size_t mapped_physical_memory_size{}; - std::size_t max_heap_size{}; - std::size_t max_physical_memory_size{}; - std::size_t address_space_width{}; + size_t m_mapped_physical_memory_size{}; + size_t m_max_heap_size{}; + size_t m_max_physical_memory_size{}; + size_t m_address_space_width{}; - KMemoryBlockManager memory_block_manager; + KMemoryBlockManager m_memory_block_manager; - bool is_kernel{}; - bool is_aslr_enabled{}; + bool m_is_kernel{}; + bool m_enable_aslr{}; + bool m_enable_device_address_space_merge{}; - KMemoryBlockSlabManager* memory_block_slab_manager{}; + KMemoryBlockSlabManager* m_memory_block_slab_manager{}; - u32 heap_fill_value{}; - const KMemoryRegion* cached_physical_heap_region{}; + u32 m_heap_fill_value{}; + const KMemoryRegion* m_cached_physical_heap_region{}; - KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application}; - KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront}; + KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; + KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront}; - Common::PageTable page_table_impl; + Common::PageTable m_page_table_impl; - Core::System& system; + Core::System& m_system; }; } // namespace Kernel diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index ddf273b5ec..b606790217 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -128,7 +128,8 @@ NvResult nvmap::IocAlloc(const std::vector& input, std::vector& output) } ASSERT(system.CurrentProcess() ->PageTable() - .LockForDeviceAddressSpace(handle_description->address, handle_description->size) + .LockForMapDeviceAddressSpace(handle_description->address, handle_description->size, + Kernel::KMemoryPermission::None, true) .IsSuccess()); std::memcpy(output.data(), ¶ms, sizeof(params)); return result; From 8d4e026d0575fe705957d6f17dc90a57f1bc0bf7 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 11 Sep 2022 00:06:41 -0700 Subject: [PATCH 16/25] core: hle: kernel: Remove junk. --- src/core/hle/kernel/kernel.cpp | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index d572394727..b6bbd4984b 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -108,10 +108,6 @@ struct KernelCore::Impl { next_user_process_id = KProcess::ProcessIDMin; next_thread_id = 1; - for (auto& core : cores) { - core = nullptr; - } - global_handle_table->Finalize(); global_handle_table.reset(); @@ -365,11 +361,6 @@ struct KernelCore::Impl { static inline thread_local KThread* current_thread{nullptr}; KThread* GetCurrentEmuThread() { - // If we are shutting down the kernel, none of this is relevant anymore. - if (IsShuttingDown()) { - return {}; - } - const auto thread_id = GetCurrentHostThreadID(); if (thread_id >= Core::Hardware::NUM_CPU_CORES) { return GetHostDummyThread(); From 79bcb38321cbde163280b04ee5a03773d54edfd9 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 2 Oct 2022 02:06:13 -0700 Subject: [PATCH 17/25] core: hle: kernel: k_interrupt_manager: HandleInterrupt should not depend on current process. --- src/core/hle/kernel/k_interrupt_manager.cpp | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp index ad73f3eab5..4a6b60d268 100644 --- a/src/core/hle/kernel/k_interrupt_manager.cpp +++ b/src/core/hle/kernel/k_interrupt_manager.cpp @@ -11,25 +11,22 @@ namespace Kernel::KInterruptManager { void HandleInterrupt(KernelCore& kernel, s32 core_id) { - auto* process = kernel.CurrentProcess(); - if (!process) { - return; - } - // Acknowledge the interrupt. kernel.PhysicalCore(core_id).ClearInterrupt(); auto& current_thread = GetCurrentThread(kernel); - // If the user disable count is set, we may need to pin the current thread. - if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) { - KScopedSchedulerLock sl{kernel}; + if (auto* process = kernel.CurrentProcess(); process) { + // If the user disable count is set, we may need to pin the current thread. + if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) { + KScopedSchedulerLock sl{kernel}; - // Pin the current thread. - process->PinCurrentThread(core_id); + // Pin the current thread. + process->PinCurrentThread(core_id); - // Set the interrupt flag for the thread. - GetCurrentThread(kernel).SetInterruptFlag(); + // Set the interrupt flag for the thread. + GetCurrentThread(kernel).SetInterruptFlag(); + } } // Request interrupt scheduling. From abcc009dff5d98b5a04229f3a82baab23d568244 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 2 Oct 2022 14:26:30 -0700 Subject: [PATCH 18/25] core: hle: kernel: k_process: Improve management of page table & cleanup. --- src/core/hle/kernel/k_page_table.cpp | 23 +++++++---- src/core/hle/kernel/k_page_table.h | 8 ++-- src/core/hle/kernel/k_process.cpp | 62 +++++++++++++++++----------- src/core/hle/kernel/k_process.h | 31 ++++++++------ src/core/hle/kernel/kernel.cpp | 23 +++++++---- src/core/hle/kernel/kernel.h | 3 ++ src/core/hle/kernel/svc.cpp | 2 +- 7 files changed, 92 insertions(+), 60 deletions(-) diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index fcffc0b88d..22098c056e 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -256,16 +256,21 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type m_mapped_physical_memory_size = 0; m_memory_pool = pool; - m_page_table_impl.Resize(m_address_space_width, PageBits); + m_page_table_impl = std::make_unique(); + m_page_table_impl->Resize(m_address_space_width, PageBits); return m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, m_memory_block_slab_manager); } void KPageTable::Finalize() { + // Finalize memory blocks. m_memory_block_manager.Finalize(m_memory_block_slab_manager, [&](VAddr addr, u64 size) { - m_system.Memory().UnmapRegion(m_page_table_impl, addr, size); + m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size); }); + + // Close the backing page table, as the destructor is not called for guest objects. + m_page_table_impl.reset(); } Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState state, @@ -514,7 +519,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { // Begin traversal. Common::PageTable::TraversalContext context; Common::PageTable::TraversalEntry next_entry; - R_UNLESS(m_page_table_impl.BeginTraversal(next_entry, context, addr), + R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory); // Prepare tracking variables. @@ -525,7 +530,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { // Iterate, adding to group as we go. const auto& memory_layout = m_system.Kernel().MemoryLayout(); while (tot_size < size) { - R_UNLESS(m_page_table_impl.ContinueTraversal(next_entry, context), + R_UNLESS(m_page_table_impl->ContinueTraversal(next_entry, context), ResultInvalidCurrentMemory); if (next_entry.phys_addr != (cur_addr + cur_size)) { @@ -588,7 +593,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu // Begin traversal. Common::PageTable::TraversalContext context; Common::PageTable::TraversalEntry next_entry; - if (!m_page_table_impl.BeginTraversal(next_entry, context, addr)) { + if (!m_page_table_impl->BeginTraversal(next_entry, context, addr)) { return false; } @@ -599,7 +604,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu // Iterate, comparing expected to actual. while (tot_size < size) { - if (!m_page_table_impl.ContinueTraversal(next_entry, context)) { + if (!m_page_table_impl->ContinueTraversal(next_entry, context)) { return false; } @@ -2042,7 +2047,7 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_ switch (operation) { case OperationType::MapGroup: - m_system.Memory().MapMemoryRegion(m_page_table_impl, addr, size, node.GetAddress()); + m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); break; default: ASSERT(false); @@ -2064,12 +2069,12 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, switch (operation) { case OperationType::Unmap: - m_system.Memory().UnmapRegion(m_page_table_impl, addr, num_pages * PageSize); + m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); break; case OperationType::Map: { ASSERT(map_addr); ASSERT(Common::IsAligned(map_addr, PageSize)); - m_system.Memory().MapMemoryRegion(m_page_table_impl, addr, num_pages * PageSize, map_addr); + m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); break; } case OperationType::ChangePermissions: diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 2258543197..1811d3e2d2 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -88,11 +88,11 @@ public: KMemoryAttribute attr_mask, KMemoryAttribute attr); Common::PageTable& PageTableImpl() { - return m_page_table_impl; + return *m_page_table_impl; } const Common::PageTable& PageTableImpl() const { - return m_page_table_impl; + return *m_page_table_impl; } bool CanContain(VAddr addr, size_t size, KMemoryState state) const; @@ -303,7 +303,7 @@ public: return IsKernel() ? 1 : 4; } PAddr GetPhysicalAddr(VAddr addr) const { - const auto backing_addr = m_page_table_impl.backing_addr[addr >> PageBits]; + const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits]; ASSERT(backing_addr); return backing_addr + addr; } @@ -365,7 +365,7 @@ private: KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront}; - Common::PageTable m_page_table_impl; + std::unique_ptr m_page_table_impl; Core::System& m_system; }; diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index abc2115bd9..1a0aec56a7 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -72,6 +72,7 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string process->name = std::move(process_name); process->resource_limit = res_limit; + process->system_resource_address = 0; process->state = State::Created; process->program_id = 0; process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() @@ -92,6 +93,7 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string process->exception_thread = nullptr; process->is_suspended = false; process->schedule_count = 0; + process->is_handle_table_initialized = false; // Open a reference to the resource limit. process->resource_limit->Open(); @@ -121,9 +123,9 @@ void KProcess::DecrementRunningThreadCount() { } } -u64 KProcess::GetTotalPhysicalMemoryAvailable() const { +u64 KProcess::GetTotalPhysicalMemoryAvailable() { const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) + - page_table->GetNormalMemorySize() + GetSystemResourceSize() + image_size + + page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size + main_thread_stack_size}; if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); capacity != pool_size) { @@ -135,16 +137,16 @@ u64 KProcess::GetTotalPhysicalMemoryAvailable() const { return memory_usage_capacity; } -u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { +u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() { return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize(); } -u64 KProcess::GetTotalPhysicalMemoryUsed() const { - return image_size + main_thread_stack_size + page_table->GetNormalMemorySize() + +u64 KProcess::GetTotalPhysicalMemoryUsed() { + return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() + GetSystemResourceSize(); } -u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { +u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() { return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); } @@ -348,6 +350,9 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: system_resource_size = metadata.GetSystemResourceSize(); image_size = code_size; + // We currently do not support process-specific system resource + UNIMPLEMENTED_IF(system_resource_size != 0); + KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory, code_size + system_resource_size); if (!memory_reservation.Succeeded()) { @@ -356,7 +361,7 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: return ResultLimitReached; } // Initialize proces address space - if (const Result result{page_table->InitializeForProcess( + if (const Result result{page_table.InitializeForProcess( metadata.GetAddressSpaceType(), false, 0x8000000, code_size, &kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)}; result.IsError()) { @@ -364,9 +369,9 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: } // Map process code region - if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(), - code_size / PageSize, KMemoryState::Code, - KMemoryPermission::None)}; + if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(), + code_size / PageSize, KMemoryState::Code, + KMemoryPermission::None)}; result.IsError()) { return result; } @@ -374,7 +379,7 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: // Initialize process capabilities const auto& caps{metadata.GetKernelCapabilities()}; if (const Result result{ - capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)}; + capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)}; result.IsError()) { return result; } @@ -384,12 +389,12 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: case FileSys::ProgramAddressSpaceType::Is32Bit: case FileSys::ProgramAddressSpaceType::Is36Bit: case FileSys::ProgramAddressSpaceType::Is39Bit: - memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart(); + memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart(); break; case FileSys::ProgramAddressSpaceType::Is32BitNoMap: - memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() + - page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart(); + memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() + + page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart(); break; default: @@ -397,7 +402,7 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: } // Create TLS region - R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address))); + R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address))); memory_reservation.Commit(); return handle_table.Initialize(capabilities.GetHandleTableSize()); @@ -409,7 +414,7 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) { resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size); const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; - ASSERT(!page_table->SetMaxHeapSize(heap_capacity).IsError()); + ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError()); ChangeState(State::Running); @@ -437,8 +442,8 @@ void KProcess::PrepareForTermination() { stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList()); - this->DeleteThreadLocalRegion(tls_region_address); - tls_region_address = 0; + this->DeleteThreadLocalRegion(plr_address); + plr_address = 0; if (resource_limit) { resource_limit->Release(LimitableResource::PhysicalMemory, @@ -474,7 +479,7 @@ void KProcess::Finalize() { } // Finalize the page table. - page_table.reset(); + page_table.Finalize(); // Perform inherited finalization. KAutoObjectWithSlabHeapAndContainer::Finalize(); @@ -628,7 +633,7 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { const auto ReprotectSegment = [&](const CodeSet::Segment& segment, Svc::MemoryPermission permission) { - page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); + page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); }; kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), @@ -645,8 +650,7 @@ bool KProcess::IsSignaled() const { } KProcess::KProcess(KernelCore& kernel_) - : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique( - kernel_.System())}, + : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()}, handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, state_lock{kernel_}, list_lock{kernel_} {} @@ -668,11 +672,11 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { // The kernel always ensures that the given stack size is page aligned. main_thread_stack_size = Common::AlignUp(stack_size, PageSize); - const VAddr start{page_table->GetStackRegionStart()}; - const std::size_t size{page_table->GetStackRegionEnd() - start}; + const VAddr start{page_table.GetStackRegionStart()}; + const std::size_t size{page_table.GetStackRegionEnd() - start}; CASCADE_RESULT(main_thread_stack_top, - page_table->AllocateAndMapMemory( + page_table.AllocateAndMapMemory( main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize, KMemoryState::Stack, KMemoryPermission::UserReadWrite)); @@ -681,4 +685,12 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { return ResultSuccess; } +void KProcess::FinalizeHandleTable() { + // Finalize the table. + handle_table.Finalize(); + + // Note that the table is finalized. + is_handle_table_initialized = false; +} + } // namespace Kernel diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index b1c7da4543..fcc2897f99 100644 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h @@ -13,6 +13,7 @@ #include "core/hle/kernel/k_auto_object.h" #include "core/hle/kernel/k_condition_variable.h" #include "core/hle/kernel/k_handle_table.h" +#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_thread_local_page.h" #include "core/hle/kernel/k_worker_task.h" @@ -31,7 +32,6 @@ class ProgramMetadata; namespace Kernel { class KernelCore; -class KPageTable; class KResourceLimit; class KThread; class KSharedMemoryInfo; @@ -107,12 +107,12 @@ public: /// Gets a reference to the process' page table. KPageTable& PageTable() { - return *page_table; + return page_table; } /// Gets const a reference to the process' page table. const KPageTable& PageTable() const { - return *page_table; + return page_table; } /// Gets a reference to the process' handle table. @@ -150,9 +150,8 @@ public: return address_arbiter.WaitForAddress(address, arb_type, value, timeout); } - /// Gets the address to the process' dedicated TLS region. - VAddr GetTLSRegionAddress() const { - return tls_region_address; + VAddr GetProcessLocalRegionAddress() const { + return plr_address; } /// Gets the current status of the process @@ -279,18 +278,18 @@ public: } /// Retrieves the total physical memory available to this process in bytes. - u64 GetTotalPhysicalMemoryAvailable() const; + u64 GetTotalPhysicalMemoryAvailable(); /// Retrieves the total physical memory available to this process in bytes, /// without the size of the personal system resource heap added to it. - u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const; + u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource(); /// Retrieves the total physical memory used by this process in bytes. - u64 GetTotalPhysicalMemoryUsed() const; + u64 GetTotalPhysicalMemoryUsed(); /// Retrieves the total physical memory used by this process in bytes, /// without the size of the personal system resource heap added to it. - u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const; + u64 GetTotalPhysicalMemoryUsedWithoutSystemResource(); /// Gets the list of all threads created with this process as their owner. std::list& GetThreadList() { @@ -413,8 +412,10 @@ private: /// Allocates the main thread stack for the process, given the stack size in bytes. Result AllocateMainThreadStack(std::size_t stack_size); + void FinalizeHandleTable(); + /// Memory manager for this process - std::unique_ptr page_table; + KPageTable page_table; /// Current status of the process State state{}; @@ -433,6 +434,8 @@ private: /// Resource limit descriptor for this process KResourceLimit* resource_limit{}; + VAddr system_resource_address{}; + /// The ideal CPU core for this process, threads are scheduled on this core by default. u8 ideal_core = 0; @@ -459,7 +462,7 @@ private: KConditionVariable condition_var; /// Address indicating the location of the process' dedicated TLS region. - VAddr tls_region_address = 0; + VAddr plr_address = 0; /// Random values for svcGetInfo RandomEntropy std::array random_entropy{}; @@ -485,8 +488,12 @@ private: /// Schedule count of this process s64 schedule_count{}; + size_t memory_release_hint{}; + bool is_signaled{}; bool is_suspended{}; + bool is_immortal{}; + bool is_handle_table_initialized{}; bool is_initialized{}; std::atomic num_running_threads{}; diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index b6bbd4984b..6879de9ef3 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -95,6 +95,15 @@ struct KernelCore::Impl { } } + void CloseCurrentProcess() { + (*current_process).Finalize(); + // current_process->Close(); + // TODO: The current process should be destroyed based on accurate ref counting after + // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak. + (*current_process).Destroy(); + current_process = nullptr; + } + void Shutdown() { is_shutting_down.store(true, std::memory_order_relaxed); SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); }); @@ -157,15 +166,7 @@ struct KernelCore::Impl { } } - // Shutdown all processes. - if (current_process) { - (*current_process).Finalize(); - // current_process->Close(); - // TODO: The current process should be destroyed based on accurate ref counting after - // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak. - (*current_process).Destroy(); - current_process = nullptr; - } + CloseCurrentProcess(); // Track kernel objects that were not freed on shutdown { @@ -870,6 +871,10 @@ const KProcess* KernelCore::CurrentProcess() const { return impl->current_process; } +void KernelCore::CloseCurrentProcess() { + impl->CloseCurrentProcess(); +} + const std::vector& KernelCore::GetProcessList() const { return impl->process_list; } diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 79e66483ee..6eded95393 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -131,6 +131,9 @@ public: /// Retrieves a const pointer to the current process. const KProcess* CurrentProcess() const; + /// Closes the current process. + void CloseCurrentProcess(); + /// Retrieves the list of processes. const std::vector& GetProcessList() const; diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index bac61fd096..b07ae3f027 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -933,7 +933,7 @@ static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle han return ResultSuccess; case GetInfoType::UserExceptionContextAddr: - *result = process->GetTLSRegionAddress(); + *result = process->GetProcessLocalRegionAddress(); return ResultSuccess; case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource: From 1b787adbd0b58986e6efdf6d536dcc949362c108 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 10 Sep 2022 23:45:07 -0700 Subject: [PATCH 19/25] core: hle: kernel: Fix InitializePreemption order. --- src/core/hle/kernel/kernel.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 6879de9ef3..eed2dc9f3e 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -74,8 +74,8 @@ struct KernelCore::Impl { InitializeMemoryLayout(); Init::InitializeKPageBufferSlabHeap(system); InitializeShutdownThreads(); - InitializePreemption(kernel); InitializePhysicalCores(); + InitializePreemption(kernel); // Initialize the Dynamic Slab Heaps. { From a4d11f4427859cbe82fc825f8493844e17bb668f Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 10 Sep 2022 01:48:15 -0700 Subject: [PATCH 20/25] core: Partially persist emulation state across game boots. --- src/core/core.cpp | 65 +++++++++++++++++++--------------- src/core/core.h | 10 ++++-- src/core/core_timing.cpp | 29 +++++++-------- src/core/core_timing.h | 7 ++-- src/tests/core/core_timing.cpp | 3 -- src/yuzu/bootmanager.cpp | 4 +-- src/yuzu/main.cpp | 1 + src/yuzu_cmd/yuzu.cpp | 4 ++- 8 files changed, 65 insertions(+), 58 deletions(-) diff --git a/src/core/core.cpp b/src/core/core.cpp index 1deeee1545..2c4c0dbe40 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -133,6 +133,30 @@ struct System::Impl { : kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{}, cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {} + void Initialize(System& system) { + device_memory = std::make_unique(); + + is_multicore = Settings::values.use_multi_core.GetValue(); + + core_timing.SetMulticore(is_multicore); + core_timing.Initialize([&system]() { system.RegisterHostThread(); }); + + const auto posix_time = std::chrono::system_clock::now().time_since_epoch(); + const auto current_time = + std::chrono::duration_cast(posix_time).count(); + Settings::values.custom_rtc_differential = + Settings::values.custom_rtc.value_or(current_time) - current_time; + + // Create a default fs if one doesn't already exist. + if (virtual_filesystem == nullptr) + virtual_filesystem = std::make_shared(); + if (content_provider == nullptr) + content_provider = std::make_unique(); + + // Create default implementations of applets if one is not provided. + applet_manager.SetDefaultAppletsIfMissing(); + } + SystemResultStatus Run() { std::unique_lock lk(suspend_guard); status = SystemResultStatus::Success; @@ -178,37 +202,17 @@ struct System::Impl { debugger = std::make_unique(system, port); } - SystemResultStatus Init(System& system, Frontend::EmuWindow& emu_window) { + SystemResultStatus SetupForMainProcess(System& system, Frontend::EmuWindow& emu_window) { LOG_DEBUG(Core, "initialized OK"); - device_memory = std::make_unique(); - - is_multicore = Settings::values.use_multi_core.GetValue(); is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue(); kernel.SetMulticore(is_multicore); cpu_manager.SetMulticore(is_multicore); cpu_manager.SetAsyncGpu(is_async_gpu); - core_timing.SetMulticore(is_multicore); kernel.Initialize(); cpu_manager.Initialize(); - core_timing.Initialize([&system]() { system.RegisterHostThread(); }); - - const auto posix_time = std::chrono::system_clock::now().time_since_epoch(); - const auto current_time = - std::chrono::duration_cast(posix_time).count(); - Settings::values.custom_rtc_differential = - Settings::values.custom_rtc.value_or(current_time) - current_time; - - // Create a default fs if one doesn't already exist. - if (virtual_filesystem == nullptr) - virtual_filesystem = std::make_shared(); - if (content_provider == nullptr) - content_provider = std::make_unique(); - - /// Create default implementations of applets if one is not provided. - applet_manager.SetDefaultAppletsIfMissing(); /// Reset all glue registrations arp_manager.ResetAll(); @@ -253,11 +257,11 @@ struct System::Impl { return SystemResultStatus::ErrorGetLoader; } - SystemResultStatus init_result{Init(system, emu_window)}; + SystemResultStatus init_result{SetupForMainProcess(system, emu_window)}; if (init_result != SystemResultStatus::Success) { LOG_CRITICAL(Core, "Failed to initialize system (Error {})!", static_cast(init_result)); - Shutdown(); + ShutdownMainProcess(); return init_result; } @@ -276,7 +280,7 @@ struct System::Impl { const auto [load_result, load_parameters] = app_loader->Load(*main_process, system); if (load_result != Loader::ResultStatus::Success) { LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result); - Shutdown(); + ShutdownMainProcess(); return static_cast( static_cast(SystemResultStatus::ErrorLoader) + static_cast(load_result)); @@ -335,7 +339,7 @@ struct System::Impl { return status; } - void Shutdown() { + void ShutdownMainProcess() { SetShuttingDown(true); // Log last frame performance stats if game was loded @@ -369,7 +373,7 @@ struct System::Impl { cheat_engine.reset(); telemetry_session.reset(); time_manager.Shutdown(); - core_timing.Shutdown(); + core_timing.ClearPendingEvents(); app_loader.reset(); audio_core.reset(); gpu_core.reset(); @@ -377,7 +381,6 @@ struct System::Impl { perf_stats.reset(); kernel.Shutdown(); memory.Reset(); - applet_manager.ClearAll(); if (auto room_member = room_network.GetRoomMember().lock()) { Network::GameInfo game_info{}; @@ -520,6 +523,10 @@ const CpuManager& System::GetCpuManager() const { return impl->cpu_manager; } +void System::Initialize() { + impl->Initialize(*this); +} + SystemResultStatus System::Run() { return impl->Run(); } @@ -540,8 +547,8 @@ void System::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) { impl->kernel.InvalidateCpuInstructionCacheRange(addr, size); } -void System::Shutdown() { - impl->Shutdown(); +void System::ShutdownMainProcess() { + impl->ShutdownMainProcess(); } bool System::IsShuttingDown() const { diff --git a/src/core/core.h b/src/core/core.h index 7843cc8ad9..4ebedffd91 100644 --- a/src/core/core.h +++ b/src/core/core.h @@ -142,6 +142,12 @@ public: System(System&&) = delete; System& operator=(System&&) = delete; + /** + * Initializes the system + * This function will initialize core functionaility used for system emulation + */ + void Initialize(); + /** * Run the OS and Application * This function will start emulation and run the relevant devices @@ -166,8 +172,8 @@ public: void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size); - /// Shutdown the emulated system. - void Shutdown(); + /// Shutdown the main emulated process. + void ShutdownMainProcess(); /// Check if the core is shutting down. [[nodiscard]] bool IsShuttingDown() const; diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp index 2678ce5328..2afb2696cc 100644 --- a/src/core/core_timing.cpp +++ b/src/core/core_timing.cpp @@ -40,7 +40,17 @@ struct CoreTiming::Event { CoreTiming::CoreTiming() : clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {} -CoreTiming::~CoreTiming() = default; +CoreTiming::~CoreTiming() { + paused = true; + shutting_down = true; + pause_event.Set(); + event.Set(); + if (timer_thread) { + timer_thread->join(); + } + timer_thread.reset(); + has_started = false; +} void CoreTiming::ThreadEntry(CoreTiming& instance) { constexpr char name[] = "HostTiming"; @@ -65,17 +75,8 @@ void CoreTiming::Initialize(std::function&& on_thread_init_) { } } -void CoreTiming::Shutdown() { - paused = true; - shutting_down = true; - pause_event.Set(); - event.Set(); - if (timer_thread) { - timer_thread->join(); - } - ClearPendingEvents(); - timer_thread.reset(); - has_started = false; +void CoreTiming::ClearPendingEvents() { + event_queue.clear(); } void CoreTiming::Pause(bool is_paused) { @@ -196,10 +197,6 @@ u64 CoreTiming::GetClockTicks() const { return CpuCyclesToClockCycles(ticks); } -void CoreTiming::ClearPendingEvents() { - event_queue.clear(); -} - void CoreTiming::RemoveEvent(const std::shared_ptr& event_type) { std::scoped_lock lock{basic_lock}; diff --git a/src/core/core_timing.h b/src/core/core_timing.h index 3259397b28..7996b529fe 100644 --- a/src/core/core_timing.h +++ b/src/core/core_timing.h @@ -61,8 +61,8 @@ public: /// required to end slice - 1 and start slice 0 before the first cycle of code is executed. void Initialize(std::function&& on_thread_init_); - /// Tears down all timing related functionality. - void Shutdown(); + /// Clear all pending events. This should ONLY be done on exit. + void ClearPendingEvents(); /// Sets if emulation is multicore or single core, must be set before Initialize void SetMulticore(bool is_multicore_) { @@ -136,9 +136,6 @@ public: private: struct Event; - /// Clear all pending events. This should ONLY be done on exit. - void ClearPendingEvents(); - static void ThreadEntry(CoreTiming& instance); void ThreadLoop(); diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp index 7c432a63c5..284b2ae66f 100644 --- a/src/tests/core/core_timing.cpp +++ b/src/tests/core/core_timing.cpp @@ -40,9 +40,6 @@ struct ScopeInit final { core_timing.SetMulticore(true); core_timing.Initialize([]() {}); } - ~ScopeInit() { - core_timing.Shutdown(); - } Core::Timing::CoreTiming core_timing; }; diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp index 24251247d2..6acfb7b06c 100644 --- a/src/yuzu/bootmanager.cpp +++ b/src/yuzu/bootmanager.cpp @@ -120,8 +120,8 @@ void EmuThread::run() { } } - // Shutdown the core emulation - system.Shutdown(); + // Shutdown the main emulated process + system.ShutdownMainProcess(); #if MICROPROFILE_ENABLED MicroProfileOnThreadExit(); diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp index a94624be63..501c342551 100644 --- a/src/yuzu/main.cpp +++ b/src/yuzu/main.cpp @@ -294,6 +294,7 @@ GMainWindow::GMainWindow(std::unique_ptr config_, bool has_broken_vulkan #ifdef __linux__ SetupSigInterrupts(); #endif + system->Initialize(); Common::Log::Initialize(); LoadTranslation(); diff --git a/src/yuzu_cmd/yuzu.cpp b/src/yuzu_cmd/yuzu.cpp index 3a0f33cba1..e16f79eb40 100644 --- a/src/yuzu_cmd/yuzu.cpp +++ b/src/yuzu_cmd/yuzu.cpp @@ -302,6 +302,8 @@ int main(int argc, char** argv) { } Core::System system{}; + system.Initialize(); + InputCommon::InputSubsystem input_subsystem{}; // Apply the command line arguments @@ -392,7 +394,7 @@ int main(int argc, char** argv) { } system.DetachDebugger(); void(system.Pause()); - system.Shutdown(); + system.ShutdownMainProcess(); detached_tasks.WaitForAllTasks(); return 0; From 829e82e264504696ce1d0ae9421a53d16bf104ea Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 14 Oct 2022 22:55:51 -0700 Subject: [PATCH 21/25] core: hle: kernel: Use result macros for new/changed code. --- src/core/hle/kernel/k_dynamic_page_manager.h | 2 +- .../hle/kernel/k_memory_block_manager.cpp | 2 +- src/core/hle/kernel/k_memory_block_manager.h | 2 +- src/core/hle/kernel/k_page_table.cpp | 111 ++++++++---------- src/core/hle/kernel/k_page_table.h | 19 +-- src/core/hle/kernel/k_process.cpp | 42 +++---- src/core/hle/kernel/k_process.h | 16 ++- src/core/hle/kernel/k_thread.cpp | 41 +++---- src/core/hle/result.h | 3 - 9 files changed, 110 insertions(+), 128 deletions(-) diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h index 88d53776ae..9076c8fa3c 100644 --- a/src/core/hle/kernel/k_dynamic_page_manager.h +++ b/src/core/hle/kernel/k_dynamic_page_manager.h @@ -65,7 +65,7 @@ public: m_page_bitmap.SetBit(i); } - return ResultSuccess; + R_SUCCEED(); } VAddr GetAddress() const { diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp index c908af75a9..cf4c1e371b 100644 --- a/src/core/hle/kernel/k_memory_block_manager.cpp +++ b/src/core/hle/kernel/k_memory_block_manager.cpp @@ -23,7 +23,7 @@ Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManag KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None); m_memory_block_tree.insert(*start_block); - return ResultSuccess; + R_SUCCEED(); } void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager, diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h index b4ee4e319d..9b5873883d 100644 --- a/src/core/hle/kernel/k_memory_block_manager.h +++ b/src/core/hle/kernel/k_memory_block_manager.h @@ -35,7 +35,7 @@ private: R_UNLESS(m_blocks[m_index + i] != nullptr, ResultOutOfResource); } - return ResultSuccess; + R_SUCCEED(); } public: diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 22098c056e..307e491cb5 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -128,12 +128,9 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type alloc_start = process_code_end; alloc_size = end - process_code_end; } - const size_t needed_size{ - (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)}; - if (alloc_size < needed_size) { - ASSERT(false); - return ResultOutOfMemory; - } + const size_t needed_size = + (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size); + R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory); const size_t remaining_size{alloc_size - needed_size}; @@ -259,8 +256,9 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type m_page_table_impl = std::make_unique(); m_page_table_impl->Resize(m_address_space_width, PageBits); - return m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, - m_memory_block_slab_manager); + // Initialize our memory block manager. + R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, + m_memory_block_slab_manager)); } void KPageTable::Finalize() { @@ -306,7 +304,7 @@ Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState sta KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size) { @@ -385,7 +383,7 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si KMemoryBlockDisableMergeAttribute::None); } - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size, @@ -487,7 +485,7 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t reprotected_pages = true; } - return ResultSuccess; + R_SUCCEED(); } VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, @@ -558,7 +556,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory); R_TRY(pg.AddBlock(cur_addr, cur_pages)); - return ResultSuccess; + R_SUCCEED(); } bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) { @@ -685,7 +683,7 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& s m_system.InvalidateCpuInstructionCaches(); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { @@ -933,7 +931,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { // Cancel our guard. unmap_guard.Cancel(); - return ResultSuccess; + R_SUCCEED(); } } } @@ -1176,7 +1174,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { // We succeeded. remap_guard.Cancel(); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) { @@ -1243,7 +1241,7 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) { @@ -1288,9 +1286,7 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size AddRegionToPages(src_address, num_pages, src_pages); AddRegionToPages(dst_address, num_pages, dst_pages); - if (!dst_pages.IsEqual(src_pages)) { - return ResultInvalidMemoryRegion; - } + R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion); { auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); @@ -1312,7 +1308,7 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, @@ -1330,13 +1326,13 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap) .IsSuccess()); - return result; + R_RETURN(result); } cur_addr += node.GetNumPages() * PageSize; } - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state, @@ -1367,7 +1363,7 @@ Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemory KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, @@ -1413,7 +1409,7 @@ Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, // We successfully mapped the pages. *out_addr = addr; - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { @@ -1425,13 +1421,13 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, OperationType::Unmap)}; result.IsError()) { - return result; + R_RETURN(result); } cur_addr += node.GetNumPages() * PageSize; } - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) { @@ -1465,7 +1461,7 @@ Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemo KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) { @@ -1498,7 +1494,7 @@ Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState stat KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, @@ -1523,7 +1519,7 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n // Create a new page group for the region. R_TRY(this->MakePageGroup(*out, address, num_pages)); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size, @@ -1589,7 +1585,7 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size, m_system.InvalidateCpuInstructionCacheRange(addr, size); } - return ResultSuccess; + R_SUCCEED(); } KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) { @@ -1653,7 +1649,7 @@ Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermi KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::None); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr) { @@ -1696,7 +1692,7 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 att new_attr, KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::None); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::SetMaxHeapSize(size_t size) { @@ -1708,7 +1704,7 @@ Result KPageTable::SetMaxHeapSize(size_t size) { m_max_heap_size = size; - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::SetHeapSize(VAddr* out, size_t size) { @@ -1769,11 +1765,11 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) { // Set the output. *out = m_heap_region_start; - return ResultSuccess; + R_SUCCEED(); } else if (size == GetHeapSize()) { // The size requested is exactly the current size. *out = m_heap_region_start; - return ResultSuccess; + R_SUCCEED(); } else { // We have to allocate memory. Determine how much to allocate and where while the table // is locked. @@ -1847,7 +1843,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) { // Set the output. *out = m_heap_region_start; - return ResultSuccess; + R_SUCCEED(); } } @@ -1857,19 +1853,12 @@ ResultVal KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_ KMemoryPermission perm, PAddr map_addr) { KScopedLightLock lk(m_general_lock); - if (!CanContain(region_start, region_num_pages * PageSize, state)) { - return ResultInvalidCurrentMemory; - } - - if (region_num_pages <= needed_num_pages) { - return ResultOutOfMemory; - } - + R_UNLESS(CanContain(region_start, region_num_pages * PageSize, state), + ResultInvalidCurrentMemory); + R_UNLESS(region_num_pages > needed_num_pages, ResultOutOfMemory); const VAddr addr{ AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)}; - if (!addr) { - return ResultOutOfMemory; - } + R_UNLESS(addr, ResultOutOfMemory); // Create an update allocator. Result allocator_result{ResultSuccess}; @@ -1922,7 +1911,7 @@ Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMem m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::ShareToDevice, KMemoryPermission::None); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) { @@ -1956,7 +1945,7 @@ Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) { m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, KMemoryPermission::None); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) { @@ -1984,24 +1973,24 @@ Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) { m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::UnshareToDevice, KMemoryPermission::None); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) { - return this->LockMemoryAndOpen( + R_RETURN(this->LockMemoryAndOpen( out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None, static_cast(KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite), - KMemoryAttribute::Locked); + KMemoryAttribute::Locked)); } Result KPageTable::UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg) { - return this->UnlockMemory( + R_RETURN(this->UnlockMemory( addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, - KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg); + KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg)); } bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { @@ -2056,7 +2045,7 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_ addr += size; } - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, @@ -2083,7 +2072,7 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, default: ASSERT(false); } - return ResultSuccess; + R_SUCCEED(); } VAddr KPageTable::GetRegionAddress(KMemoryState state) const { @@ -2211,7 +2200,7 @@ Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_ R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory); R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size, @@ -2253,7 +2242,7 @@ Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr a *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; } - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, @@ -2315,7 +2304,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* if (out_blocks_needed != nullptr) { *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; } - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, @@ -2381,7 +2370,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr new_attr, KMemoryBlockDisableMergeAttribute::Locked, KMemoryBlockDisableMergeAttribute::None); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, @@ -2436,7 +2425,7 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask new_attr, KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); - return ResultSuccess; + R_SUCCEED(); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 1811d3e2d2..c6aeacd96c 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -57,9 +57,9 @@ public: KMemoryPermission perm); Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, KMemoryState state, KMemoryPermission perm) { - return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, - this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, - state, perm); + R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, + this->GetRegionAddress(state), + this->GetRegionSize(state) / PageSize, state, perm)); } Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state); Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state); @@ -137,8 +137,8 @@ private: KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const { - return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, - perm, attr_mask, attr); + R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, + perm, attr_mask, attr)); } Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state, @@ -155,15 +155,16 @@ private: KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { - return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, - state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); + R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, + state_mask, state, perm_mask, perm, attr_mask, attr, + ignore_attr)); } Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { - return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, - attr_mask, attr, ignore_attr); + R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, + attr_mask, attr, ignore_attr)); } Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 1a0aec56a7..8c3495e5a5 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -98,7 +98,7 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string // Open a reference to the resource limit. process->resource_limit->Open(); - return ResultSuccess; + R_SUCCEED(); } void KProcess::DoWorkerTaskImpl() { @@ -246,7 +246,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad shmem->Open(); shemen_info->Open(); - return ResultSuccess; + R_SUCCEED(); } void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, @@ -296,7 +296,7 @@ Result KProcess::Reset() { // Clear signaled. is_signaled = false; - return ResultSuccess; + R_SUCCEED(); } Result KProcess::SetActivity(ProcessActivity activity) { @@ -312,9 +312,7 @@ Result KProcess::SetActivity(ProcessActivity activity) { // Either pause or resume. if (activity == ProcessActivity::Paused) { // Verify that we're not suspended. - if (is_suspended) { - return ResultInvalidState; - } + R_UNLESS(!is_suspended, ResultInvalidState); // Suspend all threads. for (auto* thread : GetThreadList()) { @@ -327,9 +325,7 @@ Result KProcess::SetActivity(ProcessActivity activity) { ASSERT(activity == ProcessActivity::Runnable); // Verify that we're suspended. - if (!is_suspended) { - return ResultInvalidState; - } + R_UNLESS(is_suspended, ResultInvalidState); // Resume all threads. for (auto* thread : GetThreadList()) { @@ -340,7 +336,7 @@ Result KProcess::SetActivity(ProcessActivity activity) { SetSuspended(false); } - return ResultSuccess; + R_SUCCEED(); } Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) { @@ -358,14 +354,14 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: if (!memory_reservation.Succeeded()) { LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", code_size + system_resource_size); - return ResultLimitReached; + R_RETURN(ResultLimitReached); } // Initialize proces address space if (const Result result{page_table.InitializeForProcess( metadata.GetAddressSpaceType(), false, 0x8000000, code_size, &kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)}; result.IsError()) { - return result; + R_RETURN(result); } // Map process code region @@ -373,7 +369,7 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: code_size / PageSize, KMemoryState::Code, KMemoryPermission::None)}; result.IsError()) { - return result; + R_RETURN(result); } // Initialize process capabilities @@ -381,7 +377,7 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: if (const Result result{ capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)}; result.IsError()) { - return result; + R_RETURN(result); } // Set memory usage capacity @@ -405,7 +401,7 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address))); memory_reservation.Commit(); - return handle_table.Initialize(capabilities.GetHandleTableSize()); + R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize())); } void KProcess::Run(s32 main_thread_priority, u64 stack_size) { @@ -504,7 +500,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) { } *out = tlr; - return ResultSuccess; + R_SUCCEED(); } } @@ -533,7 +529,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) { // We succeeded! tlp_guard.Cancel(); *out = tlr; - return ResultSuccess; + R_SUCCEED(); } Result KProcess::DeleteThreadLocalRegion(VAddr addr) { @@ -581,7 +577,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) { KThreadLocalPage::Free(kernel, page_to_free); } - return ResultSuccess; + R_SUCCEED(); } bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size, @@ -682,15 +678,7 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { main_thread_stack_top += main_thread_stack_size; - return ResultSuccess; -} - -void KProcess::FinalizeHandleTable() { - // Finalize the table. - handle_table.Finalize(); - - // Note that the table is finalized. - is_handle_table_initialized = false; + R_SUCCEED(); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index fcc2897f99..788faec1d5 100644 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h @@ -138,16 +138,16 @@ public: } Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) { - return condition_var.Wait(address, cv_key, tag, ns); + R_RETURN(condition_var.Wait(address, cv_key, tag, ns)); } Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) { - return address_arbiter.SignalToAddress(address, signal_type, value, count); + R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count)); } Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value, s64 timeout) { - return address_arbiter.WaitForAddress(address, arb_type, value, timeout); + R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout)); } VAddr GetProcessLocalRegionAddress() const { @@ -407,13 +407,19 @@ private: pinned_threads[core_id] = nullptr; } + void FinalizeHandleTable() { + // Finalize the table. + handle_table.Finalize(); + + // Note that the table is finalized. + is_handle_table_initialized = false; + } + void ChangeState(State new_state); /// Allocates the main thread stack for the process, given the stack size in bytes. Result AllocateMainThreadStack(std::size_t stack_size); - void FinalizeHandleTable(); - /// Memory manager for this process KPageTable page_table; diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 89b32d509e..b7bfcdce31 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -245,7 +245,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack } } - return ResultSuccess; + R_SUCCEED(); } Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, @@ -258,7 +258,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_ thread->host_context = std::make_shared(std::move(init_func)); thread->is_single_core = !Settings::values.use_multi_core.GetValue(); - return ResultSuccess; + R_SUCCEED(); } Result KThread::InitializeDummyThread(KThread* thread) { @@ -268,31 +268,32 @@ Result KThread::InitializeDummyThread(KThread* thread) { // Initialize emulation parameters. thread->stack_parameters.disable_count = 0; - return ResultSuccess; + R_SUCCEED(); } Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) { - return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, - system.GetCpuManager().GetGuestActivateFunc()); + R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, + ThreadType::Main, system.GetCpuManager().GetGuestActivateFunc())); } Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { - return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, - system.GetCpuManager().GetIdleThreadStartFunc()); + R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, + ThreadType::Main, system.GetCpuManager().GetIdleThreadStartFunc())); } Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, KThreadFunction func, uintptr_t arg, s32 virt_core) { - return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority, - system.GetCpuManager().GetShutdownThreadStartFunc()); + R_RETURN(InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, + ThreadType::HighPriority, + system.GetCpuManager().GetShutdownThreadStartFunc())); } Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core, KProcess* owner) { system.Kernel().GlobalSchedulerContext().AddThread(thread); - return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, - ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()); + R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, + ThreadType::User, system.GetCpuManager().GetGuestThreadFunc())); } void KThread::PostDestroy(uintptr_t arg) { @@ -542,7 +543,7 @@ Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { *out_ideal_core = virtual_ideal_core_id; *out_affinity_mask = virtual_affinity_mask; - return ResultSuccess; + R_SUCCEED(); } Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { @@ -558,7 +559,7 @@ Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask(); } - return ResultSuccess; + R_SUCCEED(); } Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { @@ -670,7 +671,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { } while (retry_update); } - return ResultSuccess; + R_SUCCEED(); } void KThread::SetBasePriority(s32 value) { @@ -843,7 +844,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) { } while (thread_is_current); } - return ResultSuccess; + R_SUCCEED(); } Result KThread::GetThreadContext3(std::vector& out) { @@ -878,7 +879,7 @@ Result KThread::GetThreadContext3(std::vector& out) { } } - return ResultSuccess; + R_SUCCEED(); } void KThread::AddWaiterImpl(KThread* thread) { @@ -1042,7 +1043,7 @@ Result KThread::Run() { // Set our state and finish. SetState(ThreadState::Runnable); - return ResultSuccess; + R_SUCCEED(); } } @@ -1089,7 +1090,7 @@ Result KThread::Terminate() { Svc::WaitInfinite)); } - return ResultSuccess; + R_SUCCEED(); } ThreadState KThread::RequestTerminate() { @@ -1162,7 +1163,7 @@ Result KThread::Sleep(s64 timeout) { // Check if the thread should terminate. if (this->IsTerminationRequested()) { slp.CancelSleep(); - return ResultTerminationRequested; + R_THROW(ResultTerminationRequested); } // Wait for the sleep to end. @@ -1170,7 +1171,7 @@ Result KThread::Sleep(s64 timeout) { SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); } - return ResultSuccess; + R_SUCCEED(); } void KThread::IfDummyThreadTryWait() { diff --git a/src/core/hle/result.h b/src/core/hle/result.h index d714dea38e..ef4b2d4173 100644 --- a/src/core/hle/result.h +++ b/src/core/hle/result.h @@ -470,9 +470,6 @@ constexpr inline Result __TmpCurrentResultReference = ResultSuccess; #define R_UNLESS(expr, res) \ { \ if (!(expr)) { \ - if (res.IsError()) { \ - LOG_ERROR(Kernel, "Failed with result: {}", res.raw); \ - } \ R_THROW(res); \ } \ } From 11f85ea7130a5245bd8d2090f0dd76ba65f15d23 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 14 Oct 2022 23:37:02 -0700 Subject: [PATCH 22/25] core: core_timing: Remove unused IsHostTiming. --- src/core/core_timing.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/core/core_timing.h b/src/core/core_timing.h index 7996b529fe..bd21dd904c 100644 --- a/src/core/core_timing.h +++ b/src/core/core_timing.h @@ -69,11 +69,6 @@ public: is_multicore = is_multicore_; } - /// Check if it's using host timing. - bool IsHostTiming() const { - return is_multicore; - } - /// Pauses/Unpauses the execution of the timer thread. void Pause(bool is_paused); From 638fa6170a8a4c36ffa644055e683a7e50aa7ae5 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 15 Oct 2022 00:48:28 -0700 Subject: [PATCH 23/25] core: core_timing: Re-initialize if single/multicore state changes. --- src/core/core.cpp | 25 ++++++++++++++++++++----- src/core/core_timing.cpp | 23 ++++++++++++++--------- src/core/core_timing.h | 2 ++ 3 files changed, 36 insertions(+), 14 deletions(-) diff --git a/src/core/core.cpp b/src/core/core.cpp index 2c4c0dbe40..622a20510c 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -155,6 +155,24 @@ struct System::Impl { // Create default implementations of applets if one is not provided. applet_manager.SetDefaultAppletsIfMissing(); + + is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue(); + + kernel.SetMulticore(is_multicore); + cpu_manager.SetMulticore(is_multicore); + cpu_manager.SetAsyncGpu(is_async_gpu); + } + + void ReinitializeIfNecessary(System& system) { + if (is_multicore == Settings::values.use_multi_core.GetValue()) { + return; + } + + LOG_DEBUG(Kernel, "Re-initializing"); + + is_multicore = Settings::values.use_multi_core.GetValue(); + + Initialize(system); } SystemResultStatus Run() { @@ -205,11 +223,8 @@ struct System::Impl { SystemResultStatus SetupForMainProcess(System& system, Frontend::EmuWindow& emu_window) { LOG_DEBUG(Core, "initialized OK"); - is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue(); - - kernel.SetMulticore(is_multicore); - cpu_manager.SetMulticore(is_multicore); - cpu_manager.SetAsyncGpu(is_async_gpu); + // Setting changes may require a full system reinitialization (e.g., disabling multicore). + ReinitializeIfNecessary(system); kernel.Initialize(); cpu_manager.Initialize(); diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp index 2afb2696cc..0e7b5f9436 100644 --- a/src/core/core_timing.cpp +++ b/src/core/core_timing.cpp @@ -41,15 +41,7 @@ CoreTiming::CoreTiming() : clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {} CoreTiming::~CoreTiming() { - paused = true; - shutting_down = true; - pause_event.Set(); - event.Set(); - if (timer_thread) { - timer_thread->join(); - } - timer_thread.reset(); - has_started = false; + Reset(); } void CoreTiming::ThreadEntry(CoreTiming& instance) { @@ -63,6 +55,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) { } void CoreTiming::Initialize(std::function&& on_thread_init_) { + Reset(); on_thread_init = std::move(on_thread_init_); event_fifo_id = 0; shutting_down = false; @@ -304,6 +297,18 @@ void CoreTiming::ThreadLoop() { } } +void CoreTiming::Reset() { + paused = true; + shutting_down = true; + pause_event.Set(); + event.Set(); + if (timer_thread) { + timer_thread->join(); + } + timer_thread.reset(); + has_started = false; +} + std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const { if (is_multicore) { return clock->GetTimeNS(); diff --git a/src/core/core_timing.h b/src/core/core_timing.h index bd21dd904c..b5925193c7 100644 --- a/src/core/core_timing.h +++ b/src/core/core_timing.h @@ -134,6 +134,8 @@ private: static void ThreadEntry(CoreTiming& instance); void ThreadLoop(); + void Reset(); + std::unique_ptr clock; s64 global_timer = 0; From a264b54022d97824a5889c711f4977bc4ecdbca3 Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 18 Oct 2022 19:12:18 -0700 Subject: [PATCH 24/25] core: Initialize: Add missing braces. --- src/core/core.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/core/core.cpp b/src/core/core.cpp index 622a20510c..7fb8bc0195 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -148,10 +148,12 @@ struct System::Impl { Settings::values.custom_rtc.value_or(current_time) - current_time; // Create a default fs if one doesn't already exist. - if (virtual_filesystem == nullptr) + if (virtual_filesystem == nullptr) { virtual_filesystem = std::make_shared(); - if (content_provider == nullptr) + } + if (content_provider == nullptr) { content_provider = std::make_unique(); + } // Create default implementations of applets if one is not provided. applet_manager.SetDefaultAppletsIfMissing(); From 97879faea43c1fad6cbb0b63573c75644705e2e9 Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 18 Oct 2022 19:13:20 -0700 Subject: [PATCH 25/25] core: hle: kernel: Migrate ProcessState to enum class. --- src/core/hle/kernel/k_process.h | 16 ++++++++-------- src/core/hle/kernel/svc_types.h | 18 +++++++++--------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index 788faec1d5..2e0cc3d0bc 100644 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h @@ -72,14 +72,14 @@ public: ~KProcess() override; enum class State { - Created = Svc::ProcessState_Created, - CreatedAttached = Svc::ProcessState_CreatedAttached, - Running = Svc::ProcessState_Running, - Crashed = Svc::ProcessState_Crashed, - RunningAttached = Svc::ProcessState_RunningAttached, - Terminating = Svc::ProcessState_Terminating, - Terminated = Svc::ProcessState_Terminated, - DebugBreak = Svc::ProcessState_DebugBreak, + Created = static_cast(Svc::ProcessState::Created), + CreatedAttached = static_cast(Svc::ProcessState::CreatedAttached), + Running = static_cast(Svc::ProcessState::Running), + Crashed = static_cast(Svc::ProcessState::Crashed), + RunningAttached = static_cast(Svc::ProcessState::RunningAttached), + Terminating = static_cast(Svc::ProcessState::Terminating), + Terminated = static_cast(Svc::ProcessState::Terminated), + DebugBreak = static_cast(Svc::ProcessState::DebugBreak), }; enum : u64 { diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h index bb4f7b004b..abb9847fe8 100644 --- a/src/core/hle/kernel/svc_types.h +++ b/src/core/hle/kernel/svc_types.h @@ -97,15 +97,15 @@ constexpr inline s32 HighestThreadPriority = 0; constexpr inline s32 SystemThreadPriorityHighest = 16; -enum ProcessState : u32 { - ProcessState_Created = 0, - ProcessState_CreatedAttached = 1, - ProcessState_Running = 2, - ProcessState_Crashed = 3, - ProcessState_RunningAttached = 4, - ProcessState_Terminating = 5, - ProcessState_Terminated = 6, - ProcessState_DebugBreak = 7, +enum class ProcessState : u32 { + Created = 0, + CreatedAttached = 1, + Running = 2, + Crashed = 3, + RunningAttached = 4, + Terminating = 5, + Terminated = 6, + DebugBreak = 7, }; constexpr inline size_t ThreadLocalRegionSize = 0x200;