From 7b7f6f1cb777aef0184169891b42ece902a616ae Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sat, 30 Oct 2021 02:32:07 +0200 Subject: [PATCH 01/68] NvHost: Fix some regressions and correct signaling on timeout. --- .../hle/service/nvdrv/devices/nvhost_ctrl.cpp | 44 ++++++++----------- 1 file changed, 19 insertions(+), 25 deletions(-) diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index 527531f294..a3c11ad8a4 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -105,30 +105,32 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector auto& event = events_interface.events[event_id]; auto& gpu = system.GPU(); - - // This is mostly to take into account unimplemented features. As synced - // gpu is always synced. - if (!gpu.IsAsync()) { - event.event->GetWritableEvent().Signal(); - return NvResult::Success; - } const u32 current_syncpoint_value = event.fence.value; const s32 diff = current_syncpoint_value - params.threshold; - if (diff >= 0) { - event.event->GetWritableEvent().Signal(); - params.value = current_syncpoint_value; - std::memcpy(output.data(), ¶ms, sizeof(params)); - events_interface.failed[event_id] = false; - return NvResult::Success; - } - const u32 target_value = current_syncpoint_value - diff; + const u32 target_value = params.value; if (!is_async) { params.value = 0; } + const auto check_failing = [&]() { + if (events_interface.failed[event_id]) { + { + auto lk = system.StallProcesses(); + gpu.WaitFence(params.syncpt_id, target_value); + system.UnstallProcesses(); + } + std::memcpy(output.data(), ¶ms, sizeof(params)); + events_interface.failed[event_id] = false; + return true; + } + return false; + }; + if (params.timeout == 0) { - std::memcpy(output.data(), ¶ms, sizeof(params)); + if (check_failing()) { + return NvResult::Success; + } return NvResult::Timeout; } @@ -147,15 +149,7 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector params.value = ((params.syncpt_id & 0xfff) << 16) | 0x10000000; } params.value |= event_id; - event.event->GetWritableEvent().Clear(); - if (events_interface.failed[event_id]) { - { - auto lk = system.StallProcesses(); - gpu.WaitFence(params.syncpt_id, target_value); - system.UnstallProcesses(); - } - std::memcpy(output.data(), ¶ms, sizeof(params)); - events_interface.failed[event_id] = false; + if (check_failing()) { return NvResult::Success; } gpu.RegisterSyncptInterrupt(params.syncpt_id, target_value); From ac104a24d11c5875e25daee5dd405ee9fe5f3a21 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sat, 30 Oct 2021 11:35:05 +0200 Subject: [PATCH 02/68] NvHost: Try a different approach to blocking. --- .../hle/service/nvdrv/devices/nvhost_ctrl.cpp | 15 ++++++--------- src/core/hle/service/nvdrv/nvdrv.h | 2 +- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index a3c11ad8a4..bfe1faf48c 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -91,7 +91,7 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector if (syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) { params.value = syncpoint_manager.GetSyncpointMin(params.syncpt_id); std::memcpy(output.data(), ¶ms, sizeof(params)); - events_interface.failed[event_id] = false; + events_interface.fails[event_id] = 0; return NvResult::Success; } @@ -99,29 +99,26 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) { params.value = new_value; std::memcpy(output.data(), ¶ms, sizeof(params)); - events_interface.failed[event_id] = false; + events_interface.fails[event_id] = 0; return NvResult::Success; } - auto& event = events_interface.events[event_id]; auto& gpu = system.GPU(); - const u32 current_syncpoint_value = event.fence.value; - const s32 diff = current_syncpoint_value - params.threshold; - const u32 target_value = params.value; + const u32 target_value = syncpoint_manager.GetSyncpointMax(params.syncpt_id); if (!is_async) { params.value = 0; } const auto check_failing = [&]() { - if (events_interface.failed[event_id]) { + if (events_interface.fails[event_id] > 1) { { auto lk = system.StallProcesses(); gpu.WaitFence(params.syncpt_id, target_value); system.UnstallProcesses(); } std::memcpy(output.data(), ¶ms, sizeof(params)); - events_interface.failed[event_id] = false; + events_interface.fails[event_id] = 0; return true; } return false; @@ -207,7 +204,7 @@ NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector& input, std::v if (events_interface.status[event_id] == EventState::Waiting) { events_interface.LiberateEvent(event_id); } - events_interface.failed[event_id] = true; + events_interface.fails[event_id]++; syncpoint_manager.RefreshSyncpoint(events_interface.events[event_id].fence.id); diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index c929e51062..4c4aa7dab9 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -50,7 +50,7 @@ struct EventInterface { // Tells if an NVEvent is registered or not std::array registered{}; // Tells the NVEvent that it has failed. - std::array failed{}; + std::array fails{}; // When an NVEvent is waiting on GPU interrupt, this is the sync_point // associated with it. std::array assigned_syncpt{}; From 39a5ce4e696716e48bdd1c980abc792827c68184 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Mon, 1 Nov 2021 00:51:29 +0100 Subject: [PATCH 03/68] NvHost: Remake Ctrl Implementation. --- .../hle/service/nvdrv/devices/nvhost_ctrl.cpp | 182 +++++++++++------- .../hle/service/nvdrv/devices/nvhost_ctrl.h | 47 +++-- src/core/hle/service/nvdrv/nvdata.h | 18 +- src/core/hle/service/nvdrv/nvdrv.cpp | 128 ++++++++++-- src/core/hle/service/nvdrv/nvdrv.h | 96 ++++----- .../hle/service/nvdrv/nvdrv_interface.cpp | 13 +- src/video_core/gpu.h | 2 +- 7 files changed, 314 insertions(+), 172 deletions(-) diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index bfe1faf48c..f2b015c8f4 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -1,11 +1,14 @@ -// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors +// (https://github.com/skyline-emu/) +// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 +// or any later version Refer to the license.txt file included. #include #include #include "common/assert.h" #include "common/logging/log.h" +#include "common/scope_exit.h" #include "core/core.h" #include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_writable_event.h" @@ -30,9 +33,9 @@ NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& case 0x1c: return IocCtrlClearEventWait(input, output); case 0x1d: - return IocCtrlEventWait(input, output, false); - case 0x1e: return IocCtrlEventWait(input, output, true); + case 0x1e: + return IocCtrlEventWait(input, output, false); case 0x1f: return IocCtrlEventRegister(input, output); case 0x20: @@ -71,54 +74,65 @@ NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector& input, std::vector } NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector& output, - bool is_async) { + bool is_allocation) { IocCtrlEventWaitParams params{}; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_async={}", - params.syncpt_id, params.threshold, params.timeout, is_async); + LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_allocation={}", + params.fence.id, params.fence.value, params.timeout, is_allocation); - if (params.syncpt_id >= MaxSyncPoints) { + bool must_unmark_fail = !is_allocation; + const u32 event_id = params.value.raw; + SCOPE_EXIT({ + std::memcpy(output.data(), ¶ms, sizeof(params)); + if (must_unmark_fail) { + events_interface.fails[event_id] = 0; + } + }); + + const u32 fence_id = static_cast(params.fence.id); + + if (fence_id >= MaxSyncPoints) { return NvResult::BadParameter; } - u32 event_id = params.value & 0x00FF; - - if (event_id >= MaxNvEvents) { - std::memcpy(output.data(), ¶ms, sizeof(params)); - return NvResult::BadParameter; - } - - if (syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) { - params.value = syncpoint_manager.GetSyncpointMin(params.syncpt_id); - std::memcpy(output.data(), ¶ms, sizeof(params)); - events_interface.fails[event_id] = 0; + if (params.fence.value == 0) { + params.value.raw = syncpoint_manager.GetSyncpointMin(fence_id); return NvResult::Success; } - if (const auto new_value = syncpoint_manager.RefreshSyncpoint(params.syncpt_id); - syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) { - params.value = new_value; - std::memcpy(output.data(), ¶ms, sizeof(params)); - events_interface.fails[event_id] = 0; + if (syncpoint_manager.IsSyncpointExpired(fence_id, params.fence.value)) { + params.value.raw = syncpoint_manager.GetSyncpointMin(fence_id); + return NvResult::Success; + } + + if (const auto new_value = syncpoint_manager.RefreshSyncpoint(fence_id); + syncpoint_manager.IsSyncpointExpired(fence_id, params.fence.value)) { + params.value.raw = new_value; return NvResult::Success; } auto& gpu = system.GPU(); - const u32 target_value = syncpoint_manager.GetSyncpointMax(params.syncpt_id); + const u32 target_value = params.fence.value; - if (!is_async) { - params.value = 0; - } + auto lock = events_interface.Lock(); + + u32 slot = [&]() { + if (is_allocation) { + params.value.raw = 0; + return events_interface.FindFreeEvent(fence_id); + } else { + return params.value.raw; + } + }(); const auto check_failing = [&]() { - if (events_interface.fails[event_id] > 1) { + if (events_interface.fails[slot] > 1) { { auto lk = system.StallProcesses(); - gpu.WaitFence(params.syncpt_id, target_value); + gpu.WaitFence(fence_id, target_value); system.UnstallProcesses(); } - std::memcpy(output.data(), ¶ms, sizeof(params)); - events_interface.fails[event_id] = 0; + params.value.raw = target_value; return true; } return false; @@ -131,47 +145,76 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector return NvResult::Timeout; } - EventState status = events_interface.status[event_id]; - const bool bad_parameter = status == EventState::Busy; - if (bad_parameter) { - std::memcpy(output.data(), ¶ms, sizeof(params)); + if (slot >= MaxNvEvents) { return NvResult::BadParameter; } - events_interface.SetEventStatus(event_id, EventState::Waiting); - events_interface.assigned_syncpt[event_id] = params.syncpt_id; - events_interface.assigned_value[event_id] = target_value; - if (is_async) { - params.value = params.syncpt_id << 4; - } else { - params.value = ((params.syncpt_id & 0xfff) << 16) | 0x10000000; + + auto* event = events_interface.events[slot]; + + if (!event) { + return NvResult::BadParameter; } - params.value |= event_id; + + if (events_interface.IsBeingUsed(slot)) { + return NvResult::BadParameter; + } + if (check_failing()) { return NvResult::Success; } - gpu.RegisterSyncptInterrupt(params.syncpt_id, target_value); - std::memcpy(output.data(), ¶ms, sizeof(params)); + + params.value.raw = 0; + + events_interface.status[slot].store(EventState::Waiting, std::memory_order_release); + events_interface.assigned_syncpt[slot] = fence_id; + events_interface.assigned_value[slot] = target_value; + if (is_allocation) { + params.value.syncpoint_id_for_allocation.Assign(static_cast(fence_id)); + params.value.event_allocated.Assign(1); + } else { + params.value.syncpoint_id.Assign(fence_id); + } + params.value.raw |= slot; + + gpu.RegisterSyncptInterrupt(fence_id, target_value); return NvResult::Timeout; } +NvResult nvhost_ctrl::FreeEvent(u32 slot) { + if (slot >= MaxNvEvents) { + return NvResult::BadParameter; + } + + if (!events_interface.registered[slot]) { + return NvResult::Success; + } + + if (events_interface.IsBeingUsed(slot)) { + return NvResult::Busy; + } + + events_interface.Free(slot); + return NvResult::Success; +} + NvResult nvhost_ctrl::IocCtrlEventRegister(const std::vector& input, std::vector& output) { IocCtrlEventRegisterParams params{}; std::memcpy(¶ms, input.data(), sizeof(params)); - const u32 event_id = params.user_event_id & 0x00FF; + const u32 event_id = params.user_event_id; LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id); if (event_id >= MaxNvEvents) { return NvResult::BadParameter; } + + auto lock = events_interface.Lock(); + if (events_interface.registered[event_id]) { - const auto event_state = events_interface.status[event_id]; - if (event_state != EventState::Free) { - LOG_WARNING(Service_NVDRV, "Event already registered! Unregistering previous event"); - events_interface.UnregisterEvent(event_id); - } else { - return NvResult::BadParameter; + const auto result = FreeEvent(event_id); + if (result != NvResult::Success) { + return result; } } - events_interface.RegisterEvent(event_id); + events_interface.Create(event_id); return NvResult::Success; } @@ -181,32 +224,33 @@ NvResult nvhost_ctrl::IocCtrlEventUnregister(const std::vector& input, std::memcpy(¶ms, input.data(), sizeof(params)); const u32 event_id = params.user_event_id & 0x00FF; LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id); - if (event_id >= MaxNvEvents) { - return NvResult::BadParameter; - } - if (!events_interface.registered[event_id]) { - return NvResult::BadParameter; - } - events_interface.UnregisterEvent(event_id); - return NvResult::Success; + + auto lock = events_interface.Lock(); + return FreeEvent(event_id); } NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector& input, std::vector& output) { - IocCtrlEventSignalParams params{}; + IocCtrlEventClearParams params{}; std::memcpy(¶ms, input.data(), sizeof(params)); - u32 event_id = params.event_id & 0x00FF; - LOG_WARNING(Service_NVDRV, "cleared event wait on, event_id: {:X}", event_id); + u32 event_id = params.event_id.slot; + LOG_DEBUG(Service_NVDRV, "called, event_id: {:X}", event_id); if (event_id >= MaxNvEvents) { return NvResult::BadParameter; } - if (events_interface.status[event_id] == EventState::Waiting) { - events_interface.LiberateEvent(event_id); + + auto lock = events_interface.Lock(); + + if (events_interface.status[event_id].exchange( + EventState::Cancelling, std::memory_order_acq_rel) == EventState::Waiting) { + system.GPU().CancelSyncptInterrupt(events_interface.assigned_syncpt[event_id], + events_interface.assigned_value[event_id]); + syncpoint_manager.RefreshSyncpoint(events_interface.assigned_syncpt[event_id]); } events_interface.fails[event_id]++; - - syncpoint_manager.RefreshSyncpoint(events_interface.events[event_id].fence.id); + events_interface.status[event_id].store(EventState::Cancelled, std::memory_order_release); + events_interface.events[event_id]->GetWritableEvent().Clear(); return NvResult::Success; } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h index 4fbb89b155..0471c09b23 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h @@ -5,6 +5,7 @@ #include #include +#include "common/bit_field.h" #include "common/common_types.h" #include "core/hle/service/nvdrv/devices/nvdevice.h" #include "core/hle/service/nvdrv/nvdrv.h" @@ -27,6 +28,24 @@ public: void OnOpen(DeviceFD fd) override; void OnClose(DeviceFD fd) override; + union SyncpointEventValue { + u32 raw; + + union { + BitField<0, 4, u32> partial_slot; + BitField<4, 28, u32> syncpoint_id; + }; + + struct { + u16 slot; + union { + BitField<0, 12, u16> syncpoint_id_for_allocation; + BitField<12, 1, u16> event_allocated; + }; + }; + }; + static_assert(sizeof(SyncpointEventValue) == sizeof(u32)); + private: struct IocSyncptReadParams { u32_le id{}; @@ -83,27 +102,18 @@ private: }; static_assert(sizeof(IocGetConfigParams) == 387, "IocGetConfigParams is incorrect size"); - struct IocCtrlEventSignalParams { - u32_le event_id{}; + struct IocCtrlEventClearParams { + SyncpointEventValue event_id{}; }; - static_assert(sizeof(IocCtrlEventSignalParams) == 4, - "IocCtrlEventSignalParams is incorrect size"); + static_assert(sizeof(IocCtrlEventClearParams) == 4, + "IocCtrlEventClearParams is incorrect size"); struct IocCtrlEventWaitParams { - u32_le syncpt_id{}; - u32_le threshold{}; - s32_le timeout{}; - u32_le value{}; - }; - static_assert(sizeof(IocCtrlEventWaitParams) == 16, "IocCtrlEventWaitParams is incorrect size"); - - struct IocCtrlEventWaitAsyncParams { - u32_le syncpt_id{}; - u32_le threshold{}; + NvFence fence{}; u32_le timeout{}; - u32_le value{}; + SyncpointEventValue value{}; }; - static_assert(sizeof(IocCtrlEventWaitAsyncParams) == 16, + static_assert(sizeof(IocCtrlEventWaitParams) == 16, "IocCtrlEventWaitAsyncParams is incorrect size"); struct IocCtrlEventRegisterParams { @@ -124,11 +134,14 @@ private: static_assert(sizeof(IocCtrlEventKill) == 8, "IocCtrlEventKill is incorrect size"); NvResult NvOsGetConfigU32(const std::vector& input, std::vector& output); - NvResult IocCtrlEventWait(const std::vector& input, std::vector& output, bool is_async); + NvResult IocCtrlEventWait(const std::vector& input, std::vector& output, + bool is_allocation); NvResult IocCtrlEventRegister(const std::vector& input, std::vector& output); NvResult IocCtrlEventUnregister(const std::vector& input, std::vector& output); NvResult IocCtrlClearEventWait(const std::vector& input, std::vector& output); + NvResult FreeEvent(u32 slot); + EventInterface& events_interface; SyncpointManager& syncpoint_manager; }; diff --git a/src/core/hle/service/nvdrv/nvdata.h b/src/core/hle/service/nvdrv/nvdata.h index 1d00394c80..2ee91f9c43 100644 --- a/src/core/hle/service/nvdrv/nvdata.h +++ b/src/core/hle/service/nvdrv/nvdata.h @@ -1,5 +1,7 @@ -// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors +// (https://github.com/skyline-emu/) +// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 +// or any later version Refer to the license.txt file included. #pragma once @@ -78,11 +80,15 @@ enum class NvResult : u32 { ModuleNotPresent = 0xA000E, }; +// obtained from +// https://github.com/skyline-emu/skyline/blob/nvdec-dev/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost/ctrl.h#L47 enum class EventState { - Free = 0, - Registered = 1, - Waiting = 2, - Busy = 3, + Available = 0, + Waiting = 1, + Cancelling = 2, + Signalling = 3, + Signalled = 4, + Cancelled = 5, }; union Ioctl { diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index 756eb74537..fa259abed2 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -1,6 +1,9 @@ -// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors +// (https://github.com/skyline-emu/) +// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 +// or any later version Refer to the license.txt file included. +#include #include #include @@ -26,6 +29,73 @@ namespace Service::Nvidia { +std::unique_lock EventInterface::Lock() { + return std::unique_lock(events_mutex); +} + +void EventInterface::Signal(u32 event_id) { + if (status[event_id].exchange(EventState::Signalling, std::memory_order_acq_rel) == + EventState::Waiting) { + events[event_id]->GetWritableEvent().Signal(); + } + status[event_id].store(EventState::Signalled, std::memory_order_release); +} + +void EventInterface::Create(u32 event_id) { + ASSERT(!events[event_id]); + ASSERT(!registered[event_id]); + ASSERT(!IsBeingUsed(event_id)); + events[event_id] = backup[event_id]; + status[event_id] = EventState::Available; + registered[event_id] = true; + const u64 mask = 1ULL << event_id; + fails[event_id] = 0; + events_mask |= mask; + LOG_CRITICAL(Service_NVDRV, "Created Event {}", event_id); +} + +void EventInterface::Free(u32 event_id) { + ASSERT(events[event_id]); + ASSERT(registered[event_id]); + ASSERT(!IsBeingUsed(event_id)); + + backup[event_id]->GetWritableEvent().Clear(); + events[event_id] = nullptr; + status[event_id] = EventState::Available; + registered[event_id] = false; + const u64 mask = ~(1ULL << event_id); + events_mask &= mask; + LOG_CRITICAL(Service_NVDRV, "Freed Event {}", event_id); +} + +u32 EventInterface::FindFreeEvent(u32 syncpoint_id) { + u32 slot{MaxNvEvents}; + u32 free_slot{MaxNvEvents}; + for (u32 i = 0; i < MaxNvEvents; i++) { + if (registered[i]) { + if (!IsBeingUsed(i)) { + slot = i; + if (assigned_syncpt[i] == syncpoint_id) { + return slot; + } + } + } else if (free_slot == MaxNvEvents) { + free_slot = i; + } + } + if (free_slot < MaxNvEvents) { + Create(free_slot); + return free_slot; + } + + if (slot < MaxNvEvents) { + return slot; + } + + LOG_CRITICAL(Service_NVDRV, "Failed to allocate an event"); + return 0; +} + void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger, Core::System& system) { auto module_ = std::make_shared(system); @@ -38,12 +108,14 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger } Module::Module(Core::System& system) - : syncpoint_manager{system.GPU()}, service_context{system, "nvdrv"} { + : syncpoint_manager{system.GPU()}, events_interface{*this}, service_context{system, "nvdrv"} { + events_interface.events_mask = 0; for (u32 i = 0; i < MaxNvEvents; i++) { - events_interface.events[i].event = - service_context.CreateEvent(fmt::format("NVDRV::NvEvent_{}", i)); - events_interface.status[i] = EventState::Free; + events_interface.status[i] = EventState::Available; + events_interface.events[i] = nullptr; events_interface.registered[i] = false; + events_interface.backup[i] = + service_context.CreateEvent(fmt::format("NVDRV::NvEvent_{}", i)); } auto nvmap_dev = std::make_shared(system); devices["/dev/nvhost-as-gpu"] = std::make_shared(system, nvmap_dev); @@ -62,8 +134,12 @@ Module::Module(Core::System& system) } Module::~Module() { + auto lock = events_interface.Lock(); for (u32 i = 0; i < MaxNvEvents; i++) { - service_context.CloseEvent(events_interface.events[i].event); + if (events_interface.registered[i]) { + events_interface.Free(i); + } + service_context.CloseEvent(events_interface.backup[i]); } } @@ -169,21 +245,41 @@ NvResult Module::Close(DeviceFD fd) { } void Module::SignalSyncpt(const u32 syncpoint_id, const u32 value) { - for (u32 i = 0; i < MaxNvEvents; i++) { - if (events_interface.assigned_syncpt[i] == syncpoint_id && + const u32 max = MaxNvEvents - std::countl_zero(events_interface.events_mask); + const u32 min = std::countr_zero(events_interface.events_mask); + for (u32 i = min; i < max; i++) { + if (events_interface.registered[i] && events_interface.assigned_syncpt[i] == syncpoint_id && events_interface.assigned_value[i] == value) { - events_interface.LiberateEvent(i); - events_interface.events[i].event->GetWritableEvent().Signal(); + events_interface.Signal(i); } } } -Kernel::KReadableEvent& Module::GetEvent(const u32 event_id) { - return events_interface.events[event_id].event->GetReadableEvent(); -} +Kernel::KEvent* Module::GetEvent(u32 event_id) { + const auto event = Devices::nvhost_ctrl::SyncpointEventValue{.raw = event_id}; -Kernel::KWritableEvent& Module::GetEventWriteable(const u32 event_id) { - return events_interface.events[event_id].event->GetWritableEvent(); + const bool allocated = event.event_allocated.Value() != 0; + const u32 slot{allocated ? event.partial_slot.Value() : static_cast(event.slot)}; + if (slot >= MaxNvEvents) { + ASSERT(false); + return nullptr; + } + + const u32 syncpoint_id{allocated ? event.syncpoint_id_for_allocation.Value() + : event.syncpoint_id.Value()}; + + auto lock = events_interface.Lock(); + + if (events_interface.registered[slot] && + events_interface.assigned_syncpt[slot] == syncpoint_id) { + ASSERT(events_interface.events[slot]); + return events_interface.events[slot]; + } + // Temporary hack. + events_interface.Create(slot); + events_interface.assigned_syncpt[slot] = syncpoint_id; + ASSERT(false); + return events_interface.events[slot]; } } // namespace Service::Nvidia diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index 4c4aa7dab9..8ce036508a 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -1,5 +1,7 @@ -// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors +// (https://github.com/skyline-emu/) +// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 +// or any later version Refer to the license.txt file included. #pragma once @@ -34,19 +36,20 @@ namespace Devices { class nvdevice; } -/// Represents an Nvidia event -struct NvEvent { - Kernel::KEvent* event{}; - NvFence fence{}; -}; +class Module; -struct EventInterface { - // Mask representing currently busy events +class EventInterface { +public: + EventInterface(Module& module_) : module{module_} {} + + // Mask representing registered events u64 events_mask{}; // Each kernel event associated to an NV event - std::array events; + std::array events{}; + // Backup NV event + std::array backup{}; // The status of the current NVEvent - std::array status{}; + std::array, MaxNvEvents> status{}; // Tells if an NVEvent is registered or not std::array registered{}; // Tells the NVEvent that it has failed. @@ -59,50 +62,26 @@ struct EventInterface { std::array assigned_value{}; // Constant to denote an unasigned syncpoint. static constexpr u32 unassigned_syncpt = 0xFFFFFFFF; - std::optional GetFreeEvent() const { - u64 mask = events_mask; - for (u32 i = 0; i < MaxNvEvents; i++) { - const bool is_free = (mask & 0x1) == 0; - if (is_free) { - if (status[i] == EventState::Registered || status[i] == EventState::Free) { - return {i}; - } - } - mask = mask >> 1; - } - return std::nullopt; - } - void SetEventStatus(const u32 event_id, EventState new_status) { - EventState old_status = status[event_id]; - if (old_status == new_status) { - return; - } - status[event_id] = new_status; - if (new_status == EventState::Registered) { - registered[event_id] = true; - } - if (new_status == EventState::Waiting || new_status == EventState::Busy) { - events_mask |= (1ULL << event_id); - } - } - void RegisterEvent(const u32 event_id) { - registered[event_id] = true; - if (status[event_id] == EventState::Free) { - status[event_id] = EventState::Registered; - } - } - void UnregisterEvent(const u32 event_id) { - registered[event_id] = false; - if (status[event_id] == EventState::Registered) { - status[event_id] = EventState::Free; - } - } - void LiberateEvent(const u32 event_id) { - status[event_id] = registered[event_id] ? EventState::Registered : EventState::Free; - events_mask &= ~(1ULL << event_id); - assigned_syncpt[event_id] = unassigned_syncpt; - assigned_value[event_id] = 0; + + bool IsBeingUsed(u32 event_id) { + const auto current_status = status[event_id].load(std::memory_order_acquire); + return current_status == EventState::Waiting || current_status == EventState::Cancelling || + current_status == EventState::Signalling; } + + std::unique_lock Lock(); + + void Signal(u32 event_id); + + void Create(u32 event_id); + + void Free(u32 event_id); + + u32 FindFreeEvent(u32 syncpoint_id); + +private: + std::mutex events_mutex; + Module& module; }; class Module final { @@ -139,11 +118,11 @@ public: void SignalSyncpt(const u32 syncpoint_id, const u32 value); - Kernel::KReadableEvent& GetEvent(u32 event_id); - - Kernel::KWritableEvent& GetEventWriteable(u32 event_id); + Kernel::KEvent* GetEvent(u32 event_id); private: + friend class EventInterface; + /// Manages syncpoints on the host SyncpointManager syncpoint_manager; @@ -159,6 +138,9 @@ private: EventInterface events_interface; KernelHelpers::ServiceContext service_context; + + void CreateEvent(u32 event_id); + void FreeEvent(u32 event_id); }; /// Registers all NVDRV services with the specified service manager. diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.cpp b/src/core/hle/service/nvdrv/nvdrv_interface.cpp index b5a9803840..07883feb2f 100644 --- a/src/core/hle/service/nvdrv/nvdrv_interface.cpp +++ b/src/core/hle/service/nvdrv/nvdrv_interface.cpp @@ -5,6 +5,7 @@ #include "common/logging/log.h" #include "core/core.h" #include "core/hle/ipc_helpers.h" +#include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_readable_event.h" #include "core/hle/service/nvdrv/nvdata.h" #include "core/hle/service/nvdrv/nvdrv.h" @@ -164,8 +165,7 @@ void NVDRV::Initialize(Kernel::HLERequestContext& ctx) { void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; const auto fd = rp.Pop(); - const auto event_id = rp.Pop() & 0x00FF; - LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}, event_id={:X}", fd, event_id); + const auto event_id = rp.Pop(); if (!is_initialized) { ServiceError(ctx, NvResult::NotInitialized); @@ -180,12 +180,13 @@ void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) { return; } - if (event_id < MaxNvEvents) { + auto* event = nvdrv->GetEvent(event_id); + + if (event) { IPC::ResponseBuilder rb{ctx, 3, 1}; rb.Push(ResultSuccess); - auto& event = nvdrv->GetEvent(event_id); - event.Clear(); - rb.PushCopyObjects(event); + auto& readable_event = event->GetReadableEvent(); + rb.PushCopyObjects(readable_event); rb.PushEnum(NvResult::Success); } else { IPC::ResponseBuilder rb{ctx, 3}; diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index b939ba315d..42c91954f7 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -201,7 +201,7 @@ public: void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value); - [[nodiscard]] bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value); + bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value); [[nodiscard]] u64 GetTicks() const; From d30b885d713fa2b9393aeb3c515f4d881bf838f2 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Mon, 1 Nov 2021 15:02:47 +0100 Subject: [PATCH 04/68] NVDRV: Implement QueryEvent. --- src/core/hle/service/nvdrv/devices/nvdevice.h | 8 ++++ .../hle/service/nvdrv/devices/nvhost_ctrl.cpp | 23 +++++++++ .../hle/service/nvdrv/devices/nvhost_ctrl.h | 2 + .../service/nvdrv/devices/nvhost_ctrl_gpu.cpp | 20 +++++++- .../service/nvdrv/devices/nvhost_ctrl_gpu.h | 14 +++++- .../hle/service/nvdrv/devices/nvhost_gpu.cpp | 26 +++++++++- .../hle/service/nvdrv/devices/nvhost_gpu.h | 13 ++++- src/core/hle/service/nvdrv/nvdrv.cpp | 48 +++++++++---------- src/core/hle/service/nvdrv/nvdrv.h | 6 ++- .../hle/service/nvdrv/nvdrv_interface.cpp | 15 ++---- 10 files changed, 134 insertions(+), 41 deletions(-) diff --git a/src/core/hle/service/nvdrv/devices/nvdevice.h b/src/core/hle/service/nvdrv/devices/nvdevice.h index 696e8121e5..204b0e7572 100644 --- a/src/core/hle/service/nvdrv/devices/nvdevice.h +++ b/src/core/hle/service/nvdrv/devices/nvdevice.h @@ -11,6 +11,10 @@ namespace Core { class System; } +namespace Kernel { +class KEvent; +} + namespace Service::Nvidia::Devices { /// Represents an abstract nvidia device node. It is to be subclassed by concrete device nodes to @@ -64,6 +68,10 @@ public: */ virtual void OnClose(DeviceFD fd) = 0; + virtual Kernel::KEvent* QueryEvent(u32 event_id) { + return nullptr; + } + protected: Core::System& system; }; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index f2b015c8f4..9b393521aa 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -255,4 +255,27 @@ NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector& input, std::v return NvResult::Success; } +Kernel::KEvent* nvhost_ctrl::QueryEvent(u32 event_id) { + const auto event = SyncpointEventValue{.raw = event_id}; + + const bool allocated = event.event_allocated.Value() != 0; + const u32 slot{allocated ? event.partial_slot.Value() : static_cast(event.slot)}; + if (slot >= MaxNvEvents) { + ASSERT(false); + return nullptr; + } + + const u32 syncpoint_id{allocated ? event.syncpoint_id_for_allocation.Value() + : event.syncpoint_id.Value()}; + + auto lock = events_interface.Lock(); + + if (events_interface.registered[slot] && + events_interface.assigned_syncpt[slot] == syncpoint_id) { + ASSERT(events_interface.events[slot]); + return events_interface.events[slot]; + } + return nullptr; +} + } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h index 0471c09b23..d548a78277 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h @@ -28,6 +28,8 @@ public: void OnOpen(DeviceFD fd) override; void OnClose(DeviceFD fd) override; + Kernel::KEvent* QueryEvent(u32 event_id) override; + union SyncpointEventValue { u32 raw; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp index 2b3b7efea7..e353408eb9 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp @@ -7,10 +7,15 @@ #include "core/core.h" #include "core/core_timing.h" #include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h" +#include "core/hle/service/nvdrv/nvdrv.h" namespace Service::Nvidia::Devices { -nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system_) : nvdevice{system_} {} +nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system_, EventInterface& events_interface_) + : nvdevice{system_}, events_interface{events_interface_} { + error_notifier_event = events_interface.CreateNonCtrlEvent("CtrlGpuErrorNotifier"); + unknown_event = events_interface.CreateNonCtrlEvent("CtrlGpuUknownEvent"); +} nvhost_ctrl_gpu::~nvhost_ctrl_gpu() = default; NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, @@ -286,4 +291,17 @@ NvResult nvhost_ctrl_gpu::GetGpuTime(const std::vector& input, std::vector& input, @@ -27,6 +31,8 @@ public: void OnOpen(DeviceFD fd) override; void OnClose(DeviceFD fd) override; + Kernel::KEvent* QueryEvent(u32 event_id) override; + private: struct IoctlGpuCharacteristics { u32_le arch; // 0x120 (NVGPU_GPU_ARCH_GM200) @@ -160,6 +166,12 @@ private: NvResult ZBCQueryTable(const std::vector& input, std::vector& output); NvResult FlushL2(const std::vector& input, std::vector& output); NvResult GetGpuTime(const std::vector& input, std::vector& output); + + EventInterface& events_interface; + + // Events + Kernel::KEvent* error_notifier_event; + Kernel::KEvent* unknown_event; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index b98e63011f..e87fa59924 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp @@ -6,6 +6,7 @@ #include "common/logging/log.h" #include "core/core.h" #include "core/hle/service/nvdrv/devices/nvhost_gpu.h" +#include "core/hle/service/nvdrv/nvdrv.h" #include "core/hle/service/nvdrv/syncpoint_manager.h" #include "core/memory.h" #include "video_core/gpu.h" @@ -21,10 +22,16 @@ Tegra::CommandHeader BuildFenceAction(Tegra::GPU::FenceOperation op, u32 syncpoi } // namespace nvhost_gpu::nvhost_gpu(Core::System& system_, std::shared_ptr nvmap_dev_, - SyncpointManager& syncpoint_manager_) - : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, syncpoint_manager{syncpoint_manager_} { + EventInterface& events_interface_, SyncpointManager& syncpoint_manager_) + : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, events_interface{events_interface_}, + syncpoint_manager{syncpoint_manager_} { channel_fence.id = syncpoint_manager_.AllocateSyncpoint(); channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id); + sm_exception_breakpoint_int_report_event = + events_interface.CreateNonCtrlEvent("GpuChannelSMExceptionBreakpointInt"); + sm_exception_breakpoint_pause_report_event = + events_interface.CreateNonCtrlEvent("GpuChannelSMExceptionBreakpointPause"); + error_notifier_event = events_interface.CreateNonCtrlEvent("GpuChannelErrorNotifier"); } nvhost_gpu::~nvhost_gpu() = default; @@ -328,4 +335,19 @@ NvResult nvhost_gpu::ChannelSetTimeslice(const std::vector& input, std::vect return NvResult::Success; } +Kernel::KEvent* nvhost_gpu::QueryEvent(u32 event_id) { + switch (event_id) { + case 1: + return sm_exception_breakpoint_int_report_event; + case 2: + return sm_exception_breakpoint_pause_report_event; + case 3: + return error_notifier_event; + default: { + LOG_CRITICAL(Service_NVDRV, "Unknown Ctrl GPU Event {}", event_id); + } + } + return nullptr; +} + } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h index 8a9f7775ad..eb4936df0e 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h @@ -15,7 +15,8 @@ namespace Service::Nvidia { class SyncpointManager; -} +class EventInterface; +} // namespace Service::Nvidia namespace Service::Nvidia::Devices { @@ -23,7 +24,7 @@ class nvmap; class nvhost_gpu final : public nvdevice { public: explicit nvhost_gpu(Core::System& system_, std::shared_ptr nvmap_dev_, - SyncpointManager& syncpoint_manager_); + EventInterface& events_interface_, SyncpointManager& syncpoint_manager_); ~nvhost_gpu() override; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, @@ -36,6 +37,8 @@ public: void OnOpen(DeviceFD fd) override; void OnClose(DeviceFD fd) override; + Kernel::KEvent* QueryEvent(u32 event_id) override; + private: enum class CtxObjects : u32_le { Ctx2D = 0x902D, @@ -192,8 +195,14 @@ private: NvResult ChannelSetTimeslice(const std::vector& input, std::vector& output); std::shared_ptr nvmap_dev; + EventInterface& events_interface; SyncpointManager& syncpoint_manager; NvFence channel_fence; + + // Events + Kernel::KEvent* sm_exception_breakpoint_int_report_event; + Kernel::KEvent* sm_exception_breakpoint_pause_report_event; + Kernel::KEvent* error_notifier_event; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index fa259abed2..7c0f89c127 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -96,6 +96,12 @@ u32 EventInterface::FindFreeEvent(u32 syncpoint_id) { return 0; } +Kernel::KEvent* EventInterface::CreateNonCtrlEvent(std::string name) { + Kernel::KEvent* new_event = module.service_context.CreateEvent(std::move(name)); + basic_events.push_back(new_event); + return new_event; +} + void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger, Core::System& system) { auto module_ = std::make_shared(system); @@ -119,9 +125,10 @@ Module::Module(Core::System& system) } auto nvmap_dev = std::make_shared(system); devices["/dev/nvhost-as-gpu"] = std::make_shared(system, nvmap_dev); - devices["/dev/nvhost-gpu"] = - std::make_shared(system, nvmap_dev, syncpoint_manager); - devices["/dev/nvhost-ctrl-gpu"] = std::make_shared(system); + devices["/dev/nvhost-gpu"] = std::make_shared( + system, nvmap_dev, events_interface, syncpoint_manager); + devices["/dev/nvhost-ctrl-gpu"] = + std::make_shared(system, events_interface); devices["/dev/nvmap"] = nvmap_dev; devices["/dev/nvdisp_disp0"] = std::make_shared(system, nvmap_dev); devices["/dev/nvhost-ctrl"] = @@ -255,31 +262,24 @@ void Module::SignalSyncpt(const u32 syncpoint_id, const u32 value) { } } -Kernel::KEvent* Module::GetEvent(u32 event_id) { - const auto event = Devices::nvhost_ctrl::SyncpointEventValue{.raw = event_id}; - - const bool allocated = event.event_allocated.Value() != 0; - const u32 slot{allocated ? event.partial_slot.Value() : static_cast(event.slot)}; - if (slot >= MaxNvEvents) { - ASSERT(false); - return nullptr; +NvResult Module::QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event) { + if (fd < 0) { + LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd); + return NvResult::InvalidState; } - const u32 syncpoint_id{allocated ? event.syncpoint_id_for_allocation.Value() - : event.syncpoint_id.Value()}; + const auto itr = open_files.find(fd); - auto lock = events_interface.Lock(); - - if (events_interface.registered[slot] && - events_interface.assigned_syncpt[slot] == syncpoint_id) { - ASSERT(events_interface.events[slot]); - return events_interface.events[slot]; + if (itr == open_files.end()) { + LOG_ERROR(Service_NVDRV, "Could not find DeviceFD={}!", fd); + return NvResult::NotImplemented; } - // Temporary hack. - events_interface.Create(slot); - events_interface.assigned_syncpt[slot] = syncpoint_id; - ASSERT(false); - return events_interface.events[slot]; + + event = itr->second->QueryEvent(event_id); + if (!event) { + return NvResult::BadParameter; + } + return NvResult::Success; } } // namespace Service::Nvidia diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index 8ce036508a..be8813c978 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -6,6 +6,7 @@ #pragma once #include +#include #include #include @@ -79,9 +80,12 @@ public: u32 FindFreeEvent(u32 syncpoint_id); + Kernel::KEvent* CreateNonCtrlEvent(std::string name); + private: std::mutex events_mutex; Module& module; + std::vector basic_events; }; class Module final { @@ -118,7 +122,7 @@ public: void SignalSyncpt(const u32 syncpoint_id, const u32 value); - Kernel::KEvent* GetEvent(u32 event_id); + NvResult QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event); private: friend class EventInterface; diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.cpp b/src/core/hle/service/nvdrv/nvdrv_interface.cpp index 07883feb2f..81ee28f319 100644 --- a/src/core/hle/service/nvdrv/nvdrv_interface.cpp +++ b/src/core/hle/service/nvdrv/nvdrv_interface.cpp @@ -173,25 +173,20 @@ void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) { return; } - const auto nv_result = nvdrv->VerifyFD(fd); - if (nv_result != NvResult::Success) { - LOG_ERROR(Service_NVDRV, "Invalid FD specified DeviceFD={}!", fd); - ServiceError(ctx, nv_result); - return; - } + Kernel::KEvent* event = nullptr; + NvResult result = nvdrv->QueryEvent(fd, event_id, event); - auto* event = nvdrv->GetEvent(event_id); - - if (event) { + if (result == NvResult::Success) { IPC::ResponseBuilder rb{ctx, 3, 1}; rb.Push(ResultSuccess); auto& readable_event = event->GetReadableEvent(); rb.PushCopyObjects(readable_event); rb.PushEnum(NvResult::Success); } else { + LOG_ERROR(Service_NVDRV, "Invalid event request!"); IPC::ResponseBuilder rb{ctx, 3}; rb.Push(ResultSuccess); - rb.PushEnum(NvResult::BadParameter); + rb.PushEnum(result); } } From a21b8824fb8a0bd7bcb44fd50559f26718f5dbc4 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Mon, 1 Nov 2021 15:25:06 +0100 Subject: [PATCH 05/68] NVDRV: Cleanup. --- .../hle/service/nvdrv/devices/nvhost_ctrl.cpp | 4 +- src/core/hle/service/nvdrv/nvdrv.cpp | 53 ++++++++++--------- src/core/hle/service/nvdrv/nvdrv.h | 9 ++-- .../hle/service/nvdrv/nvdrv_interface.cpp | 6 ++- 4 files changed, 40 insertions(+), 32 deletions(-) diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index 9b393521aa..55acd4f78e 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -125,8 +125,10 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector } }(); + must_unmark_fail = true; + const auto check_failing = [&]() { - if (events_interface.fails[slot] > 1) { + if (events_interface.fails[slot] > 2) { { auto lk = system.StallProcesses(); gpu.WaitFence(fence_id, target_value); diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index 7c0f89c127..df46562403 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -29,6 +29,29 @@ namespace Service::Nvidia { +EventInterface::EventInterface(Module& module_) : module{module_} { + events_mask = 0; + for (u32 i = 0; i < MaxNvEvents; i++) { + status[i] = EventState::Available; + events[i] = nullptr; + registered[i] = false; + } +} + +EventInterface::~EventInterface() { + auto lk = Lock(); + for (u32 i = 0; i < MaxNvEvents; i++) { + if (registered[i]) { + module.service_context.CloseEvent(events[i]); + events[i] = nullptr; + registered[i] = false; + } + } + for (auto* event : basic_events) { + module.service_context.CloseEvent(event); + } +} + std::unique_lock EventInterface::Lock() { return std::unique_lock(events_mutex); } @@ -45,27 +68,25 @@ void EventInterface::Create(u32 event_id) { ASSERT(!events[event_id]); ASSERT(!registered[event_id]); ASSERT(!IsBeingUsed(event_id)); - events[event_id] = backup[event_id]; + events[event_id] = + module.service_context.CreateEvent(fmt::format("NVDRV::NvEvent_{}", event_id)); status[event_id] = EventState::Available; registered[event_id] = true; const u64 mask = 1ULL << event_id; fails[event_id] = 0; events_mask |= mask; - LOG_CRITICAL(Service_NVDRV, "Created Event {}", event_id); } void EventInterface::Free(u32 event_id) { ASSERT(events[event_id]); ASSERT(registered[event_id]); ASSERT(!IsBeingUsed(event_id)); - - backup[event_id]->GetWritableEvent().Clear(); + module.service_context.CloseEvent(events[event_id]); events[event_id] = nullptr; status[event_id] = EventState::Available; registered[event_id] = false; const u64 mask = ~(1ULL << event_id); events_mask &= mask; - LOG_CRITICAL(Service_NVDRV, "Freed Event {}", event_id); } u32 EventInterface::FindFreeEvent(u32 syncpoint_id) { @@ -114,15 +135,7 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger } Module::Module(Core::System& system) - : syncpoint_manager{system.GPU()}, events_interface{*this}, service_context{system, "nvdrv"} { - events_interface.events_mask = 0; - for (u32 i = 0; i < MaxNvEvents; i++) { - events_interface.status[i] = EventState::Available; - events_interface.events[i] = nullptr; - events_interface.registered[i] = false; - events_interface.backup[i] = - service_context.CreateEvent(fmt::format("NVDRV::NvEvent_{}", i)); - } + : syncpoint_manager{system.GPU()}, service_context{system, "nvdrv"}, events_interface{*this} { auto nvmap_dev = std::make_shared(system); devices["/dev/nvhost-as-gpu"] = std::make_shared(system, nvmap_dev); devices["/dev/nvhost-gpu"] = std::make_shared( @@ -140,15 +153,7 @@ Module::Module(Core::System& system) std::make_shared(system, nvmap_dev, syncpoint_manager); } -Module::~Module() { - auto lock = events_interface.Lock(); - for (u32 i = 0; i < MaxNvEvents; i++) { - if (events_interface.registered[i]) { - events_interface.Free(i); - } - service_context.CloseEvent(events_interface.backup[i]); - } -} +Module::~Module() = default; NvResult Module::VerifyFD(DeviceFD fd) const { if (fd < 0) { @@ -255,7 +260,7 @@ void Module::SignalSyncpt(const u32 syncpoint_id, const u32 value) { const u32 max = MaxNvEvents - std::countl_zero(events_interface.events_mask); const u32 min = std::countr_zero(events_interface.events_mask); for (u32 i = min; i < max; i++) { - if (events_interface.registered[i] && events_interface.assigned_syncpt[i] == syncpoint_id && + if (events_interface.assigned_syncpt[i] == syncpoint_id && events_interface.assigned_value[i] == value) { events_interface.Signal(i); } diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index be8813c978..d24b575399 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -41,14 +41,13 @@ class Module; class EventInterface { public: - EventInterface(Module& module_) : module{module_} {} + EventInterface(Module& module_); + ~EventInterface(); // Mask representing registered events u64 events_mask{}; // Each kernel event associated to an NV event std::array events{}; - // Backup NV event - std::array backup{}; // The status of the current NVEvent std::array, MaxNvEvents> status{}; // Tells if an NVEvent is registered or not @@ -139,10 +138,10 @@ private: /// Mapping of device node names to their implementation. std::unordered_map> devices; - EventInterface events_interface; - KernelHelpers::ServiceContext service_context; + EventInterface events_interface; + void CreateEvent(u32 event_id); void FreeEvent(u32 event_id); }; diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.cpp b/src/core/hle/service/nvdrv/nvdrv_interface.cpp index 81ee28f319..bd41205b88 100644 --- a/src/core/hle/service/nvdrv/nvdrv_interface.cpp +++ b/src/core/hle/service/nvdrv/nvdrv_interface.cpp @@ -1,5 +1,7 @@ -// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors +// (https://github.com/skyline-emu/) +// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 +// or any later version Refer to the license.txt file included. #include #include "common/logging/log.h" From 3cbe352c18f69596d91c4862382d61a3d6515140 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Mon, 1 Nov 2021 18:53:32 +0100 Subject: [PATCH 06/68] NVDRV: Refactor and add new NvMap. --- src/common/bit_field.h | 13 +- src/core/CMakeLists.txt | 8 +- src/core/hle/service/nvdrv/core/container.cpp | 41 +++ src/core/hle/service/nvdrv/core/container.h | 38 +++ src/core/hle/service/nvdrv/core/nvmap.cpp | 245 ++++++++++++++++++ src/core/hle/service/nvdrv/core/nvmap.h | 155 +++++++++++ .../nvdrv/{ => core}/syncpoint_manager.cpp | 6 +- .../nvdrv/{ => core}/syncpoint_manager.h | 4 +- .../hle/service/nvdrv/devices/nvhost_ctrl.cpp | 8 +- .../hle/service/nvdrv/devices/nvhost_ctrl.h | 10 +- .../hle/service/nvdrv/devices/nvhost_gpu.cpp | 9 +- .../hle/service/nvdrv/devices/nvhost_gpu.h | 10 +- .../service/nvdrv/devices/nvhost_nvdec.cpp | 4 +- .../hle/service/nvdrv/devices/nvhost_nvdec.h | 2 +- .../nvdrv/devices/nvhost_nvdec_common.cpp | 8 +- .../nvdrv/devices/nvhost_nvdec_common.h | 9 +- .../hle/service/nvdrv/devices/nvhost_vic.cpp | 4 +- .../hle/service/nvdrv/devices/nvhost_vic.h | 2 +- src/core/hle/service/nvdrv/nvdrv.cpp | 16 +- src/core/hle/service/nvdrv/nvdrv.h | 11 +- 20 files changed, 558 insertions(+), 45 deletions(-) create mode 100644 src/core/hle/service/nvdrv/core/container.cpp create mode 100644 src/core/hle/service/nvdrv/core/container.h create mode 100644 src/core/hle/service/nvdrv/core/nvmap.cpp create mode 100644 src/core/hle/service/nvdrv/core/nvmap.h rename src/core/hle/service/nvdrv/{ => core}/syncpoint_manager.cpp (88%) rename src/core/hle/service/nvdrv/{ => core}/syncpoint_manager.h (97%) diff --git a/src/common/bit_field.h b/src/common/bit_field.h index 7e1df62b1c..368b7b98c9 100644 --- a/src/common/bit_field.h +++ b/src/common/bit_field.h @@ -127,11 +127,14 @@ public: } } - // This constructor and assignment operator might be considered ambiguous: - // Would they initialize the storage or just the bitfield? - // Hence, delete them. Use the Assign method to set bitfield values! - BitField(T val) = delete; - BitField& operator=(T val) = delete; + BitField(T val) { + Assign(val); + } + + BitField& operator=(T val) { + Assign(val); + return *this; + } constexpr BitField() noexcept = default; diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 8e3fd4505c..3ef19f9c26 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -550,6 +550,12 @@ add_library(core STATIC hle/service/ns/ns.h hle/service/ns/pdm_qry.cpp hle/service/ns/pdm_qry.h + hle/service/nvdrv/core/container.cpp + hle/service/nvdrv/core/container.h + hle/service/nvdrv/core/nvmap.cpp + hle/service/nvdrv/core/nvmap.h + hle/service/nvdrv/core/syncpoint_manager.cpp + hle/service/nvdrv/core/syncpoint_manager.h hle/service/nvdrv/devices/nvdevice.h hle/service/nvdrv/devices/nvdisp_disp0.cpp hle/service/nvdrv/devices/nvdisp_disp0.h @@ -578,8 +584,6 @@ add_library(core STATIC hle/service/nvdrv/nvdrv_interface.h hle/service/nvdrv/nvmemp.cpp hle/service/nvdrv/nvmemp.h - hle/service/nvdrv/syncpoint_manager.cpp - hle/service/nvdrv/syncpoint_manager.h hle/service/nvflinger/binder.h hle/service/nvflinger/buffer_item.h hle/service/nvflinger/buffer_item_consumer.cpp diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp new file mode 100644 index 0000000000..97b5b2c86f --- /dev/null +++ b/src/core/hle/service/nvdrv/core/container.cpp @@ -0,0 +1,41 @@ +// Copyright 2021 yuzu emulator team +// Copyright 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/nvmap.h" +#include "core/hle/service/nvdrv/core/syncpoint_manager.h" +#include "video_core/gpu.h" + +namespace Service::Nvidia::NvCore { + +struct ContainerImpl { + ContainerImpl(Tegra::GPU& gpu_) : file{}, manager{gpu_} {} + NvMap file; + SyncpointManager manager; +}; + +Container::Container(Tegra::GPU& gpu_) { + impl = std::make_unique(gpu_); +} + +Container::~Container() = default; + +NvMap& Container::GetNvMapFile() { + return impl->file; +} + +const NvMap& Container::GetNvMapFile() const { + return impl->file; +} + +SyncpointManager& Container::GetSyncpointManager() { + return impl->manager; +} + +const SyncpointManager& Container::GetSyncpointManager() const { + return impl->manager; +} + +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h new file mode 100644 index 0000000000..91ac2305ab --- /dev/null +++ b/src/core/hle/service/nvdrv/core/container.h @@ -0,0 +1,38 @@ +// Copyright 2021 yuzu emulator team +// Copyright 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include + +namespace Tegra { +class GPU; +} + +namespace Service::Nvidia::NvCore { + +class NvMap; +class SyncpointManager; + +struct ContainerImpl; + +class Container { +public: + Container(Tegra::GPU& gpu_); + ~Container(); + + NvMap& GetNvMapFile(); + + const NvMap& GetNvMapFile() const; + + SyncpointManager& GetSyncpointManager(); + + const SyncpointManager& GetSyncpointManager() const; + +private: + std::unique_ptr impl; +}; + +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp new file mode 100644 index 0000000000..d3f227f526 --- /dev/null +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -0,0 +1,245 @@ +// Copyright 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/alignment.h" +#include "common/assert.h" +#include "common/logging/log.h" +#include "core/hle/service/nvdrv/core/nvmap.h" +#include "core/memory.h" + +using Core::Memory::YUZU_PAGESIZE; + +namespace Service::Nvidia::NvCore { +NvMap::Handle::Handle(u64 size, Id id) : size(size), aligned_size(size), orig_size(size), id(id) {} + +NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) { + std::scoped_lock lock(mutex); + + // Handles cannot be allocated twice + if (allocated) [[unlikely]] + return NvResult::AccessDenied; + + flags = pFlags; + kind = pKind; + align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign; + + // This flag is only applicable for handles with an address passed + if (pAddress) + flags.keep_uncached_after_free = 0; + else + LOG_CRITICAL(Service_NVDRV, + "Mapping nvmap handles without a CPU side address is unimplemented!"); + + size = Common::AlignUp(size, YUZU_PAGESIZE); + aligned_size = Common::AlignUp(size, align); + address = pAddress; + + // TODO: pin init + + allocated = true; + + return NvResult::Success; +} + +NvResult NvMap::Handle::Duplicate(bool internal_session) { + // Unallocated handles cannot be duplicated as duplication requires memory accounting (in HOS) + if (!allocated) [[unlikely]] + return NvResult::BadValue; + + std::scoped_lock lock(mutex); + + // If we internally use FromId the duplication tracking of handles won't work accurately due to + // us not implementing per-process handle refs. + if (internal_session) + internal_dupes++; + else + dupes++; + + return NvResult::Success; +} + +NvMap::NvMap() = default; + +void NvMap::AddHandle(std::shared_ptr handleDesc) { + std::scoped_lock lock(handles_lock); + + handles.emplace(handleDesc->id, std::move(handleDesc)); +} + +void NvMap::UnmapHandle(Handle& handleDesc) { + // Remove pending unmap queue entry if needed + if (handleDesc.unmap_queue_entry) { + unmap_queue.erase(*handleDesc.unmap_queue_entry); + handleDesc.unmap_queue_entry.reset(); + } + + // Free and unmap the handle from the SMMU + /* + state.soc->smmu.Unmap(handleDesc.pin_virt_address, static_cast(handleDesc.aligned_size)); + smmuAllocator.Free(handleDesc.pin_virt_address, static_cast(handleDesc.aligned_size)); + handleDesc.pin_virt_address = 0; + */ +} + +bool NvMap::TryRemoveHandle(const Handle& handleDesc) { + // No dupes left, we can remove from handle map + if (handleDesc.dupes == 0 && handleDesc.internal_dupes == 0) { + std::scoped_lock lock(handles_lock); + + auto it{handles.find(handleDesc.id)}; + if (it != handles.end()) + handles.erase(it); + + return true; + } else { + return false; + } +} + +NvResult NvMap::CreateHandle(u64 size, std::shared_ptr& result_out) { + if (!size) [[unlikely]] + return NvResult::BadValue; + + u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)}; + auto handleDesc{std::make_shared(size, id)}; + AddHandle(handleDesc); + + result_out = handleDesc; + return NvResult::Success; +} + +std::shared_ptr NvMap::GetHandle(Handle::Id handle) { + std::scoped_lock lock(handles_lock); + try { + return handles.at(handle); + } catch ([[maybe_unused]] std::out_of_range& e) { + return nullptr; + } +} + +u32 NvMap::PinHandle(NvMap::Handle::Id handle) { + UNIMPLEMENTED_MSG("pinning"); + return 0; + /* + auto handleDesc{GetHandle(handle)}; + if (!handleDesc) + [[unlikely]] return 0; + + std::scoped_lock lock(handleDesc->mutex); + if (!handleDesc->pins) { + // If we're in the unmap queue we can just remove ourselves and return since we're already + // mapped + { + // Lock now to prevent our queue entry from being removed for allocation in-between the + // following check and erase + std::scoped_lock queueLock(unmap_queue_lock); + if (handleDesc->unmap_queue_entry) { + unmap_queue.erase(*handleDesc->unmap_queue_entry); + handleDesc->unmap_queue_entry.reset(); + + handleDesc->pins++; + return handleDesc->pin_virt_address; + } + } + + // If not then allocate some space and map it + u32 address{}; + while (!(address = smmuAllocator.Allocate(static_cast(handleDesc->aligned_size)))) { + // Free handles until the allocation succeeds + std::scoped_lock queueLock(unmap_queue_lock); + if (auto freeHandleDesc{unmap_queue.front()}) { + // Handles in the unmap queue are guaranteed not to be pinned so don't bother + // checking if they are before unmapping + std::scoped_lock freeLock(freeHandleDesc->mutex); + if (handleDesc->pin_virt_address) + UnmapHandle(*freeHandleDesc); + } else { + LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!"); + } + } + + state.soc->smmu.Map(address, handleDesc->GetPointer(), + static_cast(handleDesc->aligned_size)); + handleDesc->pin_virt_address = address; + } + + handleDesc->pins++; + return handleDesc->pin_virt_address; + */ +} + +void NvMap::UnpinHandle(Handle::Id handle) { + UNIMPLEMENTED_MSG("Unpinning"); + /* + auto handleDesc{GetHandle(handle)}; + if (!handleDesc) + return; + + std::scoped_lock lock(handleDesc->mutex); + if (--handleDesc->pins < 0) { + LOG_WARNING(Service_NVDRV, "Pin count imbalance detected!"); + } else if (!handleDesc->pins) { + std::scoped_lock queueLock(unmap_queue_lock); + + // Add to the unmap queue allowing this handle's memory to be freed if needed + unmap_queue.push_back(handleDesc); + handleDesc->unmap_queue_entry = std::prev(unmap_queue.end()); + } + */ +} + +std::optional NvMap::FreeHandle(Handle::Id handle, bool internal_session) { + std::weak_ptr hWeak{GetHandle(handle)}; + FreeInfo freeInfo; + + // We use a weak ptr here so we can tell when the handle has been freed and report that back to + // guest + if (auto handleDesc = hWeak.lock()) { + std::scoped_lock lock(handleDesc->mutex); + + if (internal_session) { + if (--handleDesc->internal_dupes < 0) + LOG_WARNING(Service_NVDRV, "Internal duplicate count imbalance detected!"); + } else { + if (--handleDesc->dupes < 0) { + LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!"); + } else if (handleDesc->dupes == 0) { + // Force unmap the handle + if (handleDesc->pin_virt_address) { + std::scoped_lock queueLock(unmap_queue_lock); + UnmapHandle(*handleDesc); + } + + handleDesc->pins = 0; + } + } + + // Try to remove the shared ptr to the handle from the map, if nothing else is using the + // handle then it will now be freed when `handleDesc` goes out of scope + if (TryRemoveHandle(*handleDesc)) + LOG_ERROR(Service_NVDRV, "Removed nvmap handle: {}", handle); + else + LOG_ERROR(Service_NVDRV, + "Tried to free nvmap handle: {} but didn't as it still has duplicates", + handle); + + freeInfo = { + .address = handleDesc->address, + .size = handleDesc->size, + .was_uncached = handleDesc->flags.map_uncached.Value() != 0, + }; + } else { + return std::nullopt; + } + + // Handle hasn't been freed from memory, set address to 0 to mark that the handle wasn't freed + if (!hWeak.expired()) { + LOG_ERROR(Service_NVDRV, "nvmap handle: {} wasn't freed as it is still in use", handle); + freeInfo.address = 0; + } + + return freeInfo; +} + +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h new file mode 100644 index 0000000000..e47aa755d6 --- /dev/null +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -0,0 +1,155 @@ +// Copyright 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "common/bit_field.h" +#include "common/common_types.h" +#include "core/hle/service/nvdrv/nvdata.h" + +namespace Service::Nvidia::NvCore { +/** + * @brief The nvmap core class holds the global state for nvmap and provides methods to manage + * handles + */ +class NvMap { +public: + /** + * @brief A handle to a contiguous block of memory in an application's address space + */ + struct Handle { + std::mutex mutex; + + u64 align{}; //!< The alignment to use when pinning the handle onto the SMMU + u64 size; //!< Page-aligned size of the memory the handle refers to + u64 aligned_size; //!< `align`-aligned size of the memory the handle refers to + u64 orig_size; //!< Original unaligned size of the memory this handle refers to + + s32 dupes{1}; //!< How many guest references there are to this handle + s32 internal_dupes{0}; //!< How many emulator-internal references there are to this handle + + using Id = u32; + Id id; //!< A globally unique identifier for this handle + + s32 pins{}; + u32 pin_virt_address{}; + std::optional>::iterator> unmap_queue_entry{}; + + union Flags { + BitField<0, 1, u32> map_uncached; //!< If the handle should be mapped as uncached + BitField<2, 1, u32> keep_uncached_after_free; //!< Only applicable when the handle was + //!< allocated with a fixed address + BitField<4, 1, u32> _unk0_; //!< Passed to IOVMM for pins + } flags{}; + static_assert(sizeof(Flags) == sizeof(u32)); + + u64 address{}; //!< The memory location in the guest's AS that this handle corresponds to, + //!< this can also be in the nvdrv tmem + bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC + //!< call + + u8 kind{}; //!< Used for memory compression + bool allocated{}; //!< If the handle has been allocated with `Alloc` + + Handle(u64 size, Id id); + + /** + * @brief Sets up the handle with the given memory config, can allocate memory from the tmem + * if a 0 address is passed + */ + [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress); + + /** + * @brief Increases the dupe counter of the handle for the given session + */ + [[nodiscard]] NvResult Duplicate(bool internal_session); + + /** + * @brief Obtains a pointer to the handle's memory and marks the handle it as having been + * mapped + */ + u8* GetPointer() { + if (!address) { + return nullptr; + } + + is_shared_mem_mapped = true; + return reinterpret_cast(address); + } + }; + +private: + std::list> unmap_queue; + std::mutex unmap_queue_lock; //!< Protects access to `unmap_queue` + + std::unordered_map> handles; //!< Main owning map of handles + std::mutex handles_lock; //!< Protects access to `handles` + + static constexpr u32 HandleIdIncrement{ + 4}; //!< Each new handle ID is an increment of 4 from the previous + std::atomic next_handle_id{HandleIdIncrement}; + + void AddHandle(std::shared_ptr handle); + + /** + * @brief Unmaps and frees the SMMU memory region a handle is mapped to + * @note Both `unmap_queue_lock` and `handleDesc.mutex` MUST be locked when calling this + */ + void UnmapHandle(Handle& handleDesc); + + /** + * @brief Removes a handle from the map taking its dupes into account + * @note handleDesc.mutex MUST be locked when calling this + * @return If the handle was removed from the map + */ + bool TryRemoveHandle(const Handle& handleDesc); + +public: + /** + * @brief Encapsulates the result of a FreeHandle operation + */ + struct FreeInfo { + u64 address; //!< Address the handle referred to before deletion + u64 size; //!< Page-aligned handle size + bool was_uncached; //!< If the handle was allocated as uncached + }; + + NvMap(); + + /** + * @brief Creates an unallocated handle of the given size + */ + [[nodiscard]] NvResult CreateHandle(u64 size, std::shared_ptr& result_out); + + std::shared_ptr GetHandle(Handle::Id handle); + + /** + * @brief Maps a handle into the SMMU address space + * @note This operation is refcounted, the number of calls to this must eventually match the + * number of calls to `UnpinHandle` + * @return The SMMU virtual address that the handle has been mapped to + */ + u32 PinHandle(Handle::Id handle); + + /** + * @brief When this has been called an equal number of times to `PinHandle` for the supplied + * handle it will be added to a list of handles to be freed when necessary + */ + void UnpinHandle(Handle::Id handle); + + /** + * @brief Tries to free a handle and remove a single dupe + * @note If a handle has no dupes left and has no other users a FreeInfo struct will be returned + * describing the prior state of the handle + */ + std::optional FreeHandle(Handle::Id handle, bool internal_session); +}; +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp similarity index 88% rename from src/core/hle/service/nvdrv/syncpoint_manager.cpp rename to src/core/hle/service/nvdrv/core/syncpoint_manager.cpp index a6fa943e8a..ff6cbb37e5 100644 --- a/src/core/hle/service/nvdrv/syncpoint_manager.cpp +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include "common/assert.h" -#include "core/hle/service/nvdrv/syncpoint_manager.h" +#include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "video_core/gpu.h" -namespace Service::Nvidia { +namespace Service::Nvidia::NvCore { SyncpointManager::SyncpointManager(Tegra::GPU& gpu_) : gpu{gpu_} {} @@ -35,4 +35,4 @@ u32 SyncpointManager::IncreaseSyncpoint(u32 syncpoint_id, u32 value) { return GetSyncpointMax(syncpoint_id); } -} // namespace Service::Nvidia +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/syncpoint_manager.h b/src/core/hle/service/nvdrv/core/syncpoint_manager.h similarity index 97% rename from src/core/hle/service/nvdrv/syncpoint_manager.h rename to src/core/hle/service/nvdrv/core/syncpoint_manager.h index 7f080f76e2..cf7f0b4bee 100644 --- a/src/core/hle/service/nvdrv/syncpoint_manager.h +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.h @@ -13,7 +13,7 @@ namespace Tegra { class GPU; } -namespace Service::Nvidia { +namespace Service::Nvidia::NvCore { class SyncpointManager final { public: @@ -81,4 +81,4 @@ private: Tegra::GPU& gpu; }; -} // namespace Service::Nvidia +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index 55acd4f78e..5e2155e6c7 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -12,15 +12,17 @@ #include "core/core.h" #include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_writable_event.h" +#include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "core/hle/service/nvdrv/devices/nvhost_ctrl.h" #include "video_core/gpu.h" namespace Service::Nvidia::Devices { nvhost_ctrl::nvhost_ctrl(Core::System& system_, EventInterface& events_interface_, - SyncpointManager& syncpoint_manager_) - : nvdevice{system_}, events_interface{events_interface_}, syncpoint_manager{ - syncpoint_manager_} {} + NvCore::Container& core_) + : nvdevice{system_}, events_interface{events_interface_}, core{core_}, + syncpoint_manager{core_.GetSyncpointManager()} {} nvhost_ctrl::~nvhost_ctrl() = default; NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h index d548a78277..9fd46ea5f0 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h @@ -10,12 +10,17 @@ #include "core/hle/service/nvdrv/devices/nvdevice.h" #include "core/hle/service/nvdrv/nvdrv.h" +namespace Service::Nvidia::NvCore { +class Container; +class SyncpointManager; +} // namespace Service::Nvidia::NvCore + namespace Service::Nvidia::Devices { class nvhost_ctrl final : public nvdevice { public: explicit nvhost_ctrl(Core::System& system_, EventInterface& events_interface_, - SyncpointManager& syncpoint_manager_); + NvCore::Container& core); ~nvhost_ctrl() override; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, @@ -145,7 +150,8 @@ private: NvResult FreeEvent(u32 slot); EventInterface& events_interface; - SyncpointManager& syncpoint_manager; + NvCore::Container& core; + NvCore::SyncpointManager& syncpoint_manager; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index e87fa59924..a480bfc47f 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp @@ -5,9 +5,10 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "core/hle/service/nvdrv/devices/nvhost_gpu.h" #include "core/hle/service/nvdrv/nvdrv.h" -#include "core/hle/service/nvdrv/syncpoint_manager.h" #include "core/memory.h" #include "video_core/gpu.h" @@ -22,10 +23,10 @@ Tegra::CommandHeader BuildFenceAction(Tegra::GPU::FenceOperation op, u32 syncpoi } // namespace nvhost_gpu::nvhost_gpu(Core::System& system_, std::shared_ptr nvmap_dev_, - EventInterface& events_interface_, SyncpointManager& syncpoint_manager_) + EventInterface& events_interface_, NvCore::Container& core_) : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, events_interface{events_interface_}, - syncpoint_manager{syncpoint_manager_} { - channel_fence.id = syncpoint_manager_.AllocateSyncpoint(); + core{core_}, syncpoint_manager{core_.GetSyncpointManager()} { + channel_fence.id = syncpoint_manager.AllocateSyncpoint(); channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id); sm_exception_breakpoint_int_report_event = events_interface.CreateNonCtrlEvent("GpuChannelSMExceptionBreakpointInt"); diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h index eb4936df0e..4f73a7bae3 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h @@ -14,7 +14,12 @@ #include "video_core/dma_pusher.h" namespace Service::Nvidia { + +namespace NvCore { +class Container; class SyncpointManager; +} // namespace NvCore + class EventInterface; } // namespace Service::Nvidia @@ -24,7 +29,7 @@ class nvmap; class nvhost_gpu final : public nvdevice { public: explicit nvhost_gpu(Core::System& system_, std::shared_ptr nvmap_dev_, - EventInterface& events_interface_, SyncpointManager& syncpoint_manager_); + EventInterface& events_interface_, NvCore::Container& core); ~nvhost_gpu() override; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, @@ -196,7 +201,8 @@ private: std::shared_ptr nvmap_dev; EventInterface& events_interface; - SyncpointManager& syncpoint_manager; + NvCore::Container& core; + NvCore::SyncpointManager& syncpoint_manager; NvFence channel_fence; // Events diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp index a7385fce81..2c9158c7c8 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp @@ -11,8 +11,8 @@ namespace Service::Nvidia::Devices { nvhost_nvdec::nvhost_nvdec(Core::System& system_, std::shared_ptr nvmap_dev_, - SyncpointManager& syncpoint_manager_) - : nvhost_nvdec_common{system_, std::move(nvmap_dev_), syncpoint_manager_} {} + NvCore::Container& core) + : nvhost_nvdec_common{system_, std::move(nvmap_dev_), core} {} nvhost_nvdec::~nvhost_nvdec() = default; NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h index 29b3e6a360..04da4a9136 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h @@ -11,7 +11,7 @@ namespace Service::Nvidia::Devices { class nvhost_nvdec final : public nvhost_nvdec_common { public: explicit nvhost_nvdec(Core::System& system_, std::shared_ptr nvmap_dev_, - SyncpointManager& syncpoint_manager_); + NvCore::Container& core); ~nvhost_nvdec() override; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index 8b2cd9bf1d..5a9c59f371 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp @@ -8,9 +8,10 @@ #include "common/common_types.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h" #include "core/hle/service/nvdrv/devices/nvmap.h" -#include "core/hle/service/nvdrv/syncpoint_manager.h" #include "core/memory.h" #include "video_core/memory_manager.h" #include "video_core/renderer_base.h" @@ -45,8 +46,9 @@ std::size_t WriteVectors(std::vector& dst, const std::vector& src, std::s } // Anonymous namespace nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, std::shared_ptr nvmap_dev_, - SyncpointManager& syncpoint_manager_) - : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, syncpoint_manager{syncpoint_manager_} {} + NvCore::Container& core_) + : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, core{core_}, + syncpoint_manager{core.GetSyncpointManager()} {} nvhost_nvdec_common::~nvhost_nvdec_common() = default; NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector& input) { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h index 12d39946db..cccc94a584 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h @@ -9,7 +9,11 @@ #include "core/hle/service/nvdrv/devices/nvdevice.h" namespace Service::Nvidia { + +namespace NvCore { class SyncpointManager; +class Container; +} // namespace NvCore namespace Devices { class nvmap; @@ -17,7 +21,7 @@ class nvmap; class nvhost_nvdec_common : public nvdevice { public: explicit nvhost_nvdec_common(Core::System& system_, std::shared_ptr nvmap_dev_, - SyncpointManager& syncpoint_manager_); + NvCore::Container& core); ~nvhost_nvdec_common() override; protected: @@ -114,7 +118,8 @@ protected: s32_le nvmap_fd{}; u32_le submit_timeout{}; std::shared_ptr nvmap_dev; - SyncpointManager& syncpoint_manager; + NvCore::Container& core; + NvCore::SyncpointManager& syncpoint_manager; std::array device_syncpoints{}; }; }; // namespace Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index f58e8bada5..66558c331b 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp @@ -9,8 +9,8 @@ namespace Service::Nvidia::Devices { nvhost_vic::nvhost_vic(Core::System& system_, std::shared_ptr nvmap_dev_, - SyncpointManager& syncpoint_manager_) - : nvhost_nvdec_common{system_, std::move(nvmap_dev_), syncpoint_manager_} {} + NvCore::Container& core) + : nvhost_nvdec_common{system_, std::move(nvmap_dev_), core} {} nvhost_vic::~nvhost_vic() = default; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.h b/src/core/hle/service/nvdrv/devices/nvhost_vic.h index b41b195aee..6f9838b2dc 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.h @@ -10,7 +10,7 @@ namespace Service::Nvidia::Devices { class nvhost_vic final : public nvhost_nvdec_common { public: explicit nvhost_vic(Core::System& system_, std::shared_ptr nvmap_dev_, - SyncpointManager& syncpoint_manager_); + NvCore::Container& core); ~nvhost_vic(); NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index df46562403..824c0e2906 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -11,6 +11,7 @@ #include "core/hle/ipc_helpers.h" #include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_writable_event.h" +#include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/devices/nvdevice.h" #include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" @@ -24,8 +25,8 @@ #include "core/hle/service/nvdrv/nvdrv.h" #include "core/hle/service/nvdrv/nvdrv_interface.h" #include "core/hle/service/nvdrv/nvmemp.h" -#include "core/hle/service/nvdrv/syncpoint_manager.h" #include "core/hle/service/nvflinger/nvflinger.h" +#include "video_core/gpu.h" namespace Service::Nvidia { @@ -75,6 +76,7 @@ void EventInterface::Create(u32 event_id) { const u64 mask = 1ULL << event_id; fails[event_id] = 0; events_mask |= mask; + assigned_syncpt[event_id] = 0; } void EventInterface::Free(u32 event_id) { @@ -135,22 +137,22 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger } Module::Module(Core::System& system) - : syncpoint_manager{system.GPU()}, service_context{system, "nvdrv"}, events_interface{*this} { + : service_context{system, "nvdrv"}, events_interface{*this}, container{system.GPU()} { auto nvmap_dev = std::make_shared(system); devices["/dev/nvhost-as-gpu"] = std::make_shared(system, nvmap_dev); - devices["/dev/nvhost-gpu"] = std::make_shared( - system, nvmap_dev, events_interface, syncpoint_manager); + devices["/dev/nvhost-gpu"] = + std::make_shared(system, nvmap_dev, events_interface, container); devices["/dev/nvhost-ctrl-gpu"] = std::make_shared(system, events_interface); devices["/dev/nvmap"] = nvmap_dev; devices["/dev/nvdisp_disp0"] = std::make_shared(system, nvmap_dev); devices["/dev/nvhost-ctrl"] = - std::make_shared(system, events_interface, syncpoint_manager); + std::make_shared(system, events_interface, container); devices["/dev/nvhost-nvdec"] = - std::make_shared(system, nvmap_dev, syncpoint_manager); + std::make_shared(system, nvmap_dev, container); devices["/dev/nvhost-nvjpg"] = std::make_shared(system); devices["/dev/nvhost-vic"] = - std::make_shared(system, nvmap_dev, syncpoint_manager); + std::make_shared(system, nvmap_dev, container); } Module::~Module() = default; diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index d24b575399..96adf2ffbc 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -12,8 +12,8 @@ #include "common/common_types.h" #include "core/hle/service/kernel_helpers.h" +#include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/nvdata.h" -#include "core/hle/service/nvdrv/syncpoint_manager.h" #include "core/hle/service/nvflinger/ui/fence.h" #include "core/hle/service/service.h" @@ -31,7 +31,10 @@ class NVFlinger; namespace Service::Nvidia { +namespace NvCore { +class Container; class SyncpointManager; +} // namespace NvCore namespace Devices { class nvdevice; @@ -126,9 +129,6 @@ public: private: friend class EventInterface; - /// Manages syncpoints on the host - SyncpointManager syncpoint_manager; - /// Id to use for the next open file descriptor. DeviceFD next_fd = 1; @@ -142,6 +142,9 @@ private: EventInterface events_interface; + /// Manages syncpoints on the host + NvCore::Container container; + void CreateEvent(u32 event_id); void FreeEvent(u32 event_id); }; From de0e8eff429b4374c18e3325ad3747db55bddddd Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Thu, 4 Nov 2021 12:51:17 +0100 Subject: [PATCH 07/68] NVDRV: Implement new NvMap --- src/core/hle/service/nvdrv/core/nvmap.cpp | 119 +++++----- src/core/hle/service/nvdrv/core/nvmap.h | 12 +- .../service/nvdrv/devices/nvdisp_disp0.cpp | 14 +- .../hle/service/nvdrv/devices/nvdisp_disp0.h | 12 +- .../service/nvdrv/devices/nvhost_as_gpu.cpp | 54 +++-- .../hle/service/nvdrv/devices/nvhost_as_gpu.h | 12 +- .../hle/service/nvdrv/devices/nvhost_ctrl.cpp | 2 + .../hle/service/nvdrv/devices/nvhost_gpu.cpp | 9 +- .../hle/service/nvdrv/devices/nvhost_gpu.h | 7 +- .../service/nvdrv/devices/nvhost_nvdec.cpp | 5 +- .../hle/service/nvdrv/devices/nvhost_nvdec.h | 3 +- .../nvdrv/devices/nvhost_nvdec_common.cpp | 26 ++- .../nvdrv/devices/nvhost_nvdec_common.h | 11 +- .../hle/service/nvdrv/devices/nvhost_vic.cpp | 5 +- .../hle/service/nvdrv/devices/nvhost_vic.h | 3 +- src/core/hle/service/nvdrv/devices/nvmap.cpp | 218 +++++++++--------- src/core/hle/service/nvdrv/devices/nvmap.h | 55 ++--- src/core/hle/service/nvdrv/nvdrv.cpp | 15 +- 18 files changed, 306 insertions(+), 276 deletions(-) diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index d3f227f526..281381cc41 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -17,7 +17,7 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) std::scoped_lock lock(mutex); // Handles cannot be allocated twice - if (allocated) [[unlikely]] + if (allocated) return NvResult::AccessDenied; flags = pFlags; @@ -61,33 +61,34 @@ NvResult NvMap::Handle::Duplicate(bool internal_session) { NvMap::NvMap() = default; -void NvMap::AddHandle(std::shared_ptr handleDesc) { +void NvMap::AddHandle(std::shared_ptr handle_description) { std::scoped_lock lock(handles_lock); - handles.emplace(handleDesc->id, std::move(handleDesc)); + handles.emplace(handle_description->id, std::move(handle_description)); } -void NvMap::UnmapHandle(Handle& handleDesc) { +void NvMap::UnmapHandle(Handle& handle_description) { // Remove pending unmap queue entry if needed - if (handleDesc.unmap_queue_entry) { - unmap_queue.erase(*handleDesc.unmap_queue_entry); - handleDesc.unmap_queue_entry.reset(); + if (handle_description.unmap_queue_entry) { + unmap_queue.erase(*handle_description.unmap_queue_entry); + handle_description.unmap_queue_entry.reset(); } // Free and unmap the handle from the SMMU /* - state.soc->smmu.Unmap(handleDesc.pin_virt_address, static_cast(handleDesc.aligned_size)); - smmuAllocator.Free(handleDesc.pin_virt_address, static_cast(handleDesc.aligned_size)); - handleDesc.pin_virt_address = 0; + state.soc->smmu.Unmap(handle_description.pin_virt_address, + static_cast(handle_description.aligned_size)); + smmuAllocator.Free(handle_description.pin_virt_address, + static_cast(handle_description.aligned_size)); handle_description.pin_virt_address = 0; */ } -bool NvMap::TryRemoveHandle(const Handle& handleDesc) { +bool NvMap::TryRemoveHandle(const Handle& handle_description) { // No dupes left, we can remove from handle map - if (handleDesc.dupes == 0 && handleDesc.internal_dupes == 0) { + if (handle_description.dupes == 0 && handle_description.internal_dupes == 0) { std::scoped_lock lock(handles_lock); - auto it{handles.find(handleDesc.id)}; + auto it{handles.find(handle_description.id)}; if (it != handles.end()) handles.erase(it); @@ -102,10 +103,10 @@ NvResult NvMap::CreateHandle(u64 size, std::shared_ptr& result_ou return NvResult::BadValue; u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)}; - auto handleDesc{std::make_shared(size, id)}; - AddHandle(handleDesc); + auto handle_description{std::make_shared(size, id)}; + AddHandle(handle_description); - result_out = handleDesc; + result_out = handle_description; return NvResult::Success; } @@ -118,73 +119,83 @@ std::shared_ptr NvMap::GetHandle(Handle::Id handle) { } } +VAddr NvMap::GetHandleAddress(Handle::Id handle) { + std::scoped_lock lock(handles_lock); + try { + return handles.at(handle)->address; + } catch ([[maybe_unused]] std::out_of_range& e) { + return 0; + } +} + u32 NvMap::PinHandle(NvMap::Handle::Id handle) { UNIMPLEMENTED_MSG("pinning"); return 0; /* - auto handleDesc{GetHandle(handle)}; - if (!handleDesc) + auto handle_description{GetHandle(handle)}; + if (!handle_description) [[unlikely]] return 0; - std::scoped_lock lock(handleDesc->mutex); - if (!handleDesc->pins) { + std::scoped_lock lock(handle_description->mutex); + if (!handle_description->pins) { // If we're in the unmap queue we can just remove ourselves and return since we're already // mapped { // Lock now to prevent our queue entry from being removed for allocation in-between the // following check and erase std::scoped_lock queueLock(unmap_queue_lock); - if (handleDesc->unmap_queue_entry) { - unmap_queue.erase(*handleDesc->unmap_queue_entry); - handleDesc->unmap_queue_entry.reset(); + if (handle_description->unmap_queue_entry) { + unmap_queue.erase(*handle_description->unmap_queue_entry); + handle_description->unmap_queue_entry.reset(); - handleDesc->pins++; - return handleDesc->pin_virt_address; + handle_description->pins++; + return handle_description->pin_virt_address; } } // If not then allocate some space and map it u32 address{}; - while (!(address = smmuAllocator.Allocate(static_cast(handleDesc->aligned_size)))) { + while (!(address = + smmuAllocator.Allocate(static_cast(handle_description->aligned_size)))) { // Free handles until the allocation succeeds std::scoped_lock queueLock(unmap_queue_lock); if (auto freeHandleDesc{unmap_queue.front()}) { // Handles in the unmap queue are guaranteed not to be pinned so don't bother // checking if they are before unmapping std::scoped_lock freeLock(freeHandleDesc->mutex); - if (handleDesc->pin_virt_address) + if (handle_description->pin_virt_address) UnmapHandle(*freeHandleDesc); } else { LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!"); } } - state.soc->smmu.Map(address, handleDesc->GetPointer(), - static_cast(handleDesc->aligned_size)); - handleDesc->pin_virt_address = address; + state.soc->smmu.Map(address, handle_description->GetPointer(), + static_cast(handle_description->aligned_size)); + handle_description->pin_virt_address = address; } - handleDesc->pins++; - return handleDesc->pin_virt_address; + handle_description->pins++; + return handle_description->pin_virt_address; */ } void NvMap::UnpinHandle(Handle::Id handle) { UNIMPLEMENTED_MSG("Unpinning"); /* - auto handleDesc{GetHandle(handle)}; - if (!handleDesc) + auto handle_description{GetHandle(handle)}; + if (!handle_description) return; - std::scoped_lock lock(handleDesc->mutex); - if (--handleDesc->pins < 0) { + std::scoped_lock lock(handle_description->mutex); + if (--handle_description->pins < 0) { LOG_WARNING(Service_NVDRV, "Pin count imbalance detected!"); - } else if (!handleDesc->pins) { + } else if (!handle_description->pins) { std::scoped_lock queueLock(unmap_queue_lock); // Add to the unmap queue allowing this handle's memory to be freed if needed - unmap_queue.push_back(handleDesc); - handleDesc->unmap_queue_entry = std::prev(unmap_queue.end()); + unmap_queue.push_back(handle_description); + handle_description->unmap_queue_entry = std::prev(unmap_queue.end()); } */ } @@ -195,39 +206,39 @@ std::optional NvMap::FreeHandle(Handle::Id handle, bool interna // We use a weak ptr here so we can tell when the handle has been freed and report that back to // guest - if (auto handleDesc = hWeak.lock()) { - std::scoped_lock lock(handleDesc->mutex); + if (auto handle_description = hWeak.lock()) { + std::scoped_lock lock(handle_description->mutex); if (internal_session) { - if (--handleDesc->internal_dupes < 0) + if (--handle_description->internal_dupes < 0) LOG_WARNING(Service_NVDRV, "Internal duplicate count imbalance detected!"); } else { - if (--handleDesc->dupes < 0) { + if (--handle_description->dupes < 0) { LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!"); - } else if (handleDesc->dupes == 0) { + } else if (handle_description->dupes == 0) { // Force unmap the handle - if (handleDesc->pin_virt_address) { + if (handle_description->pin_virt_address) { std::scoped_lock queueLock(unmap_queue_lock); - UnmapHandle(*handleDesc); + UnmapHandle(*handle_description); } - handleDesc->pins = 0; + handle_description->pins = 0; } } // Try to remove the shared ptr to the handle from the map, if nothing else is using the - // handle then it will now be freed when `handleDesc` goes out of scope - if (TryRemoveHandle(*handleDesc)) - LOG_ERROR(Service_NVDRV, "Removed nvmap handle: {}", handle); + // handle then it will now be freed when `handle_description` goes out of scope + if (TryRemoveHandle(*handle_description)) + LOG_DEBUG(Service_NVDRV, "Removed nvmap handle: {}", handle); else - LOG_ERROR(Service_NVDRV, + LOG_DEBUG(Service_NVDRV, "Tried to free nvmap handle: {} but didn't as it still has duplicates", handle); freeInfo = { - .address = handleDesc->address, - .size = handleDesc->size, - .was_uncached = handleDesc->flags.map_uncached.Value() != 0, + .address = handle_description->address, + .size = handle_description->size, + .was_uncached = handle_description->flags.map_uncached.Value() != 0, }; } else { return std::nullopt; diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index e47aa755d6..994c70e6fa 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -59,6 +59,8 @@ public: u8 kind{}; //!< Used for memory compression bool allocated{}; //!< If the handle has been allocated with `Alloc` + u64 dma_map_addr{}; //! remove me after implementing pinning. + Handle(u64 size, Id id); /** @@ -101,16 +103,16 @@ private: /** * @brief Unmaps and frees the SMMU memory region a handle is mapped to - * @note Both `unmap_queue_lock` and `handleDesc.mutex` MUST be locked when calling this + * @note Both `unmap_queue_lock` and `handle_description.mutex` MUST be locked when calling this */ - void UnmapHandle(Handle& handleDesc); + void UnmapHandle(Handle& handle_description); /** * @brief Removes a handle from the map taking its dupes into account - * @note handleDesc.mutex MUST be locked when calling this + * @note handle_description.mutex MUST be locked when calling this * @return If the handle was removed from the map */ - bool TryRemoveHandle(const Handle& handleDesc); + bool TryRemoveHandle(const Handle& handle_description); public: /** @@ -131,6 +133,8 @@ public: std::shared_ptr GetHandle(Handle::Id handle); + VAddr GetHandleAddress(Handle::Id handle); + /** * @brief Maps a handle into the SMMU address space * @note This operation is refcounted, the number of calls to this must eventually match the diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp index 6047119142..b1c0e9eb2d 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp @@ -5,15 +5,16 @@ #include "common/logging/log.h" #include "core/core.h" #include "core/core_timing.h" +#include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" -#include "core/hle/service/nvdrv/devices/nvmap.h" #include "core/perf_stats.h" #include "video_core/gpu.h" namespace Service::Nvidia::Devices { -nvdisp_disp0::nvdisp_disp0(Core::System& system_, std::shared_ptr nvmap_dev_) - : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)} {} +nvdisp_disp0::nvdisp_disp0(Core::System& system_, NvCore::Container& core) + : nvdevice{system_}, container{core}, nvmap{core.GetNvMapFile()} {} nvdisp_disp0::~nvdisp_disp0() = default; NvResult nvdisp_disp0::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, @@ -40,7 +41,7 @@ void nvdisp_disp0::OnClose(DeviceFD fd) {} void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, u32 height, u32 stride, android::BufferTransformFlags transform, const Common::Rectangle& crop_rect) { - const VAddr addr = nvmap_dev->GetObjectAddress(buffer_handle); + const VAddr addr = nvmap.GetHandleAddress(buffer_handle); LOG_TRACE(Service, "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}", addr, offset, width, height, stride, format); @@ -54,4 +55,9 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat form system.GetPerfStats().BeginSystemFrame(); } +Kernel::KEvent* nvdisp_disp0::QueryEvent(u32 event_id) { + LOG_CRITICAL(Service_NVDRV, "Unknown DISP Event {}", event_id); + return nullptr; +} + } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h index 67b105e020..1ca9b2e74a 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h @@ -11,13 +11,18 @@ #include "core/hle/service/nvflinger/buffer_transform_flags.h" #include "core/hle/service/nvflinger/pixel_format.h" +namespace Service::Nvidia::NvCore { +class Container; +class NvMap; +} // namespace Service::Nvidia::NvCore + namespace Service::Nvidia::Devices { class nvmap; class nvdisp_disp0 final : public nvdevice { public: - explicit nvdisp_disp0(Core::System& system_, std::shared_ptr nvmap_dev_); + explicit nvdisp_disp0(Core::System& system_, NvCore::Container& core); ~nvdisp_disp0() override; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, @@ -35,8 +40,11 @@ public: u32 stride, android::BufferTransformFlags transform, const Common::Rectangle& crop_rect); + Kernel::KEvent* QueryEvent(u32 event_id) override; + private: - std::shared_ptr nvmap_dev; + NvCore::Container& container; + NvCore::NvMap& nvmap; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 9867a648d8..9283d6aec9 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -7,15 +7,16 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" -#include "core/hle/service/nvdrv/devices/nvmap.h" #include "video_core/memory_manager.h" #include "video_core/rasterizer_interface.h" namespace Service::Nvidia::Devices { -nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, std::shared_ptr nvmap_dev_) - : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)} {} +nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, NvCore::Container& core) + : nvdevice{system_}, container{core}, nvmap{core.GetNvMapFile()} {} nvhost_as_gpu::~nvhost_as_gpu() = default; NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, @@ -143,7 +144,7 @@ NvResult nvhost_as_gpu::Remap(const std::vector& input, std::vector& out LOG_DEBUG(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}", entry.offset, entry.nvmap_handle, entry.pages); - const auto object{nvmap_dev->GetObject(entry.nvmap_handle)}; + const auto object{nvmap.GetHandle(entry.nvmap_handle)}; if (!object) { LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", entry.nvmap_handle); result = NvResult::InvalidState; @@ -153,7 +154,8 @@ NvResult nvhost_as_gpu::Remap(const std::vector& input, std::vector& out const auto offset{static_cast(entry.offset) << 0x10}; const auto size{static_cast(entry.pages) << 0x10}; const auto map_offset{static_cast(entry.map_offset) << 0x10}; - const auto addr{system.GPU().MemoryManager().Map(object->addr + map_offset, offset, size)}; + const auto addr{ + system.GPU().MemoryManager().Map(object->address + map_offset, offset, size)}; if (!addr) { LOG_CRITICAL(Service_NVDRV, "map returned an invalid address!"); @@ -176,24 +178,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vectorGetObject(params.nvmap_handle)}; - if (!object) { - LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle); - std::memcpy(output.data(), ¶ms, output.size()); - return NvResult::InvalidState; - } - - // The real nvservices doesn't make a distinction between handles and ids, and - // object can only have one handle and it will be the same as its id. Assert that this is the - // case to prevent unexpected behavior. - ASSERT(object->id == params.nvmap_handle); auto& gpu = system.GPU(); - - u64 page_size{params.page_size}; - if (!page_size) { - page_size = object->align; - } - if ((params.flags & AddressSpaceFlags::Remap) != AddressSpaceFlags::None) { if (const auto buffer_map{FindBufferMap(params.offset)}; buffer_map) { const auto cpu_addr{static_cast(buffer_map->CpuAddr() + params.buffer_offset)}; @@ -220,10 +205,24 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vectorstatus == nvmap::Object::Status::Allocated); + const auto object{nvmap.GetHandle(params.nvmap_handle)}; + if (!object) { + LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle); + std::memcpy(output.data(), ¶ms, output.size()); + return NvResult::InvalidState; + } - const auto physical_address{object->addr + params.buffer_offset}; + // The real nvservices doesn't make a distinction between handles and ids, and + // object can only have one handle and it will be the same as its id. Assert that this is the + // case to prevent unexpected behavior. + ASSERT(object->id == params.nvmap_handle); + + u64 page_size{params.page_size}; + if (!page_size) { + page_size = object->align; + } + + const auto physical_address{object->address + params.buffer_offset}; u64 size{params.mapping_size}; if (!size) { size = object->size; @@ -363,4 +362,9 @@ std::optional nvhost_as_gpu::RemoveBufferMap(GPUVAddr gpu_addr) { return std::nullopt; } +Kernel::KEvent* nvhost_as_gpu::QueryEvent(u32 event_id) { + LOG_CRITICAL(Service_NVDRV, "Unknown AS GPU Event {}", event_id); + return nullptr; +} + } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index 555843a6f4..67d2f1e877 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h @@ -13,6 +13,11 @@ #include "common/swap.h" #include "core/hle/service/nvdrv/devices/nvdevice.h" +namespace Service::Nvidia::NvCore { +class Container; +class NvMap; +} // namespace Service::Nvidia::NvCore + namespace Service::Nvidia::Devices { constexpr u32 DEFAULT_BIG_PAGE_SIZE = 1 << 16; @@ -29,7 +34,7 @@ DECLARE_ENUM_FLAG_OPERATORS(AddressSpaceFlags); class nvhost_as_gpu final : public nvdevice { public: - explicit nvhost_as_gpu(Core::System& system_, std::shared_ptr nvmap_dev_); + explicit nvhost_as_gpu(Core::System& system_, NvCore::Container& core); ~nvhost_as_gpu() override; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, @@ -42,6 +47,8 @@ public: void OnOpen(DeviceFD fd) override; void OnClose(DeviceFD fd) override; + Kernel::KEvent* QueryEvent(u32 event_id) override; + private: class BufferMap final { public: @@ -180,7 +187,8 @@ private: void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated); std::optional RemoveBufferMap(GPUVAddr gpu_addr); - std::shared_ptr nvmap_dev; + NvCore::Container& container; + NvCore::NvMap& nvmap; // This is expected to be ordered, therefore we must use a map, not unordered_map std::map buffer_mappings; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index 5e2155e6c7..51c40f6202 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -279,6 +279,8 @@ Kernel::KEvent* nvhost_ctrl::QueryEvent(u32 event_id) { ASSERT(events_interface.events[slot]); return events_interface.events[slot]; } + // Is this possible in hardware? + ASSERT_MSG(false, "Slot:{}, SyncpointID:{}, requested", slot, syncpoint_id); return nullptr; } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index a480bfc47f..e7921ade23 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp @@ -6,6 +6,7 @@ #include "common/logging/log.h" #include "core/core.h" #include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "core/hle/service/nvdrv/devices/nvhost_gpu.h" #include "core/hle/service/nvdrv/nvdrv.h" @@ -22,10 +23,10 @@ Tegra::CommandHeader BuildFenceAction(Tegra::GPU::FenceOperation op, u32 syncpoi } } // namespace -nvhost_gpu::nvhost_gpu(Core::System& system_, std::shared_ptr nvmap_dev_, - EventInterface& events_interface_, NvCore::Container& core_) - : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, events_interface{events_interface_}, - core{core_}, syncpoint_manager{core_.GetSyncpointManager()} { +nvhost_gpu::nvhost_gpu(Core::System& system_, EventInterface& events_interface_, + NvCore::Container& core_) + : nvdevice{system_}, events_interface{events_interface_}, core{core_}, + syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()} { channel_fence.id = syncpoint_manager.AllocateSyncpoint(); channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id); sm_exception_breakpoint_int_report_event = diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h index 4f73a7bae3..440c0c42d5 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h @@ -17,6 +17,7 @@ namespace Service::Nvidia { namespace NvCore { class Container; +class NvMap; class SyncpointManager; } // namespace NvCore @@ -28,8 +29,8 @@ namespace Service::Nvidia::Devices { class nvmap; class nvhost_gpu final : public nvdevice { public: - explicit nvhost_gpu(Core::System& system_, std::shared_ptr nvmap_dev_, - EventInterface& events_interface_, NvCore::Container& core); + explicit nvhost_gpu(Core::System& system_, EventInterface& events_interface_, + NvCore::Container& core); ~nvhost_gpu() override; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, @@ -199,10 +200,10 @@ private: NvResult ChannelSetTimeout(const std::vector& input, std::vector& output); NvResult ChannelSetTimeslice(const std::vector& input, std::vector& output); - std::shared_ptr nvmap_dev; EventInterface& events_interface; NvCore::Container& core; NvCore::SyncpointManager& syncpoint_manager; + NvCore::NvMap& nvmap; NvFence channel_fence; // Events diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp index 2c9158c7c8..aa1a00832a 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp @@ -10,9 +10,8 @@ namespace Service::Nvidia::Devices { -nvhost_nvdec::nvhost_nvdec(Core::System& system_, std::shared_ptr nvmap_dev_, - NvCore::Container& core) - : nvhost_nvdec_common{system_, std::move(nvmap_dev_), core} {} +nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core) + : nvhost_nvdec_common{system_, core} {} nvhost_nvdec::~nvhost_nvdec() = default; NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h index 04da4a9136..fef4b32167 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h @@ -10,8 +10,7 @@ namespace Service::Nvidia::Devices { class nvhost_nvdec final : public nvhost_nvdec_common { public: - explicit nvhost_nvdec(Core::System& system_, std::shared_ptr nvmap_dev_, - NvCore::Container& core); + explicit nvhost_nvdec(Core::System& system_, NvCore::Container& core); ~nvhost_nvdec() override; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index 5a9c59f371..e76c9e5eda 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp @@ -9,9 +9,9 @@ #include "common/logging/log.h" #include "core/core.h" #include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h" -#include "core/hle/service/nvdrv/devices/nvmap.h" #include "core/memory.h" #include "video_core/memory_manager.h" #include "video_core/renderer_base.h" @@ -45,10 +45,9 @@ std::size_t WriteVectors(std::vector& dst, const std::vector& src, std::s } } // Anonymous namespace -nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, std::shared_ptr nvmap_dev_, - NvCore::Container& core_) - : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, core{core_}, - syncpoint_manager{core.GetSyncpointManager()} {} +nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_) + : nvdevice{system_}, core{core_}, + syncpoint_manager{core.GetSyncpointManager()}, nvmap{core.GetNvMapFile()} {} nvhost_nvdec_common::~nvhost_nvdec_common() = default; NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector& input) { @@ -90,10 +89,10 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, const std::vector& input, } } for (const auto& cmd_buffer : command_buffers) { - const auto object = nvmap_dev->GetObject(cmd_buffer.memory_id); + const auto object = nvmap.GetHandle(cmd_buffer.memory_id); ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;); Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count); - system.Memory().ReadBlock(object->addr + cmd_buffer.offset, cmdlist.data(), + system.Memory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(), cmdlist.size() * sizeof(u32)); gpu.PushCommandBuffer(fd_to_id[fd], cmdlist); } @@ -125,6 +124,7 @@ NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector& input, std::ve NvResult nvhost_nvdec_common::GetWaitbase(const std::vector& input, std::vector& output) { IoctlGetWaitbase params{}; + LOG_CRITICAL(Service_NVDRV, "called WAITBASE"); std::memcpy(¶ms, input.data(), sizeof(IoctlGetWaitbase)); params.value = 0; // Seems to be hard coded at 0 std::memcpy(output.data(), ¶ms, sizeof(IoctlGetWaitbase)); @@ -141,7 +141,7 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector& input, std::vecto auto& gpu = system.GPU(); for (auto& cmd_buffer : cmd_buffer_handles) { - auto object{nvmap_dev->GetObject(cmd_buffer.map_handle)}; + auto object{nvmap.GetHandle(cmd_buffer.map_handle)}; if (!object) { LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmd_buffer.map_handle); std::memcpy(output.data(), ¶ms, output.size()); @@ -150,7 +150,8 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector& input, std::vecto if (object->dma_map_addr == 0) { // NVDEC and VIC memory is in the 32-bit address space // MapAllocate32 will attempt to map a lower 32-bit value in the shared gpu memory space - const GPUVAddr low_addr = gpu.MemoryManager().MapAllocate32(object->addr, object->size); + const GPUVAddr low_addr = + gpu.MemoryManager().MapAllocate32(object->address, object->size); object->dma_map_addr = static_cast(low_addr); // Ensure that the dma_map_addr is indeed in the lower 32-bit address space. ASSERT(object->dma_map_addr == low_addr); @@ -158,7 +159,7 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector& input, std::vecto if (!object->dma_map_addr) { LOG_ERROR(Service_NVDRV, "failed to map size={}", object->size); } else { - cmd_buffer.map_address = object->dma_map_addr; + cmd_buffer.map_address = static_cast(object->dma_map_addr); } } std::memcpy(output.data(), ¶ms, sizeof(IoctlMapBuffer)); @@ -184,4 +185,9 @@ NvResult nvhost_nvdec_common::SetSubmitTimeout(const std::vector& input, return NvResult::Success; } +Kernel::KEvent* nvhost_nvdec_common::QueryEvent(u32 event_id) { + LOG_CRITICAL(Service_NVDRV, "Unknown HOSTX1 Event {}", event_id); + return nullptr; +} + } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h index cccc94a584..74231d5c5c 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h @@ -11,17 +11,16 @@ namespace Service::Nvidia { namespace NvCore { -class SyncpointManager; class Container; +class NvMap; +class SyncpointManager; } // namespace NvCore namespace Devices { -class nvmap; class nvhost_nvdec_common : public nvdevice { public: - explicit nvhost_nvdec_common(Core::System& system_, std::shared_ptr nvmap_dev_, - NvCore::Container& core); + explicit nvhost_nvdec_common(Core::System& system_, NvCore::Container& core); ~nvhost_nvdec_common() override; protected: @@ -114,12 +113,14 @@ protected: NvResult UnmapBuffer(const std::vector& input, std::vector& output); NvResult SetSubmitTimeout(const std::vector& input, std::vector& output); + Kernel::KEvent* QueryEvent(u32 event_id) override; + std::unordered_map fd_to_id{}; s32_le nvmap_fd{}; u32_le submit_timeout{}; - std::shared_ptr nvmap_dev; NvCore::Container& core; NvCore::SyncpointManager& syncpoint_manager; + NvCore::NvMap& nvmap; std::array device_syncpoints{}; }; }; // namespace Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index 66558c331b..358e89aa8e 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp @@ -8,9 +8,8 @@ #include "video_core/renderer_base.h" namespace Service::Nvidia::Devices { -nvhost_vic::nvhost_vic(Core::System& system_, std::shared_ptr nvmap_dev_, - NvCore::Container& core) - : nvhost_nvdec_common{system_, std::move(nvmap_dev_), core} {} +nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core) + : nvhost_nvdec_common{system_, core} {} nvhost_vic::~nvhost_vic() = default; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.h b/src/core/hle/service/nvdrv/devices/nvhost_vic.h index 6f9838b2dc..252b1e6f2a 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.h @@ -9,8 +9,7 @@ namespace Service::Nvidia::Devices { class nvhost_vic final : public nvhost_nvdec_common { public: - explicit nvhost_vic(Core::System& system_, std::shared_ptr nvmap_dev_, - NvCore::Container& core); + explicit nvhost_vic(Core::System& system_, NvCore::Container& core); ~nvhost_vic(); NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index d8518149d7..2aee68f5cb 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -2,19 +2,24 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include +#include #include +#include "common/alignment.h" #include "common/assert.h" #include "common/logging/log.h" +#include "core/core.h" +#include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/devices/nvmap.h" +#include "core/memory.h" + +using Core::Memory::YUZU_PAGESIZE; namespace Service::Nvidia::Devices { -nvmap::nvmap(Core::System& system_) : nvdevice{system_} { - // Handle 0 appears to be used when remapping, so we create a placeholder empty nvmap object to - // represent this. - CreateObject(0); -} +nvmap::nvmap(Core::System& system_, NvCore::Container& container_) + : nvdevice{system_}, container{container_}, file{container.GetNvMapFile()} {} nvmap::~nvmap() = default; @@ -63,38 +68,32 @@ void nvmap::OnOpen(DeviceFD fd) {} void nvmap::OnClose(DeviceFD fd) {} VAddr nvmap::GetObjectAddress(u32 handle) const { - auto object = GetObject(handle); - ASSERT(object); - ASSERT(object->status == Object::Status::Allocated); - return object->addr; + auto obj = file.GetHandle(handle); + if (obj) { + return obj->address; + } + return 0; } -u32 nvmap::CreateObject(u32 size) { - // Create a new nvmap object and obtain a handle to it. - auto object = std::make_shared(); - object->id = next_id++; - object->size = size; - object->status = Object::Status::Created; - object->refcount = 1; - - const u32 handle = next_handle++; - - handles.insert_or_assign(handle, std::move(object)); - - return handle; +std::shared_ptr nvmap::GetObject(u32 handle) const { + return file.GetHandle(handle); } NvResult nvmap::IocCreate(const std::vector& input, std::vector& output) { IocCreateParams params; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_DEBUG(Service_NVDRV, "size=0x{:08X}", params.size); + LOG_WARNING(Service_NVDRV, "called, size=0x{:08X}", params.size); - if (!params.size) { - LOG_ERROR(Service_NVDRV, "Size is 0"); - return NvResult::BadValue; + std::shared_ptr handle_description{}; + auto result = + file.CreateHandle(Common::AlignUp(params.size, YUZU_PAGESIZE), handle_description); + if (result != NvResult::Success) { + LOG_CRITICAL(Service_NVDRV, "Failed to create Object"); + return result; } - - params.handle = CreateObject(params.size); + handle_description->orig_size = params.size; // Orig size is the unaligned size + params.handle = handle_description->id; + LOG_DEBUG(Service_NVDRV, "handle: {}, size: 0x{:X}", handle_description->id, params.size); std::memcpy(output.data(), ¶ms, sizeof(params)); return NvResult::Success; @@ -103,42 +102,42 @@ NvResult nvmap::IocCreate(const std::vector& input, std::vector& output) NvResult nvmap::IocAlloc(const std::vector& input, std::vector& output) { IocAllocParams params; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.addr); + LOG_WARNING(Service_NVDRV, "called, addr={:X}", params.address); if (!params.handle) { - LOG_ERROR(Service_NVDRV, "Handle is 0"); + LOG_CRITICAL(Service_NVDRV, "Handle is 0"); return NvResult::BadValue; } if ((params.align - 1) & params.align) { - LOG_ERROR(Service_NVDRV, "Incorrect alignment used, alignment={:08X}", params.align); + LOG_CRITICAL(Service_NVDRV, "Incorrect alignment used, alignment={:08X}", params.align); return NvResult::BadValue; } - const u32 min_alignment = 0x1000; - if (params.align < min_alignment) { - params.align = min_alignment; + // Force page size alignment at a minimum + if (params.align < YUZU_PAGESIZE) { + params.align = YUZU_PAGESIZE; } - auto object = GetObject(params.handle); - if (!object) { - LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); + auto handle_description{file.GetHandle(params.handle)}; + if (!handle_description) { + LOG_CRITICAL(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); return NvResult::BadValue; } - if (object->status == Object::Status::Allocated) { - LOG_ERROR(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle); + if (handle_description->allocated) { + LOG_CRITICAL(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle); return NvResult::InsufficientMemory; } - object->flags = params.flags; - object->align = params.align; - object->kind = params.kind; - object->addr = params.addr; - object->status = Object::Status::Allocated; - + const auto result = + handle_description->Alloc(params.flags, params.align, params.kind, params.address); + if (result != NvResult::Success) { + LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); + return result; + } std::memcpy(output.data(), ¶ms, sizeof(params)); - return NvResult::Success; + return result; } NvResult nvmap::IocGetId(const std::vector& input, std::vector& output) { @@ -147,19 +146,20 @@ NvResult nvmap::IocGetId(const std::vector& input, std::vector& output) LOG_WARNING(Service_NVDRV, "called"); + // See the comment in FromId for extra info on this function if (!params.handle) { - LOG_ERROR(Service_NVDRV, "Handle is zero"); + LOG_CRITICAL(Service_NVDRV, "Error!"); return NvResult::BadValue; } - auto object = GetObject(params.handle); - if (!object) { - LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); - return NvResult::BadValue; + auto handle_description{file.GetHandle(params.handle)}; + if (!handle_description) { + LOG_CRITICAL(Service_NVDRV, "Error!"); + return NvResult::AccessDenied; // This will always return EPERM irrespective of if the + // handle exists or not } - params.id = object->id; - + params.id = handle_description->id; std::memcpy(output.data(), ¶ms, sizeof(params)); return NvResult::Success; } @@ -168,26 +168,29 @@ NvResult nvmap::IocFromId(const std::vector& input, std::vector& output) IocFromIdParams params; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_WARNING(Service_NVDRV, "(STUBBED) called"); + LOG_WARNING(Service_NVDRV, "called, id:{}"); - auto itr = std::find_if(handles.begin(), handles.end(), - [&](const auto& entry) { return entry.second->id == params.id; }); - if (itr == handles.end()) { - LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); + // Handles and IDs are always the same value in nvmap however IDs can be used globally given the + // right permissions. + // Since we don't plan on ever supporting multiprocess we can skip implementing handle refs and + // so this function just does simple validation and passes through the handle id. + if (!params.id) { + LOG_CRITICAL(Service_NVDRV, "Error!"); return NvResult::BadValue; } - auto& object = itr->second; - if (object->status != Object::Status::Allocated) { - LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle); + auto handle_description{file.GetHandle(params.id)}; + if (!handle_description) { + LOG_CRITICAL(Service_NVDRV, "Error!"); return NvResult::BadValue; } - itr->second->refcount++; - - // Return the existing handle instead of creating a new one. - params.handle = itr->first; - + auto result = handle_description->Duplicate(false); + if (result != NvResult::Success) { + LOG_CRITICAL(Service_NVDRV, "Error!"); + return result; + } + params.handle = handle_description->id; std::memcpy(output.data(), ¶ms, sizeof(params)); return NvResult::Success; } @@ -198,35 +201,43 @@ NvResult nvmap::IocParam(const std::vector& input, std::vector& output) IocParamParams params; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_DEBUG(Service_NVDRV, "(STUBBED) called type={}", params.param); + LOG_WARNING(Service_NVDRV, "called type={}", params.param); - auto object = GetObject(params.handle); - if (!object) { - LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); + if (!params.handle) { + LOG_CRITICAL(Service_NVDRV, "Error!"); return NvResult::BadValue; } - if (object->status != Object::Status::Allocated) { - LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle); + auto handle_description{file.GetHandle(params.handle)}; + if (!handle_description) { + LOG_CRITICAL(Service_NVDRV, "Error!"); return NvResult::BadValue; } - switch (static_cast(params.param)) { - case ParamTypes::Size: - params.result = object->size; + switch (params.param) { + case HandleParameterType::Size: + params.result = static_cast(handle_description->orig_size); break; - case ParamTypes::Alignment: - params.result = object->align; + case HandleParameterType::Alignment: + params.result = static_cast(handle_description->align); break; - case ParamTypes::Heap: - // TODO(Subv): Seems to be a hardcoded value? - params.result = 0x40000000; + case HandleParameterType::Base: + params.result = static_cast(-22); // posix EINVAL break; - case ParamTypes::Kind: - params.result = object->kind; + case HandleParameterType::Heap: + if (handle_description->allocated) + params.result = 0x40000000; + else + params.result = 0x40000000; + break; + case HandleParameterType::Kind: + params.result = handle_description->kind; + break; + case HandleParameterType::IsSharedMemMapped: + params.result = handle_description->is_shared_mem_mapped; break; default: - UNIMPLEMENTED(); + return NvResult::BadValue; } std::memcpy(output.data(), ¶ms, sizeof(params)); @@ -234,46 +245,25 @@ NvResult nvmap::IocParam(const std::vector& input, std::vector& output) } NvResult nvmap::IocFree(const std::vector& input, std::vector& output) { - // TODO(Subv): These flags are unconfirmed. - enum FreeFlags { - Freed = 0, - NotFreedYet = 1, - }; - IocFreeParams params; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_DEBUG(Service_NVDRV, "(STUBBED) called"); + LOG_WARNING(Service_NVDRV, "called"); - auto itr = handles.find(params.handle); - if (itr == handles.end()) { - LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); - return NvResult::BadValue; - } - if (!itr->second->refcount) { - LOG_ERROR( - Service_NVDRV, - "There is no references to this object. The object is already freed. handle={:08X}", - params.handle); - return NvResult::BadValue; + if (!params.handle) { + LOG_CRITICAL(Service_NVDRV, "Handle null freed?"); + return NvResult::Success; } - itr->second->refcount--; - - params.size = itr->second->size; - - if (itr->second->refcount == 0) { - params.flags = Freed; - // The address of the nvmap is written to the output if we're finally freeing it, otherwise - // 0 is written. - params.address = itr->second->addr; + if (auto freeInfo{file.FreeHandle(params.handle, false)}) { + params.address = freeInfo->address; + params.size = static_cast(freeInfo->size); + params.flags = NvCore::NvMap::Handle::Flags{.map_uncached = freeInfo->was_uncached}; } else { - params.flags = NotFreedYet; - params.address = 0; + // This is possible when there's internel dups or other duplicates. + LOG_CRITICAL(Service_NVDRV, "Not freed"); } - handles.erase(params.handle); - std::memcpy(output.data(), ¶ms, sizeof(params)); return NvResult::Success; } diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h index d5360d6e58..c22eb57a40 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.h +++ b/src/core/hle/service/nvdrv/devices/nvmap.h @@ -9,15 +9,23 @@ #include "common/common_funcs.h" #include "common/common_types.h" #include "common/swap.h" +#include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/devices/nvdevice.h" +namespace Service::Nvidia::NvCore { +class Container; +} // namespace Service::Nvidia::NvCore + namespace Service::Nvidia::Devices { class nvmap final : public nvdevice { public: - explicit nvmap(Core::System& system_); + explicit nvmap(Core::System& system_, NvCore::Container& container); ~nvmap() override; + nvmap(nvmap const&) = delete; + nvmap& operator=(nvmap const&) = delete; + NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, std::vector& output) override; NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector& input, @@ -31,27 +39,16 @@ public: /// Returns the allocated address of an nvmap object given its handle. VAddr GetObjectAddress(u32 handle) const; - /// Represents an nvmap object. - struct Object { - enum class Status { Created, Allocated }; - u32 id; - u32 size; - u32 flags; - u32 align; - u8 kind; - VAddr addr; - Status status; - u32 refcount; - u32 dma_map_addr; - }; + std::shared_ptr GetObject(u32 handle) const; - std::shared_ptr GetObject(u32 handle) const { - auto itr = handles.find(handle); - if (itr != handles.end()) { - return itr->second; - } - return {}; - } + enum class HandleParameterType : u32_le { + Size = 1, + Alignment = 2, + Base = 3, + Heap = 4, + Kind = 5, + IsSharedMemMapped = 6 + }; private: /// Id to use for the next handle that is created. @@ -60,9 +57,6 @@ private: /// Id to use for the next object that is created. u32 next_id = 0; - /// Mapping of currently allocated handles to the objects they represent. - std::unordered_map> handles; - struct IocCreateParams { // Input u32_le size{}; @@ -83,11 +77,11 @@ private: // Input u32_le handle{}; u32_le heap_mask{}; - u32_le flags{}; + NvCore::NvMap::Handle::Flags flags{}; u32_le align{}; u8 kind{}; INSERT_PADDING_BYTES(7); - u64_le addr{}; + u64_le address{}; }; static_assert(sizeof(IocAllocParams) == 32, "IocAllocParams has wrong size"); @@ -96,14 +90,14 @@ private: INSERT_PADDING_BYTES(4); u64_le address{}; u32_le size{}; - u32_le flags{}; + NvCore::NvMap::Handle::Flags flags{}; }; static_assert(sizeof(IocFreeParams) == 24, "IocFreeParams has wrong size"); struct IocParamParams { // Input u32_le handle{}; - u32_le param{}; + HandleParameterType param{}; // Output u32_le result{}; }; @@ -117,14 +111,15 @@ private: }; static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size"); - u32 CreateObject(u32 size); - NvResult IocCreate(const std::vector& input, std::vector& output); NvResult IocAlloc(const std::vector& input, std::vector& output); NvResult IocGetId(const std::vector& input, std::vector& output); NvResult IocFromId(const std::vector& input, std::vector& output); NvResult IocParam(const std::vector& input, std::vector& output); NvResult IocFree(const std::vector& input, std::vector& output); + + NvCore::Container& container; + NvCore::NvMap& file; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index 824c0e2906..f4914d5399 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -138,21 +138,18 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger Module::Module(Core::System& system) : service_context{system, "nvdrv"}, events_interface{*this}, container{system.GPU()} { - auto nvmap_dev = std::make_shared(system); - devices["/dev/nvhost-as-gpu"] = std::make_shared(system, nvmap_dev); + devices["/dev/nvhost-as-gpu"] = std::make_shared(system, container); devices["/dev/nvhost-gpu"] = - std::make_shared(system, nvmap_dev, events_interface, container); + std::make_shared(system, events_interface, container); devices["/dev/nvhost-ctrl-gpu"] = std::make_shared(system, events_interface); - devices["/dev/nvmap"] = nvmap_dev; - devices["/dev/nvdisp_disp0"] = std::make_shared(system, nvmap_dev); + devices["/dev/nvmap"] = std::make_shared(system, container); + devices["/dev/nvdisp_disp0"] = std::make_shared(system, container); devices["/dev/nvhost-ctrl"] = std::make_shared(system, events_interface, container); - devices["/dev/nvhost-nvdec"] = - std::make_shared(system, nvmap_dev, container); + devices["/dev/nvhost-nvdec"] = std::make_shared(system, container); devices["/dev/nvhost-nvjpg"] = std::make_shared(system); - devices["/dev/nvhost-vic"] = - std::make_shared(system, nvmap_dev, container); + devices["/dev/nvhost-vic"] = std::make_shared(system, container); } Module::~Module() = default; From af35dbcf633d35450b333eb33334b3dd1bc050a1 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Fri, 5 Nov 2021 01:44:11 +0100 Subject: [PATCH 08/68] NVDRV: Fix Open/Close and make sure each device is correctly created. --- .../hle/service/nvdrv/devices/nvhost_ctrl.cpp | 178 ++++++++++++----- .../hle/service/nvdrv/devices/nvhost_ctrl.h | 42 ++++ .../service/nvdrv/devices/nvhost_ctrl_gpu.cpp | 9 +- .../hle/service/nvdrv/devices/nvhost_gpu.cpp | 12 +- .../service/nvdrv/devices/nvhost_nvdec.cpp | 2 + .../hle/service/nvdrv/devices/nvhost_nvdec.h | 2 +- .../nvdrv/devices/nvhost_nvdec_common.cpp | 2 + .../nvdrv/devices/nvhost_nvdec_common.h | 2 +- .../hle/service/nvdrv/devices/nvhost_vic.cpp | 3 + .../hle/service/nvdrv/devices/nvhost_vic.h | 2 +- src/core/hle/service/nvdrv/nvdrv.cpp | 182 +++++++----------- src/core/hle/service/nvdrv/nvdrv.h | 56 ++---- src/core/hle/service/nvflinger/nvflinger.cpp | 7 +- src/core/hle/service/nvflinger/nvflinger.h | 1 + 14 files changed, 296 insertions(+), 204 deletions(-) diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index 51c40f6202..122c1d5e12 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -3,9 +3,11 @@ // SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 // or any later version Refer to the license.txt file included. +#include #include #include +#include #include "common/assert.h" #include "common/logging/log.h" #include "common/scope_exit.h" @@ -22,8 +24,19 @@ namespace Service::Nvidia::Devices { nvhost_ctrl::nvhost_ctrl(Core::System& system_, EventInterface& events_interface_, NvCore::Container& core_) : nvdevice{system_}, events_interface{events_interface_}, core{core_}, - syncpoint_manager{core_.GetSyncpointManager()} {} -nvhost_ctrl::~nvhost_ctrl() = default; + syncpoint_manager{core_.GetSyncpointManager()} { + events_interface.RegisterForSignal(this); +} + +nvhost_ctrl::~nvhost_ctrl() { + events_interface.UnregisterForSignal(this); + for (auto& event : events) { + if (!event.registered) { + continue; + } + events_interface.FreeEvent(event.kevent); + } +} NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, std::vector& output) { @@ -87,7 +100,7 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector SCOPE_EXIT({ std::memcpy(output.data(), ¶ms, sizeof(params)); if (must_unmark_fail) { - events_interface.fails[event_id] = 0; + events[event_id].fails = 0; } }); @@ -116,12 +129,12 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector auto& gpu = system.GPU(); const u32 target_value = params.fence.value; - auto lock = events_interface.Lock(); + auto lock = NvEventsLock(); u32 slot = [&]() { if (is_allocation) { params.value.raw = 0; - return events_interface.FindFreeEvent(fence_id); + return FindFreeNvEvent(fence_id); } else { return params.value.raw; } @@ -130,7 +143,7 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector must_unmark_fail = true; const auto check_failing = [&]() { - if (events_interface.fails[slot] > 2) { + if (events[slot].fails > 2) { { auto lk = system.StallProcesses(); gpu.WaitFence(fence_id, target_value); @@ -142,6 +155,10 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector return false; }; + if (slot >= MaxNvEvents) { + return NvResult::BadParameter; + } + if (params.timeout == 0) { if (check_failing()) { return NvResult::Success; @@ -149,17 +166,13 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector return NvResult::Timeout; } - if (slot >= MaxNvEvents) { + auto& event = events[slot]; + + if (!event.registered) { return NvResult::BadParameter; } - auto* event = events_interface.events[slot]; - - if (!event) { - return NvResult::BadParameter; - } - - if (events_interface.IsBeingUsed(slot)) { + if (event.IsBeingUsed()) { return NvResult::BadParameter; } @@ -169,9 +182,9 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector params.value.raw = 0; - events_interface.status[slot].store(EventState::Waiting, std::memory_order_release); - events_interface.assigned_syncpt[slot] = fence_id; - events_interface.assigned_value[slot] = target_value; + event.status.store(EventState::Waiting, std::memory_order_release); + event.assigned_syncpt = fence_id; + event.assigned_value = target_value; if (is_allocation) { params.value.syncpoint_id_for_allocation.Assign(static_cast(fence_id)); params.value.event_allocated.Assign(1); @@ -189,15 +202,17 @@ NvResult nvhost_ctrl::FreeEvent(u32 slot) { return NvResult::BadParameter; } - if (!events_interface.registered[slot]) { + auto& event = events[slot]; + + if (!event.registered) { return NvResult::Success; } - if (events_interface.IsBeingUsed(slot)) { + if (event.IsBeingUsed()) { return NvResult::Busy; } - events_interface.Free(slot); + FreeNvEvent(slot); return NvResult::Success; } @@ -210,15 +225,15 @@ NvResult nvhost_ctrl::IocCtrlEventRegister(const std::vector& input, std::ve return NvResult::BadParameter; } - auto lock = events_interface.Lock(); + auto lock = NvEventsLock(); - if (events_interface.registered[event_id]) { + if (events[event_id].registered) { const auto result = FreeEvent(event_id); if (result != NvResult::Success) { return result; } } - events_interface.Create(event_id); + CreateNvEvent(event_id); return NvResult::Success; } @@ -229,7 +244,7 @@ NvResult nvhost_ctrl::IocCtrlEventUnregister(const std::vector& input, const u32 event_id = params.user_event_id & 0x00FF; LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id); - auto lock = events_interface.Lock(); + auto lock = NvEventsLock(); return FreeEvent(event_id); } @@ -244,44 +259,121 @@ NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector& input, std::v return NvResult::BadParameter; } - auto lock = events_interface.Lock(); + auto lock = NvEventsLock(); - if (events_interface.status[event_id].exchange( - EventState::Cancelling, std::memory_order_acq_rel) == EventState::Waiting) { - system.GPU().CancelSyncptInterrupt(events_interface.assigned_syncpt[event_id], - events_interface.assigned_value[event_id]); - syncpoint_manager.RefreshSyncpoint(events_interface.assigned_syncpt[event_id]); + auto& event = events[event_id]; + if (event.status.exchange(EventState::Cancelling, std::memory_order_acq_rel) == + EventState::Waiting) { + system.GPU().CancelSyncptInterrupt(event.assigned_syncpt, event.assigned_value); + syncpoint_manager.RefreshSyncpoint(event.assigned_syncpt); } - events_interface.fails[event_id]++; - events_interface.status[event_id].store(EventState::Cancelled, std::memory_order_release); - events_interface.events[event_id]->GetWritableEvent().Clear(); + event.fails++; + event.status.store(EventState::Cancelled, std::memory_order_release); + event.kevent->GetWritableEvent().Clear(); return NvResult::Success; } Kernel::KEvent* nvhost_ctrl::QueryEvent(u32 event_id) { - const auto event = SyncpointEventValue{.raw = event_id}; + const auto desired_event = SyncpointEventValue{.raw = event_id}; - const bool allocated = event.event_allocated.Value() != 0; - const u32 slot{allocated ? event.partial_slot.Value() : static_cast(event.slot)}; + const bool allocated = desired_event.event_allocated.Value() != 0; + const u32 slot{allocated ? desired_event.partial_slot.Value() + : static_cast(desired_event.slot)}; if (slot >= MaxNvEvents) { ASSERT(false); return nullptr; } - const u32 syncpoint_id{allocated ? event.syncpoint_id_for_allocation.Value() - : event.syncpoint_id.Value()}; + const u32 syncpoint_id{allocated ? desired_event.syncpoint_id_for_allocation.Value() + : desired_event.syncpoint_id.Value()}; - auto lock = events_interface.Lock(); + auto lock = NvEventsLock(); - if (events_interface.registered[slot] && - events_interface.assigned_syncpt[slot] == syncpoint_id) { - ASSERT(events_interface.events[slot]); - return events_interface.events[slot]; + auto& event = events[slot]; + if (event.registered && event.assigned_syncpt == syncpoint_id) { + ASSERT(event.kevent); + return event.kevent; } // Is this possible in hardware? ASSERT_MSG(false, "Slot:{}, SyncpointID:{}, requested", slot, syncpoint_id); return nullptr; } +std::unique_lock nvhost_ctrl::NvEventsLock() { + return std::unique_lock(events_mutex); +} + +void nvhost_ctrl::CreateNvEvent(u32 event_id) { + auto& event = events[event_id]; + ASSERT(!event.kevent); + ASSERT(!event.registered); + ASSERT(!event.IsBeingUsed()); + event.kevent = events_interface.CreateEvent(fmt::format("NVCTRL::NvEvent_{}", event_id)); + event.status = EventState::Available; + event.registered = true; + const u64 mask = 1ULL << event_id; + event.fails = 0; + events_mask |= mask; + event.assigned_syncpt = 0; +} + +void nvhost_ctrl::FreeNvEvent(u32 event_id) { + auto& event = events[event_id]; + ASSERT(event.kevent); + ASSERT(event.registered); + ASSERT(!event.IsBeingUsed()); + events_interface.FreeEvent(event.kevent); + event.kevent = nullptr; + event.status = EventState::Available; + event.registered = false; + const u64 mask = ~(1ULL << event_id); + events_mask &= mask; +} + +u32 nvhost_ctrl::FindFreeNvEvent(u32 syncpoint_id) { + u32 slot{MaxNvEvents}; + u32 free_slot{MaxNvEvents}; + for (u32 i = 0; i < MaxNvEvents; i++) { + auto& event = events[i]; + if (event.registered) { + if (!event.IsBeingUsed()) { + slot = i; + if (event.assigned_syncpt == syncpoint_id) { + return slot; + } + } + } else if (free_slot == MaxNvEvents) { + free_slot = i; + } + } + if (free_slot < MaxNvEvents) { + CreateNvEvent(free_slot); + return free_slot; + } + + if (slot < MaxNvEvents) { + return slot; + } + + LOG_CRITICAL(Service_NVDRV, "Failed to allocate an event"); + return 0; +} + +void nvhost_ctrl::SignalNvEvent(u32 syncpoint_id, u32 value) { + const u32 max = MaxNvEvents - std::countl_zero(events_mask); + const u32 min = std::countr_zero(events_mask); + for (u32 i = min; i < max; i++) { + auto& event = events[i]; + if (event.assigned_syncpt != syncpoint_id || event.assigned_value != value) { + continue; + } + if (event.status.exchange(EventState::Signalling, std::memory_order_acq_rel) == + EventState::Waiting) { + event.kevent->GetWritableEvent().Signal(); + } + event.status.store(EventState::Signalled, std::memory_order_release); + } +} + } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h index 9fd46ea5f0..f2fc5d0472 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h @@ -53,7 +53,49 @@ public: }; static_assert(sizeof(SyncpointEventValue) == sizeof(u32)); + void SignalNvEvent(u32 syncpoint_id, u32 value); + private: + struct InternalEvent { + // Mask representing registered events + + // Each kernel event associated to an NV event + Kernel::KEvent* kevent{}; + // The status of the current NVEvent + std::atomic status{}; + + // Tells the NVEvent that it has failed. + u32 fails{}; + // When an NVEvent is waiting on GPU interrupt, this is the sync_point + // associated with it. + u32 assigned_syncpt{}; + // This is the value of the GPU interrupt for which the NVEvent is waiting + // for. + u32 assigned_value{}; + + // Tells if an NVEvent is registered or not + bool registered{}; + + bool IsBeingUsed() { + const auto current_status = status.load(std::memory_order_acquire); + return current_status == EventState::Waiting || + current_status == EventState::Cancelling || + current_status == EventState::Signalling; + } + }; + + std::unique_lock NvEventsLock(); + + void CreateNvEvent(u32 event_id); + + void FreeNvEvent(u32 event_id); + + u32 FindFreeNvEvent(u32 syncpoint_id); + + std::array events{}; + std::mutex events_mutex; + u64 events_mask{}; + struct IocSyncptReadParams { u32_le id{}; u32_le value{}; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp index e353408eb9..ced57dfe66 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp @@ -13,10 +13,13 @@ namespace Service::Nvidia::Devices { nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system_, EventInterface& events_interface_) : nvdevice{system_}, events_interface{events_interface_} { - error_notifier_event = events_interface.CreateNonCtrlEvent("CtrlGpuErrorNotifier"); - unknown_event = events_interface.CreateNonCtrlEvent("CtrlGpuUknownEvent"); + error_notifier_event = events_interface.CreateEvent("CtrlGpuErrorNotifier"); + unknown_event = events_interface.CreateEvent("CtrlGpuUknownEvent"); +} +nvhost_ctrl_gpu::~nvhost_ctrl_gpu() { + events_interface.FreeEvent(error_notifier_event); + events_interface.FreeEvent(unknown_event); } -nvhost_ctrl_gpu::~nvhost_ctrl_gpu() = default; NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, std::vector& output) { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index e7921ade23..cb54ee5a41 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp @@ -30,13 +30,17 @@ nvhost_gpu::nvhost_gpu(Core::System& system_, EventInterface& events_interface_, channel_fence.id = syncpoint_manager.AllocateSyncpoint(); channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id); sm_exception_breakpoint_int_report_event = - events_interface.CreateNonCtrlEvent("GpuChannelSMExceptionBreakpointInt"); + events_interface.CreateEvent("GpuChannelSMExceptionBreakpointInt"); sm_exception_breakpoint_pause_report_event = - events_interface.CreateNonCtrlEvent("GpuChannelSMExceptionBreakpointPause"); - error_notifier_event = events_interface.CreateNonCtrlEvent("GpuChannelErrorNotifier"); + events_interface.CreateEvent("GpuChannelSMExceptionBreakpointPause"); + error_notifier_event = events_interface.CreateEvent("GpuChannelErrorNotifier"); } -nvhost_gpu::~nvhost_gpu() = default; +nvhost_gpu::~nvhost_gpu() { + events_interface.FreeEvent(sm_exception_breakpoint_int_report_event); + events_interface.FreeEvent(sm_exception_breakpoint_pause_report_event); + events_interface.FreeEvent(error_notifier_event); +} NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, std::vector& output) { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp index aa1a00832a..00947ea199 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp @@ -10,6 +10,8 @@ namespace Service::Nvidia::Devices { +u32 nvhost_nvdec::next_id{}; + nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core) : nvhost_nvdec_common{system_, core} {} nvhost_nvdec::~nvhost_nvdec() = default; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h index fef4b32167..3261ce1d40 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h @@ -24,7 +24,7 @@ public: void OnClose(DeviceFD fd) override; private: - u32 next_id{}; + static u32 next_id; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index e76c9e5eda..77e6a1cd64 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp @@ -45,6 +45,8 @@ std::size_t WriteVectors(std::vector& dst, const std::vector& src, std::s } } // Anonymous namespace +std::unordered_map nvhost_nvdec_common::fd_to_id{}; + nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_) : nvdevice{system_}, core{core_}, syncpoint_manager{core.GetSyncpointManager()}, nvmap{core.GetNvMapFile()} {} diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h index 74231d5c5c..53029af6ad 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h @@ -115,7 +115,7 @@ protected: Kernel::KEvent* QueryEvent(u32 event_id) override; - std::unordered_map fd_to_id{}; + static std::unordered_map fd_to_id; s32_le nvmap_fd{}; u32_le submit_timeout{}; NvCore::Container& core; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index 358e89aa8e..c89ff6b27d 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp @@ -8,6 +8,9 @@ #include "video_core/renderer_base.h" namespace Service::Nvidia::Devices { + +u32 nvhost_vic::next_id{}; + nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core) : nvhost_nvdec_common{system_, core} {} diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.h b/src/core/hle/service/nvdrv/devices/nvhost_vic.h index 252b1e6f2a..59e23b41ef 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.h @@ -23,6 +23,6 @@ public: void OnClose(DeviceFD fd) override; private: - u32 next_id{}; + static u32 next_id; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index f4914d5399..ff8c7c13cc 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -3,7 +3,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 // or any later version Refer to the license.txt file included. -#include #include #include @@ -30,101 +29,39 @@ namespace Service::Nvidia { -EventInterface::EventInterface(Module& module_) : module{module_} { - events_mask = 0; - for (u32 i = 0; i < MaxNvEvents; i++) { - status[i] = EventState::Available; - events[i] = nullptr; - registered[i] = false; +EventInterface::EventInterface(Module& module_) : module{module_} {} + +EventInterface::~EventInterface() = default; + +void EventInterface::RegisterForSignal(Devices::nvhost_ctrl* device) { + std::unique_lock lk(guard); + on_signal.push_back(device); +} + +void EventInterface::UnregisterForSignal(Devices::nvhost_ctrl* device) { + std::unique_lock lk(guard); + auto it = std::find(on_signal.begin(), on_signal.end(), device); + if (it != on_signal.end()) { + on_signal.erase(it); } } -EventInterface::~EventInterface() { - auto lk = Lock(); - for (u32 i = 0; i < MaxNvEvents; i++) { - if (registered[i]) { - module.service_context.CloseEvent(events[i]); - events[i] = nullptr; - registered[i] = false; - } - } - for (auto* event : basic_events) { - module.service_context.CloseEvent(event); +void EventInterface::Signal(u32 syncpoint_id, u32 value) { + std::unique_lock lk(guard); + for (auto* device : on_signal) { + device->SignalNvEvent(syncpoint_id, value); } } -std::unique_lock EventInterface::Lock() { - return std::unique_lock(events_mutex); -} - -void EventInterface::Signal(u32 event_id) { - if (status[event_id].exchange(EventState::Signalling, std::memory_order_acq_rel) == - EventState::Waiting) { - events[event_id]->GetWritableEvent().Signal(); - } - status[event_id].store(EventState::Signalled, std::memory_order_release); -} - -void EventInterface::Create(u32 event_id) { - ASSERT(!events[event_id]); - ASSERT(!registered[event_id]); - ASSERT(!IsBeingUsed(event_id)); - events[event_id] = - module.service_context.CreateEvent(fmt::format("NVDRV::NvEvent_{}", event_id)); - status[event_id] = EventState::Available; - registered[event_id] = true; - const u64 mask = 1ULL << event_id; - fails[event_id] = 0; - events_mask |= mask; - assigned_syncpt[event_id] = 0; -} - -void EventInterface::Free(u32 event_id) { - ASSERT(events[event_id]); - ASSERT(registered[event_id]); - ASSERT(!IsBeingUsed(event_id)); - module.service_context.CloseEvent(events[event_id]); - events[event_id] = nullptr; - status[event_id] = EventState::Available; - registered[event_id] = false; - const u64 mask = ~(1ULL << event_id); - events_mask &= mask; -} - -u32 EventInterface::FindFreeEvent(u32 syncpoint_id) { - u32 slot{MaxNvEvents}; - u32 free_slot{MaxNvEvents}; - for (u32 i = 0; i < MaxNvEvents; i++) { - if (registered[i]) { - if (!IsBeingUsed(i)) { - slot = i; - if (assigned_syncpt[i] == syncpoint_id) { - return slot; - } - } - } else if (free_slot == MaxNvEvents) { - free_slot = i; - } - } - if (free_slot < MaxNvEvents) { - Create(free_slot); - return free_slot; - } - - if (slot < MaxNvEvents) { - return slot; - } - - LOG_CRITICAL(Service_NVDRV, "Failed to allocate an event"); - return 0; -} - -Kernel::KEvent* EventInterface::CreateNonCtrlEvent(std::string name) { +Kernel::KEvent* EventInterface::CreateEvent(std::string name) { Kernel::KEvent* new_event = module.service_context.CreateEvent(std::move(name)); - basic_events.push_back(new_event); return new_event; } +void EventInterface::FreeEvent(Kernel::KEvent* event) { + module.service_context.CloseEvent(event); +} + void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger, Core::System& system) { auto module_ = std::make_shared(system); @@ -138,18 +75,50 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger Module::Module(Core::System& system) : service_context{system, "nvdrv"}, events_interface{*this}, container{system.GPU()} { - devices["/dev/nvhost-as-gpu"] = std::make_shared(system, container); - devices["/dev/nvhost-gpu"] = - std::make_shared(system, events_interface, container); - devices["/dev/nvhost-ctrl-gpu"] = - std::make_shared(system, events_interface); - devices["/dev/nvmap"] = std::make_shared(system, container); - devices["/dev/nvdisp_disp0"] = std::make_shared(system, container); - devices["/dev/nvhost-ctrl"] = - std::make_shared(system, events_interface, container); - devices["/dev/nvhost-nvdec"] = std::make_shared(system, container); - devices["/dev/nvhost-nvjpg"] = std::make_shared(system); - devices["/dev/nvhost-vic"] = std::make_shared(system, container); + builders["/dev/nvhost-as-gpu"] = [this, &system](DeviceFD fd) { + std::shared_ptr device = + std::make_shared(system, container); + return open_files.emplace(fd, device).first; + }; + builders["/dev/nvhost-gpu"] = [this, &system](DeviceFD fd) { + std::shared_ptr device = + std::make_shared(system, events_interface, container); + return open_files.emplace(fd, device).first; + }; + builders["/dev/nvhost-ctrl-gpu"] = [this, &system](DeviceFD fd) { + std::shared_ptr device = + std::make_shared(system, events_interface); + return open_files.emplace(fd, device).first; + }; + builders["/dev/nvmap"] = [this, &system](DeviceFD fd) { + std::shared_ptr device = + std::make_shared(system, container); + return open_files.emplace(fd, device).first; + }; + builders["/dev/nvdisp_disp0"] = [this, &system](DeviceFD fd) { + std::shared_ptr device = + std::make_shared(system, container); + return open_files.emplace(fd, device).first; + }; + builders["/dev/nvhost-ctrl"] = [this, &system](DeviceFD fd) { + std::shared_ptr device = + std::make_shared(system, events_interface, container); + return open_files.emplace(fd, device).first; + }; + builders["/dev/nvhost-nvdec"] = [this, &system](DeviceFD fd) { + std::shared_ptr device = + std::make_shared(system, container); + return open_files.emplace(fd, device).first; + }; + builders["/dev/nvhost-nvjpg"] = [this, &system](DeviceFD fd) { + std::shared_ptr device = std::make_shared(system); + return open_files.emplace(fd, device).first; + }; + builders["/dev/nvhost-vic"] = [this, &system](DeviceFD fd) { + std::shared_ptr device = + std::make_shared(system, container); + return open_files.emplace(fd, device).first; + }; } Module::~Module() = default; @@ -169,18 +138,18 @@ NvResult Module::VerifyFD(DeviceFD fd) const { } DeviceFD Module::Open(const std::string& device_name) { - if (devices.find(device_name) == devices.end()) { + auto it = builders.find(device_name); + if (it == builders.end()) { LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name); return INVALID_NVDRV_FD; } - auto device = devices[device_name]; const DeviceFD fd = next_fd++; + auto& builder = it->second; + auto device = builder(fd)->second; device->OnOpen(fd); - open_files[fd] = std::move(device); - return fd; } @@ -256,14 +225,7 @@ NvResult Module::Close(DeviceFD fd) { } void Module::SignalSyncpt(const u32 syncpoint_id, const u32 value) { - const u32 max = MaxNvEvents - std::countl_zero(events_interface.events_mask); - const u32 min = std::countr_zero(events_interface.events_mask); - for (u32 i = min; i < max; i++) { - if (events_interface.assigned_syncpt[i] == syncpoint_id && - events_interface.assigned_value[i] == value) { - events_interface.Signal(i); - } - } + events_interface.Signal(syncpoint_id, value); } NvResult Module::QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event) { diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index 96adf2ffbc..3983794bb7 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -5,6 +5,7 @@ #pragma once +#include #include #include #include @@ -38,7 +39,8 @@ class SyncpointManager; namespace Devices { class nvdevice; -} +class nvhost_ctrl; +} // namespace Devices class Module; @@ -47,47 +49,19 @@ public: EventInterface(Module& module_); ~EventInterface(); - // Mask representing registered events - u64 events_mask{}; - // Each kernel event associated to an NV event - std::array events{}; - // The status of the current NVEvent - std::array, MaxNvEvents> status{}; - // Tells if an NVEvent is registered or not - std::array registered{}; - // Tells the NVEvent that it has failed. - std::array fails{}; - // When an NVEvent is waiting on GPU interrupt, this is the sync_point - // associated with it. - std::array assigned_syncpt{}; - // This is the value of the GPU interrupt for which the NVEvent is waiting - // for. - std::array assigned_value{}; - // Constant to denote an unasigned syncpoint. - static constexpr u32 unassigned_syncpt = 0xFFFFFFFF; + void RegisterForSignal(Devices::nvhost_ctrl*); + void UnregisterForSignal(Devices::nvhost_ctrl*); - bool IsBeingUsed(u32 event_id) { - const auto current_status = status[event_id].load(std::memory_order_acquire); - return current_status == EventState::Waiting || current_status == EventState::Cancelling || - current_status == EventState::Signalling; - } + void Signal(u32 syncpoint_id, u32 value); - std::unique_lock Lock(); + Kernel::KEvent* CreateEvent(std::string name); - void Signal(u32 event_id); - - void Create(u32 event_id); - - void Free(u32 event_id); - - u32 FindFreeEvent(u32 syncpoint_id); - - Kernel::KEvent* CreateNonCtrlEvent(std::string name); + void FreeEvent(Kernel::KEvent* event); private: - std::mutex events_mutex; Module& module; - std::vector basic_events; + std::mutex guard; + std::list on_signal; }; class Module final { @@ -97,9 +71,9 @@ public: /// Returns a pointer to one of the available devices, identified by its name. template - std::shared_ptr GetDevice(const std::string& name) { - auto itr = devices.find(name); - if (itr == devices.end()) + std::shared_ptr GetDevice(DeviceFD fd) { + auto itr = open_files.find(fd); + if (itr == open_files.end()) return nullptr; return std::static_pointer_cast(itr->second); } @@ -132,8 +106,9 @@ private: /// Id to use for the next open file descriptor. DeviceFD next_fd = 1; + using FilesContainerType = std::unordered_map>; /// Mapping of file descriptors to the devices they reference. - std::unordered_map> open_files; + FilesContainerType open_files; /// Mapping of device node names to their implementation. std::unordered_map> devices; @@ -147,6 +122,7 @@ private: void CreateEvent(u32 event_id); void FreeEvent(u32 event_id); + std::unordered_map> builders; }; /// Registers all NVDRV services with the specified service manager. diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index 4246e5e259..8c3013f833 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp @@ -105,10 +105,15 @@ NVFlinger::~NVFlinger() { display.GetLayer(layer).Core().NotifyShutdown(); } } + + if (nvdrv) { + nvdrv->Close(disp_fd); + } } void NVFlinger::SetNVDrvInstance(std::shared_ptr instance) { nvdrv = std::move(instance); + disp_fd = nvdrv->Open("/dev/nvdisp_disp0"); } std::optional NVFlinger::OpenDisplay(std::string_view name) { @@ -276,7 +281,7 @@ void NVFlinger::Compose() { // Now send the buffer to the GPU for drawing. // TODO(Subv): Support more than just disp0. The display device selection is probably based // on which display we're drawing (Default, Internal, External, etc) - auto nvdisp = nvdrv->GetDevice("/dev/nvdisp_disp0"); + auto nvdisp = nvdrv->GetDevice(disp_fd); ASSERT(nvdisp); Common::Rectangle crop_rect{ diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h index 3bbe5d92b9..b62615de27 100644 --- a/src/core/hle/service/nvflinger/nvflinger.h +++ b/src/core/hle/service/nvflinger/nvflinger.h @@ -116,6 +116,7 @@ private: void SplitVSync(std::stop_token stop_token); std::shared_ptr nvdrv; + s32 disp_fd; std::list displays; From 68d9504a046f40ce4ada54b0eba7228e659970de Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Fri, 5 Nov 2021 02:11:46 +0100 Subject: [PATCH 09/68] NVMAP: Fix the Free return parameters. --- src/core/hle/service/nvdrv/core/nvmap.cpp | 4 ++- src/core/hle/service/nvdrv/core/nvmap.h | 1 + src/core/hle/service/nvdrv/devices/nvmap.cpp | 28 ++++++++++---------- 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index 281381cc41..1126daeb5e 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -11,7 +11,9 @@ using Core::Memory::YUZU_PAGESIZE; namespace Service::Nvidia::NvCore { -NvMap::Handle::Handle(u64 size, Id id) : size(size), aligned_size(size), orig_size(size), id(id) {} +NvMap::Handle::Handle(u64 size, Id id) : size(size), aligned_size(size), orig_size(size), id(id) { + flags.raw = 0; +} NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) { std::scoped_lock lock(mutex); diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index 994c70e6fa..5e6c73589a 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -44,6 +44,7 @@ public: std::optional>::iterator> unmap_queue_entry{}; union Flags { + u32 raw; BitField<0, 1, u32> map_uncached; //!< If the handle should be mapped as uncached BitField<2, 1, u32> keep_uncached_after_free; //!< Only applicable when the handle was //!< allocated with a fixed address diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 2aee68f5cb..57f58055d3 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -82,7 +82,7 @@ std::shared_ptr nvmap::GetObject(u32 handle) const { NvResult nvmap::IocCreate(const std::vector& input, std::vector& output) { IocCreateParams params; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_WARNING(Service_NVDRV, "called, size=0x{:08X}", params.size); + LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size); std::shared_ptr handle_description{}; auto result = @@ -102,7 +102,7 @@ NvResult nvmap::IocCreate(const std::vector& input, std::vector& output) NvResult nvmap::IocAlloc(const std::vector& input, std::vector& output) { IocAllocParams params; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_WARNING(Service_NVDRV, "called, addr={:X}", params.address); + LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address); if (!params.handle) { LOG_CRITICAL(Service_NVDRV, "Handle is 0"); @@ -144,7 +144,7 @@ NvResult nvmap::IocGetId(const std::vector& input, std::vector& output) IocGetIdParams params; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_WARNING(Service_NVDRV, "called"); + LOG_DEBUG(Service_NVDRV, "called"); // See the comment in FromId for extra info on this function if (!params.handle) { @@ -168,26 +168,26 @@ NvResult nvmap::IocFromId(const std::vector& input, std::vector& output) IocFromIdParams params; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_WARNING(Service_NVDRV, "called, id:{}"); + LOG_DEBUG(Service_NVDRV, "called, id:{}"); // Handles and IDs are always the same value in nvmap however IDs can be used globally given the // right permissions. // Since we don't plan on ever supporting multiprocess we can skip implementing handle refs and // so this function just does simple validation and passes through the handle id. if (!params.id) { - LOG_CRITICAL(Service_NVDRV, "Error!"); + LOG_CRITICAL(Service_NVDRV, "Zero Id is invalid!"); return NvResult::BadValue; } auto handle_description{file.GetHandle(params.id)}; if (!handle_description) { - LOG_CRITICAL(Service_NVDRV, "Error!"); + LOG_CRITICAL(Service_NVDRV, "Unregistered handle!"); return NvResult::BadValue; } auto result = handle_description->Duplicate(false); if (result != NvResult::Success) { - LOG_CRITICAL(Service_NVDRV, "Error!"); + LOG_CRITICAL(Service_NVDRV, "Could not duplicate handle!"); return result; } params.handle = handle_description->id; @@ -201,16 +201,16 @@ NvResult nvmap::IocParam(const std::vector& input, std::vector& output) IocParamParams params; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_WARNING(Service_NVDRV, "called type={}", params.param); + LOG_DEBUG(Service_NVDRV, "called type={}", params.param); if (!params.handle) { - LOG_CRITICAL(Service_NVDRV, "Error!"); + LOG_CRITICAL(Service_NVDRV, "Invalid handle!"); return NvResult::BadValue; } auto handle_description{file.GetHandle(params.handle)}; if (!handle_description) { - LOG_CRITICAL(Service_NVDRV, "Error!"); + LOG_CRITICAL(Service_NVDRV, "Not registered handle!"); return NvResult::BadValue; } @@ -228,7 +228,7 @@ NvResult nvmap::IocParam(const std::vector& input, std::vector& output) if (handle_description->allocated) params.result = 0x40000000; else - params.result = 0x40000000; + params.result = 0; break; case HandleParameterType::Kind: params.result = handle_description->kind; @@ -248,7 +248,7 @@ NvResult nvmap::IocFree(const std::vector& input, std::vector& output) { IocFreeParams params; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_WARNING(Service_NVDRV, "called"); + LOG_DEBUG(Service_NVDRV, "called"); if (!params.handle) { LOG_CRITICAL(Service_NVDRV, "Handle null freed?"); @@ -258,10 +258,10 @@ NvResult nvmap::IocFree(const std::vector& input, std::vector& output) { if (auto freeInfo{file.FreeHandle(params.handle, false)}) { params.address = freeInfo->address; params.size = static_cast(freeInfo->size); - params.flags = NvCore::NvMap::Handle::Flags{.map_uncached = freeInfo->was_uncached}; + params.flags.raw = 0; + params.flags.map_uncached = freeInfo->was_uncached; } else { // This is possible when there's internel dups or other duplicates. - LOG_CRITICAL(Service_NVDRV, "Not freed"); } std::memcpy(output.data(), ¶ms, sizeof(params)); From ad038609c877ad54dde7b9592f0584deb56a27c5 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Fri, 5 Nov 2021 02:57:14 +0100 Subject: [PATCH 10/68] NVDRV: Fix clearing when destroying. --- src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp | 13 +++++++------ src/core/hle/service/nvdrv/nvdrv.cpp | 7 ++----- src/core/hle/service/nvdrv/nvdrv.h | 3 --- 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index 122c1d5e12..abde2a6d35 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -24,12 +24,9 @@ namespace Service::Nvidia::Devices { nvhost_ctrl::nvhost_ctrl(Core::System& system_, EventInterface& events_interface_, NvCore::Container& core_) : nvdevice{system_}, events_interface{events_interface_}, core{core_}, - syncpoint_manager{core_.GetSyncpointManager()} { - events_interface.RegisterForSignal(this); -} + syncpoint_manager{core_.GetSyncpointManager()} {} nvhost_ctrl::~nvhost_ctrl() { - events_interface.UnregisterForSignal(this); for (auto& event : events) { if (!event.registered) { continue; @@ -77,8 +74,12 @@ NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, const std::vector& return NvResult::NotImplemented; } -void nvhost_ctrl::OnOpen(DeviceFD fd) {} -void nvhost_ctrl::OnClose(DeviceFD fd) {} +void nvhost_ctrl::OnOpen(DeviceFD fd) { + events_interface.RegisterForSignal(this); +} +void nvhost_ctrl::OnClose(DeviceFD fd) { + events_interface.UnregisterForSignal(this); +} NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector& input, std::vector& output) { IocGetConfigParams params{}; diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index ff8c7c13cc..208de0b756 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -29,7 +29,7 @@ namespace Service::Nvidia { -EventInterface::EventInterface(Module& module_) : module{module_} {} +EventInterface::EventInterface(Module& module_) : module{module_}, guard{}, on_signal{} {} EventInterface::~EventInterface() = default; @@ -40,10 +40,7 @@ void EventInterface::RegisterForSignal(Devices::nvhost_ctrl* device) { void EventInterface::UnregisterForSignal(Devices::nvhost_ctrl* device) { std::unique_lock lk(guard); - auto it = std::find(on_signal.begin(), on_signal.end(), device); - if (it != on_signal.end()) { - on_signal.erase(it); - } + on_signal.remove(device); } void EventInterface::Signal(u32 syncpoint_id, u32 value) { diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index 3983794bb7..1fe98cf32c 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -110,9 +110,6 @@ private: /// Mapping of file descriptors to the devices they reference. FilesContainerType open_files; - /// Mapping of device node names to their implementation. - std::unordered_map> devices; - KernelHelpers::ServiceContext service_context; EventInterface events_interface; From c77b8df12efab9f36f007ff3b309d43588a71260 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Fri, 5 Nov 2021 03:10:20 +0100 Subject: [PATCH 11/68] NVASGPU: Fix Remap. --- src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 9283d6aec9..b1c683511d 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -144,6 +144,14 @@ NvResult nvhost_as_gpu::Remap(const std::vector& input, std::vector& out LOG_DEBUG(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}", entry.offset, entry.nvmap_handle, entry.pages); + if (entry.nvmap_handle == 0) { + // If nvmap handle is null, we should unmap instead. + const auto offset{static_cast(entry.offset) << 0x10}; + const auto size{static_cast(entry.pages) << 0x10}; + system.GPU().MemoryManager().Unmap(offset, size); + continue; + } + const auto object{nvmap.GetHandle(entry.nvmap_handle)}; if (!object) { LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", entry.nvmap_handle); From 139ea93512aeead8a4aee3910a3de86eb109a838 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Fri, 5 Nov 2021 15:52:31 +0100 Subject: [PATCH 12/68] VideoCore: implement channels on gpu caches. --- .../service/nvdrv/devices/nvhost_as_gpu.cpp | 34 +- .../hle/service/nvdrv/devices/nvhost_as_gpu.h | 14 +- .../hle/service/nvdrv/devices/nvhost_gpu.cpp | 34 +- .../hle/service/nvdrv/devices/nvhost_gpu.h | 9 + src/core/hle/service/nvdrv/devices/nvmap.cpp | 2 +- src/core/hle/service/nvdrv/nvdrv.cpp | 2 +- src/video_core/CMakeLists.txt | 8 + src/video_core/buffer_cache/buffer_cache.h | 103 ++-- src/video_core/control/channel_state.cpp | 44 ++ src/video_core/control/channel_state.h | 69 +++ .../control/channel_state_cache.cpp | 5 + src/video_core/control/channel_state_cache.h | 68 +++ .../control/channel_state_cache.inc | 64 +++ src/video_core/control/scheduler.cpp | 31 ++ src/video_core/control/scheduler.h | 38 ++ src/video_core/dma_pusher.cpp | 23 +- src/video_core/dma_pusher.h | 13 +- src/video_core/engines/puller.cpp | 297 +++++++++++ src/video_core/engines/puller.h | 179 +++++++ src/video_core/fence_manager.h | 28 +- src/video_core/gpu.cpp | 482 ++++-------------- src/video_core/gpu.h | 55 +- src/video_core/gpu_thread.cpp | 14 +- src/video_core/gpu_thread.h | 12 +- src/video_core/memory_manager.cpp | 5 - src/video_core/query_cache.h | 18 +- src/video_core/rasterizer_interface.h | 9 + .../renderer_opengl/gl_fence_manager.cpp | 4 +- .../renderer_opengl/gl_fence_manager.h | 4 +- .../renderer_opengl/gl_query_cache.cpp | 5 +- .../renderer_opengl/gl_query_cache.h | 3 +- .../renderer_opengl/gl_rasterizer.cpp | 14 +- .../renderer_opengl/gl_shader_cache.cpp | 39 +- .../renderer_opengl/gl_shader_cache.h | 9 +- .../renderer_vulkan/renderer_vulkan.cpp | 17 +- .../renderer_vulkan/vk_fence_manager.cpp | 4 +- .../renderer_vulkan/vk_fence_manager.h | 4 +- .../renderer_vulkan/vk_pipeline_cache.cpp | 28 +- .../renderer_vulkan/vk_pipeline_cache.h | 6 +- .../renderer_vulkan/vk_query_cache.cpp | 7 +- .../renderer_vulkan/vk_query_cache.h | 5 +- .../renderer_vulkan/vk_rasterizer.cpp | 87 +++- .../renderer_vulkan/vk_rasterizer.h | 20 +- .../renderer_vulkan/vk_state_tracker.cpp | 13 +- .../renderer_vulkan/vk_state_tracker.h | 22 +- src/video_core/shader_cache.cpp | 33 +- src/video_core/shader_cache.h | 15 +- src/video_core/texture_cache/image_base.h | 3 + src/video_core/texture_cache/texture_cache.h | 209 +++++--- .../texture_cache/texture_cache_base.h | 75 ++- 50 files changed, 1469 insertions(+), 817 deletions(-) create mode 100644 src/video_core/control/channel_state.cpp create mode 100644 src/video_core/control/channel_state.h create mode 100644 src/video_core/control/channel_state_cache.cpp create mode 100644 src/video_core/control/channel_state_cache.h create mode 100644 src/video_core/control/channel_state_cache.inc create mode 100644 src/video_core/control/scheduler.cpp create mode 100644 src/video_core/control/scheduler.h create mode 100644 src/video_core/engines/puller.cpp create mode 100644 src/video_core/engines/puller.h diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index b1c683511d..9946ce6246 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -10,13 +10,17 @@ #include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" +#include "core/hle/service/nvdrv/devices/nvhost_gpu.h" +#include "core/hle/service/nvdrv/nvdrv.h" +#include "video_core/control/channel_state.h" #include "video_core/memory_manager.h" #include "video_core/rasterizer_interface.h" namespace Service::Nvidia::Devices { -nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, NvCore::Container& core) - : nvdevice{system_}, container{core}, nvmap{core.GetNvMapFile()} {} +nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, Module& module_, NvCore::Container& core) + : nvdevice{system_}, module{module_}, container{core}, nvmap{core.GetNvMapFile()}, + gmmu{std::make_shared(system)} {} nvhost_as_gpu::~nvhost_as_gpu() = default; NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, @@ -102,9 +106,9 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector& input, std::vector< const auto size{static_cast(params.pages) * static_cast(params.page_size)}; if ((params.flags & AddressSpaceFlags::FixedOffset) != AddressSpaceFlags::None) { - params.offset = *system.GPU().MemoryManager().AllocateFixed(params.offset, size); + params.offset = *(gmmu->AllocateFixed(params.offset, size)); } else { - params.offset = system.GPU().MemoryManager().Allocate(size, params.align); + params.offset = gmmu->Allocate(size, params.align); } auto result = NvResult::Success; @@ -124,8 +128,7 @@ NvResult nvhost_as_gpu::FreeSpace(const std::vector& input, std::vector& LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset, params.pages, params.page_size); - system.GPU().MemoryManager().Unmap(params.offset, - static_cast(params.pages) * params.page_size); + gmmu->Unmap(params.offset, static_cast(params.pages) * params.page_size); std::memcpy(output.data(), ¶ms, output.size()); return NvResult::Success; @@ -148,7 +151,7 @@ NvResult nvhost_as_gpu::Remap(const std::vector& input, std::vector& out // If nvmap handle is null, we should unmap instead. const auto offset{static_cast(entry.offset) << 0x10}; const auto size{static_cast(entry.pages) << 0x10}; - system.GPU().MemoryManager().Unmap(offset, size); + gmmu->Unmap(offset, size); continue; } @@ -162,8 +165,7 @@ NvResult nvhost_as_gpu::Remap(const std::vector& input, std::vector& out const auto offset{static_cast(entry.offset) << 0x10}; const auto size{static_cast(entry.pages) << 0x10}; const auto map_offset{static_cast(entry.map_offset) << 0x10}; - const auto addr{ - system.GPU().MemoryManager().Map(object->address + map_offset, offset, size)}; + const auto addr{gmmu->Map(object->address + map_offset, offset, size)}; if (!addr) { LOG_CRITICAL(Service_NVDRV, "map returned an invalid address!"); @@ -186,13 +188,12 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vector(buffer_map->CpuAddr() + params.buffer_offset)}; const auto gpu_addr{static_cast(params.offset + params.buffer_offset)}; - if (!gpu.MemoryManager().Map(cpu_addr, gpu_addr, params.mapping_size)) { + if (!gmmu->Map(cpu_addr, gpu_addr, params.mapping_size)) { LOG_CRITICAL(Service_NVDRV, "remap failed, flags={:X}, nvmap_handle={:X}, buffer_offset={}, " "mapping_size = {}, offset={}", @@ -238,9 +239,9 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vectorMapAllocate(physical_address, size, page_size); } else { - params.offset = gpu.MemoryManager().Map(physical_address, params.offset, size); + params.offset = gmmu->Map(physical_address, params.offset, size); } auto result = NvResult::Success; @@ -262,7 +263,7 @@ NvResult nvhost_as_gpu::UnmapBuffer(const std::vector& input, std::vectorUnmap(params.offset, *size); } else { LOG_ERROR(Service_NVDRV, "invalid offset=0x{:X}", params.offset); } @@ -274,9 +275,10 @@ NvResult nvhost_as_gpu::UnmapBuffer(const std::vector& input, std::vector& input, std::vector& output) { IoctlBindChannel params{}; std::memcpy(¶ms, input.data(), input.size()); - LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}", params.fd); + LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd); - channel = params.fd; + auto gpu_channel_device = module.GetDevice(params.fd); + gpu_channel_device->channel_state->memory_manager = gmmu; return NvResult::Success; } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index 67d2f1e877..4ecae3caf6 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h @@ -13,6 +13,14 @@ #include "common/swap.h" #include "core/hle/service/nvdrv/devices/nvdevice.h" +namespace Tegra { +class MemoryManager; +} // namespace Tegra + +namespace Service::Nvidia { +class Module; +} + namespace Service::Nvidia::NvCore { class Container; class NvMap; @@ -34,7 +42,7 @@ DECLARE_ENUM_FLAG_OPERATORS(AddressSpaceFlags); class nvhost_as_gpu final : public nvdevice { public: - explicit nvhost_as_gpu(Core::System& system_, NvCore::Container& core); + explicit nvhost_as_gpu(Core::System& system_, Module& module, NvCore::Container& core); ~nvhost_as_gpu() override; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, @@ -187,9 +195,13 @@ private: void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated); std::optional RemoveBufferMap(GPUVAddr gpu_addr); + Module& module; + NvCore::Container& container; NvCore::NvMap& nvmap; + std::shared_ptr gmmu; + // This is expected to be ordered, therefore we must use a map, not unordered_map std::map buffer_mappings; }; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index cb54ee5a41..38d45cb79a 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp @@ -11,12 +11,14 @@ #include "core/hle/service/nvdrv/devices/nvhost_gpu.h" #include "core/hle/service/nvdrv/nvdrv.h" #include "core/memory.h" +#include "video_core/control/channel_state.h" +#include "video_core/engines/puller.h" #include "video_core/gpu.h" namespace Service::Nvidia::Devices { namespace { -Tegra::CommandHeader BuildFenceAction(Tegra::GPU::FenceOperation op, u32 syncpoint_id) { - Tegra::GPU::FenceAction result{}; +Tegra::CommandHeader BuildFenceAction(Tegra::Engines::Puller::FenceOperation op, u32 syncpoint_id) { + Tegra::Engines::Puller::FenceAction result{}; result.op.Assign(op); result.syncpoint_id.Assign(syncpoint_id); return {result.raw}; @@ -26,7 +28,8 @@ Tegra::CommandHeader BuildFenceAction(Tegra::GPU::FenceOperation op, u32 syncpoi nvhost_gpu::nvhost_gpu(Core::System& system_, EventInterface& events_interface_, NvCore::Container& core_) : nvdevice{system_}, events_interface{events_interface_}, core{core_}, - syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()} { + syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()}, + channel_state{system.GPU().AllocateChannel()} { channel_fence.id = syncpoint_manager.AllocateSyncpoint(); channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id); sm_exception_breakpoint_int_report_event = @@ -180,6 +183,12 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(const std::vector& input, std::vectorinitiated) { + LOG_CRITICAL(Service_NVDRV, "Already allocated!"); + return NvResult::AlreadyAllocated; + } + + system.GPU().InitChannel(*channel_state); channel_fence.value = system.GPU().GetSyncpointValue(channel_fence.id); params.fence_out = channel_fence; @@ -206,7 +215,7 @@ static std::vector BuildWaitCommandList(NvFence fence) { {fence.value}, Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1, Tegra::SubmissionMode::Increasing), - BuildFenceAction(Tegra::GPU::FenceOperation::Acquire, fence.id), + BuildFenceAction(Tegra::Engines::Puller::FenceOperation::Acquire, fence.id), }; } @@ -220,7 +229,8 @@ static std::vector BuildIncrementCommandList(NvFence fence for (u32 count = 0; count < add_increment; ++count) { result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1, Tegra::SubmissionMode::Increasing)); - result.emplace_back(BuildFenceAction(Tegra::GPU::FenceOperation::Increment, fence.id)); + result.emplace_back( + BuildFenceAction(Tegra::Engines::Puller::FenceOperation::Increment, fence.id)); } return result; @@ -247,11 +257,13 @@ NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::vector auto& gpu = system.GPU(); + const auto bind_id = channel_state->bind_id; + params.fence_out.id = channel_fence.id; if (params.flags.add_wait.Value() && !syncpoint_manager.IsSyncpointExpired(params.fence_out.id, params.fence_out.value)) { - gpu.PushGPUEntries(Tegra::CommandList{BuildWaitCommandList(params.fence_out)}); + gpu.PushGPUEntries(bind_id, Tegra::CommandList{BuildWaitCommandList(params.fence_out)}); } if (params.flags.add_increment.Value() || params.flags.increment.Value()) { @@ -262,15 +274,15 @@ NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::vector params.fence_out.value = syncpoint_manager.GetSyncpointMax(params.fence_out.id); } - gpu.PushGPUEntries(std::move(entries)); + gpu.PushGPUEntries(bind_id, std::move(entries)); if (params.flags.add_increment.Value()) { if (params.flags.suppress_wfi) { - gpu.PushGPUEntries(Tegra::CommandList{ - BuildIncrementCommandList(params.fence_out, params.AddIncrementValue())}); + gpu.PushGPUEntries(bind_id, Tegra::CommandList{BuildIncrementCommandList( + params.fence_out, params.AddIncrementValue())}); } else { - gpu.PushGPUEntries(Tegra::CommandList{ - BuildIncrementWithWfiCommandList(params.fence_out, params.AddIncrementValue())}); + gpu.PushGPUEntries(bind_id, Tegra::CommandList{BuildIncrementWithWfiCommandList( + params.fence_out, params.AddIncrementValue())}); } } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h index 440c0c42d5..3a65ed06db 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h @@ -13,6 +13,12 @@ #include "core/hle/service/nvdrv/nvdata.h" #include "video_core/dma_pusher.h" +namespace Tegra { +namespace Control { +struct ChannelState; +} +} // namespace Tegra + namespace Service::Nvidia { namespace NvCore { @@ -26,6 +32,7 @@ class EventInterface; namespace Service::Nvidia::Devices { +class nvhost_as_gpu; class nvmap; class nvhost_gpu final : public nvdevice { public: @@ -46,6 +53,7 @@ public: Kernel::KEvent* QueryEvent(u32 event_id) override; private: + friend class nvhost_as_gpu; enum class CtxObjects : u32_le { Ctx2D = 0x902D, Ctx3D = 0xB197, @@ -204,6 +212,7 @@ private: NvCore::Container& core; NvCore::SyncpointManager& syncpoint_manager; NvCore::NvMap& nvmap; + std::shared_ptr channel_state; NvFence channel_fence; // Events diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 57f58055d3..279997e813 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -168,7 +168,7 @@ NvResult nvmap::IocFromId(const std::vector& input, std::vector& output) IocFromIdParams params; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_DEBUG(Service_NVDRV, "called, id:{}"); + LOG_DEBUG(Service_NVDRV, "called, id:{}", params.id); // Handles and IDs are always the same value in nvmap however IDs can be used globally given the // right permissions. diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index 208de0b756..b39a4c6db5 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -74,7 +74,7 @@ Module::Module(Core::System& system) : service_context{system, "nvdrv"}, events_interface{*this}, container{system.GPU()} { builders["/dev/nvhost-as-gpu"] = [this, &system](DeviceFD fd) { std::shared_ptr device = - std::make_shared(system, container); + std::make_shared(system, *this, container); return open_files.emplace(fd, device).first; }; builders["/dev/nvhost-gpu"] = [this, &system](DeviceFD fd) { diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 5b38083516..e216c51a21 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -35,6 +35,12 @@ add_library(video_core STATIC command_classes/vic.h compatible_formats.cpp compatible_formats.h + control/channel_state.cpp + control/channel_state.h + control/channel_state_cache.cpp + control/channel_state_cache.h + control/scheduler.cpp + control/scheduler.h delayed_destruction_ring.h dirty_flags.cpp dirty_flags.h @@ -54,6 +60,8 @@ add_library(video_core STATIC engines/maxwell_3d.h engines/maxwell_dma.cpp engines/maxwell_dma.h + engines/puller.cpp + engines/puller.h framebuffer_config.h macro/macro.cpp macro/macro.h diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index f015dae566..6b6764d721 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -5,7 +5,6 @@ #include #include -#include #include #include #include @@ -23,6 +22,7 @@ #include "common/settings.h" #include "core/memory.h" #include "video_core/buffer_cache/buffer_base.h" +#include "video_core/control/channel_state_cache.h" #include "video_core/delayed_destruction_ring.h" #include "video_core/dirty_flags.h" #include "video_core/engines/kepler_compute.h" @@ -56,7 +56,7 @@ using UniformBufferSizes = std::array; template -class BufferCache { +class BufferCache : public VideoCommon::ChannelSetupCaches { // Page size for caching purposes. // This is unrelated to the CPU page size and it can be changed as it seems optimal. @@ -116,10 +116,7 @@ public: static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast(4_KiB); explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_, - Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::Engines::KeplerCompute& kepler_compute_, - Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, - Runtime& runtime_); + Core::Memory::Memory& cpu_memory_, Runtime& runtime_); void TickFrame(); @@ -367,9 +364,6 @@ private: void ClearDownload(IntervalType subtract_interval); VideoCore::RasterizerInterface& rasterizer; - Tegra::Engines::Maxwell3D& maxwell3d; - Tegra::Engines::KeplerCompute& kepler_compute; - Tegra::MemoryManager& gpu_memory; Core::Memory::Memory& cpu_memory; SlotVector slot_buffers; @@ -444,12 +438,8 @@ private: template BufferCache

::BufferCache(VideoCore::RasterizerInterface& rasterizer_, - Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::Engines::KeplerCompute& kepler_compute_, - Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, - Runtime& runtime_) - : runtime{runtime_}, rasterizer{rasterizer_}, maxwell3d{maxwell3d_}, - kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, cpu_memory{cpu_memory_} { + Core::Memory::Memory& cpu_memory_, Runtime& runtime_) + : runtime{runtime_}, rasterizer{rasterizer_}, cpu_memory{cpu_memory_} { // Ensure the first slot is used for the null buffer void(slot_buffers.insert(runtime, NullBufferParams{})); common_ranges.clear(); @@ -552,8 +542,8 @@ void BufferCache

::ClearDownload(IntervalType subtract_interval) { template bool BufferCache

::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { - const std::optional cpu_src_address = gpu_memory.GpuToCpuAddress(src_address); - const std::optional cpu_dest_address = gpu_memory.GpuToCpuAddress(dest_address); + const std::optional cpu_src_address = gpu_memory->GpuToCpuAddress(src_address); + const std::optional cpu_dest_address = gpu_memory->GpuToCpuAddress(dest_address); if (!cpu_src_address || !cpu_dest_address) { return false; } @@ -611,7 +601,7 @@ bool BufferCache

::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am template bool BufferCache

::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) { - const std::optional cpu_dst_address = gpu_memory.GpuToCpuAddress(dst_address); + const std::optional cpu_dst_address = gpu_memory->GpuToCpuAddress(dst_address); if (!cpu_dst_address) { return false; } @@ -635,7 +625,7 @@ bool BufferCache

::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) { template void BufferCache

::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) { - const std::optional cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); + const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); const Binding binding{ .cpu_addr = *cpu_addr, .size = size, @@ -673,7 +663,7 @@ void BufferCache

::BindHostGeometryBuffers(bool is_indexed) { if (is_indexed) { BindHostIndexBuffer(); } else if constexpr (!HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) { - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; if (regs.draw.topology == Maxwell::PrimitiveTopology::Quads) { runtime.BindQuadArrayIndexBuffer(regs.vertex_buffer.first, regs.vertex_buffer.count); } @@ -733,7 +723,7 @@ void BufferCache

::BindGraphicsStorageBuffer(size_t stage, size_t ssbo_index, enabled_storage_buffers[stage] |= 1U << ssbo_index; written_storage_buffers[stage] |= (is_written ? 1U : 0U) << ssbo_index; - const auto& cbufs = maxwell3d.state.shader_stages[stage]; + const auto& cbufs = maxwell3d->state.shader_stages[stage]; const GPUVAddr ssbo_addr = cbufs.const_buffers[cbuf_index].address + cbuf_offset; storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr); } @@ -770,7 +760,7 @@ void BufferCache

::BindComputeStorageBuffer(size_t ssbo_index, u32 cbuf_index, enabled_compute_storage_buffers |= 1U << ssbo_index; written_compute_storage_buffers |= (is_written ? 1U : 0U) << ssbo_index; - const auto& launch_desc = kepler_compute.launch_description; + const auto& launch_desc = kepler_compute->launch_description; ASSERT(((launch_desc.const_buffer_enable_mask >> cbuf_index) & 1) != 0); const auto& cbufs = launch_desc.const_buffer_config; @@ -991,19 +981,19 @@ void BufferCache

::BindHostIndexBuffer() { const u32 size = index_buffer.size; SynchronizeBuffer(buffer, index_buffer.cpu_addr, size); if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) { - const u32 new_offset = offset + maxwell3d.regs.index_array.first * - maxwell3d.regs.index_array.FormatSizeInBytes(); + const u32 new_offset = offset + maxwell3d->regs.index_array.first * + maxwell3d->regs.index_array.FormatSizeInBytes(); runtime.BindIndexBuffer(buffer, new_offset, size); } else { - runtime.BindIndexBuffer(maxwell3d.regs.draw.topology, maxwell3d.regs.index_array.format, - maxwell3d.regs.index_array.first, maxwell3d.regs.index_array.count, - buffer, offset, size); + runtime.BindIndexBuffer(maxwell3d->regs.draw.topology, maxwell3d->regs.index_array.format, + maxwell3d->regs.index_array.first, + maxwell3d->regs.index_array.count, buffer, offset, size); } } template void BufferCache

::BindHostVertexBuffers() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) { const Binding& binding = vertex_buffers[index]; Buffer& buffer = slot_buffers[binding.buffer_id]; @@ -1014,7 +1004,7 @@ void BufferCache

::BindHostVertexBuffers() { } flags[Dirty::VertexBuffer0 + index] = false; - const u32 stride = maxwell3d.regs.vertex_array[index].stride; + const u32 stride = maxwell3d->regs.vertex_array[index].stride; const u32 offset = buffer.Offset(binding.cpu_addr); runtime.BindVertexBuffer(index, buffer, offset, binding.size, stride); } @@ -1154,7 +1144,7 @@ void BufferCache

::BindHostGraphicsTextureBuffers(size_t stage) { template void BufferCache

::BindHostTransformFeedbackBuffers() { - if (maxwell3d.regs.tfb_enabled == 0) { + if (maxwell3d->regs.tfb_enabled == 0) { return; } for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) { @@ -1262,8 +1252,8 @@ template void BufferCache

::UpdateIndexBuffer() { // We have to check for the dirty flags and index count // The index count is currently changed without updating the dirty flags - const auto& index_array = maxwell3d.regs.index_array; - auto& flags = maxwell3d.dirty.flags; + const auto& index_array = maxwell3d->regs.index_array; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::IndexBuffer] && last_index_count == index_array.count) { return; } @@ -1272,7 +1262,7 @@ void BufferCache

::UpdateIndexBuffer() { const GPUVAddr gpu_addr_begin = index_array.StartAddress(); const GPUVAddr gpu_addr_end = index_array.EndAddress(); - const std::optional cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr_begin); + const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin); const u32 address_size = static_cast(gpu_addr_end - gpu_addr_begin); const u32 draw_size = (index_array.count + index_array.first) * index_array.FormatSizeInBytes(); const u32 size = std::min(address_size, draw_size); @@ -1289,8 +1279,8 @@ void BufferCache

::UpdateIndexBuffer() { template void BufferCache

::UpdateVertexBuffers() { - auto& flags = maxwell3d.dirty.flags; - if (!maxwell3d.dirty.flags[Dirty::VertexBuffers]) { + auto& flags = maxwell3d->dirty.flags; + if (!maxwell3d->dirty.flags[Dirty::VertexBuffers]) { return; } flags[Dirty::VertexBuffers] = false; @@ -1302,28 +1292,15 @@ void BufferCache

::UpdateVertexBuffers() { template void BufferCache

::UpdateVertexBuffer(u32 index) { - if (!maxwell3d.dirty.flags[Dirty::VertexBuffer0 + index]) { + if (!maxwell3d->dirty.flags[Dirty::VertexBuffer0 + index]) { return; } - const auto& array = maxwell3d.regs.vertex_array[index]; - const auto& limit = maxwell3d.regs.vertex_array_limit[index]; + const auto& array = maxwell3d->regs.vertex_array[index]; + const auto& limit = maxwell3d->regs.vertex_array_limit[index]; const GPUVAddr gpu_addr_begin = array.StartAddress(); const GPUVAddr gpu_addr_end = limit.LimitAddress() + 1; - const std::optional cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr_begin); - u32 address_size = static_cast(gpu_addr_end - gpu_addr_begin); - if (address_size >= 64_MiB) { - // Reported vertex buffer size is very large, cap to mapped buffer size - GPUVAddr submapped_addr_end = gpu_addr_begin; - - const auto ranges{gpu_memory.GetSubmappedRange(gpu_addr_begin, address_size)}; - if (ranges.size() > 0) { - const auto& [addr, size] = *ranges.begin(); - submapped_addr_end = addr + size; - } - - address_size = - std::min(address_size, static_cast(submapped_addr_end - gpu_addr_begin)); - } + const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin); + const u32 address_size = static_cast(gpu_addr_end - gpu_addr_begin); const u32 size = address_size; // TODO: Analyze stride and number of vertices if (array.enable == 0 || size == 0 || !cpu_addr) { vertex_buffers[index] = NULL_BINDING; @@ -1382,7 +1359,7 @@ void BufferCache

::UpdateTextureBuffers(size_t stage) { template void BufferCache

::UpdateTransformFeedbackBuffers() { - if (maxwell3d.regs.tfb_enabled == 0) { + if (maxwell3d->regs.tfb_enabled == 0) { return; } for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) { @@ -1392,10 +1369,10 @@ void BufferCache

::UpdateTransformFeedbackBuffers() { template void BufferCache

::UpdateTransformFeedbackBuffer(u32 index) { - const auto& binding = maxwell3d.regs.tfb_bindings[index]; + const auto& binding = maxwell3d->regs.tfb_bindings[index]; const GPUVAddr gpu_addr = binding.Address() + binding.buffer_offset; const u32 size = binding.buffer_size; - const std::optional cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); + const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); if (binding.buffer_enable == 0 || size == 0 || !cpu_addr) { transform_feedback_buffers[index] = NULL_BINDING; return; @@ -1414,10 +1391,10 @@ void BufferCache

::UpdateComputeUniformBuffers() { ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) { Binding& binding = compute_uniform_buffers[index]; binding = NULL_BINDING; - const auto& launch_desc = kepler_compute.launch_description; + const auto& launch_desc = kepler_compute->launch_description; if (((launch_desc.const_buffer_enable_mask >> index) & 1) != 0) { const auto& cbuf = launch_desc.const_buffer_config[index]; - const std::optional cpu_addr = gpu_memory.GpuToCpuAddress(cbuf.Address()); + const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(cbuf.Address()); if (cpu_addr) { binding.cpu_addr = *cpu_addr; binding.size = cbuf.size; @@ -1831,7 +1808,7 @@ void BufferCache

::NotifyBufferDeletion() { dirty_uniform_buffers.fill(~u32{0}); uniform_buffer_binding_sizes.fill({}); } - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; flags[Dirty::IndexBuffer] = true; flags[Dirty::VertexBuffers] = true; for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) { @@ -1842,9 +1819,9 @@ void BufferCache

::NotifyBufferDeletion() { template typename BufferCache

::Binding BufferCache

::StorageBufferBinding(GPUVAddr ssbo_addr) const { - const GPUVAddr gpu_addr = gpu_memory.Read(ssbo_addr); - const u32 size = gpu_memory.Read(ssbo_addr + 8); - const std::optional cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); + const GPUVAddr gpu_addr = gpu_memory->Read(ssbo_addr); + const u32 size = gpu_memory->Read(ssbo_addr + 8); + const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); if (!cpu_addr || size == 0) { return NULL_BINDING; } @@ -1859,7 +1836,7 @@ typename BufferCache

::Binding BufferCache

::StorageBufferBinding(GPUVAddr s template typename BufferCache

::TextureBufferBinding BufferCache

::GetTextureBufferBinding( GPUVAddr gpu_addr, u32 size, PixelFormat format) { - const std::optional cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); + const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); TextureBufferBinding binding; if (!cpu_addr || size == 0) { binding.cpu_addr = 0; diff --git a/src/video_core/control/channel_state.cpp b/src/video_core/control/channel_state.cpp new file mode 100644 index 0000000000..67803fe94c --- /dev/null +++ b/src/video_core/control/channel_state.cpp @@ -0,0 +1,44 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "video_core/control/channel_state.h" +#include "video_core/dma_pusher.h" +#include "video_core/engines/fermi_2d.h" +#include "video_core/engines/kepler_compute.h" +#include "video_core/engines/kepler_memory.h" +#include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/maxwell_dma.h" +#include "video_core/engines/puller.h" +#include "video_core/memory_manager.h" + +namespace Tegra::Control { + +ChannelState::ChannelState(s32 bind_id_) { + bind_id = bind_id_; + initiated = false; +} + +void ChannelState::Init(Core::System& system, GPU& gpu) { + ASSERT(memory_manager); + dma_pusher = std::make_unique(system, gpu, *memory_manager, *this); + maxwell_3d = std::make_unique(system, *memory_manager); + fermi_2d = std::make_unique(); + kepler_compute = std::make_unique(system, *memory_manager); + maxwell_dma = std::make_unique(system, *memory_manager); + kepler_memory = std::make_unique(system, *memory_manager); + initiated = true; +} + +void ChannelState::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) { + dma_pusher->BindRasterizer(rasterizer); + memory_manager->BindRasterizer(rasterizer); + maxwell_3d->BindRasterizer(rasterizer); + fermi_2d->BindRasterizer(rasterizer); + kepler_memory->BindRasterizer(rasterizer); + kepler_compute->BindRasterizer(rasterizer); + maxwell_dma->BindRasterizer(rasterizer); +} + +} // namespace Tegra::Control diff --git a/src/video_core/control/channel_state.h b/src/video_core/control/channel_state.h new file mode 100644 index 0000000000..82808a6b82 --- /dev/null +++ b/src/video_core/control/channel_state.h @@ -0,0 +1,69 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include + +#include "common/common_types.h" + +namespace Core { +class System; +} + +namespace VideoCore { +class RasterizerInterface; +} + +namespace Tegra { + +class GPU; + +namespace Engines { +class Puller; +class Fermi2D; +class Maxwell3D; +class MaxwellDMA; +class KeplerCompute; +class KeplerMemory; +} // namespace Engines + +class MemoryManager; +class DmaPusher; + +namespace Control { + +struct ChannelState { + ChannelState(s32 bind_id); + ChannelState(const ChannelState& state) = delete; + ChannelState& operator=(const ChannelState&) = delete; + ChannelState(ChannelState&& other) noexcept = default; + ChannelState& operator=(ChannelState&& other) noexcept = default; + + void Init(Core::System& system, GPU& gpu); + + void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); + + s32 bind_id = -1; + /// 3D engine + std::unique_ptr maxwell_3d; + /// 2D engine + std::unique_ptr fermi_2d; + /// Compute engine + std::unique_ptr kepler_compute; + /// DMA engine + std::unique_ptr maxwell_dma; + /// Inline memory engine + std::unique_ptr kepler_memory; + + std::shared_ptr memory_manager; + + std::unique_ptr dma_pusher; + + bool initiated{}; +}; + +} // namespace Control + +} // namespace Tegra diff --git a/src/video_core/control/channel_state_cache.cpp b/src/video_core/control/channel_state_cache.cpp new file mode 100644 index 0000000000..f72a97b2f5 --- /dev/null +++ b/src/video_core/control/channel_state_cache.cpp @@ -0,0 +1,5 @@ +#include "video_core/control/channel_state_cache.inc" + +namespace VideoCommon { +template class VideoCommon::ChannelSetupCaches; +} diff --git a/src/video_core/control/channel_state_cache.h b/src/video_core/control/channel_state_cache.h new file mode 100644 index 0000000000..c8298c0030 --- /dev/null +++ b/src/video_core/control/channel_state_cache.h @@ -0,0 +1,68 @@ +#pragma once + +#include +#include +#include + +#include "common/common_types.h" + +namespace Tegra { + +namespace Engines { +class Maxwell3D; +class KeplerCompute; +} // namespace Engines + +class MemoryManager; + +namespace Control { +struct ChannelState; +} + +} // namespace Tegra + +namespace VideoCommon { + +class ChannelInfo { +public: + ChannelInfo() = delete; + ChannelInfo(Tegra::Control::ChannelState& state); + ChannelInfo(const ChannelInfo& state) = delete; + ChannelInfo& operator=(const ChannelInfo&) = delete; + ChannelInfo(ChannelInfo&& other) = default; + ChannelInfo& operator=(ChannelInfo&& other) = default; + + Tegra::Engines::Maxwell3D& maxwell3d; + Tegra::Engines::KeplerCompute& kepler_compute; + Tegra::MemoryManager& gpu_memory; +}; + +template +class ChannelSetupCaches { +public: + /// Operations for seting the channel of execution. + + /// Create channel state. + void CreateChannel(Tegra::Control::ChannelState& channel); + + /// Bind a channel for execution. + void BindToChannel(s32 id); + + /// Erase channel's state. + void EraseChannel(s32 id); + +protected: + static constexpr size_t UNSET_CHANNEL{std::numeric_limits::max()}; + + std::deque

channel_storage; + std::deque free_channel_ids; + std::unordered_map channel_map; + + P* channel_state; + size_t current_channel_id{UNSET_CHANNEL}; + Tegra::Engines::Maxwell3D* maxwell3d; + Tegra::Engines::KeplerCompute* kepler_compute; + Tegra::MemoryManager* gpu_memory; +}; + +} // namespace VideoCommon diff --git a/src/video_core/control/channel_state_cache.inc b/src/video_core/control/channel_state_cache.inc new file mode 100644 index 0000000000..3eb73af9f4 --- /dev/null +++ b/src/video_core/control/channel_state_cache.inc @@ -0,0 +1,64 @@ +#include "video_core/control/channel_state.h" +#include "video_core/control/channel_state_cache.h" +#include "video_core/engines/kepler_compute.h" +#include "video_core/engines/maxwell_3d.h" +#include "video_core/memory_manager.h" + +namespace VideoCommon { + +ChannelInfo::ChannelInfo(Tegra::Control::ChannelState& channel_state) + : maxwell3d{*channel_state.maxwell_3d}, kepler_compute{*channel_state.kepler_compute}, + gpu_memory{*channel_state.memory_manager} {} + +template +void ChannelSetupCaches

::CreateChannel(struct Tegra::Control::ChannelState& channel) { + ASSERT(channel_map.find(channel.bind_id) == channel_map.end() && channel.bind_id >= 0); + auto new_id = [this, &channel]() { + if (!free_channel_ids.empty()) { + auto id = free_channel_ids.front(); + free_channel_ids.pop_front(); + new (&channel_storage[id]) ChannelInfo(channel); + return id; + } + channel_storage.emplace_back(channel); + return channel_storage.size() - 1; + }(); + channel_map.emplace(channel.bind_id, new_id); + if (current_channel_id != UNSET_CHANNEL) { + channel_state = &channel_storage[current_channel_id]; + } +} + +/// Bind a channel for execution. +template +void ChannelSetupCaches

::BindToChannel(s32 id) { + auto it = channel_map.find(id); + ASSERT(it != channel_map.end() && id >= 0); + current_channel_id = it->second; + channel_state = &channel_storage[current_channel_id]; + maxwell3d = &channel_state->maxwell3d; + kepler_compute = &channel_state->kepler_compute; + gpu_memory = &channel_state->gpu_memory; +} + +/// Erase channel's channel_state. +template +void ChannelSetupCaches

::EraseChannel(s32 id) { + const auto it = channel_map.find(id); + ASSERT(it != channel_map.end() && id >= 0); + const auto this_id = it->second; + free_channel_ids.push_back(this_id); + channel_map.erase(it); + if (this_id == current_channel_id) { + current_channel_id = UNSET_CHANNEL; + channel_state = nullptr; + maxwell3d = nullptr; + kepler_compute = nullptr; + gpu_memory = nullptr; + } else if (current_channel_id != UNSET_CHANNEL) { + channel_state = &channel_storage[current_channel_id]; + } +} + + +} // namespace VideoCommon diff --git a/src/video_core/control/scheduler.cpp b/src/video_core/control/scheduler.cpp new file mode 100644 index 0000000000..e1abcb188b --- /dev/null +++ b/src/video_core/control/scheduler.cpp @@ -0,0 +1,31 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include + +#include "video_core/control/channel_state.h" +#include "video_core/control/scheduler.h" +#include "video_core/gpu.h" + +namespace Tegra::Control { +Scheduler::Scheduler(GPU& gpu_) : gpu{gpu_} {} + +Scheduler::~Scheduler() = default; + +void Scheduler::Push(s32 channel, CommandList&& entries) { + std::unique_lock lk(scheduling_guard); + auto it = channels.find(channel); + auto channel_state = it->second; + gpu.BindChannel(channel_state->bind_id); + channel_state->dma_pusher->Push(std::move(entries)); + channel_state->dma_pusher->DispatchCalls(); +} + +void Scheduler::DeclareChannel(std::shared_ptr new_channel) { + s32 channel = new_channel->bind_id; + std::unique_lock lk(scheduling_guard); + channels.emplace(channel, new_channel); +} + +} // namespace Tegra::Control diff --git a/src/video_core/control/scheduler.h b/src/video_core/control/scheduler.h new file mode 100644 index 0000000000..802e9caffb --- /dev/null +++ b/src/video_core/control/scheduler.h @@ -0,0 +1,38 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include +#include +#include + +#include "video_core/dma_pusher.h" + +namespace Tegra { + +class GPU; + +namespace Control { + +struct ChannelState; + +class Scheduler { +public: + Scheduler(GPU& gpu_); + ~Scheduler(); + + void Push(s32 channel, CommandList&& entries); + + void DeclareChannel(std::shared_ptr new_channel); + +private: + std::unordered_map> channels; + std::mutex scheduling_guard; + GPU& gpu; +}; + +} // namespace Control + +} // namespace Tegra diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp index 29b8582abd..b01f04d0c9 100644 --- a/src/video_core/dma_pusher.cpp +++ b/src/video_core/dma_pusher.cpp @@ -12,7 +12,10 @@ namespace Tegra { -DmaPusher::DmaPusher(Core::System& system_, GPU& gpu_) : gpu{gpu_}, system{system_} {} +DmaPusher::DmaPusher(Core::System& system_, GPU& gpu_, MemoryManager& memory_manager_, + Control::ChannelState& channel_state_) + : gpu{gpu_}, system{system_}, memory_manager{memory_manager_}, puller{gpu_, memory_manager_, + *this, channel_state_} {} DmaPusher::~DmaPusher() = default; @@ -76,11 +79,11 @@ bool DmaPusher::Step() { // Push buffer non-empty, read a word command_headers.resize(command_list_header.size); if (Settings::IsGPULevelHigh()) { - gpu.MemoryManager().ReadBlock(dma_get, command_headers.data(), - command_list_header.size * sizeof(u32)); + memory_manager.ReadBlock(dma_get, command_headers.data(), + command_list_header.size * sizeof(u32)); } else { - gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(), - command_list_header.size * sizeof(u32)); + memory_manager.ReadBlockUnsafe(dma_get, command_headers.data(), + command_list_header.size * sizeof(u32)); } } for (std::size_t index = 0; index < command_headers.size();) { @@ -154,7 +157,7 @@ void DmaPusher::SetState(const CommandHeader& command_header) { void DmaPusher::CallMethod(u32 argument) const { if (dma_state.method < non_puller_methods) { - gpu.CallMethod(GPU::MethodCall{ + puller.CallPullerMethod(Engines::Puller::MethodCall{ dma_state.method, argument, dma_state.subchannel, @@ -168,12 +171,16 @@ void DmaPusher::CallMethod(u32 argument) const { void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const { if (dma_state.method < non_puller_methods) { - gpu.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods, - dma_state.method_count); + puller.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods, + dma_state.method_count); } else { subchannels[dma_state.subchannel]->CallMultiMethod(dma_state.method, base_start, num_methods, dma_state.method_count); } } +void DmaPusher::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) { + puller.BindRasterizer(rasterizer); +} + } // namespace Tegra diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h index 872fd146ac..fd7c936c40 100644 --- a/src/video_core/dma_pusher.h +++ b/src/video_core/dma_pusher.h @@ -10,6 +10,7 @@ #include "common/bit_field.h" #include "common/common_types.h" #include "video_core/engines/engine_interface.h" +#include "video_core/engines/puller.h" namespace Core { class System; @@ -17,7 +18,12 @@ class System; namespace Tegra { +namespace Control { +struct ChannelState; +} + class GPU; +class MemoryManager; enum class SubmissionMode : u32 { IncreasingOld = 0, @@ -102,7 +108,8 @@ struct CommandList final { */ class DmaPusher final { public: - explicit DmaPusher(Core::System& system_, GPU& gpu_); + explicit DmaPusher(Core::System& system_, GPU& gpu_, MemoryManager& memory_manager_, + Control::ChannelState& channel_state_); ~DmaPusher(); void Push(CommandList&& entries) { @@ -115,6 +122,8 @@ public: subchannels[subchannel_id] = engine; } + void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); + private: static constexpr u32 non_puller_methods = 0x40; static constexpr u32 max_subchannels = 8; @@ -148,6 +157,8 @@ private: GPU& gpu; Core::System& system; + MemoryManager& memory_manager; + mutable Engines::Puller puller; }; } // namespace Tegra diff --git a/src/video_core/engines/puller.cpp b/src/video_core/engines/puller.cpp new file mode 100644 index 0000000000..37f2ced18f --- /dev/null +++ b/src/video_core/engines/puller.cpp @@ -0,0 +1,297 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/logging/log.h" +#include "common/settings.h" +#include "core/core.h" +#include "video_core/control/channel_state.h" +#include "video_core/dma_pusher.h" +#include "video_core/engines/fermi_2d.h" +#include "video_core/engines/kepler_compute.h" +#include "video_core/engines/kepler_memory.h" +#include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/maxwell_dma.h" +#include "video_core/engines/puller.h" +#include "video_core/gpu.h" +#include "video_core/memory_manager.h" +#include "video_core/rasterizer_interface.h" + +namespace Tegra::Engines { + +Puller::Puller(GPU& gpu_, MemoryManager& memory_manager_, DmaPusher& dma_pusher_, + Control::ChannelState& channel_state_) + : gpu{gpu_}, memory_manager{memory_manager_}, dma_pusher{dma_pusher_}, channel_state{ + channel_state_} {} + +Puller::~Puller() = default; + +void Puller::ProcessBindMethod(const MethodCall& method_call) { + // Bind the current subchannel to the desired engine id. + LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel, + method_call.argument); + const auto engine_id = static_cast(method_call.argument); + bound_engines[method_call.subchannel] = static_cast(engine_id); + switch (engine_id) { + case EngineID::FERMI_TWOD_A: + dma_pusher.BindSubchannel(channel_state.fermi_2d.get(), method_call.subchannel); + break; + case EngineID::MAXWELL_B: + dma_pusher.BindSubchannel(channel_state.maxwell_3d.get(), method_call.subchannel); + break; + case EngineID::KEPLER_COMPUTE_B: + dma_pusher.BindSubchannel(channel_state.kepler_compute.get(), method_call.subchannel); + break; + case EngineID::MAXWELL_DMA_COPY_A: + dma_pusher.BindSubchannel(channel_state.maxwell_dma.get(), method_call.subchannel); + break; + case EngineID::KEPLER_INLINE_TO_MEMORY_B: + dma_pusher.BindSubchannel(channel_state.kepler_memory.get(), method_call.subchannel); + break; + default: + UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", engine_id); + } +} + +void Puller::ProcessFenceActionMethod() { + switch (regs.fence_action.op) { + case Puller::FenceOperation::Acquire: + // UNIMPLEMENTED_MSG("Channel Scheduling pending."); + // WaitFence(regs.fence_action.syncpoint_id, regs.fence_value); + break; + case Puller::FenceOperation::Increment: + rasterizer->SignalSyncPoint(regs.fence_action.syncpoint_id); + break; + default: + UNIMPLEMENTED_MSG("Unimplemented operation {}", regs.fence_action.op.Value()); + } +} + +void Puller::ProcessWaitForInterruptMethod() { + // TODO(bunnei) ImplementMe + LOG_WARNING(HW_GPU, "(STUBBED) called"); +} + +void Puller::ProcessSemaphoreTriggerMethod() { + const auto semaphoreOperationMask = 0xF; + const auto op = + static_cast(regs.semaphore_trigger & semaphoreOperationMask); + if (op == GpuSemaphoreOperation::WriteLong) { + struct Block { + u32 sequence; + u32 zeros = 0; + u64 timestamp; + }; + + Block block{}; + block.sequence = regs.semaphore_sequence; + // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of + // CoreTiming + block.timestamp = gpu.GetTicks(); + memory_manager.WriteBlock(regs.semaphore_address.SemaphoreAddress(), &block, sizeof(block)); + } else { + const u32 word{memory_manager.Read(regs.semaphore_address.SemaphoreAddress())}; + if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) || + (op == GpuSemaphoreOperation::AcquireGequal && + static_cast(word - regs.semaphore_sequence) > 0) || + (op == GpuSemaphoreOperation::AcquireMask && (word & regs.semaphore_sequence))) { + // Nothing to do in this case + } else { + regs.acquire_source = true; + regs.acquire_value = regs.semaphore_sequence; + if (op == GpuSemaphoreOperation::AcquireEqual) { + regs.acquire_active = true; + regs.acquire_mode = false; + } else if (op == GpuSemaphoreOperation::AcquireGequal) { + regs.acquire_active = true; + regs.acquire_mode = true; + } else if (op == GpuSemaphoreOperation::AcquireMask) { + // TODO(kemathe) The acquire mask operation waits for a value that, ANDed with + // semaphore_sequence, gives a non-0 result + LOG_ERROR(HW_GPU, "Invalid semaphore operation AcquireMask not implemented"); + } else { + LOG_ERROR(HW_GPU, "Invalid semaphore operation"); + } + } + } +} + +void Puller::ProcessSemaphoreRelease() { + memory_manager.Write(regs.semaphore_address.SemaphoreAddress(), regs.semaphore_release); +} + +void Puller::ProcessSemaphoreAcquire() { + const u32 word = memory_manager.Read(regs.semaphore_address.SemaphoreAddress()); + const auto value = regs.semaphore_acquire; + if (word != value) { + regs.acquire_active = true; + regs.acquire_value = value; + // TODO(kemathe73) figure out how to do the acquire_timeout + regs.acquire_mode = false; + regs.acquire_source = false; + } +} + +/// Calls a GPU puller method. +void Puller::CallPullerMethod(const MethodCall& method_call) { + regs.reg_array[method_call.method] = method_call.argument; + const auto method = static_cast(method_call.method); + + switch (method) { + case BufferMethods::BindObject: { + ProcessBindMethod(method_call); + break; + } + case BufferMethods::Nop: + case BufferMethods::SemaphoreAddressHigh: + case BufferMethods::SemaphoreAddressLow: + case BufferMethods::SemaphoreSequence: + case BufferMethods::UnkCacheFlush: + case BufferMethods::WrcacheFlush: + case BufferMethods::FenceValue: + break; + case BufferMethods::RefCnt: + rasterizer->SignalReference(); + break; + case BufferMethods::FenceAction: + ProcessFenceActionMethod(); + break; + case BufferMethods::WaitForInterrupt: + ProcessWaitForInterruptMethod(); + break; + case BufferMethods::SemaphoreTrigger: { + ProcessSemaphoreTriggerMethod(); + break; + } + case BufferMethods::NotifyIntr: { + // TODO(Kmather73): Research and implement this method. + LOG_ERROR(HW_GPU, "Special puller engine method NotifyIntr not implemented"); + break; + } + case BufferMethods::Unk28: { + // TODO(Kmather73): Research and implement this method. + LOG_ERROR(HW_GPU, "Special puller engine method Unk28 not implemented"); + break; + } + case BufferMethods::SemaphoreAcquire: { + ProcessSemaphoreAcquire(); + break; + } + case BufferMethods::SemaphoreRelease: { + ProcessSemaphoreRelease(); + break; + } + case BufferMethods::Yield: { + // TODO(Kmather73): Research and implement this method. + LOG_ERROR(HW_GPU, "Special puller engine method Yield not implemented"); + break; + } + default: + LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented", method); + break; + } +} + +/// Calls a GPU engine method. +void Puller::CallEngineMethod(const MethodCall& method_call) { + const EngineID engine = bound_engines[method_call.subchannel]; + + switch (engine) { + case EngineID::FERMI_TWOD_A: + channel_state.fermi_2d->CallMethod(method_call.method, method_call.argument, + method_call.IsLastCall()); + break; + case EngineID::MAXWELL_B: + channel_state.maxwell_3d->CallMethod(method_call.method, method_call.argument, + method_call.IsLastCall()); + break; + case EngineID::KEPLER_COMPUTE_B: + channel_state.kepler_compute->CallMethod(method_call.method, method_call.argument, + method_call.IsLastCall()); + break; + case EngineID::MAXWELL_DMA_COPY_A: + channel_state.maxwell_dma->CallMethod(method_call.method, method_call.argument, + method_call.IsLastCall()); + break; + case EngineID::KEPLER_INLINE_TO_MEMORY_B: + channel_state.kepler_memory->CallMethod(method_call.method, method_call.argument, + method_call.IsLastCall()); + break; + default: + UNIMPLEMENTED_MSG("Unimplemented engine"); + } +} + +/// Calls a GPU engine multivalue method. +void Puller::CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, + u32 methods_pending) { + const EngineID engine = bound_engines[subchannel]; + + switch (engine) { + case EngineID::FERMI_TWOD_A: + channel_state.fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending); + break; + case EngineID::MAXWELL_B: + channel_state.maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending); + break; + case EngineID::KEPLER_COMPUTE_B: + channel_state.kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending); + break; + case EngineID::MAXWELL_DMA_COPY_A: + channel_state.maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending); + break; + case EngineID::KEPLER_INLINE_TO_MEMORY_B: + channel_state.kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending); + break; + default: + UNIMPLEMENTED_MSG("Unimplemented engine"); + } +} + +/// Calls a GPU method. +void Puller::CallMethod(const MethodCall& method_call) { + LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method_call.method, + method_call.subchannel); + + ASSERT(method_call.subchannel < bound_engines.size()); + + if (ExecuteMethodOnEngine(method_call.method)) { + CallEngineMethod(method_call); + } else { + CallPullerMethod(method_call); + } +} + +/// Calls a GPU multivalue method. +void Puller::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, + u32 methods_pending) { + LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel); + + ASSERT(subchannel < bound_engines.size()); + + if (ExecuteMethodOnEngine(method)) { + CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending); + } else { + for (std::size_t i = 0; i < amount; i++) { + CallPullerMethod(MethodCall{ + method, + base_start[i], + subchannel, + methods_pending - static_cast(i), + }); + } + } +} + +void Puller::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) { + rasterizer = rasterizer_; +} + +/// Determines where the method should be executed. +[[nodiscard]] bool Puller::ExecuteMethodOnEngine(u32 method) { + const auto buffer_method = static_cast(method); + return buffer_method >= BufferMethods::NonPullerMethods; +} + +} // namespace Tegra::Engines diff --git a/src/video_core/engines/puller.h b/src/video_core/engines/puller.h new file mode 100644 index 0000000000..d948ec790d --- /dev/null +++ b/src/video_core/engines/puller.h @@ -0,0 +1,179 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include +#include +#include +#include "common/bit_field.h" +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "video_core/engines/engine_interface.h" + +namespace Core { +class System; +} + +namespace Tegra { +class MemoryManager; +class DmaPusher; + +enum class EngineID { + FERMI_TWOD_A = 0x902D, // 2D Engine + MAXWELL_B = 0xB197, // 3D Engine + KEPLER_COMPUTE_B = 0xB1C0, + KEPLER_INLINE_TO_MEMORY_B = 0xA140, + MAXWELL_DMA_COPY_A = 0xB0B5, +}; + +namespace Control { +struct ChannelState; +} +} // namespace Tegra + +namespace VideoCore { +class RasterizerInterface; +} + +namespace Tegra::Engines { + +class Puller final { +public: + struct MethodCall { + u32 method{}; + u32 argument{}; + u32 subchannel{}; + u32 method_count{}; + + explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0) + : method(method_), argument(argument_), subchannel(subchannel_), + method_count(method_count_) {} + + [[nodiscard]] bool IsLastCall() const { + return method_count <= 1; + } + }; + + enum class FenceOperation : u32 { + Acquire = 0, + Increment = 1, + }; + + union FenceAction { + u32 raw; + BitField<0, 1, FenceOperation> op; + BitField<8, 24, u32> syncpoint_id; + }; + + explicit Puller(GPU& gpu_, MemoryManager& memory_manager_, DmaPusher& dma_pusher, + Control::ChannelState& channel_state); + ~Puller(); + + void CallMethod(const MethodCall& method_call); + + void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, + u32 methods_pending); + + void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); + + void CallPullerMethod(const MethodCall& method_call); + + void CallEngineMethod(const MethodCall& method_call); + + void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, + u32 methods_pending); + +private: + Tegra::GPU& gpu; + + MemoryManager& memory_manager; + DmaPusher& dma_pusher; + Control::ChannelState& channel_state; + VideoCore::RasterizerInterface* rasterizer = nullptr; + + static constexpr std::size_t NUM_REGS = 0x800; + struct Regs { + static constexpr size_t NUM_REGS = 0x40; + + union { + struct { + INSERT_PADDING_WORDS_NOINIT(0x4); + struct { + u32 address_high; + u32 address_low; + + [[nodiscard]] GPUVAddr SemaphoreAddress() const { + return static_cast((static_cast(address_high) << 32) | + address_low); + } + } semaphore_address; + + u32 semaphore_sequence; + u32 semaphore_trigger; + INSERT_PADDING_WORDS_NOINIT(0xC); + + // The pusher and the puller share the reference counter, the pusher only has read + // access + u32 reference_count; + INSERT_PADDING_WORDS_NOINIT(0x5); + + u32 semaphore_acquire; + u32 semaphore_release; + u32 fence_value; + FenceAction fence_action; + INSERT_PADDING_WORDS_NOINIT(0xE2); + + // Puller state + u32 acquire_mode; + u32 acquire_source; + u32 acquire_active; + u32 acquire_timeout; + u32 acquire_value; + }; + std::array reg_array; + }; + } regs{}; + + void ProcessBindMethod(const MethodCall& method_call); + void ProcessFenceActionMethod(); + void ProcessSemaphoreAcquire(); + void ProcessSemaphoreRelease(); + void ProcessSemaphoreTriggerMethod(); + void ProcessWaitForInterruptMethod(); + [[nodiscard]] bool ExecuteMethodOnEngine(u32 method); + + /// Mapping of command subchannels to their bound engine ids + std::array bound_engines{}; + + enum class GpuSemaphoreOperation { + AcquireEqual = 0x1, + WriteLong = 0x2, + AcquireGequal = 0x4, + AcquireMask = 0x8, + }; + +#define ASSERT_REG_POSITION(field_name, position) \ + static_assert(offsetof(Regs, field_name) == position * 4, \ + "Field " #field_name " has invalid position") + + ASSERT_REG_POSITION(semaphore_address, 0x4); + ASSERT_REG_POSITION(semaphore_sequence, 0x6); + ASSERT_REG_POSITION(semaphore_trigger, 0x7); + ASSERT_REG_POSITION(reference_count, 0x14); + ASSERT_REG_POSITION(semaphore_acquire, 0x1A); + ASSERT_REG_POSITION(semaphore_release, 0x1B); + ASSERT_REG_POSITION(fence_value, 0x1C); + ASSERT_REG_POSITION(fence_action, 0x1D); + + ASSERT_REG_POSITION(acquire_mode, 0x100); + ASSERT_REG_POSITION(acquire_source, 0x101); + ASSERT_REG_POSITION(acquire_active, 0x102); + ASSERT_REG_POSITION(acquire_timeout, 0x103); + ASSERT_REG_POSITION(acquire_value, 0x104); + +#undef ASSERT_REG_POSITION +}; + +} // namespace Tegra::Engines diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h index 1e9832ddd4..d658e038d9 100644 --- a/src/video_core/fence_manager.h +++ b/src/video_core/fence_manager.h @@ -4,12 +4,13 @@ #pragma once #include +#include +#include #include #include "common/common_types.h" #include "video_core/delayed_destruction_ring.h" #include "video_core/gpu.h" -#include "video_core/memory_manager.h" #include "video_core/rasterizer_interface.h" namespace VideoCommon { @@ -19,10 +20,10 @@ public: explicit FenceBase(u32 payload_, bool is_stubbed_) : address{}, payload{payload_}, is_semaphore{false}, is_stubbed{is_stubbed_} {} - explicit FenceBase(GPUVAddr address_, u32 payload_, bool is_stubbed_) + explicit FenceBase(u8* address_, u32 payload_, bool is_stubbed_) : address{address_}, payload{payload_}, is_semaphore{true}, is_stubbed{is_stubbed_} {} - GPUVAddr GetAddress() const { + u8* GetAddress() const { return address; } @@ -35,7 +36,7 @@ public: } private: - GPUVAddr address; + u8* address; u32 payload; bool is_semaphore; @@ -57,7 +58,7 @@ public: buffer_cache.AccumulateFlushes(); } - void SignalSemaphore(GPUVAddr addr, u32 value) { + void SignalSemaphore(u8* addr, u32 value) { TryReleasePendingFences(); const bool should_flush = ShouldFlush(); CommitAsyncFlushes(); @@ -91,8 +92,9 @@ public: } PopAsyncFlushes(); if (current_fence->IsSemaphore()) { - gpu_memory.template Write(current_fence->GetAddress(), - current_fence->GetPayload()); + char* address = reinterpret_cast(current_fence->GetAddress()); + auto payload = current_fence->GetPayload(); + std::memcpy(address, &payload, sizeof(payload)); } else { gpu.IncrementSyncPoint(current_fence->GetPayload()); } @@ -104,8 +106,8 @@ protected: explicit FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_, TTextureCache& texture_cache_, TTBufferCache& buffer_cache_, TQueryCache& query_cache_) - : rasterizer{rasterizer_}, gpu{gpu_}, gpu_memory{gpu.MemoryManager()}, - texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, query_cache{query_cache_} {} + : rasterizer{rasterizer_}, gpu{gpu_}, texture_cache{texture_cache_}, + buffer_cache{buffer_cache_}, query_cache{query_cache_} {} virtual ~FenceManager() = default; @@ -113,7 +115,7 @@ protected: /// true virtual TFence CreateFence(u32 value, bool is_stubbed) = 0; /// Creates a Semaphore Fence Interface, does not create a backend fence if 'is_stubbed' is true - virtual TFence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) = 0; + virtual TFence CreateFence(u8* addr, u32 value, bool is_stubbed) = 0; /// Queues a fence into the backend if the fence isn't stubbed. virtual void QueueFence(TFence& fence) = 0; /// Notifies that the backend fence has been signaled/reached in host GPU. @@ -123,7 +125,6 @@ protected: VideoCore::RasterizerInterface& rasterizer; Tegra::GPU& gpu; - Tegra::MemoryManager& gpu_memory; TTextureCache& texture_cache; TTBufferCache& buffer_cache; TQueryCache& query_cache; @@ -137,8 +138,9 @@ private: } PopAsyncFlushes(); if (current_fence->IsSemaphore()) { - gpu_memory.template Write(current_fence->GetAddress(), - current_fence->GetPayload()); + char* address = reinterpret_cast(current_fence->GetAddress()); + const auto payload = current_fence->GetPayload(); + std::memcpy(address, &payload, sizeof(payload)); } else { gpu.IncrementSyncPoint(current_fence->GetPayload()); } diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 33431f2a0f..80a1c69e00 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -18,6 +18,8 @@ #include "core/hle/service/nvdrv/nvdata.h" #include "core/perf_stats.h" #include "video_core/cdma_pusher.h" +#include "video_core/control/channel_state.h" +#include "video_core/control/scheduler.h" #include "video_core/dma_pusher.h" #include "video_core/engines/fermi_2d.h" #include "video_core/engines/kepler_compute.h" @@ -36,65 +38,58 @@ MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192)); struct GPU::Impl { explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_) - : gpu{gpu_}, system{system_}, memory_manager{std::make_unique( - system)}, - dma_pusher{std::make_unique(system, gpu)}, use_nvdec{use_nvdec_}, - maxwell_3d{std::make_unique(system, *memory_manager)}, - fermi_2d{std::make_unique()}, - kepler_compute{std::make_unique(system, *memory_manager)}, - maxwell_dma{std::make_unique(system, *memory_manager)}, - kepler_memory{std::make_unique(system, *memory_manager)}, + : gpu{gpu_}, system{system_}, use_nvdec{use_nvdec_}, shader_notify{std::make_unique()}, is_async{is_async_}, - gpu_thread{system_, is_async_} {} + gpu_thread{system_, is_async_}, scheduler{std::make_unique(gpu)} {} ~Impl() = default; + std::shared_ptr CreateChannel(s32 channel_id) { + auto channel_state = std::make_shared(channel_id); + channels.emplace(channel_id, channel_state); + scheduler->DeclareChannel(channel_state); + return channel_state; + } + + void BindChannel(s32 channel_id) { + if (bound_channel == channel_id) { + return; + } + auto it = channels.find(channel_id); + ASSERT(it != channels.end()); + bound_channel = channel_id; + current_channel = it->second.get(); + + rasterizer->BindChannel(*current_channel); + } + + std::shared_ptr AllocateChannel() { + return CreateChannel(new_channel_id++); + } + + void InitChannel(Control::ChannelState& to_init) { + to_init.Init(system, gpu); + to_init.BindRasterizer(rasterizer); + rasterizer->InitializeChannel(to_init); + } + + void ReleaseChannel(Control::ChannelState& to_release) { + UNIMPLEMENTED(); + } + + void CreateHost1xChannel() { + if (host1x_channel) { + return; + } + host1x_channel = CreateChannel(0); + host1x_channel->memory_manager = std::make_shared(system); + InitChannel(*host1x_channel); + } + /// Binds a renderer to the GPU. void BindRenderer(std::unique_ptr renderer_) { renderer = std::move(renderer_); rasterizer = renderer->ReadRasterizer(); - - memory_manager->BindRasterizer(rasterizer); - maxwell_3d->BindRasterizer(rasterizer); - fermi_2d->BindRasterizer(rasterizer); - kepler_compute->BindRasterizer(rasterizer); - kepler_memory->BindRasterizer(rasterizer); - maxwell_dma->BindRasterizer(rasterizer); - } - - /// Calls a GPU method. - void CallMethod(const GPU::MethodCall& method_call) { - LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method_call.method, - method_call.subchannel); - - ASSERT(method_call.subchannel < bound_engines.size()); - - if (ExecuteMethodOnEngine(method_call.method)) { - CallEngineMethod(method_call); - } else { - CallPullerMethod(method_call); - } - } - - /// Calls a GPU multivalue method. - void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, - u32 methods_pending) { - LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel); - - ASSERT(subchannel < bound_engines.size()); - - if (ExecuteMethodOnEngine(method)) { - CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending); - } else { - for (std::size_t i = 0; i < amount; i++) { - CallPullerMethod(GPU::MethodCall{ - method, - base_start[i], - subchannel, - methods_pending - static_cast(i), - }); - } - } } /// Flush all current written commands into the host GPU for execution. @@ -146,42 +141,44 @@ struct GPU::Impl { /// Returns a reference to the Maxwell3D GPU engine. [[nodiscard]] Engines::Maxwell3D& Maxwell3D() { - return *maxwell_3d; + ASSERT(current_channel); + return *current_channel->maxwell_3d; } /// Returns a const reference to the Maxwell3D GPU engine. [[nodiscard]] const Engines::Maxwell3D& Maxwell3D() const { - return *maxwell_3d; + ASSERT(current_channel); + return *current_channel->maxwell_3d; } /// Returns a reference to the KeplerCompute GPU engine. [[nodiscard]] Engines::KeplerCompute& KeplerCompute() { - return *kepler_compute; + ASSERT(current_channel); + return *current_channel->kepler_compute; } /// Returns a reference to the KeplerCompute GPU engine. [[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const { - return *kepler_compute; + ASSERT(current_channel); + return *current_channel->kepler_compute; } /// Returns a reference to the GPU memory manager. [[nodiscard]] Tegra::MemoryManager& MemoryManager() { - return *memory_manager; - } - - /// Returns a const reference to the GPU memory manager. - [[nodiscard]] const Tegra::MemoryManager& MemoryManager() const { - return *memory_manager; + CreateHost1xChannel(); + return *host1x_channel->memory_manager; } /// Returns a reference to the GPU DMA pusher. [[nodiscard]] Tegra::DmaPusher& DmaPusher() { - return *dma_pusher; + ASSERT(current_channel); + return *current_channel->dma_pusher; } /// Returns a const reference to the GPU DMA pusher. [[nodiscard]] const Tegra::DmaPusher& DmaPusher() const { - return *dma_pusher; + ASSERT(current_channel); + return *current_channel->dma_pusher; } /// Returns a reference to the underlying renderer. @@ -306,7 +303,7 @@ struct GPU::Impl { /// This can be used to launch any necessary threads and register any necessary /// core timing events. void Start() { - gpu_thread.StartThread(*renderer, renderer->Context(), *dma_pusher); + gpu_thread.StartThread(*renderer, renderer->Context(), *scheduler); cpu_context = renderer->GetRenderWindow().CreateSharedContext(); cpu_context->MakeCurrent(); } @@ -328,8 +325,8 @@ struct GPU::Impl { } /// Push GPU command entries to be processed - void PushGPUEntries(Tegra::CommandList&& entries) { - gpu_thread.SubmitList(std::move(entries)); + void PushGPUEntries(s32 channel, Tegra::CommandList&& entries) { + gpu_thread.SubmitList(channel, std::move(entries)); } /// Push GPU command buffer entries to be processed @@ -381,303 +378,16 @@ struct GPU::Impl { interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value); } - void ProcessBindMethod(const GPU::MethodCall& method_call) { - // Bind the current subchannel to the desired engine id. - LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel, - method_call.argument); - const auto engine_id = static_cast(method_call.argument); - bound_engines[method_call.subchannel] = static_cast(engine_id); - switch (engine_id) { - case EngineID::FERMI_TWOD_A: - dma_pusher->BindSubchannel(fermi_2d.get(), method_call.subchannel); - break; - case EngineID::MAXWELL_B: - dma_pusher->BindSubchannel(maxwell_3d.get(), method_call.subchannel); - break; - case EngineID::KEPLER_COMPUTE_B: - dma_pusher->BindSubchannel(kepler_compute.get(), method_call.subchannel); - break; - case EngineID::MAXWELL_DMA_COPY_A: - dma_pusher->BindSubchannel(maxwell_dma.get(), method_call.subchannel); - break; - case EngineID::KEPLER_INLINE_TO_MEMORY_B: - dma_pusher->BindSubchannel(kepler_memory.get(), method_call.subchannel); - break; - default: - UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", engine_id); - } - } - - void ProcessFenceActionMethod() { - switch (regs.fence_action.op) { - case GPU::FenceOperation::Acquire: - WaitFence(regs.fence_action.syncpoint_id, regs.fence_value); - break; - case GPU::FenceOperation::Increment: - IncrementSyncPoint(regs.fence_action.syncpoint_id); - break; - default: - UNIMPLEMENTED_MSG("Unimplemented operation {}", regs.fence_action.op.Value()); - } - } - - void ProcessWaitForInterruptMethod() { - // TODO(bunnei) ImplementMe - LOG_WARNING(HW_GPU, "(STUBBED) called"); - } - - void ProcessSemaphoreTriggerMethod() { - const auto semaphoreOperationMask = 0xF; - const auto op = - static_cast(regs.semaphore_trigger & semaphoreOperationMask); - if (op == GpuSemaphoreOperation::WriteLong) { - struct Block { - u32 sequence; - u32 zeros = 0; - u64 timestamp; - }; - - Block block{}; - block.sequence = regs.semaphore_sequence; - // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of - // CoreTiming - block.timestamp = GetTicks(); - memory_manager->WriteBlock(regs.semaphore_address.SemaphoreAddress(), &block, - sizeof(block)); - } else { - const u32 word{memory_manager->Read(regs.semaphore_address.SemaphoreAddress())}; - if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) || - (op == GpuSemaphoreOperation::AcquireGequal && - static_cast(word - regs.semaphore_sequence) > 0) || - (op == GpuSemaphoreOperation::AcquireMask && (word & regs.semaphore_sequence))) { - // Nothing to do in this case - } else { - regs.acquire_source = true; - regs.acquire_value = regs.semaphore_sequence; - if (op == GpuSemaphoreOperation::AcquireEqual) { - regs.acquire_active = true; - regs.acquire_mode = false; - } else if (op == GpuSemaphoreOperation::AcquireGequal) { - regs.acquire_active = true; - regs.acquire_mode = true; - } else if (op == GpuSemaphoreOperation::AcquireMask) { - // TODO(kemathe) The acquire mask operation waits for a value that, ANDed with - // semaphore_sequence, gives a non-0 result - LOG_ERROR(HW_GPU, "Invalid semaphore operation AcquireMask not implemented"); - } else { - LOG_ERROR(HW_GPU, "Invalid semaphore operation"); - } - } - } - } - - void ProcessSemaphoreRelease() { - memory_manager->Write(regs.semaphore_address.SemaphoreAddress(), - regs.semaphore_release); - } - - void ProcessSemaphoreAcquire() { - const u32 word = memory_manager->Read(regs.semaphore_address.SemaphoreAddress()); - const auto value = regs.semaphore_acquire; - if (word != value) { - regs.acquire_active = true; - regs.acquire_value = value; - // TODO(kemathe73) figure out how to do the acquire_timeout - regs.acquire_mode = false; - regs.acquire_source = false; - } - } - - /// Calls a GPU puller method. - void CallPullerMethod(const GPU::MethodCall& method_call) { - regs.reg_array[method_call.method] = method_call.argument; - const auto method = static_cast(method_call.method); - - switch (method) { - case BufferMethods::BindObject: { - ProcessBindMethod(method_call); - break; - } - case BufferMethods::Nop: - case BufferMethods::SemaphoreAddressHigh: - case BufferMethods::SemaphoreAddressLow: - case BufferMethods::SemaphoreSequence: - break; - case BufferMethods::UnkCacheFlush: - rasterizer->SyncGuestHost(); - break; - case BufferMethods::WrcacheFlush: - rasterizer->SignalReference(); - break; - case BufferMethods::FenceValue: - break; - case BufferMethods::RefCnt: - rasterizer->SignalReference(); - break; - case BufferMethods::FenceAction: - ProcessFenceActionMethod(); - break; - case BufferMethods::WaitForInterrupt: - rasterizer->WaitForIdle(); - break; - case BufferMethods::SemaphoreTrigger: { - ProcessSemaphoreTriggerMethod(); - break; - } - case BufferMethods::NotifyIntr: { - // TODO(Kmather73): Research and implement this method. - LOG_ERROR(HW_GPU, "Special puller engine method NotifyIntr not implemented"); - break; - } - case BufferMethods::Unk28: { - // TODO(Kmather73): Research and implement this method. - LOG_ERROR(HW_GPU, "Special puller engine method Unk28 not implemented"); - break; - } - case BufferMethods::SemaphoreAcquire: { - ProcessSemaphoreAcquire(); - break; - } - case BufferMethods::SemaphoreRelease: { - ProcessSemaphoreRelease(); - break; - } - case BufferMethods::Yield: { - // TODO(Kmather73): Research and implement this method. - LOG_ERROR(HW_GPU, "Special puller engine method Yield not implemented"); - break; - } - default: - LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented", method); - break; - } - } - - /// Calls a GPU engine method. - void CallEngineMethod(const GPU::MethodCall& method_call) { - const EngineID engine = bound_engines[method_call.subchannel]; - - switch (engine) { - case EngineID::FERMI_TWOD_A: - fermi_2d->CallMethod(method_call.method, method_call.argument, - method_call.IsLastCall()); - break; - case EngineID::MAXWELL_B: - maxwell_3d->CallMethod(method_call.method, method_call.argument, - method_call.IsLastCall()); - break; - case EngineID::KEPLER_COMPUTE_B: - kepler_compute->CallMethod(method_call.method, method_call.argument, - method_call.IsLastCall()); - break; - case EngineID::MAXWELL_DMA_COPY_A: - maxwell_dma->CallMethod(method_call.method, method_call.argument, - method_call.IsLastCall()); - break; - case EngineID::KEPLER_INLINE_TO_MEMORY_B: - kepler_memory->CallMethod(method_call.method, method_call.argument, - method_call.IsLastCall()); - break; - default: - UNIMPLEMENTED_MSG("Unimplemented engine"); - } - } - - /// Calls a GPU engine multivalue method. - void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, - u32 methods_pending) { - const EngineID engine = bound_engines[subchannel]; - - switch (engine) { - case EngineID::FERMI_TWOD_A: - fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending); - break; - case EngineID::MAXWELL_B: - maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending); - break; - case EngineID::KEPLER_COMPUTE_B: - kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending); - break; - case EngineID::MAXWELL_DMA_COPY_A: - maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending); - break; - case EngineID::KEPLER_INLINE_TO_MEMORY_B: - kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending); - break; - default: - UNIMPLEMENTED_MSG("Unimplemented engine"); - } - } - - /// Determines where the method should be executed. - [[nodiscard]] bool ExecuteMethodOnEngine(u32 method) { - const auto buffer_method = static_cast(method); - return buffer_method >= BufferMethods::NonPullerMethods; - } - - struct Regs { - static constexpr size_t NUM_REGS = 0x40; - - union { - struct { - INSERT_PADDING_WORDS_NOINIT(0x4); - struct { - u32 address_high; - u32 address_low; - - [[nodiscard]] GPUVAddr SemaphoreAddress() const { - return static_cast((static_cast(address_high) << 32) | - address_low); - } - } semaphore_address; - - u32 semaphore_sequence; - u32 semaphore_trigger; - INSERT_PADDING_WORDS_NOINIT(0xC); - - // The pusher and the puller share the reference counter, the pusher only has read - // access - u32 reference_count; - INSERT_PADDING_WORDS_NOINIT(0x5); - - u32 semaphore_acquire; - u32 semaphore_release; - u32 fence_value; - GPU::FenceAction fence_action; - INSERT_PADDING_WORDS_NOINIT(0xE2); - - // Puller state - u32 acquire_mode; - u32 acquire_source; - u32 acquire_active; - u32 acquire_timeout; - u32 acquire_value; - }; - std::array reg_array; - }; - } regs{}; - GPU& gpu; Core::System& system; - std::unique_ptr memory_manager; - std::unique_ptr dma_pusher; + std::map> cdma_pushers; std::unique_ptr renderer; VideoCore::RasterizerInterface* rasterizer = nullptr; const bool use_nvdec; - /// Mapping of command subchannels to their bound engine ids - std::array bound_engines{}; - /// 3D engine - std::unique_ptr maxwell_3d; - /// 2D engine - std::unique_ptr fermi_2d; - /// Compute engine - std::unique_ptr kepler_compute; - /// DMA engine - std::unique_ptr maxwell_dma; - /// Inline memory engine - std::unique_ptr kepler_memory; + std::shared_ptr host1x_channel; + s32 new_channel_id{1}; /// Shader build notifier std::unique_ptr shader_notify; /// When true, we are about to shut down emulation session, so terminate outstanding tasks @@ -710,33 +420,10 @@ struct GPU::Impl { VideoCommon::GPUThread::ThreadManager gpu_thread; std::unique_ptr cpu_context; -#define ASSERT_REG_POSITION(field_name, position) \ - static_assert(offsetof(Regs, field_name) == position * 4, \ - "Field " #field_name " has invalid position") - - ASSERT_REG_POSITION(semaphore_address, 0x4); - ASSERT_REG_POSITION(semaphore_sequence, 0x6); - ASSERT_REG_POSITION(semaphore_trigger, 0x7); - ASSERT_REG_POSITION(reference_count, 0x14); - ASSERT_REG_POSITION(semaphore_acquire, 0x1A); - ASSERT_REG_POSITION(semaphore_release, 0x1B); - ASSERT_REG_POSITION(fence_value, 0x1C); - ASSERT_REG_POSITION(fence_action, 0x1D); - - ASSERT_REG_POSITION(acquire_mode, 0x100); - ASSERT_REG_POSITION(acquire_source, 0x101); - ASSERT_REG_POSITION(acquire_active, 0x102); - ASSERT_REG_POSITION(acquire_timeout, 0x103); - ASSERT_REG_POSITION(acquire_value, 0x104); - -#undef ASSERT_REG_POSITION - - enum class GpuSemaphoreOperation { - AcquireEqual = 0x1, - WriteLong = 0x2, - AcquireGequal = 0x4, - AcquireMask = 0x8, - }; + std::unique_ptr scheduler; + std::unordered_map> channels; + Tegra::Control::ChannelState* current_channel; + s32 bound_channel{-1}; }; GPU::GPU(Core::System& system, bool is_async, bool use_nvdec) @@ -744,19 +431,26 @@ GPU::GPU(Core::System& system, bool is_async, bool use_nvdec) GPU::~GPU() = default; +std::shared_ptr GPU::AllocateChannel() { + return impl->AllocateChannel(); +} + +void GPU::InitChannel(Control::ChannelState& to_init) { + impl->InitChannel(to_init); +} + +void GPU::BindChannel(s32 channel_id) { + impl->BindChannel(channel_id); +} + +void GPU::ReleaseChannel(Control::ChannelState& to_release) { + impl->ReleaseChannel(to_release); +} + void GPU::BindRenderer(std::unique_ptr renderer) { impl->BindRenderer(std::move(renderer)); } -void GPU::CallMethod(const MethodCall& method_call) { - impl->CallMethod(method_call); -} - -void GPU::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, - u32 methods_pending) { - impl->CallMultiMethod(method, subchannel, base_start, amount, methods_pending); -} - void GPU::FlushCommands() { impl->FlushCommands(); } @@ -881,8 +575,8 @@ void GPU::ReleaseContext() { impl->ReleaseContext(); } -void GPU::PushGPUEntries(Tegra::CommandList&& entries) { - impl->PushGPUEntries(std::move(entries)); +void GPU::PushGPUEntries(s32 channel, Tegra::CommandList&& entries) { + impl->PushGPUEntries(channel, std::move(entries)); } void GPU::PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries) { diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index 42c91954f7..74d55e074e 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -89,57 +89,20 @@ class Maxwell3D; class KeplerCompute; } // namespace Engines -enum class EngineID { - FERMI_TWOD_A = 0x902D, // 2D Engine - MAXWELL_B = 0xB197, // 3D Engine - KEPLER_COMPUTE_B = 0xB1C0, - KEPLER_INLINE_TO_MEMORY_B = 0xA140, - MAXWELL_DMA_COPY_A = 0xB0B5, -}; +namespace Control { +struct ChannelState; +} class MemoryManager; class GPU final { public: - struct MethodCall { - u32 method{}; - u32 argument{}; - u32 subchannel{}; - u32 method_count{}; - - explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0) - : method(method_), argument(argument_), subchannel(subchannel_), - method_count(method_count_) {} - - [[nodiscard]] bool IsLastCall() const { - return method_count <= 1; - } - }; - - enum class FenceOperation : u32 { - Acquire = 0, - Increment = 1, - }; - - union FenceAction { - u32 raw; - BitField<0, 1, FenceOperation> op; - BitField<8, 24, u32> syncpoint_id; - }; - explicit GPU(Core::System& system, bool is_async, bool use_nvdec); ~GPU(); /// Binds a renderer to the GPU. void BindRenderer(std::unique_ptr renderer); - /// Calls a GPU method. - void CallMethod(const MethodCall& method_call); - - /// Calls a GPU multivalue method. - void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, - u32 methods_pending); - /// Flush all current written commands into the host GPU for execution. void FlushCommands(); /// Synchronizes CPU writes with Host GPU memory. @@ -147,6 +110,14 @@ public: /// Signal the ending of command list. void OnCommandListEnd(); + std::shared_ptr AllocateChannel(); + + void InitChannel(Control::ChannelState& to_init); + + void BindChannel(s32 channel_id); + + void ReleaseChannel(Control::ChannelState& to_release); + /// Request a host GPU memory flush from the CPU. [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size); @@ -226,7 +197,7 @@ public: void ReleaseContext(); /// Push GPU command entries to be processed - void PushGPUEntries(Tegra::CommandList&& entries); + void PushGPUEntries(s32 channel, Tegra::CommandList&& entries); /// Push GPU command buffer entries to be processed void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries); @@ -248,7 +219,7 @@ public: private: struct Impl; - std::unique_ptr impl; + mutable std::unique_ptr impl; }; } // namespace Tegra diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp index f0e48cfbd0..9844cde43a 100644 --- a/src/video_core/gpu_thread.cpp +++ b/src/video_core/gpu_thread.cpp @@ -8,6 +8,7 @@ #include "common/thread.h" #include "core/core.h" #include "core/frontend/emu_window.h" +#include "video_core/control/scheduler.h" #include "video_core/dma_pusher.h" #include "video_core/gpu.h" #include "video_core/gpu_thread.h" @@ -18,7 +19,7 @@ namespace VideoCommon::GPUThread { /// Runs the GPU thread static void RunThread(std::stop_token stop_token, Core::System& system, VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context, - Tegra::DmaPusher& dma_pusher, SynchState& state) { + Tegra::Control::Scheduler& scheduler, SynchState& state) { std::string name = "GPU"; MicroProfileOnThreadCreate(name.c_str()); SCOPE_EXIT({ MicroProfileOnThreadExit(); }); @@ -36,8 +37,7 @@ static void RunThread(std::stop_token stop_token, Core::System& system, break; } if (auto* submit_list = std::get_if(&next.data)) { - dma_pusher.Push(std::move(submit_list->entries)); - dma_pusher.DispatchCalls(); + scheduler.Push(submit_list->channel, std::move(submit_list->entries)); } else if (const auto* data = std::get_if(&next.data)) { renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr); } else if (std::holds_alternative(next.data)) { @@ -68,14 +68,14 @@ ThreadManager::~ThreadManager() = default; void ThreadManager::StartThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context, - Tegra::DmaPusher& dma_pusher) { + Tegra::Control::Scheduler& scheduler) { rasterizer = renderer.ReadRasterizer(); thread = std::jthread(RunThread, std::ref(system), std::ref(renderer), std::ref(context), - std::ref(dma_pusher), std::ref(state)); + std::ref(scheduler), std::ref(state)); } -void ThreadManager::SubmitList(Tegra::CommandList&& entries) { - PushCommand(SubmitListCommand(std::move(entries))); +void ThreadManager::SubmitList(s32 channel, Tegra::CommandList&& entries) { + PushCommand(SubmitListCommand(channel, std::move(entries))); } void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h index 2f8210cb9a..c5078a2b37 100644 --- a/src/video_core/gpu_thread.h +++ b/src/video_core/gpu_thread.h @@ -15,7 +15,9 @@ namespace Tegra { struct FramebufferConfig; -class DmaPusher; +namespace Control { +class Scheduler; +} } // namespace Tegra namespace Core { @@ -34,8 +36,10 @@ namespace VideoCommon::GPUThread { /// Command to signal to the GPU thread that a command list is ready for processing struct SubmitListCommand final { - explicit SubmitListCommand(Tegra::CommandList&& entries_) : entries{std::move(entries_)} {} + explicit SubmitListCommand(s32 channel_, Tegra::CommandList&& entries_) + : channel{channel_}, entries{std::move(entries_)} {} + s32 channel; Tegra::CommandList entries; }; @@ -112,10 +116,10 @@ public: /// Creates and starts the GPU thread. void StartThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context, - Tegra::DmaPusher& dma_pusher); + Tegra::Control::Scheduler& scheduler); /// Push GPU command entries to be processed - void SubmitList(Tegra::CommandList&& entries); + void SubmitList(s32 channel, Tegra::CommandList&& entries); /// Swap buffers (render frame) void SwapBuffers(const Tegra::FramebufferConfig* framebuffer); diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index bf9eb735d9..a3efd365e3 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -133,11 +133,6 @@ void MemoryManager::SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::s // TryLockPage(page_entry, size); auto& current_page = page_table[PageEntryIndex(gpu_addr)]; - if ((!current_page.IsValid() && page_entry.IsValid()) || - current_page.ToAddress() != page_entry.ToAddress()) { - rasterizer->ModifyGPUMemory(gpu_addr, size); - } - current_page = page_entry; } diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h index 889b606b38..eb68ea6388 100644 --- a/src/video_core/query_cache.h +++ b/src/video_core/query_cache.h @@ -17,6 +17,7 @@ #include "common/assert.h" #include "common/settings.h" +#include "video_core/control/channel_state_cache.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/memory_manager.h" #include "video_core/rasterizer_interface.h" @@ -90,13 +91,10 @@ private: }; template -class QueryCacheBase { +class QueryCacheBase : public VideoCommon::ChannelSetupCaches { public: - explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_, - Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::MemoryManager& gpu_memory_) - : rasterizer{rasterizer_}, maxwell3d{maxwell3d_}, - gpu_memory{gpu_memory_}, streams{{CounterStream{static_cast(*this), + explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_) + : rasterizer{rasterizer_}, streams{{CounterStream{static_cast(*this), VideoCore::QueryType::SamplesPassed}}} {} void InvalidateRegion(VAddr addr, std::size_t size) { @@ -117,13 +115,13 @@ public: */ void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional timestamp) { std::unique_lock lock{mutex}; - const std::optional cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); + const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); ASSERT(cpu_addr); CachedQuery* query = TryGet(*cpu_addr); if (!query) { ASSERT_OR_EXECUTE(cpu_addr, return;); - u8* const host_ptr = gpu_memory.GetPointer(gpu_addr); + u8* const host_ptr = gpu_memory->GetPointer(gpu_addr); query = Register(type, *cpu_addr, host_ptr, timestamp.has_value()); } @@ -137,7 +135,7 @@ public: /// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch. void UpdateCounters() { std::unique_lock lock{mutex}; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; Stream(VideoCore::QueryType::SamplesPassed).Update(regs.samplecnt_enable); } @@ -264,8 +262,6 @@ private: static constexpr unsigned YUZU_PAGEBITS = 12; VideoCore::RasterizerInterface& rasterizer; - Tegra::Engines::Maxwell3D& maxwell3d; - Tegra::MemoryManager& gpu_memory; std::recursive_mutex mutex; diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h index a04a764815..8dacb26268 100644 --- a/src/video_core/rasterizer_interface.h +++ b/src/video_core/rasterizer_interface.h @@ -16,6 +16,9 @@ class MemoryManager; namespace Engines { class AccelerateDMAInterface; } +namespace Control { +struct ChannelState; +} } // namespace Tegra namespace VideoCore { @@ -137,5 +140,11 @@ public: /// Initialize disk cached resources for the game being emulated virtual void LoadDiskResources(u64 title_id, std::stop_token stop_loading, const DiskResourceLoadCallback& callback) {} + + virtual void InitializeChannel(Tegra::Control::ChannelState& channel) {} + + virtual void BindChannel(Tegra::Control::ChannelState& channel) {} + + virtual void ReleaseChannel(s32 channel_id) {} }; } // namespace VideoCore diff --git a/src/video_core/renderer_opengl/gl_fence_manager.cpp b/src/video_core/renderer_opengl/gl_fence_manager.cpp index 6e82c2e28d..c76446b605 100644 --- a/src/video_core/renderer_opengl/gl_fence_manager.cpp +++ b/src/video_core/renderer_opengl/gl_fence_manager.cpp @@ -12,7 +12,7 @@ namespace OpenGL { GLInnerFence::GLInnerFence(u32 payload_, bool is_stubbed_) : FenceBase{payload_, is_stubbed_} {} -GLInnerFence::GLInnerFence(GPUVAddr address_, u32 payload_, bool is_stubbed_) +GLInnerFence::GLInnerFence(u8* address_, u32 payload_, bool is_stubbed_) : FenceBase{address_, payload_, is_stubbed_} {} GLInnerFence::~GLInnerFence() = default; @@ -52,7 +52,7 @@ Fence FenceManagerOpenGL::CreateFence(u32 value, bool is_stubbed) { return std::make_shared(value, is_stubbed); } -Fence FenceManagerOpenGL::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) { +Fence FenceManagerOpenGL::CreateFence(u8* addr, u32 value, bool is_stubbed) { return std::make_shared(addr, value, is_stubbed); } diff --git a/src/video_core/renderer_opengl/gl_fence_manager.h b/src/video_core/renderer_opengl/gl_fence_manager.h index 14ff00db21..fced8d0029 100644 --- a/src/video_core/renderer_opengl/gl_fence_manager.h +++ b/src/video_core/renderer_opengl/gl_fence_manager.h @@ -17,7 +17,7 @@ namespace OpenGL { class GLInnerFence : public VideoCommon::FenceBase { public: explicit GLInnerFence(u32 payload_, bool is_stubbed_); - explicit GLInnerFence(GPUVAddr address_, u32 payload_, bool is_stubbed_); + explicit GLInnerFence(u8* address_, u32 payload_, bool is_stubbed_); ~GLInnerFence(); void Queue(); @@ -41,7 +41,7 @@ public: protected: Fence CreateFence(u32 value, bool is_stubbed) override; - Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override; + Fence CreateFence(u8* addr, u32 value, bool is_stubbed) override; void QueueFence(Fence& fence) override; bool IsFenceSignaled(Fence& fence) const override; void WaitFence(Fence& fence) override; diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp index ed40f5791d..5070db4417 100644 --- a/src/video_core/renderer_opengl/gl_query_cache.cpp +++ b/src/video_core/renderer_opengl/gl_query_cache.cpp @@ -26,9 +26,8 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) { } // Anonymous namespace -QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::MemoryManager& gpu_memory_) - : QueryCacheBase(rasterizer_, maxwell3d_, gpu_memory_), gl_rasterizer{rasterizer_} {} +QueryCache::QueryCache(RasterizerOpenGL& rasterizer_) + : QueryCacheBase(rasterizer_), gl_rasterizer{rasterizer_} {} QueryCache::~QueryCache() = default; diff --git a/src/video_core/renderer_opengl/gl_query_cache.h b/src/video_core/renderer_opengl/gl_query_cache.h index 8a49f1ef0d..14ce59990b 100644 --- a/src/video_core/renderer_opengl/gl_query_cache.h +++ b/src/video_core/renderer_opengl/gl_query_cache.h @@ -28,8 +28,7 @@ using CounterStream = VideoCommon::CounterStreamBase; class QueryCache final : public VideoCommon::QueryCacheBase { public: - explicit QueryCache(RasterizerOpenGL& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::MemoryManager& gpu_memory_); + explicit QueryCache(RasterizerOpenGL& rasterizer_); ~QueryCache(); OGLQuery AllocateQuery(VideoCore::QueryType type); diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index a0d048b0be..e8d61bd417 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -60,12 +60,11 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra kepler_compute(gpu.KeplerCompute()), gpu_memory(gpu.MemoryManager()), device(device_), screen_info(screen_info_), program_manager(program_manager_), state_tracker(state_tracker_), texture_cache_runtime(device, program_manager, state_tracker), - texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory), - buffer_cache_runtime(device), - buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime), - shader_cache(*this, emu_window_, maxwell3d, kepler_compute, gpu_memory, device, texture_cache, - buffer_cache, program_manager, state_tracker, gpu.ShaderNotify()), - query_cache(*this, maxwell3d, gpu_memory), accelerate_dma(buffer_cache), + texture_cache(texture_cache_runtime, *this), buffer_cache_runtime(device), + buffer_cache(*this, cpu_memory_, buffer_cache_runtime), + shader_cache(*this, emu_window_, device, texture_cache, buffer_cache, program_manager, + state_tracker, gpu.ShaderNotify()), + query_cache(*this), accelerate_dma(buffer_cache), fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache) {} RasterizerOpenGL::~RasterizerOpenGL() = default; @@ -392,7 +391,8 @@ void RasterizerOpenGL::SignalSemaphore(GPUVAddr addr, u32 value) { gpu_memory.Write(addr, value); return; } - fence_manager.SignalSemaphore(addr, value); + auto paddr = gpu_memory.GetPointer(addr); + fence_manager.SignalSemaphore(paddr, value); } void RasterizerOpenGL::SignalSyncPoint(u32 value) { diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index 0b8d8ec927..494581d0d3 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -151,16 +151,13 @@ void SetXfbState(VideoCommon::TransformFeedbackState& state, const Maxwell& regs } // Anonymous namespace ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_, - Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::Engines::KeplerCompute& kepler_compute_, - Tegra::MemoryManager& gpu_memory_, const Device& device_, - TextureCache& texture_cache_, BufferCache& buffer_cache_, - ProgramManager& program_manager_, StateTracker& state_tracker_, - VideoCore::ShaderNotify& shader_notify_) - : VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_}, - emu_window{emu_window_}, device{device_}, texture_cache{texture_cache_}, - buffer_cache{buffer_cache_}, program_manager{program_manager_}, state_tracker{state_tracker_}, - shader_notify{shader_notify_}, use_asynchronous_shaders{device.UseAsynchronousShaders()}, + const Device& device_, TextureCache& texture_cache_, + BufferCache& buffer_cache_, ProgramManager& program_manager_, + StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_) + : VideoCommon::ShaderCache{rasterizer_}, emu_window{emu_window_}, device{device_}, + texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, program_manager{program_manager_}, + state_tracker{state_tracker_}, shader_notify{shader_notify_}, + use_asynchronous_shaders{device.UseAsynchronousShaders()}, profile{ .supported_spirv = 0x00010000, @@ -310,7 +307,7 @@ GraphicsPipeline* ShaderCache::CurrentGraphicsPipeline() { current_pipeline = nullptr; return nullptr; } - const auto& regs{maxwell3d.regs}; + const auto& regs{maxwell3d->regs}; graphics_key.raw = 0; graphics_key.early_z.Assign(regs.force_early_fragment_tests != 0 ? 1 : 0); graphics_key.gs_input_topology.Assign(graphics_key.unique_hashes[4] != 0 @@ -351,13 +348,13 @@ GraphicsPipeline* ShaderCache::BuiltPipeline(GraphicsPipeline* pipeline) const n } // If something is using depth, we can assume that games are not rendering anything which // will be used one time. - if (maxwell3d.regs.zeta_enable) { + if (maxwell3d->regs.zeta_enable) { return nullptr; } // If games are using a small index count, we can assume these are full screen quads. // Usually these shaders are only used once for building textures so we can assume they // can't be built async - if (maxwell3d.regs.index_array.count <= 6 || maxwell3d.regs.vertex_buffer.count <= 6) { + if (maxwell3d->regs.index_array.count <= 6 || maxwell3d->regs.vertex_buffer.count <= 6) { return pipeline; } return nullptr; @@ -368,7 +365,7 @@ ComputePipeline* ShaderCache::CurrentComputePipeline() { if (!shader) { return nullptr; } - const auto& qmd{kepler_compute.launch_description}; + const auto& qmd{kepler_compute->launch_description}; const ComputePipelineKey key{ .unique_hash = shader->unique_hash, .shared_memory_size = qmd.shared_alloc, @@ -481,8 +478,8 @@ std::unique_ptr ShaderCache::CreateGraphicsPipeline( } auto* const thread_worker{build_in_parallel ? workers.get() : nullptr}; return std::make_unique( - device, texture_cache, buffer_cache, gpu_memory, maxwell3d, program_manager, state_tracker, - thread_worker, &shader_notify, sources, sources_spirv, infos, key); + device, texture_cache, buffer_cache, *gpu_memory, *maxwell3d, program_manager, + state_tracker, thread_worker, &shader_notify, sources, sources_spirv, infos, key); } catch (Shader::Exception& exception) { LOG_ERROR(Render_OpenGL, "{}", exception.what()); @@ -491,9 +488,9 @@ std::unique_ptr ShaderCache::CreateGraphicsPipeline( std::unique_ptr ShaderCache::CreateComputePipeline( const ComputePipelineKey& key, const VideoCommon::ShaderInfo* shader) { - const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()}; - const auto& qmd{kepler_compute.launch_description}; - ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start}; + const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()}; + const auto& qmd{kepler_compute->launch_description}; + ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start}; env.SetCachedSize(shader->size_bytes); main_pools.ReleaseContents(); @@ -536,8 +533,8 @@ std::unique_ptr ShaderCache::CreateComputePipeline( break; } - return std::make_unique(device, texture_cache, buffer_cache, gpu_memory, - kepler_compute, program_manager, program.info, code, + return std::make_unique(device, texture_cache, buffer_cache, *gpu_memory, + *kepler_compute, program_manager, program.info, code, code_spirv); } catch (Shader::Exception& exception) { LOG_ERROR(Render_OpenGL, "{}", exception.what()); diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h index a14269dea6..89f181fe30 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_cache.h @@ -30,12 +30,9 @@ using ShaderWorker = Common::StatefulThreadWorker; class ShaderCache : public VideoCommon::ShaderCache { public: explicit ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_, - Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::Engines::KeplerCompute& kepler_compute_, - Tegra::MemoryManager& gpu_memory_, const Device& device_, - TextureCache& texture_cache_, BufferCache& buffer_cache_, - ProgramManager& program_manager_, StateTracker& state_tracker_, - VideoCore::ShaderNotify& shader_notify_); + const Device& device_, TextureCache& texture_cache_, + BufferCache& buffer_cache_, ProgramManager& program_manager_, + StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_); ~ShaderCache(); void LoadDiskResources(u64 title_id, std::stop_token stop_loading, diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index 7c78d02996..68c2bc34c4 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp @@ -95,20 +95,25 @@ RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_, Core::Frontend::EmuWindow& emu_window, Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_, std::unique_ptr context_) try - : RendererBase(emu_window, std::move(context_)), telemetry_session(telemetry_session_), - cpu_memory(cpu_memory_), gpu(gpu_), library(OpenLibrary()), + : RendererBase(emu_window, std::move(context_)), + telemetry_session(telemetry_session_), + cpu_memory(cpu_memory_), + gpu(gpu_), + library(OpenLibrary()), instance(CreateInstance(library, dld, VK_API_VERSION_1_1, render_window.GetWindowInfo().type, true, Settings::values.renderer_debug.GetValue())), debug_callback(Settings::values.renderer_debug ? CreateDebugCallback(instance) : nullptr), surface(CreateSurface(instance, render_window)), - device(CreateDevice(instance, dld, *surface)), memory_allocator(device, false), - state_tracker(gpu), scheduler(device, state_tracker), + device(CreateDevice(instance, dld, *surface)), + memory_allocator(device, false), + state_tracker(gpu), + scheduler(device, state_tracker), swapchain(*surface, device, scheduler, render_window.GetFramebufferLayout().width, render_window.GetFramebufferLayout().height, false), blit_screen(cpu_memory, render_window, device, memory_allocator, swapchain, scheduler, screen_info), - rasterizer(render_window, gpu, gpu.MemoryManager(), cpu_memory, screen_info, device, - memory_allocator, state_tracker, scheduler) { + rasterizer(render_window, gpu, cpu_memory, screen_info, device, memory_allocator, + state_tracker, scheduler) { Report(); } catch (const vk::Exception& exception) { LOG_ERROR(Render_Vulkan, "Vulkan initialization failed with error: {}", exception.what()); diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp index c249b34d4c..301cbbabe8 100644 --- a/src/video_core/renderer_vulkan/vk_fence_manager.cpp +++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp @@ -14,7 +14,7 @@ namespace Vulkan { InnerFence::InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_) : FenceBase{payload_, is_stubbed_}, scheduler{scheduler_} {} -InnerFence::InnerFence(Scheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_) +InnerFence::InnerFence(Scheduler& scheduler_, u8* address_, u32 payload_, bool is_stubbed_) : FenceBase{address_, payload_, is_stubbed_}, scheduler{scheduler_} {} InnerFence::~InnerFence() = default; @@ -52,7 +52,7 @@ Fence FenceManager::CreateFence(u32 value, bool is_stubbed) { return std::make_shared(scheduler, value, is_stubbed); } -Fence FenceManager::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) { +Fence FenceManager::CreateFence(u8* addr, u32 value, bool is_stubbed) { return std::make_shared(scheduler, addr, value, is_stubbed); } diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h index 7c0bbd80a2..ea9e88052d 100644 --- a/src/video_core/renderer_vulkan/vk_fence_manager.h +++ b/src/video_core/renderer_vulkan/vk_fence_manager.h @@ -26,7 +26,7 @@ class Scheduler; class InnerFence : public VideoCommon::FenceBase { public: explicit InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_); - explicit InnerFence(Scheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_); + explicit InnerFence(Scheduler& scheduler_, u8* address_, u32 payload_, bool is_stubbed_); ~InnerFence(); void Queue(); @@ -51,7 +51,7 @@ public: protected: Fence CreateFence(u32 value, bool is_stubbed) override; - Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override; + Fence CreateFence(u8* addr, u32 value, bool is_stubbed) override; void QueueFence(Fence& fence) override; bool IsFenceSignaled(Fence& fence) const override; void WaitFence(Fence& fence) override; diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp index accbfc8e11..b1e0b96c49 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp @@ -259,17 +259,15 @@ bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) c return std::memcmp(&rhs, this, Size()) == 0; } -PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::Engines::KeplerCompute& kepler_compute_, - Tegra::MemoryManager& gpu_memory_, const Device& device_, +PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, const Device& device_, Scheduler& scheduler_, DescriptorPool& descriptor_pool_, UpdateDescriptorQueue& update_descriptor_queue_, RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_, TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_) - : VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_}, - device{device_}, scheduler{scheduler_}, descriptor_pool{descriptor_pool_}, - update_descriptor_queue{update_descriptor_queue_}, render_pass_cache{render_pass_cache_}, - buffer_cache{buffer_cache_}, texture_cache{texture_cache_}, shader_notify{shader_notify_}, + : VideoCommon::ShaderCache{rasterizer_}, device{device_}, scheduler{scheduler_}, + descriptor_pool{descriptor_pool_}, update_descriptor_queue{update_descriptor_queue_}, + render_pass_cache{render_pass_cache_}, buffer_cache{buffer_cache_}, + texture_cache{texture_cache_}, shader_notify{shader_notify_}, use_asynchronous_shaders{Settings::values.use_asynchronous_shaders.GetValue()}, workers(std::max(std::thread::hardware_concurrency(), 2U) - 1, "VkPipelineBuilder"), serialization_thread(1, "VkPipelineSerialization") { @@ -337,7 +335,7 @@ GraphicsPipeline* PipelineCache::CurrentGraphicsPipeline() { current_pipeline = nullptr; return nullptr; } - graphics_key.state.Refresh(maxwell3d, device.IsExtExtendedDynamicStateSupported(), + graphics_key.state.Refresh(*maxwell3d, device.IsExtExtendedDynamicStateSupported(), device.IsExtVertexInputDynamicStateSupported()); if (current_pipeline) { @@ -357,7 +355,7 @@ ComputePipeline* PipelineCache::CurrentComputePipeline() { if (!shader) { return nullptr; } - const auto& qmd{kepler_compute.launch_description}; + const auto& qmd{kepler_compute->launch_description}; const ComputePipelineCacheKey key{ .unique_hash = shader->unique_hash, .shared_memory_size = qmd.shared_alloc, @@ -486,13 +484,13 @@ GraphicsPipeline* PipelineCache::BuiltPipeline(GraphicsPipeline* pipeline) const } // If something is using depth, we can assume that games are not rendering anything which // will be used one time. - if (maxwell3d.regs.zeta_enable) { + if (maxwell3d->regs.zeta_enable) { return nullptr; } // If games are using a small index count, we can assume these are full screen quads. // Usually these shaders are only used once for building textures so we can assume they // can't be built async - if (maxwell3d.regs.index_array.count <= 6 || maxwell3d.regs.vertex_buffer.count <= 6) { + if (maxwell3d->regs.index_array.count <= 6 || maxwell3d->regs.vertex_buffer.count <= 6) { return pipeline; } return nullptr; @@ -558,7 +556,7 @@ std::unique_ptr PipelineCache::CreateGraphicsPipeline( } Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr}; return std::make_unique( - maxwell3d, gpu_memory, scheduler, buffer_cache, texture_cache, &shader_notify, device, + *maxwell3d, *gpu_memory, scheduler, buffer_cache, texture_cache, &shader_notify, device, descriptor_pool, update_descriptor_queue, thread_worker, statistics, render_pass_cache, key, std::move(modules), infos); @@ -592,9 +590,9 @@ std::unique_ptr PipelineCache::CreateGraphicsPipeline() { std::unique_ptr PipelineCache::CreateComputePipeline( const ComputePipelineCacheKey& key, const ShaderInfo* shader) { - const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()}; - const auto& qmd{kepler_compute.launch_description}; - ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start}; + const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()}; + const auto& qmd{kepler_compute->launch_description}; + ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start}; env.SetCachedSize(shader->size_bytes); main_pools.ReleaseContents(); diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h index 127957dbf5..61f9e9366a 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h @@ -100,10 +100,8 @@ struct ShaderPools { class PipelineCache : public VideoCommon::ShaderCache { public: - explicit PipelineCache(RasterizerVulkan& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d, - Tegra::Engines::KeplerCompute& kepler_compute, - Tegra::MemoryManager& gpu_memory, const Device& device, - Scheduler& scheduler, DescriptorPool& descriptor_pool, + explicit PipelineCache(RasterizerVulkan& rasterizer, const Device& device, Scheduler& scheduler, + DescriptorPool& descriptor_pool, UpdateDescriptorQueue& update_descriptor_queue, RenderPassCache& render_pass_cache, BufferCache& buffer_cache, TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_); diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp index 2b859c6b8c..393bbdf378 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp @@ -65,10 +65,9 @@ void QueryPool::Reserve(std::pair query) { usage[pool_index * GROW_STEP + static_cast(query.second)] = false; } -QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, - Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, - const Device& device_, Scheduler& scheduler_) - : QueryCacheBase{rasterizer_, maxwell3d_, gpu_memory_}, device{device_}, scheduler{scheduler_}, +QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_, + Scheduler& scheduler_) + : QueryCacheBase{rasterizer_}, device{device_}, scheduler{scheduler_}, query_pools{ QueryPool{device_, scheduler_, QueryType::SamplesPassed}, } {} diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h index b0d86c4f80..26762ee097 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.h +++ b/src/video_core/renderer_vulkan/vk_query_cache.h @@ -52,9 +52,8 @@ private: class QueryCache final : public VideoCommon::QueryCacheBase { public: - explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_, - Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, - const Device& device_, Scheduler& scheduler_); + explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_, + Scheduler& scheduler_); ~QueryCache(); std::pair AllocateQuery(VideoCore::QueryType type); diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 7e40c2df1a..5d9ff0589d 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -11,6 +11,7 @@ #include "common/microprofile.h" #include "common/scope_exit.h" #include "common/settings.h" +#include "video_core/control/channel_state.h" #include "video_core/engines/kepler_compute.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_vulkan/blit_image.h" @@ -148,14 +149,11 @@ DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instan } // Anonymous namespace RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, - Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_, const Device& device_, MemoryAllocator& memory_allocator_, StateTracker& state_tracker_, Scheduler& scheduler_) - : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, - gpu_memory{gpu_memory_}, maxwell3d{gpu.Maxwell3D()}, kepler_compute{gpu.KeplerCompute()}, - screen_info{screen_info_}, device{device_}, memory_allocator{memory_allocator_}, - state_tracker{state_tracker_}, scheduler{scheduler_}, + : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, screen_info{screen_info_}, device{device_}, + memory_allocator{memory_allocator_}, state_tracker{state_tracker_}, scheduler{scheduler_}, staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler), update_descriptor_queue(device, scheduler), blit_image(device, scheduler, state_tracker, descriptor_pool), @@ -165,14 +163,13 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra memory_allocator, staging_pool, blit_image, astc_decoder_pass, render_pass_cache}, - texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory), + texture_cache(texture_cache_runtime, *this), buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool, update_descriptor_queue, descriptor_pool), - buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime), - pipeline_cache(*this, maxwell3d, kepler_compute, gpu_memory, device, scheduler, - descriptor_pool, update_descriptor_queue, render_pass_cache, buffer_cache, - texture_cache, gpu.ShaderNotify()), - query_cache{*this, maxwell3d, gpu_memory, device, scheduler}, accelerate_dma{buffer_cache}, + buffer_cache(*this, cpu_memory_, buffer_cache_runtime), + pipeline_cache(*this, device, scheduler, descriptor_pool, update_descriptor_queue, + render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()), + query_cache{*this, device, scheduler}, accelerate_dma{buffer_cache}, fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler), wfi_event(device.GetLogical().CreateEvent()) { scheduler.SetQueryCache(query_cache); @@ -199,8 +196,8 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { UpdateDynamicStates(); - const auto& regs{maxwell3d.regs}; - const u32 num_instances{maxwell3d.mme_draw.instance_count}; + const auto& regs{maxwell3d->regs}; + const u32 num_instances{maxwell3d->mme_draw.instance_count}; const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_instanced, is_indexed)}; scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) { if (draw_params.is_indexed) { @@ -218,14 +215,14 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { void RasterizerVulkan::Clear() { MICROPROFILE_SCOPE(Vulkan_Clearing); - if (!maxwell3d.ShouldExecute()) { + if (!maxwell3d->ShouldExecute()) { return; } FlushWork(); query_cache.UpdateCounters(); - auto& regs = maxwell3d.regs; + auto& regs = maxwell3d->regs; const bool use_color = regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B || regs.clear_buffers.A; const bool use_depth = regs.clear_buffers.Z; @@ -339,9 +336,9 @@ void RasterizerVulkan::DispatchCompute() { return; } std::scoped_lock lock{texture_cache.mutex, buffer_cache.mutex}; - pipeline->Configure(kepler_compute, gpu_memory, scheduler, buffer_cache, texture_cache); + pipeline->Configure(*kepler_compute, *gpu_memory, scheduler, buffer_cache, texture_cache); - const auto& qmd{kepler_compute.launch_description}; + const auto& qmd{kepler_compute->launch_description}; const std::array dim{qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z}; scheduler.RequestOutsideRenderPassOperationContext(); scheduler.Record([dim](vk::CommandBuffer cmdbuf) { cmdbuf.Dispatch(dim[0], dim[1], dim[2]); }); @@ -451,10 +448,11 @@ void RasterizerVulkan::ModifyGPUMemory(GPUVAddr addr, u64 size) { void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) { if (!gpu.IsAsync()) { - gpu_memory.Write(addr, value); + gpu_memory->Write(addr, value); return; } - fence_manager.SignalSemaphore(addr, value); + auto paddr = gpu_memory->GetPointer(addr); + fence_manager.SignalSemaphore(paddr, value); } void RasterizerVulkan::SignalSyncPoint(u32 value) { @@ -553,12 +551,12 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerVulkan::AccessAccelerateDMA() void RasterizerVulkan::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, std::span memory) { - auto cpu_addr = gpu_memory.GpuToCpuAddress(address); + auto cpu_addr = gpu_memory->GpuToCpuAddress(address); if (!cpu_addr) [[unlikely]] { - gpu_memory.WriteBlock(address, memory.data(), copy_size); + gpu_memory->WriteBlock(address, memory.data(), copy_size); return; } - gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size); + gpu_memory->WriteBlockUnsafe(address, memory.data(), copy_size); { std::unique_lock lock{buffer_cache.mutex}; if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) { @@ -627,7 +625,7 @@ bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 } void RasterizerVulkan::UpdateDynamicStates() { - auto& regs = maxwell3d.regs; + auto& regs = maxwell3d->regs; UpdateViewportsState(regs); UpdateScissorsState(regs); UpdateDepthBias(regs); @@ -651,7 +649,7 @@ void RasterizerVulkan::UpdateDynamicStates() { } void RasterizerVulkan::BeginTransformFeedback() { - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; if (regs.tfb_enabled == 0) { return; } @@ -667,7 +665,7 @@ void RasterizerVulkan::BeginTransformFeedback() { } void RasterizerVulkan::EndTransformFeedback() { - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; if (regs.tfb_enabled == 0) { return; } @@ -917,7 +915,7 @@ void RasterizerVulkan::UpdateStencilTestEnable(Tegra::Engines::Maxwell3D::Regs& } void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) { - auto& dirty{maxwell3d.dirty.flags}; + auto& dirty{maxwell3d->dirty.flags}; if (!dirty[Dirty::VertexInput]) { return; } @@ -974,4 +972,41 @@ void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) }); } +void RasterizerVulkan::InitializeChannel(Tegra::Control::ChannelState& channel) { + CreateChannel(channel); + { + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + texture_cache.CreateChannel(channel); + buffer_cache.CreateChannel(channel); + } + pipeline_cache.CreateChannel(channel); + query_cache.CreateChannel(channel); + state_tracker.SetupTables(channel); +} + +void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) { + const s32 channel_id = channel.bind_id; + BindToChannel(channel_id); + { + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + texture_cache.BindToChannel(channel_id); + buffer_cache.BindToChannel(channel_id); + } + pipeline_cache.BindToChannel(channel_id); + query_cache.BindToChannel(channel_id); + state_tracker.ChangeChannel(channel); + scheduler.InvalidateState(); +} + +void RasterizerVulkan::ReleaseChannel(s32 channel_id) { + EraseChannel(channel_id); + { + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + texture_cache.EraseChannel(channel_id); + buffer_cache.EraseChannel(channel_id); + } + pipeline_cache.EraseChannel(channel_id); + query_cache.EraseChannel(channel_id); +} + } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index 0370ea39be..642fe65769 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h @@ -8,6 +8,7 @@ #include #include "common/common_types.h" +#include "video_core/control/channel_state_cache.h" #include "video_core/engines/maxwell_dma.h" #include "video_core/rasterizer_accelerated.h" #include "video_core/rasterizer_interface.h" @@ -54,13 +55,13 @@ private: BufferCache& buffer_cache; }; -class RasterizerVulkan final : public VideoCore::RasterizerAccelerated { +class RasterizerVulkan final : public VideoCore::RasterizerAccelerated, + protected VideoCommon::ChannelSetupCaches { public: explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, - Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, - ScreenInfo& screen_info_, const Device& device_, - MemoryAllocator& memory_allocator_, StateTracker& state_tracker_, - Scheduler& scheduler_); + Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_, + const Device& device_, MemoryAllocator& memory_allocator_, + StateTracker& state_tracker_, Scheduler& scheduler_); ~RasterizerVulkan() override; void Draw(bool is_indexed, bool is_instanced) override; @@ -99,6 +100,12 @@ public: void LoadDiskResources(u64 title_id, std::stop_token stop_loading, const VideoCore::DiskResourceLoadCallback& callback) override; + void InitializeChannel(Tegra::Control::ChannelState& channel) override; + + void BindChannel(Tegra::Control::ChannelState& channel) override; + + void ReleaseChannel(s32 channel_id) override; + private: static constexpr size_t MAX_TEXTURES = 192; static constexpr size_t MAX_IMAGES = 48; @@ -134,9 +141,6 @@ private: void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs); Tegra::GPU& gpu; - Tegra::MemoryManager& gpu_memory; - Tegra::Engines::Maxwell3D& maxwell3d; - Tegra::Engines::KeplerCompute& kepler_compute; ScreenInfo& screen_info; const Device& device; diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp index 9ad0964319..a87bf8dd33 100644 --- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp +++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp @@ -7,6 +7,7 @@ #include "common/common_types.h" #include "core/core.h" +#include "video_core/control/channel_state.h" #include "video_core/dirty_flags.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/gpu.h" @@ -174,9 +175,8 @@ void SetupDirtyVertexBindings(Tables& tables) { } } // Anonymous namespace -StateTracker::StateTracker(Tegra::GPU& gpu) - : flags{gpu.Maxwell3D().dirty.flags}, invalidation_flags{MakeInvalidationFlags()} { - auto& tables{gpu.Maxwell3D().dirty.tables}; +void StateTracker::SetupTables(Tegra::Control::ChannelState& channel_state) { + auto& tables{channel_state.maxwell_3d->dirty.tables}; SetupDirtyFlags(tables); SetupDirtyViewports(tables); SetupDirtyScissors(tables); @@ -199,4 +199,11 @@ StateTracker::StateTracker(Tegra::GPU& gpu) SetupDirtyVertexBindings(tables); } +void StateTracker::ChangeChannel(Tegra::Control::ChannelState& channel_state) { + flags = &channel_state.maxwell_3d->dirty.flags; +} + +StateTracker::StateTracker(Tegra::GPU& gpu) + : flags{}, invalidation_flags{MakeInvalidationFlags()} {} + } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.h b/src/video_core/renderer_vulkan/vk_state_tracker.h index a85bc1c10d..9f8a887f9c 100644 --- a/src/video_core/renderer_vulkan/vk_state_tracker.h +++ b/src/video_core/renderer_vulkan/vk_state_tracker.h @@ -10,6 +10,12 @@ #include "video_core/dirty_flags.h" #include "video_core/engines/maxwell_3d.h" +namespace Tegra { +namespace Control { +struct ChannelState; +} +} // namespace Tegra + namespace Vulkan { namespace Dirty { @@ -56,16 +62,16 @@ public: explicit StateTracker(Tegra::GPU& gpu); void InvalidateCommandBufferState() { - flags |= invalidation_flags; + (*flags) |= invalidation_flags; current_topology = INVALID_TOPOLOGY; } void InvalidateViewports() { - flags[Dirty::Viewports] = true; + (*flags)[Dirty::Viewports] = true; } void InvalidateScissors() { - flags[Dirty::Scissors] = true; + (*flags)[Dirty::Scissors] = true; } bool TouchViewports() { @@ -139,16 +145,20 @@ public: return has_changed; } + void SetupTables(Tegra::Control::ChannelState& channel_state); + + void ChangeChannel(Tegra::Control::ChannelState& channel_state); + private: static constexpr auto INVALID_TOPOLOGY = static_cast(~0u); bool Exchange(std::size_t id, bool new_value) const noexcept { - const bool is_dirty = flags[id]; - flags[id] = new_value; + const bool is_dirty = (*flags)[id]; + (*flags)[id] = new_value; return is_dirty; } - Tegra::Engines::Maxwell3D::DirtyState::Flags& flags; + Tegra::Engines::Maxwell3D::DirtyState::Flags* flags; Tegra::Engines::Maxwell3D::DirtyState::Flags invalidation_flags; Maxwell::PrimitiveTopology current_topology = INVALID_TOPOLOGY; }; diff --git a/src/video_core/shader_cache.cpp b/src/video_core/shader_cache.cpp index 164e4ee0ef..f530665794 100644 --- a/src/video_core/shader_cache.cpp +++ b/src/video_core/shader_cache.cpp @@ -8,6 +8,7 @@ #include "common/assert.h" #include "shader_recompiler/frontend/maxwell/control_flow.h" #include "shader_recompiler/object_pool.h" +#include "video_core/control/channel_state.h" #include "video_core/dirty_flags.h" #include "video_core/engines/kepler_compute.h" #include "video_core/engines/maxwell_3d.h" @@ -33,29 +34,25 @@ void ShaderCache::SyncGuestHost() { RemovePendingShaders(); } -ShaderCache::ShaderCache(VideoCore::RasterizerInterface& rasterizer_, - Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::Engines::KeplerCompute& kepler_compute_) - : gpu_memory{gpu_memory_}, maxwell3d{maxwell3d_}, kepler_compute{kepler_compute_}, - rasterizer{rasterizer_} {} +ShaderCache::ShaderCache(VideoCore::RasterizerInterface& rasterizer_) : rasterizer{rasterizer_} {} bool ShaderCache::RefreshStages(std::array& unique_hashes) { - auto& dirty{maxwell3d.dirty.flags}; + auto& dirty{maxwell3d->dirty.flags}; if (!dirty[VideoCommon::Dirty::Shaders]) { return last_shaders_valid; } dirty[VideoCommon::Dirty::Shaders] = false; - const GPUVAddr base_addr{maxwell3d.regs.code_address.CodeAddress()}; + const GPUVAddr base_addr{maxwell3d->regs.code_address.CodeAddress()}; for (size_t index = 0; index < Tegra::Engines::Maxwell3D::Regs::MaxShaderProgram; ++index) { - if (!maxwell3d.regs.IsShaderConfigEnabled(index)) { + if (!maxwell3d->regs.IsShaderConfigEnabled(index)) { unique_hashes[index] = 0; continue; } - const auto& shader_config{maxwell3d.regs.shader_config[index]}; + const auto& shader_config{maxwell3d->regs.shader_config[index]}; const auto program{static_cast(index)}; const GPUVAddr shader_addr{base_addr + shader_config.offset}; - const std::optional cpu_shader_addr{gpu_memory.GpuToCpuAddress(shader_addr)}; + const std::optional cpu_shader_addr{gpu_memory->GpuToCpuAddress(shader_addr)}; if (!cpu_shader_addr) { LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr); last_shaders_valid = false; @@ -64,7 +61,7 @@ bool ShaderCache::RefreshStages(std::array& unique_hashes) { const ShaderInfo* shader_info{TryGet(*cpu_shader_addr)}; if (!shader_info) { const u32 start_address{shader_config.offset}; - GraphicsEnvironment env{maxwell3d, gpu_memory, program, base_addr, start_address}; + GraphicsEnvironment env{*maxwell3d, *gpu_memory, program, base_addr, start_address}; shader_info = MakeShaderInfo(env, *cpu_shader_addr); } shader_infos[index] = shader_info; @@ -75,10 +72,10 @@ bool ShaderCache::RefreshStages(std::array& unique_hashes) { } const ShaderInfo* ShaderCache::ComputeShader() { - const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()}; - const auto& qmd{kepler_compute.launch_description}; + const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()}; + const auto& qmd{kepler_compute->launch_description}; const GPUVAddr shader_addr{program_base + qmd.program_start}; - const std::optional cpu_shader_addr{gpu_memory.GpuToCpuAddress(shader_addr)}; + const std::optional cpu_shader_addr{gpu_memory->GpuToCpuAddress(shader_addr)}; if (!cpu_shader_addr) { LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr); return nullptr; @@ -86,22 +83,22 @@ const ShaderInfo* ShaderCache::ComputeShader() { if (const ShaderInfo* const shader = TryGet(*cpu_shader_addr)) { return shader; } - ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start}; + ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start}; return MakeShaderInfo(env, *cpu_shader_addr); } void ShaderCache::GetGraphicsEnvironments(GraphicsEnvironments& result, const std::array& unique_hashes) { size_t env_index{}; - const GPUVAddr base_addr{maxwell3d.regs.code_address.CodeAddress()}; + const GPUVAddr base_addr{maxwell3d->regs.code_address.CodeAddress()}; for (size_t index = 0; index < NUM_PROGRAMS; ++index) { if (unique_hashes[index] == 0) { continue; } const auto program{static_cast(index)}; auto& env{result.envs[index]}; - const u32 start_address{maxwell3d.regs.shader_config[index].offset}; - env = GraphicsEnvironment{maxwell3d, gpu_memory, program, base_addr, start_address}; + const u32 start_address{maxwell3d->regs.shader_config[index].offset}; + env = GraphicsEnvironment{*maxwell3d, *gpu_memory, program, base_addr, start_address}; env.SetCachedSize(shader_infos[index]->size_bytes); result.env_ptrs[env_index++] = &env; } diff --git a/src/video_core/shader_cache.h b/src/video_core/shader_cache.h index f67cea8c49..a4391202de 100644 --- a/src/video_core/shader_cache.h +++ b/src/video_core/shader_cache.h @@ -12,6 +12,7 @@ #include #include "common/common_types.h" +#include "video_core/control/channel_state_cache.h" #include "video_core/rasterizer_interface.h" #include "video_core/shader_environment.h" @@ -19,6 +20,10 @@ namespace Tegra { class MemoryManager; } +namespace Tegra::Control { +struct ChannelState; +} + namespace VideoCommon { class GenericEnvironment; @@ -28,7 +33,7 @@ struct ShaderInfo { size_t size_bytes{}; }; -class ShaderCache { +class ShaderCache : public VideoCommon::ChannelSetupCaches { static constexpr u64 YUZU_PAGEBITS = 14; static constexpr u64 YUZU_PAGESIZE = u64(1) << YUZU_PAGEBITS; @@ -71,9 +76,7 @@ protected: } }; - explicit ShaderCache(VideoCore::RasterizerInterface& rasterizer_, - Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::Engines::KeplerCompute& kepler_compute_); + explicit ShaderCache(VideoCore::RasterizerInterface& rasterizer_); /// @brief Update the hashes and information of shader stages /// @param unique_hashes Shader hashes to store into when a stage is enabled @@ -88,10 +91,6 @@ protected: void GetGraphicsEnvironments(GraphicsEnvironments& result, const std::array& unique_hashes); - Tegra::MemoryManager& gpu_memory; - Tegra::Engines::Maxwell3D& maxwell3d; - Tegra::Engines::KeplerCompute& kepler_compute; - std::array shader_infos{}; bool last_shaders_valid = false; diff --git a/src/video_core/texture_cache/image_base.h b/src/video_core/texture_cache/image_base.h index 1f85ec9da0..620565684b 100644 --- a/src/video_core/texture_cache/image_base.h +++ b/src/video_core/texture_cache/image_base.h @@ -88,6 +88,9 @@ struct ImageBase { u32 scale_rating = 0; u64 scale_tick = 0; bool has_scaled = false; + + size_t channel = 0; + ImageFlagBits flags = ImageFlagBits::CpuModified; GPUVAddr gpu_addr = 0; diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 1dbe01bc0b..2731aead07 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -7,6 +7,7 @@ #include "common/alignment.h" #include "common/settings.h" +#include "video_core/control/channel_state.h" #include "video_core/dirty_flags.h" #include "video_core/engines/kepler_compute.h" #include "video_core/texture_cache/image_view_base.h" @@ -29,12 +30,8 @@ using VideoCore::Surface::SurfaceType; using namespace Common::Literals; template -TextureCache

::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& rasterizer_, - Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::Engines::KeplerCompute& kepler_compute_, - Tegra::MemoryManager& gpu_memory_) - : runtime{runtime_}, rasterizer{rasterizer_}, maxwell3d{maxwell3d_}, - kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_} { +TextureCache

::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& rasterizer_) + : runtime{runtime_}, rasterizer{rasterizer_} { // Configure null sampler TSCEntry sampler_descriptor{}; sampler_descriptor.min_filter.Assign(Tegra::Texture::TextureFilter::Linear); @@ -42,6 +39,13 @@ TextureCache

::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& sampler_descriptor.mipmap_filter.Assign(Tegra::Texture::TextureMipmapFilter::Linear); sampler_descriptor.cubemap_anisotropy.Assign(1); + // Setup channels + current_channel_id = UNSET_CHANNEL; + state = nullptr; + maxwell3d = nullptr; + kepler_compute = nullptr; + gpu_memory = nullptr; + // Make sure the first index is reserved for the null resources // This way the null resource becomes a compile time constant void(slot_images.insert(NullImageParams{})); @@ -93,7 +97,7 @@ void TextureCache

::RunGarbageCollector() { const auto copies = FullDownloadCopies(image.info); image.DownloadMemory(map, copies); runtime.Finish(); - SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span); + SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span); } if (True(image.flags & ImageFlagBits::Tracked)) { UntrackImage(image, image_id); @@ -152,22 +156,23 @@ void TextureCache

::MarkModification(ImageId id) noexcept { template template void TextureCache

::FillGraphicsImageViews(std::span views) { - FillImageViews(graphics_image_table, graphics_image_view_ids, views); + FillImageViews(state->graphics_image_table, state->graphics_image_view_ids, + views); } template void TextureCache

::FillComputeImageViews(std::span views) { - FillImageViews(compute_image_table, compute_image_view_ids, views); + FillImageViews(state->compute_image_table, state->compute_image_view_ids, views); } template typename P::Sampler* TextureCache

::GetGraphicsSampler(u32 index) { - if (index > graphics_sampler_table.Limit()) { + if (index > state->graphics_sampler_table.Limit()) { LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index); return &slot_samplers[NULL_SAMPLER_ID]; } - const auto [descriptor, is_new] = graphics_sampler_table.Read(index); - SamplerId& id = graphics_sampler_ids[index]; + const auto [descriptor, is_new] = state->graphics_sampler_table.Read(index); + SamplerId& id = state->graphics_sampler_ids[index]; if (is_new) { id = FindSampler(descriptor); } @@ -176,12 +181,12 @@ typename P::Sampler* TextureCache

::GetGraphicsSampler(u32 index) { template typename P::Sampler* TextureCache

::GetComputeSampler(u32 index) { - if (index > compute_sampler_table.Limit()) { + if (index > state->compute_sampler_table.Limit()) { LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index); return &slot_samplers[NULL_SAMPLER_ID]; } - const auto [descriptor, is_new] = compute_sampler_table.Read(index); - SamplerId& id = compute_sampler_ids[index]; + const auto [descriptor, is_new] = state->compute_sampler_table.Read(index); + SamplerId& id = state->compute_sampler_ids[index]; if (is_new) { id = FindSampler(descriptor); } @@ -191,34 +196,34 @@ typename P::Sampler* TextureCache

::GetComputeSampler(u32 index) { template void TextureCache

::SynchronizeGraphicsDescriptors() { using SamplerIndex = Tegra::Engines::Maxwell3D::Regs::SamplerIndex; - const bool linked_tsc = maxwell3d.regs.sampler_index == SamplerIndex::ViaHeaderIndex; - const u32 tic_limit = maxwell3d.regs.tic.limit; - const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d.regs.tsc.limit; - if (graphics_sampler_table.Synchornize(maxwell3d.regs.tsc.Address(), tsc_limit)) { - graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); + const bool linked_tsc = maxwell3d->regs.sampler_index == SamplerIndex::ViaHeaderIndex; + const u32 tic_limit = maxwell3d->regs.tic.limit; + const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d->regs.tsc.limit; + if (state->graphics_sampler_table.Synchornize(maxwell3d->regs.tsc.Address(), tsc_limit)) { + state->graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); } - if (graphics_image_table.Synchornize(maxwell3d.regs.tic.Address(), tic_limit)) { - graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); + if (state->graphics_image_table.Synchornize(maxwell3d->regs.tic.Address(), tic_limit)) { + state->graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); } } template void TextureCache

::SynchronizeComputeDescriptors() { - const bool linked_tsc = kepler_compute.launch_description.linked_tsc; - const u32 tic_limit = kepler_compute.regs.tic.limit; - const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute.regs.tsc.limit; - const GPUVAddr tsc_gpu_addr = kepler_compute.regs.tsc.Address(); - if (compute_sampler_table.Synchornize(tsc_gpu_addr, tsc_limit)) { - compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); + const bool linked_tsc = kepler_compute->launch_description.linked_tsc; + const u32 tic_limit = kepler_compute->regs.tic.limit; + const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute->regs.tsc.limit; + const GPUVAddr tsc_gpu_addr = kepler_compute->regs.tsc.Address(); + if (state->compute_sampler_table.Synchornize(tsc_gpu_addr, tsc_limit)) { + state->compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); } - if (compute_image_table.Synchornize(kepler_compute.regs.tic.Address(), tic_limit)) { - compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); + if (state->compute_image_table.Synchornize(kepler_compute->regs.tic.Address(), tic_limit)) { + state->compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); } } template bool TextureCache

::RescaleRenderTargets(bool is_clear) { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; u32 scale_rating = 0; bool rescaled = false; std::array tmp_color_images{}; @@ -315,7 +320,7 @@ bool TextureCache

::RescaleRenderTargets(bool is_clear) { template void TextureCache

::UpdateRenderTargets(bool is_clear) { using namespace VideoCommon::Dirty; - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::RenderTargets]) { for (size_t index = 0; index < NUM_RT; ++index) { ImageViewId& color_buffer_id = render_targets.color_buffer_ids[index]; @@ -342,7 +347,7 @@ void TextureCache

::UpdateRenderTargets(bool is_clear) { PrepareImageView(depth_buffer_id, true, is_clear && IsFullClear(depth_buffer_id)); for (size_t index = 0; index < NUM_RT; ++index) { - render_targets.draw_buffers[index] = static_cast(maxwell3d.regs.rt_control.Map(index)); + render_targets.draw_buffers[index] = static_cast(maxwell3d->regs.rt_control.Map(index)); } u32 up_scale = 1; u32 down_shift = 0; @@ -351,8 +356,8 @@ void TextureCache

::UpdateRenderTargets(bool is_clear) { down_shift = Settings::values.resolution_info.down_shift; } render_targets.size = Extent2D{ - (maxwell3d.regs.render_area.width * up_scale) >> down_shift, - (maxwell3d.regs.render_area.height * up_scale) >> down_shift, + (maxwell3d->regs.render_area.width * up_scale) >> down_shift, + (maxwell3d->regs.render_area.height * up_scale) >> down_shift, }; flags[Dirty::DepthBiasGlobal] = true; @@ -458,7 +463,7 @@ void TextureCache

::DownloadMemory(VAddr cpu_addr, size_t size) { const auto copies = FullDownloadCopies(image.info); image.DownloadMemory(map, copies); runtime.Finish(); - SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span); + SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span); } } @@ -655,7 +660,7 @@ void TextureCache

::PopAsyncFlushes() { for (const ImageId image_id : download_ids) { const ImageBase& image = slot_images[image_id]; const auto copies = FullDownloadCopies(image.info); - SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, download_span); + SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, download_span); download_map.offset += image.unswizzled_size_bytes; download_span = download_span.subspan(image.unswizzled_size_bytes); } @@ -714,26 +719,26 @@ void TextureCache

::UploadImageContents(Image& image, StagingBuffer& staging) const GPUVAddr gpu_addr = image.gpu_addr; if (True(image.flags & ImageFlagBits::AcceleratedUpload)) { - gpu_memory.ReadBlockUnsafe(gpu_addr, mapped_span.data(), mapped_span.size_bytes()); + gpu_memory->ReadBlockUnsafe(gpu_addr, mapped_span.data(), mapped_span.size_bytes()); const auto uploads = FullUploadSwizzles(image.info); runtime.AccelerateImageUpload(image, staging, uploads); } else if (True(image.flags & ImageFlagBits::Converted)) { std::vector unswizzled_data(image.unswizzled_size_bytes); - auto copies = UnswizzleImage(gpu_memory, gpu_addr, image.info, unswizzled_data); + auto copies = UnswizzleImage(*gpu_memory, gpu_addr, image.info, unswizzled_data); ConvertImage(unswizzled_data, image.info, mapped_span, copies); image.UploadMemory(staging, copies); } else { - const auto copies = UnswizzleImage(gpu_memory, gpu_addr, image.info, mapped_span); + const auto copies = UnswizzleImage(*gpu_memory, gpu_addr, image.info, mapped_span); image.UploadMemory(staging, copies); } } template ImageViewId TextureCache

::FindImageView(const TICEntry& config) { - if (!IsValidEntry(gpu_memory, config)) { + if (!IsValidEntry(*gpu_memory, config)) { return NULL_IMAGE_VIEW_ID; } - const auto [pair, is_new] = image_views.try_emplace(config); + const auto [pair, is_new] = state->image_views.try_emplace(config); ImageViewId& image_view_id = pair->second; if (is_new) { image_view_id = CreateImageView(config); @@ -777,9 +782,9 @@ ImageId TextureCache

::FindOrInsertImage(const ImageInfo& info, GPUVAddr gpu_a template ImageId TextureCache

::FindImage(const ImageInfo& info, GPUVAddr gpu_addr, RelaxedOptions options) { - std::optional cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); + std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); if (!cpu_addr) { - cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info)); + cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info)); if (!cpu_addr) { return ImageId{}; } @@ -860,7 +865,7 @@ void TextureCache

::InvalidateScale(Image& image) { image.scale_tick = frame_tick + 1; } const std::span image_view_ids = image.image_view_ids; - auto& dirty = maxwell3d.dirty.flags; + auto& dirty = maxwell3d->dirty.flags; dirty[Dirty::RenderTargets] = true; dirty[Dirty::ZetaBuffer] = true; for (size_t rt = 0; rt < NUM_RT; ++rt) { @@ -881,11 +886,11 @@ void TextureCache

::InvalidateScale(Image& image) { image.image_view_ids.clear(); image.image_view_infos.clear(); if constexpr (ENABLE_VALIDATION) { - std::ranges::fill(graphics_image_view_ids, CORRUPT_ID); - std::ranges::fill(compute_image_view_ids, CORRUPT_ID); + std::ranges::fill(state->graphics_image_view_ids, CORRUPT_ID); + std::ranges::fill(state->compute_image_view_ids, CORRUPT_ID); } - graphics_image_table.Invalidate(); - compute_image_table.Invalidate(); + state->graphics_image_table.Invalidate(); + state->compute_image_table.Invalidate(); has_deleted_images = true; } @@ -929,10 +934,10 @@ bool TextureCache

::ScaleDown(Image& image) { template ImageId TextureCache

::InsertImage(const ImageInfo& info, GPUVAddr gpu_addr, RelaxedOptions options) { - std::optional cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); + std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); if (!cpu_addr) { const auto size = CalculateGuestSizeInBytes(info); - cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr, size); + cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, size); if (!cpu_addr) { const VAddr fake_addr = ~(1ULL << 40ULL) + virtual_invalid_space; virtual_invalid_space += Common::AlignUp(size, 32); @@ -1050,7 +1055,7 @@ ImageId TextureCache

::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA const ImageId new_image_id = slot_images.insert(runtime, new_info, gpu_addr, cpu_addr); Image& new_image = slot_images[new_image_id]; - if (!gpu_memory.IsContinousRange(new_image.gpu_addr, new_image.guest_size_bytes)) { + if (!gpu_memory->IsContinousRange(new_image.gpu_addr, new_image.guest_size_bytes)) { new_image.flags |= ImageFlagBits::Sparse; } @@ -1192,7 +1197,7 @@ SamplerId TextureCache

::FindSampler(const TSCEntry& config) { if (std::ranges::all_of(config.raw, [](u64 value) { return value == 0; })) { return NULL_SAMPLER_ID; } - const auto [pair, is_new] = samplers.try_emplace(config); + const auto [pair, is_new] = state->samplers.try_emplace(config); if (is_new) { pair->second = slot_samplers.insert(runtime, config); } @@ -1201,7 +1206,7 @@ SamplerId TextureCache

::FindSampler(const TSCEntry& config) { template ImageViewId TextureCache

::FindColorBuffer(size_t index, bool is_clear) { - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; if (index >= regs.rt_control.count) { return ImageViewId{}; } @@ -1219,7 +1224,7 @@ ImageViewId TextureCache

::FindColorBuffer(size_t index, bool is_clear) { template ImageViewId TextureCache

::FindDepthBuffer(bool is_clear) { - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; if (!regs.zeta_enable) { return ImageViewId{}; } @@ -1321,8 +1326,8 @@ void TextureCache

::ForEachImageInRegionGPU(GPUVAddr gpu_addr, size_t size, Fu static constexpr bool BOOL_BREAK = std::is_same_v; boost::container::small_vector images; ForEachGPUPage(gpu_addr, size, [this, &images, gpu_addr, size, func](u64 page) { - const auto it = gpu_page_table.find(page); - if (it == gpu_page_table.end()) { + const auto it = state->gpu_page_table.find(page); + if (it == state->gpu_page_table.end()) { if constexpr (BOOL_BREAK) { return false; } else { @@ -1403,9 +1408,9 @@ template void TextureCache

::ForEachSparseSegment(ImageBase& image, Func&& func) { using FuncReturn = typename std::invoke_result::type; static constexpr bool RETURNS_BOOL = std::is_same_v; - const auto segments = gpu_memory.GetSubmappedRange(image.gpu_addr, image.guest_size_bytes); + const auto segments = gpu_memory->GetSubmappedRange(image.gpu_addr, image.guest_size_bytes); for (const auto& [gpu_addr, size] : segments) { - std::optional cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); + std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); ASSERT(cpu_addr); if constexpr (RETURNS_BOOL) { if (func(gpu_addr, *cpu_addr, size)) { @@ -1449,7 +1454,7 @@ void TextureCache

::RegisterImage(ImageId image_id) { image.lru_index = lru_cache.Insert(image_id, frame_tick); ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, - [this, image_id](u64 page) { gpu_page_table[page].push_back(image_id); }); + [this, image_id](u64 page) { state->gpu_page_table[page].push_back(image_id); }); if (False(image.flags & ImageFlagBits::Sparse)) { auto map_id = slot_map_views.insert(image.gpu_addr, image.cpu_addr, image.guest_size_bytes, image_id); @@ -1497,8 +1502,9 @@ void TextureCache

::UnregisterImage(ImageId image_id) { } image_ids.erase(vector_it); }; - ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, - [this, &clear_page_table](u64 page) { clear_page_table(page, gpu_page_table); }); + ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, &clear_page_table](u64 page) { + clear_page_table(page, state->gpu_page_table); + }); if (False(image.flags & ImageFlagBits::Sparse)) { const auto map_id = image.map_view_id; ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) { @@ -1631,7 +1637,7 @@ void TextureCache

::DeleteImage(ImageId image_id, bool immediate_delete) { ASSERT_MSG(False(image.flags & ImageFlagBits::Registered), "Image was not unregistered"); // Mark render targets as dirty - auto& dirty = maxwell3d.dirty.flags; + auto& dirty = maxwell3d->dirty.flags; dirty[Dirty::RenderTargets] = true; dirty[Dirty::ZetaBuffer] = true; for (size_t rt = 0; rt < NUM_RT; ++rt) { @@ -1681,22 +1687,24 @@ void TextureCache

::DeleteImage(ImageId image_id, bool immediate_delete) { if (alloc_images.empty()) { image_allocs_table.erase(alloc_it); } - if constexpr (ENABLE_VALIDATION) { - std::ranges::fill(graphics_image_view_ids, CORRUPT_ID); - std::ranges::fill(compute_image_view_ids, CORRUPT_ID); + for (auto& this_state : channel_storage) { + if constexpr (ENABLE_VALIDATION) { + std::ranges::fill(this_state.graphics_image_view_ids, CORRUPT_ID); + std::ranges::fill(this_state.compute_image_view_ids, CORRUPT_ID); + } + this_state.graphics_image_table.Invalidate(); + this_state.compute_image_table.Invalidate(); } - graphics_image_table.Invalidate(); - compute_image_table.Invalidate(); has_deleted_images = true; } template void TextureCache

::RemoveImageViewReferences(std::span removed_views) { - auto it = image_views.begin(); - while (it != image_views.end()) { + auto it = state->image_views.begin(); + while (it != state->image_views.end()) { const auto found = std::ranges::find(removed_views, it->second); if (found != removed_views.end()) { - it = image_views.erase(it); + it = state->image_views.erase(it); } else { ++it; } @@ -1943,7 +1951,7 @@ bool TextureCache

::IsFullClear(ImageViewId id) { const ImageViewBase& image_view = slot_image_views[id]; const ImageBase& image = slot_images[image_view.image_id]; const Extent3D size = image_view.size; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; const auto& scissor = regs.scissor_test[0]; if (image.info.resources.levels > 1 || image.info.resources.layers > 1) { // Images with multiple resources can't be cleared in a single call @@ -1958,4 +1966,61 @@ bool TextureCache

::IsFullClear(ImageViewId id) { scissor.max_y >= size.height; } +template +TextureCache

::ChannelInfo::ChannelInfo(Tegra::Control::ChannelState& state) noexcept + : maxwell3d{*state.maxwell_3d}, kepler_compute{*state.kepler_compute}, + gpu_memory{*state.memory_manager}, graphics_image_table{gpu_memory}, + graphics_sampler_table{gpu_memory}, compute_image_table{gpu_memory}, compute_sampler_table{ + gpu_memory} {} + +template +void TextureCache

::CreateChannel(struct Tegra::Control::ChannelState& channel) { + ASSERT(channel_map.find(channel.bind_id) == channel_map.end() && channel.bind_id >= 0); + auto new_id = [this, &channel]() { + if (!free_channel_ids.empty()) { + auto id = free_channel_ids.front(); + free_channel_ids.pop_front(); + new (&channel_storage[id]) ChannelInfo(channel); + return id; + } + channel_storage.emplace_back(channel); + return channel_storage.size() - 1; + }(); + channel_map.emplace(channel.bind_id, new_id); + if (current_channel_id != UNSET_CHANNEL) { + state = &channel_storage[current_channel_id]; + } +} + +/// Bind a channel for execution. +template +void TextureCache

::BindToChannel(s32 id) { + auto it = channel_map.find(id); + ASSERT(it != channel_map.end() && id >= 0); + current_channel_id = it->second; + state = &channel_storage[current_channel_id]; + maxwell3d = &state->maxwell3d; + kepler_compute = &state->kepler_compute; + gpu_memory = &state->gpu_memory; +} + +/// Erase channel's state. +template +void TextureCache

::EraseChannel(s32 id) { + const auto it = channel_map.find(id); + ASSERT(it != channel_map.end() && id >= 0); + const auto this_id = it->second; + free_channel_ids.push_back(this_id); + channel_map.erase(it); + if (this_id == current_channel_id) { + current_channel_id = UNSET_CHANNEL; + state = nullptr; + maxwell3d = nullptr; + kepler_compute = nullptr; + gpu_memory = nullptr; + } else if (current_channel_id != UNSET_CHANNEL) { + state = &channel_storage[current_channel_id]; + } +} + } // namespace VideoCommon diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h index 7e6c6cef20..69efcb7182 100644 --- a/src/video_core/texture_cache/texture_cache_base.h +++ b/src/video_core/texture_cache/texture_cache_base.h @@ -3,6 +3,8 @@ #pragma once +#include +#include #include #include #include @@ -26,6 +28,10 @@ #include "video_core/texture_cache/types.h" #include "video_core/textures/texture.h" +namespace Tegra::Control { +struct ChannelState; +} + namespace VideoCommon { using Tegra::Texture::SwizzleSource; @@ -58,6 +64,8 @@ class TextureCache { /// True when the API can provide info about the memory of the device. static constexpr bool HAS_DEVICE_MEMORY_INFO = P::HAS_DEVICE_MEMORY_INFO; + static constexpr size_t UNSET_CHANNEL{std::numeric_limits::max()}; + static constexpr s64 TARGET_THRESHOLD = 4_GiB; static constexpr s64 DEFAULT_EXPECTED_MEMORY = 1_GiB + 125_MiB; static constexpr s64 DEFAULT_CRITICAL_MEMORY = 1_GiB + 625_MiB; @@ -85,8 +93,7 @@ class TextureCache { }; public: - explicit TextureCache(Runtime&, VideoCore::RasterizerInterface&, Tegra::Engines::Maxwell3D&, - Tegra::Engines::KeplerCompute&, Tegra::MemoryManager&); + explicit TextureCache(Runtime&, VideoCore::RasterizerInterface&); /// Notify the cache that a new frame has been queued void TickFrame(); @@ -171,6 +178,15 @@ public: [[nodiscard]] bool IsRescaling(const ImageViewBase& image_view) const noexcept; + /// Create channel state. + void CreateChannel(struct Tegra::Control::ChannelState& channel); + + /// Bind a channel for execution. + void BindToChannel(s32 id); + + /// Erase channel's state. + void EraseChannel(s32 id); + std::mutex mutex; private: @@ -338,31 +354,52 @@ private: u64 GetScaledImageSizeBytes(ImageBase& image); Runtime& runtime; + + struct ChannelInfo { + ChannelInfo() = delete; + ChannelInfo(struct Tegra::Control::ChannelState& state) noexcept; + ChannelInfo(const ChannelInfo& state) = delete; + ChannelInfo& operator=(const ChannelInfo&) = delete; + ChannelInfo(ChannelInfo&& other) noexcept = default; + ChannelInfo& operator=(ChannelInfo&& other) noexcept = default; + + Tegra::Engines::Maxwell3D& maxwell3d; + Tegra::Engines::KeplerCompute& kepler_compute; + Tegra::MemoryManager& gpu_memory; + + DescriptorTable graphics_image_table{gpu_memory}; + DescriptorTable graphics_sampler_table{gpu_memory}; + std::vector graphics_sampler_ids; + std::vector graphics_image_view_ids; + + DescriptorTable compute_image_table{gpu_memory}; + DescriptorTable compute_sampler_table{gpu_memory}; + std::vector compute_sampler_ids; + std::vector compute_image_view_ids; + + std::unordered_map image_views; + std::unordered_map samplers; + + std::unordered_map, IdentityHash> gpu_page_table; + }; + + std::deque channel_storage; + std::deque free_channel_ids; + std::unordered_map channel_map; + + ChannelInfo* state; + size_t current_channel_id{UNSET_CHANNEL}; VideoCore::RasterizerInterface& rasterizer; - Tegra::Engines::Maxwell3D& maxwell3d; - Tegra::Engines::KeplerCompute& kepler_compute; - Tegra::MemoryManager& gpu_memory; - - DescriptorTable graphics_image_table{gpu_memory}; - DescriptorTable graphics_sampler_table{gpu_memory}; - std::vector graphics_sampler_ids; - std::vector graphics_image_view_ids; - - DescriptorTable compute_image_table{gpu_memory}; - DescriptorTable compute_sampler_table{gpu_memory}; - std::vector compute_sampler_ids; - std::vector compute_image_view_ids; + Tegra::Engines::Maxwell3D* maxwell3d; + Tegra::Engines::KeplerCompute* kepler_compute; + Tegra::MemoryManager* gpu_memory; RenderTargets render_targets; - std::unordered_map image_views; - std::unordered_map samplers; std::unordered_map framebuffers; std::unordered_map, IdentityHash> page_table; - std::unordered_map, IdentityHash> gpu_page_table; std::unordered_map, IdentityHash> sparse_page_table; - std::unordered_map> sparse_views; VAddr virtual_invalid_space{}; From 2c62563ab58a70236a39571149f8370f3fdfb2a3 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 7 Nov 2021 14:17:32 +0100 Subject: [PATCH 13/68] NVHOST_CTRl: Implement missing method and fix some stuffs. --- src/CMakeLists.txt | 1 + .../hle/service/nvdrv/devices/nvhost_ctrl.cpp | 30 ++++++++++++++++--- .../hle/service/nvdrv/devices/nvhost_ctrl.h | 5 ++-- src/video_core/gpu.cpp | 5 ++++ 4 files changed, 35 insertions(+), 6 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 54de1dc94f..3575a3cb3e 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -121,6 +121,7 @@ else() if (ARCHITECTURE_x86_64) add_compile_options("-mcx16") + add_compile_options("-fwrapv") endif() if (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL Clang) diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index abde2a6d35..0e22d92730 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -52,6 +52,8 @@ NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& return IocCtrlEventRegister(input, output); case 0x20: return IocCtrlEventUnregister(input, output); + case 0x21: + return IocCtrlEventUnregisterBatch(input, output); } break; default: @@ -249,6 +251,25 @@ NvResult nvhost_ctrl::IocCtrlEventUnregister(const std::vector& input, return FreeEvent(event_id); } +NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(const std::vector& input, + std::vector& output) { + IocCtrlEventUnregisterBatchParams params{}; + std::memcpy(¶ms, input.data(), sizeof(params)); + u64 event_mask = params.user_events; + LOG_DEBUG(Service_NVDRV, " called, event_mask: {:X}", event_mask); + + auto lock = NvEventsLock(); + while (event_mask != 0) { + const u64 event_id = std::countr_zero(event_mask); + event_mask &= ~(1ULL << event_id); + const auto result = FreeEvent(static_cast(event_id)); + if (result != NvResult::Success) { + return result; + } + } + return NvResult::Success; +} + NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector& input, std::vector& output) { IocCtrlEventClearParams params{}; std::memcpy(¶ms, input.data(), sizeof(params)); @@ -362,10 +383,11 @@ u32 nvhost_ctrl::FindFreeNvEvent(u32 syncpoint_id) { } void nvhost_ctrl::SignalNvEvent(u32 syncpoint_id, u32 value) { - const u32 max = MaxNvEvents - std::countl_zero(events_mask); - const u32 min = std::countr_zero(events_mask); - for (u32 i = min; i < max; i++) { - auto& event = events[i]; + u64 signal_mask = events_mask; + while (signal_mask != 0) { + const u64 event_id = std::countr_zero(signal_mask); + signal_mask &= ~(1ULL << event_id); + auto& event = events[event_id]; if (event.assigned_syncpt != syncpoint_id || event.assigned_value != value) { continue; } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h index f2fc5d0472..9bd10257bb 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h @@ -177,16 +177,17 @@ private: static_assert(sizeof(IocCtrlEventUnregisterParams) == 4, "IocCtrlEventUnregisterParams is incorrect size"); - struct IocCtrlEventKill { + struct IocCtrlEventUnregisterBatchParams { u64_le user_events{}; }; - static_assert(sizeof(IocCtrlEventKill) == 8, "IocCtrlEventKill is incorrect size"); + static_assert(sizeof(IocCtrlEventUnregisterBatchParams) == 8, "IocCtrlEventKill is incorrect size"); NvResult NvOsGetConfigU32(const std::vector& input, std::vector& output); NvResult IocCtrlEventWait(const std::vector& input, std::vector& output, bool is_allocation); NvResult IocCtrlEventRegister(const std::vector& input, std::vector& output); NvResult IocCtrlEventUnregister(const std::vector& input, std::vector& output); + NvResult IocCtrlEventUnregisterBatch(const std::vector& input, std::vector& output); NvResult IocCtrlClearEventWait(const std::vector& input, std::vector& output); NvResult FreeEvent(u32 slot); diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 80a1c69e00..8c0ff00941 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -249,6 +249,11 @@ struct GPU::Impl { void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) { std::scoped_lock lock{sync_mutex}; + u32 current_value = syncpoints.at(syncpoint_id).load(); + if ((static_cast(current_value) - static_cast(value)) >= 0) { + TriggerCpuInterrupt(syncpoint_id, value); + return; + } auto& interrupt = syncpt_interrupts.at(syncpoint_id); bool contains = std::any_of(interrupt.begin(), interrupt.end(), [value](u32 in_value) { return in_value == value; }); From d7990c159e956e5431c501fa94405dd04496197c Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 7 Nov 2021 17:15:28 +0100 Subject: [PATCH 14/68] OpenGl: Implement Channels. --- .../renderer_opengl/gl_rasterizer.cpp | 174 +++++++++++------- .../renderer_opengl/gl_rasterizer.h | 13 +- .../renderer_opengl/gl_state_tracker.cpp | 17 +- .../renderer_opengl/gl_state_tracker.h | 82 +++++---- .../renderer_opengl/renderer_opengl.cpp | 2 +- .../renderer_vulkan/renderer_vulkan.cpp | 2 +- .../renderer_vulkan/vk_rasterizer.cpp | 2 +- .../renderer_vulkan/vk_state_tracker.cpp | 8 +- .../renderer_vulkan/vk_state_tracker.h | 4 +- 9 files changed, 186 insertions(+), 118 deletions(-) diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index e8d61bd417..8cfa0013fb 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -16,7 +16,7 @@ #include "common/microprofile.h" #include "common/scope_exit.h" #include "common/settings.h" - +#include "video_core/control/channel_state.h" #include "video_core/engines/kepler_compute.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/memory_manager.h" @@ -56,9 +56,8 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra Core::Memory::Memory& cpu_memory_, const Device& device_, ScreenInfo& screen_info_, ProgramManager& program_manager_, StateTracker& state_tracker_) - : RasterizerAccelerated(cpu_memory_), gpu(gpu_), maxwell3d(gpu.Maxwell3D()), - kepler_compute(gpu.KeplerCompute()), gpu_memory(gpu.MemoryManager()), device(device_), - screen_info(screen_info_), program_manager(program_manager_), state_tracker(state_tracker_), + : RasterizerAccelerated(cpu_memory_), gpu(gpu_), device(device_), screen_info(screen_info_), + program_manager(program_manager_), state_tracker(state_tracker_), texture_cache_runtime(device, program_manager, state_tracker), texture_cache(texture_cache_runtime, *this), buffer_cache_runtime(device), buffer_cache(*this, cpu_memory_, buffer_cache_runtime), @@ -70,7 +69,7 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra RasterizerOpenGL::~RasterizerOpenGL() = default; void RasterizerOpenGL::SyncVertexFormats() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::VertexFormats]) { return; } @@ -88,7 +87,7 @@ void RasterizerOpenGL::SyncVertexFormats() { } flags[Dirty::VertexFormat0 + index] = false; - const auto attrib = maxwell3d.regs.vertex_attrib_format[index]; + const auto attrib = maxwell3d->regs.vertex_attrib_format[index]; const auto gl_index = static_cast(index); // Disable constant attributes. @@ -112,13 +111,13 @@ void RasterizerOpenGL::SyncVertexFormats() { } void RasterizerOpenGL::SyncVertexInstances() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::VertexInstances]) { return; } flags[Dirty::VertexInstances] = false; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; for (std::size_t index = 0; index < NUM_SUPPORTED_VERTEX_ATTRIBUTES; ++index) { if (!flags[Dirty::VertexInstance0 + index]) { continue; @@ -139,11 +138,11 @@ void RasterizerOpenGL::LoadDiskResources(u64 title_id, std::stop_token stop_load void RasterizerOpenGL::Clear() { MICROPROFILE_SCOPE(OpenGL_Clears); - if (!maxwell3d.ShouldExecute()) { + if (!maxwell3d->ShouldExecute()) { return; } - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; bool use_color{}; bool use_depth{}; bool use_stencil{}; @@ -221,17 +220,17 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { SyncState(); - const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d.regs.draw.topology); + const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d->regs.draw.topology); BeginTransformFeedback(pipeline, primitive_mode); - const GLuint base_instance = static_cast(maxwell3d.regs.vb_base_instance); + const GLuint base_instance = static_cast(maxwell3d->regs.vb_base_instance); const GLsizei num_instances = - static_cast(is_instanced ? maxwell3d.mme_draw.instance_count : 1); + static_cast(is_instanced ? maxwell3d->mme_draw.instance_count : 1); if (is_indexed) { - const GLint base_vertex = static_cast(maxwell3d.regs.vb_element_base); - const GLsizei num_vertices = static_cast(maxwell3d.regs.index_array.count); + const GLint base_vertex = static_cast(maxwell3d->regs.vb_element_base); + const GLsizei num_vertices = static_cast(maxwell3d->regs.index_array.count); const GLvoid* const offset = buffer_cache_runtime.IndexOffset(); - const GLenum format = MaxwellToGL::IndexFormat(maxwell3d.regs.index_array.format); + const GLenum format = MaxwellToGL::IndexFormat(maxwell3d->regs.index_array.format); if (num_instances == 1 && base_instance == 0 && base_vertex == 0) { glDrawElements(primitive_mode, num_vertices, format, offset); } else if (num_instances == 1 && base_instance == 0) { @@ -250,8 +249,8 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { base_instance); } } else { - const GLint base_vertex = static_cast(maxwell3d.regs.vertex_buffer.first); - const GLsizei num_vertices = static_cast(maxwell3d.regs.vertex_buffer.count); + const GLint base_vertex = static_cast(maxwell3d->regs.vertex_buffer.first); + const GLsizei num_vertices = static_cast(maxwell3d->regs.vertex_buffer.count); if (num_instances == 1 && base_instance == 0) { glDrawArrays(primitive_mode, base_vertex, num_vertices); } else if (base_instance == 0) { @@ -273,7 +272,7 @@ void RasterizerOpenGL::DispatchCompute() { return; } pipeline->Configure(); - const auto& qmd{kepler_compute.launch_description}; + const auto& qmd{kepler_compute->launch_description}; glDispatchCompute(qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z); ++num_queued_commands; has_written_global_memory |= pipeline->WritesGlobalMemory(); @@ -388,10 +387,10 @@ void RasterizerOpenGL::ModifyGPUMemory(GPUVAddr addr, u64 size) { void RasterizerOpenGL::SignalSemaphore(GPUVAddr addr, u32 value) { if (!gpu.IsAsync()) { - gpu_memory.Write(addr, value); + gpu_memory->Write(addr, value); return; } - auto paddr = gpu_memory.GetPointer(addr); + auto paddr = gpu_memory->GetPointer(addr); fence_manager.SignalSemaphore(paddr, value); } @@ -483,12 +482,12 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerOpenGL::AccessAccelerateDMA() void RasterizerOpenGL::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, std::span memory) { - auto cpu_addr = gpu_memory.GpuToCpuAddress(address); + auto cpu_addr = gpu_memory->GpuToCpuAddress(address); if (!cpu_addr) [[unlikely]] { - gpu_memory.WriteBlock(address, memory.data(), copy_size); + gpu_memory->WriteBlock(address, memory.data(), copy_size); return; } - gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size); + gpu_memory->WriteBlockUnsafe(address, memory.data(), copy_size); { std::unique_lock lock{buffer_cache.mutex}; if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) { @@ -551,8 +550,8 @@ void RasterizerOpenGL::SyncState() { } void RasterizerOpenGL::SyncViewport() { - auto& flags = maxwell3d.dirty.flags; - const auto& regs = maxwell3d.regs; + auto& flags = maxwell3d->dirty.flags; + const auto& regs = maxwell3d->regs; const bool rescale_viewports = flags[VideoCommon::Dirty::RescaleViewports]; const bool dirty_viewport = flags[Dirty::Viewports] || rescale_viewports; @@ -657,23 +656,23 @@ void RasterizerOpenGL::SyncViewport() { } void RasterizerOpenGL::SyncDepthClamp() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::DepthClampEnabled]) { return; } flags[Dirty::DepthClampEnabled] = false; - oglEnable(GL_DEPTH_CLAMP, maxwell3d.regs.view_volume_clip_control.depth_clamp_disabled == 0); + oglEnable(GL_DEPTH_CLAMP, maxwell3d->regs.view_volume_clip_control.depth_clamp_disabled == 0); } void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::ClipDistances] && !flags[VideoCommon::Dirty::Shaders]) { return; } flags[Dirty::ClipDistances] = false; - clip_mask &= maxwell3d.regs.clip_distance_enabled; + clip_mask &= maxwell3d->regs.clip_distance_enabled; if (clip_mask == last_clip_distance_mask) { return; } @@ -689,8 +688,8 @@ void RasterizerOpenGL::SyncClipCoef() { } void RasterizerOpenGL::SyncCullMode() { - auto& flags = maxwell3d.dirty.flags; - const auto& regs = maxwell3d.regs; + auto& flags = maxwell3d->dirty.flags; + const auto& regs = maxwell3d->regs; if (flags[Dirty::CullTest]) { flags[Dirty::CullTest] = false; @@ -705,23 +704,23 @@ void RasterizerOpenGL::SyncCullMode() { } void RasterizerOpenGL::SyncPrimitiveRestart() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::PrimitiveRestart]) { return; } flags[Dirty::PrimitiveRestart] = false; - if (maxwell3d.regs.primitive_restart.enabled) { + if (maxwell3d->regs.primitive_restart.enabled) { glEnable(GL_PRIMITIVE_RESTART); - glPrimitiveRestartIndex(maxwell3d.regs.primitive_restart.index); + glPrimitiveRestartIndex(maxwell3d->regs.primitive_restart.index); } else { glDisable(GL_PRIMITIVE_RESTART); } } void RasterizerOpenGL::SyncDepthTestState() { - auto& flags = maxwell3d.dirty.flags; - const auto& regs = maxwell3d.regs; + auto& flags = maxwell3d->dirty.flags; + const auto& regs = maxwell3d->regs; if (flags[Dirty::DepthMask]) { flags[Dirty::DepthMask] = false; @@ -740,13 +739,13 @@ void RasterizerOpenGL::SyncDepthTestState() { } void RasterizerOpenGL::SyncStencilTestState() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::StencilTest]) { return; } flags[Dirty::StencilTest] = false; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; oglEnable(GL_STENCIL_TEST, regs.stencil_enable); glStencilFuncSeparate(GL_FRONT, MaxwellToGL::ComparisonOp(regs.stencil_front_func_func), @@ -771,23 +770,23 @@ void RasterizerOpenGL::SyncStencilTestState() { } void RasterizerOpenGL::SyncRasterizeEnable() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::RasterizeEnable]) { return; } flags[Dirty::RasterizeEnable] = false; - oglEnable(GL_RASTERIZER_DISCARD, maxwell3d.regs.rasterize_enable == 0); + oglEnable(GL_RASTERIZER_DISCARD, maxwell3d->regs.rasterize_enable == 0); } void RasterizerOpenGL::SyncPolygonModes() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::PolygonModes]) { return; } flags[Dirty::PolygonModes] = false; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; if (regs.fill_rectangle) { if (!GLAD_GL_NV_fill_rectangle) { LOG_ERROR(Render_OpenGL, "GL_NV_fill_rectangle used and not supported"); @@ -820,7 +819,7 @@ void RasterizerOpenGL::SyncPolygonModes() { } void RasterizerOpenGL::SyncColorMask() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::ColorMasks]) { return; } @@ -829,7 +828,7 @@ void RasterizerOpenGL::SyncColorMask() { const bool force = flags[Dirty::ColorMaskCommon]; flags[Dirty::ColorMaskCommon] = false; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; if (regs.color_mask_common) { if (!force && !flags[Dirty::ColorMask0]) { return; @@ -854,30 +853,30 @@ void RasterizerOpenGL::SyncColorMask() { } void RasterizerOpenGL::SyncMultiSampleState() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::MultisampleControl]) { return; } flags[Dirty::MultisampleControl] = false; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; oglEnable(GL_SAMPLE_ALPHA_TO_COVERAGE, regs.multisample_control.alpha_to_coverage); oglEnable(GL_SAMPLE_ALPHA_TO_ONE, regs.multisample_control.alpha_to_one); } void RasterizerOpenGL::SyncFragmentColorClampState() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::FragmentClampColor]) { return; } flags[Dirty::FragmentClampColor] = false; - glClampColor(GL_CLAMP_FRAGMENT_COLOR, maxwell3d.regs.frag_color_clamp ? GL_TRUE : GL_FALSE); + glClampColor(GL_CLAMP_FRAGMENT_COLOR, maxwell3d->regs.frag_color_clamp ? GL_TRUE : GL_FALSE); } void RasterizerOpenGL::SyncBlendState() { - auto& flags = maxwell3d.dirty.flags; - const auto& regs = maxwell3d.regs; + auto& flags = maxwell3d->dirty.flags; + const auto& regs = maxwell3d->regs; if (flags[Dirty::BlendColor]) { flags[Dirty::BlendColor] = false; @@ -934,13 +933,13 @@ void RasterizerOpenGL::SyncBlendState() { } void RasterizerOpenGL::SyncLogicOpState() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::LogicOp]) { return; } flags[Dirty::LogicOp] = false; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; if (regs.logic_op.enable) { glEnable(GL_COLOR_LOGIC_OP); glLogicOp(MaxwellToGL::LogicOp(regs.logic_op.operation)); @@ -950,7 +949,7 @@ void RasterizerOpenGL::SyncLogicOpState() { } void RasterizerOpenGL::SyncScissorTest() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::Scissors] && !flags[VideoCommon::Dirty::RescaleScissors]) { return; } @@ -959,7 +958,7 @@ void RasterizerOpenGL::SyncScissorTest() { const bool force = flags[VideoCommon::Dirty::RescaleScissors]; flags[VideoCommon::Dirty::RescaleScissors] = false; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; const auto& resolution = Settings::values.resolution_info; const bool is_rescaling{texture_cache.IsRescaling()}; @@ -995,39 +994,39 @@ void RasterizerOpenGL::SyncScissorTest() { } void RasterizerOpenGL::SyncPointState() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::PointSize]) { return; } flags[Dirty::PointSize] = false; - oglEnable(GL_POINT_SPRITE, maxwell3d.regs.point_sprite_enable); - oglEnable(GL_PROGRAM_POINT_SIZE, maxwell3d.regs.vp_point_size.enable); + oglEnable(GL_POINT_SPRITE, maxwell3d->regs.point_sprite_enable); + oglEnable(GL_PROGRAM_POINT_SIZE, maxwell3d->regs.vp_point_size.enable); const bool is_rescaling{texture_cache.IsRescaling()}; const float scale = is_rescaling ? Settings::values.resolution_info.up_factor : 1.0f; - glPointSize(std::max(1.0f, maxwell3d.regs.point_size * scale)); + glPointSize(std::max(1.0f, maxwell3d->regs.point_size * scale)); } void RasterizerOpenGL::SyncLineState() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::LineWidth]) { return; } flags[Dirty::LineWidth] = false; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; oglEnable(GL_LINE_SMOOTH, regs.line_smooth_enable); glLineWidth(regs.line_smooth_enable ? regs.line_width_smooth : regs.line_width_aliased); } void RasterizerOpenGL::SyncPolygonOffset() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::PolygonOffset]) { return; } flags[Dirty::PolygonOffset] = false; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; oglEnable(GL_POLYGON_OFFSET_FILL, regs.polygon_offset_fill_enable); oglEnable(GL_POLYGON_OFFSET_LINE, regs.polygon_offset_line_enable); oglEnable(GL_POLYGON_OFFSET_POINT, regs.polygon_offset_point_enable); @@ -1041,13 +1040,13 @@ void RasterizerOpenGL::SyncPolygonOffset() { } void RasterizerOpenGL::SyncAlphaTest() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::AlphaTest]) { return; } flags[Dirty::AlphaTest] = false; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; if (regs.alpha_test_enabled) { glEnable(GL_ALPHA_TEST); glAlphaFunc(MaxwellToGL::ComparisonOp(regs.alpha_test_func), regs.alpha_test_ref); @@ -1057,17 +1056,17 @@ void RasterizerOpenGL::SyncAlphaTest() { } void RasterizerOpenGL::SyncFramebufferSRGB() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::FramebufferSRGB]) { return; } flags[Dirty::FramebufferSRGB] = false; - oglEnable(GL_FRAMEBUFFER_SRGB, maxwell3d.regs.framebuffer_srgb); + oglEnable(GL_FRAMEBUFFER_SRGB, maxwell3d->regs.framebuffer_srgb); } void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum primitive_mode) { - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; if (regs.tfb_enabled == 0) { return; } @@ -1086,11 +1085,48 @@ void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum } void RasterizerOpenGL::EndTransformFeedback() { - if (maxwell3d.regs.tfb_enabled != 0) { + if (maxwell3d->regs.tfb_enabled != 0) { glEndTransformFeedback(); } } +void RasterizerOpenGL::InitializeChannel(Tegra::Control::ChannelState& channel) { + CreateChannel(channel); + { + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + texture_cache.CreateChannel(channel); + buffer_cache.CreateChannel(channel); + } + shader_cache.CreateChannel(channel); + query_cache.CreateChannel(channel); + state_tracker.SetupTables(channel); +} + +void RasterizerOpenGL::BindChannel(Tegra::Control::ChannelState& channel) { + const s32 channel_id = channel.bind_id; + BindToChannel(channel_id); + { + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + texture_cache.BindToChannel(channel_id); + buffer_cache.BindToChannel(channel_id); + } + shader_cache.BindToChannel(channel_id); + query_cache.BindToChannel(channel_id); + state_tracker.ChangeChannel(channel); + state_tracker.InvalidateState(); +} + +void RasterizerOpenGL::ReleaseChannel(s32 channel_id) { + EraseChannel(channel_id); + { + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + texture_cache.EraseChannel(channel_id); + buffer_cache.EraseChannel(channel_id); + } + shader_cache.EraseChannel(channel_id); + query_cache.EraseChannel(channel_id); +} + AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {} bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 31a16fcbaf..39b20cfb20 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -12,6 +12,7 @@ #include #include "common/common_types.h" +#include "video_core/control/channel_state_cache.h" #include "video_core/engines/maxwell_dma.h" #include "video_core/rasterizer_accelerated.h" #include "video_core/rasterizer_interface.h" @@ -58,7 +59,8 @@ private: BufferCache& buffer_cache; }; -class RasterizerOpenGL : public VideoCore::RasterizerAccelerated { +class RasterizerOpenGL : public VideoCore::RasterizerAccelerated, + protected VideoCommon::ChannelSetupCaches { public: explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, Core::Memory::Memory& cpu_memory_, const Device& device_, @@ -107,6 +109,12 @@ public: return num_queued_commands > 0; } + void InitializeChannel(Tegra::Control::ChannelState& channel) override; + + void BindChannel(Tegra::Control::ChannelState& channel) override; + + void ReleaseChannel(s32 channel_id) override; + private: static constexpr size_t MAX_TEXTURES = 192; static constexpr size_t MAX_IMAGES = 48; @@ -191,9 +199,6 @@ private: void EndTransformFeedback(); Tegra::GPU& gpu; - Tegra::Engines::Maxwell3D& maxwell3d; - Tegra::Engines::KeplerCompute& kepler_compute; - Tegra::MemoryManager& gpu_memory; const Device& device; ScreenInfo& screen_info; diff --git a/src/video_core/renderer_opengl/gl_state_tracker.cpp b/src/video_core/renderer_opengl/gl_state_tracker.cpp index 912725ef72..3657f867d3 100644 --- a/src/video_core/renderer_opengl/gl_state_tracker.cpp +++ b/src/video_core/renderer_opengl/gl_state_tracker.cpp @@ -7,8 +7,8 @@ #include "common/common_types.h" #include "core/core.h" +#include "video_core/control/channel_state.h" #include "video_core/engines/maxwell_3d.h" -#include "video_core/gpu.h" #include "video_core/renderer_opengl/gl_state_tracker.h" #define OFF(field_name) MAXWELL3D_REG_INDEX(field_name) @@ -202,9 +202,8 @@ void SetupDirtyMisc(Tables& tables) { } // Anonymous namespace -StateTracker::StateTracker(Tegra::GPU& gpu) : flags{gpu.Maxwell3D().dirty.flags} { - auto& dirty = gpu.Maxwell3D().dirty; - auto& tables = dirty.tables; +void StateTracker::SetupTables(Tegra::Control::ChannelState& channel_state) { + auto& tables{channel_state.maxwell_3d->dirty.tables}; SetupDirtyFlags(tables); SetupDirtyColorMasks(tables); SetupDirtyViewports(tables); @@ -230,4 +229,14 @@ StateTracker::StateTracker(Tegra::GPU& gpu) : flags{gpu.Maxwell3D().dirty.flags} SetupDirtyMisc(tables); } +void StateTracker::ChangeChannel(Tegra::Control::ChannelState& channel_state) { + flags = &channel_state.maxwell_3d->dirty.flags; +} + +void StateTracker::InvalidateState() { + flags->set(); +} + +StateTracker::StateTracker() : flags{} {} + } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_state_tracker.h b/src/video_core/renderer_opengl/gl_state_tracker.h index 04e024f085..97d32768b5 100644 --- a/src/video_core/renderer_opengl/gl_state_tracker.h +++ b/src/video_core/renderer_opengl/gl_state_tracker.h @@ -12,8 +12,10 @@ #include "video_core/engines/maxwell_3d.h" namespace Tegra { -class GPU; +namespace Control { +struct ChannelState; } +} // namespace Tegra namespace OpenGL { @@ -83,7 +85,7 @@ static_assert(Last <= std::numeric_limits::max()); class StateTracker { public: - explicit StateTracker(Tegra::GPU& gpu); + explicit StateTracker(); void BindIndexBuffer(GLuint new_index_buffer) { if (index_buffer == new_index_buffer) { @@ -121,94 +123,106 @@ public: } void NotifyScreenDrawVertexArray() { - flags[OpenGL::Dirty::VertexFormats] = true; - flags[OpenGL::Dirty::VertexFormat0 + 0] = true; - flags[OpenGL::Dirty::VertexFormat0 + 1] = true; + (*flags)[OpenGL::Dirty::VertexFormats] = true; + (*flags)[OpenGL::Dirty::VertexFormat0 + 0] = true; + (*flags)[OpenGL::Dirty::VertexFormat0 + 1] = true; - flags[VideoCommon::Dirty::VertexBuffers] = true; - flags[VideoCommon::Dirty::VertexBuffer0] = true; + (*flags)[VideoCommon::Dirty::VertexBuffers] = true; + (*flags)[VideoCommon::Dirty::VertexBuffer0] = true; - flags[OpenGL::Dirty::VertexInstances] = true; - flags[OpenGL::Dirty::VertexInstance0 + 0] = true; - flags[OpenGL::Dirty::VertexInstance0 + 1] = true; + (*flags)[OpenGL::Dirty::VertexInstances] = true; + (*flags)[OpenGL::Dirty::VertexInstance0 + 0] = true; + (*flags)[OpenGL::Dirty::VertexInstance0 + 1] = true; } void NotifyPolygonModes() { - flags[OpenGL::Dirty::PolygonModes] = true; - flags[OpenGL::Dirty::PolygonModeFront] = true; - flags[OpenGL::Dirty::PolygonModeBack] = true; + (*flags)[OpenGL::Dirty::PolygonModes] = true; + (*flags)[OpenGL::Dirty::PolygonModeFront] = true; + (*flags)[OpenGL::Dirty::PolygonModeBack] = true; } void NotifyViewport0() { - flags[OpenGL::Dirty::Viewports] = true; - flags[OpenGL::Dirty::Viewport0] = true; + (*flags)[OpenGL::Dirty::Viewports] = true; + (*flags)[OpenGL::Dirty::Viewport0] = true; } void NotifyScissor0() { - flags[OpenGL::Dirty::Scissors] = true; - flags[OpenGL::Dirty::Scissor0] = true; + (*flags)[OpenGL::Dirty::Scissors] = true; + (*flags)[OpenGL::Dirty::Scissor0] = true; } void NotifyColorMask(size_t index) { - flags[OpenGL::Dirty::ColorMasks] = true; - flags[OpenGL::Dirty::ColorMask0 + index] = true; + (*flags)[OpenGL::Dirty::ColorMasks] = true; + (*flags)[OpenGL::Dirty::ColorMask0 + index] = true; } void NotifyBlend0() { - flags[OpenGL::Dirty::BlendStates] = true; - flags[OpenGL::Dirty::BlendState0] = true; + (*flags)[OpenGL::Dirty::BlendStates] = true; + (*flags)[OpenGL::Dirty::BlendState0] = true; } void NotifyFramebuffer() { - flags[VideoCommon::Dirty::RenderTargets] = true; + (*flags)[VideoCommon::Dirty::RenderTargets] = true; } void NotifyFrontFace() { - flags[OpenGL::Dirty::FrontFace] = true; + (*flags)[OpenGL::Dirty::FrontFace] = true; } void NotifyCullTest() { - flags[OpenGL::Dirty::CullTest] = true; + (*flags)[OpenGL::Dirty::CullTest] = true; } void NotifyDepthMask() { - flags[OpenGL::Dirty::DepthMask] = true; + (*flags)[OpenGL::Dirty::DepthMask] = true; } void NotifyDepthTest() { - flags[OpenGL::Dirty::DepthTest] = true; + (*flags)[OpenGL::Dirty::DepthTest] = true; } void NotifyStencilTest() { - flags[OpenGL::Dirty::StencilTest] = true; + (*flags)[OpenGL::Dirty::StencilTest] = true; } void NotifyPolygonOffset() { - flags[OpenGL::Dirty::PolygonOffset] = true; + (*flags)[OpenGL::Dirty::PolygonOffset] = true; } void NotifyRasterizeEnable() { - flags[OpenGL::Dirty::RasterizeEnable] = true; + (*flags)[OpenGL::Dirty::RasterizeEnable] = true; } void NotifyFramebufferSRGB() { - flags[OpenGL::Dirty::FramebufferSRGB] = true; + (*flags)[OpenGL::Dirty::FramebufferSRGB] = true; } void NotifyLogicOp() { - flags[OpenGL::Dirty::LogicOp] = true; + (*flags)[OpenGL::Dirty::LogicOp] = true; } void NotifyClipControl() { - flags[OpenGL::Dirty::ClipControl] = true; + (*flags)[OpenGL::Dirty::ClipControl] = true; } void NotifyAlphaTest() { - flags[OpenGL::Dirty::AlphaTest] = true; + (*flags)[OpenGL::Dirty::AlphaTest] = true; } + void NotifyRange(u8 start, u8 end) { + for (auto flag = start; flag <= end; flag++) { + (*flags)[flag] = true; + } + } + + void SetupTables(Tegra::Control::ChannelState& channel_state); + + void ChangeChannel(Tegra::Control::ChannelState& channel_state); + + void InvalidateState(); + private: - Tegra::Engines::Maxwell3D::DirtyState::Flags& flags; + Tegra::Engines::Maxwell3D::DirtyState::Flags* flags; GLuint framebuffer = 0; GLuint index_buffer = 0; diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index 34f3f7a679..8bd5eba7e2 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp @@ -131,7 +131,7 @@ RendererOpenGL::RendererOpenGL(Core::TelemetrySession& telemetry_session_, Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_, std::unique_ptr context_) : RendererBase{emu_window_, std::move(context_)}, telemetry_session{telemetry_session_}, - emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, state_tracker{gpu}, + emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, state_tracker{}, program_manager{device}, rasterizer(emu_window, gpu, cpu_memory, device, screen_info, program_manager, state_tracker) { if (Settings::values.renderer_debug && GLAD_GL_KHR_debug) { diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index 68c2bc34c4..d12669c9d4 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp @@ -106,7 +106,7 @@ RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_, surface(CreateSurface(instance, render_window)), device(CreateDevice(instance, dld, *surface)), memory_allocator(device, false), - state_tracker(gpu), + state_tracker(), scheduler(device, state_tracker), swapchain(*surface, device, scheduler, render_window.GetFramebufferLayout().width, render_window.GetFramebufferLayout().height, false), diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 5d9ff0589d..bf750452f4 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -995,7 +995,7 @@ void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) { pipeline_cache.BindToChannel(channel_id); query_cache.BindToChannel(channel_id); state_tracker.ChangeChannel(channel); - scheduler.InvalidateState(); + state_tracker.InvalidateState(); } void RasterizerVulkan::ReleaseChannel(s32 channel_id) { diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp index a87bf8dd33..5a11d3267b 100644 --- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp +++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp @@ -10,7 +10,6 @@ #include "video_core/control/channel_state.h" #include "video_core/dirty_flags.h" #include "video_core/engines/maxwell_3d.h" -#include "video_core/gpu.h" #include "video_core/renderer_vulkan/vk_state_tracker.h" #define OFF(field_name) MAXWELL3D_REG_INDEX(field_name) @@ -203,7 +202,10 @@ void StateTracker::ChangeChannel(Tegra::Control::ChannelState& channel_state) { flags = &channel_state.maxwell_3d->dirty.flags; } -StateTracker::StateTracker(Tegra::GPU& gpu) - : flags{}, invalidation_flags{MakeInvalidationFlags()} {} +void StateTracker::InvalidateState() { + flags->set(); +} + +StateTracker::StateTracker() : flags{}, invalidation_flags{MakeInvalidationFlags()} {} } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.h b/src/video_core/renderer_vulkan/vk_state_tracker.h index 9f8a887f9c..c107d9c246 100644 --- a/src/video_core/renderer_vulkan/vk_state_tracker.h +++ b/src/video_core/renderer_vulkan/vk_state_tracker.h @@ -59,7 +59,7 @@ class StateTracker { using Maxwell = Tegra::Engines::Maxwell3D::Regs; public: - explicit StateTracker(Tegra::GPU& gpu); + explicit StateTracker(); void InvalidateCommandBufferState() { (*flags) |= invalidation_flags; @@ -149,6 +149,8 @@ public: void ChangeChannel(Tegra::Control::ChannelState& channel_state); + void InvalidateState(); + private: static constexpr auto INVALID_TOPOLOGY = static_cast(~0u); From 3f8e7a55851a613becf715cbf3016a8e9f63d65f Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 7 Nov 2021 17:52:45 +0100 Subject: [PATCH 15/68] VideoCore: Fix channels with disk pipeline/shader cache. --- .../renderer_opengl/gl_compute_pipeline.cpp | 19 ++++++------- .../renderer_opengl/gl_compute_pipeline.h | 16 +++++++---- .../renderer_opengl/gl_graphics_pipeline.cpp | 28 +++++++++---------- .../renderer_opengl/gl_graphics_pipeline.h | 16 +++++++---- .../renderer_opengl/gl_rasterizer.cpp | 1 + .../renderer_opengl/gl_shader_cache.cpp | 11 ++++---- .../renderer_vulkan/vk_graphics_pipeline.cpp | 17 ++++++----- .../renderer_vulkan/vk_graphics_pipeline.h | 28 +++++++++++-------- .../renderer_vulkan/vk_pipeline_cache.cpp | 8 +++--- .../renderer_vulkan/vk_rasterizer.cpp | 2 ++ src/video_core/texture_cache/texture_cache.h | 12 ++++---- 11 files changed, 87 insertions(+), 71 deletions(-) diff --git a/src/video_core/renderer_opengl/gl_compute_pipeline.cpp b/src/video_core/renderer_opengl/gl_compute_pipeline.cpp index 1f0f156ede..26b51f442b 100644 --- a/src/video_core/renderer_opengl/gl_compute_pipeline.cpp +++ b/src/video_core/renderer_opengl/gl_compute_pipeline.cpp @@ -28,12 +28,11 @@ bool ComputePipelineKey::operator==(const ComputePipelineKey& rhs) const noexcep } ComputePipeline::ComputePipeline(const Device& device, TextureCache& texture_cache_, - BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_, - Tegra::Engines::KeplerCompute& kepler_compute_, - ProgramManager& program_manager_, const Shader::Info& info_, - std::string code, std::vector code_v) - : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, gpu_memory{gpu_memory_}, - kepler_compute{kepler_compute_}, program_manager{program_manager_}, info{info_} { + BufferCache& buffer_cache_, ProgramManager& program_manager_, + const Shader::Info& info_, std::string code, + std::vector code_v) + : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, + program_manager{program_manager_}, info{info_} { switch (device.GetShaderBackend()) { case Settings::ShaderBackend::GLSL: source_program = CreateProgram(code, GL_COMPUTE_SHADER); @@ -86,7 +85,7 @@ void ComputePipeline::Configure() { GLsizei texture_binding{}; GLsizei image_binding{}; - const auto& qmd{kepler_compute.launch_description}; + const auto& qmd{kepler_compute->launch_description}; const auto& cbufs{qmd.const_buffer_config}; const bool via_header_index{qmd.linked_tsc != 0}; const auto read_handle{[&](const auto& desc, u32 index) { @@ -101,12 +100,12 @@ void ComputePipeline::Configure() { const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset}; const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() + secondary_offset}; - const u32 lhs_raw{gpu_memory.Read(addr)}; - const u32 rhs_raw{gpu_memory.Read(separate_addr)}; + const u32 lhs_raw{gpu_memory->Read(addr)}; + const u32 rhs_raw{gpu_memory->Read(separate_addr)}; return TexturePair(lhs_raw | rhs_raw, via_header_index); } } - return TexturePair(gpu_memory.Read(addr), via_header_index); + return TexturePair(gpu_memory->Read(addr), via_header_index); }}; const auto add_image{[&](const auto& desc, bool blacklist) { for (u32 index = 0; index < desc.count; ++index) { diff --git a/src/video_core/renderer_opengl/gl_compute_pipeline.h b/src/video_core/renderer_opengl/gl_compute_pipeline.h index 723f27f114..6534dec325 100644 --- a/src/video_core/renderer_opengl/gl_compute_pipeline.h +++ b/src/video_core/renderer_opengl/gl_compute_pipeline.h @@ -49,10 +49,8 @@ static_assert(std::is_trivially_constructible_v); class ComputePipeline { public: explicit ComputePipeline(const Device& device, TextureCache& texture_cache_, - BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_, - Tegra::Engines::KeplerCompute& kepler_compute_, - ProgramManager& program_manager_, const Shader::Info& info_, - std::string code, std::vector code_v); + BufferCache& buffer_cache_, ProgramManager& program_manager_, + const Shader::Info& info_, std::string code, std::vector code_v); void Configure(); @@ -60,11 +58,17 @@ public: return writes_global_memory; } + void SetEngine(Tegra::Engines::KeplerCompute* kepler_compute_, + Tegra::MemoryManager* gpu_memory_) { + kepler_compute = kepler_compute_; + gpu_memory = gpu_memory_; + } + private: TextureCache& texture_cache; BufferCache& buffer_cache; - Tegra::MemoryManager& gpu_memory; - Tegra::Engines::KeplerCompute& kepler_compute; + Tegra::MemoryManager* gpu_memory; + Tegra::Engines::KeplerCompute* kepler_compute; ProgramManager& program_manager; Shader::Info info; diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp index 67eae369dd..c877d77926 100644 --- a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp +++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp @@ -169,15 +169,15 @@ ConfigureFuncPtr ConfigureFunc(const std::array& infos, u32 ena } } // Anonymous namespace -GraphicsPipeline::GraphicsPipeline( - const Device& device, TextureCache& texture_cache_, BufferCache& buffer_cache_, - Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_, - ProgramManager& program_manager_, StateTracker& state_tracker_, ShaderWorker* thread_worker, - VideoCore::ShaderNotify* shader_notify, std::array sources, - std::array, 5> sources_spirv, const std::array& infos, - const GraphicsPipelineKey& key_) - : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, - gpu_memory{gpu_memory_}, maxwell3d{maxwell3d_}, program_manager{program_manager_}, +GraphicsPipeline::GraphicsPipeline(const Device& device, TextureCache& texture_cache_, + BufferCache& buffer_cache_, ProgramManager& program_manager_, + StateTracker& state_tracker_, ShaderWorker* thread_worker, + VideoCore::ShaderNotify* shader_notify, + std::array sources, + std::array, 5> sources_spirv, + const std::array& infos, + const GraphicsPipelineKey& key_) + : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, program_manager{program_manager_}, state_tracker{state_tracker_}, key{key_} { if (shader_notify) { shader_notify->MarkShaderBuilding(); @@ -285,7 +285,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { buffer_cache.runtime.SetBaseStorageBindings(base_storage_bindings); buffer_cache.runtime.SetEnableStorageBuffers(use_storage_buffers); - const auto& regs{maxwell3d.regs}; + const auto& regs{maxwell3d->regs}; const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex}; const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE { const Shader::Info& info{stage_infos[stage]}; @@ -299,7 +299,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { ++ssbo_index; } } - const auto& cbufs{maxwell3d.state.shader_stages[stage].const_buffers}; + const auto& cbufs{maxwell3d->state.shader_stages[stage].const_buffers}; const auto read_handle{[&](const auto& desc, u32 index) { ASSERT(cbufs[desc.cbuf_index].enabled); const u32 index_offset{index << desc.size_shift}; @@ -312,13 +312,13 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { const u32 second_offset{desc.secondary_cbuf_offset + index_offset}; const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address + second_offset}; - const u32 lhs_raw{gpu_memory.Read(addr)}; - const u32 rhs_raw{gpu_memory.Read(separate_addr)}; + const u32 lhs_raw{gpu_memory->Read(addr)}; + const u32 rhs_raw{gpu_memory->Read(separate_addr)}; const u32 raw{lhs_raw | rhs_raw}; return TexturePair(raw, via_header_index); } } - return TexturePair(gpu_memory.Read(addr), via_header_index); + return TexturePair(gpu_memory->Read(addr), via_header_index); }}; const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE { for (u32 index = 0; index < desc.count; ++index) { diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.h b/src/video_core/renderer_opengl/gl_graphics_pipeline.h index 4ec15b9666..a0f0e63cba 100644 --- a/src/video_core/renderer_opengl/gl_graphics_pipeline.h +++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.h @@ -71,10 +71,9 @@ static_assert(std::is_trivially_constructible_v); class GraphicsPipeline { public: explicit GraphicsPipeline(const Device& device, TextureCache& texture_cache_, - BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_, - Tegra::Engines::Maxwell3D& maxwell3d_, - ProgramManager& program_manager_, StateTracker& state_tracker_, - ShaderWorker* thread_worker, VideoCore::ShaderNotify* shader_notify, + BufferCache& buffer_cache_, ProgramManager& program_manager_, + StateTracker& state_tracker_, ShaderWorker* thread_worker, + VideoCore::ShaderNotify* shader_notify, std::array sources, std::array, 5> sources_spirv, const std::array& infos, @@ -107,6 +106,11 @@ public: }; } + void SetEngine(Tegra::Engines::Maxwell3D* maxwell3d_, Tegra::MemoryManager* gpu_memory_) { + maxwell3d = maxwell3d_; + gpu_memory = gpu_memory_; + } + private: template void ConfigureImpl(bool is_indexed); @@ -119,8 +123,8 @@ private: TextureCache& texture_cache; BufferCache& buffer_cache; - Tegra::MemoryManager& gpu_memory; - Tegra::Engines::Maxwell3D& maxwell3d; + Tegra::MemoryManager* gpu_memory; + Tegra::Engines::Maxwell3D* maxwell3d; ProgramManager& program_manager; StateTracker& state_tracker; const GraphicsPipelineKey key; diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 8cfa0013fb..4eb9bd1160 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -216,6 +216,7 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { return; } std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + pipeline->SetEngine(maxwell3d, gpu_memory); pipeline->Configure(is_indexed); SyncState(); diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index 494581d0d3..5a29a41d21 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -477,9 +477,9 @@ std::unique_ptr ShaderCache::CreateGraphicsPipeline( previous_program = &program; } auto* const thread_worker{build_in_parallel ? workers.get() : nullptr}; - return std::make_unique( - device, texture_cache, buffer_cache, *gpu_memory, *maxwell3d, program_manager, - state_tracker, thread_worker, &shader_notify, sources, sources_spirv, infos, key); + return std::make_unique(device, texture_cache, buffer_cache, program_manager, + state_tracker, thread_worker, &shader_notify, sources, + sources_spirv, infos, key); } catch (Shader::Exception& exception) { LOG_ERROR(Render_OpenGL, "{}", exception.what()); @@ -533,9 +533,8 @@ std::unique_ptr ShaderCache::CreateComputePipeline( break; } - return std::make_unique(device, texture_cache, buffer_cache, *gpu_memory, - *kepler_compute, program_manager, program.info, code, - code_spirv); + return std::make_unique(device, texture_cache, buffer_cache, program_manager, + program.info, code, code_spirv); } catch (Shader::Exception& exception) { LOG_ERROR(Render_OpenGL, "{}", exception.what()); return nullptr; diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp index 5aca8f0385..1e993185f9 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp @@ -215,15 +215,14 @@ ConfigureFuncPtr ConfigureFunc(const std::array& m } // Anonymous namespace GraphicsPipeline::GraphicsPipeline( - Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, Scheduler& scheduler_, - BufferCache& buffer_cache_, TextureCache& texture_cache_, + Scheduler& scheduler_, BufferCache& buffer_cache_, TextureCache& texture_cache_, VideoCore::ShaderNotify* shader_notify, const Device& device_, DescriptorPool& descriptor_pool, UpdateDescriptorQueue& update_descriptor_queue_, Common::ThreadWorker* worker_thread, PipelineStatistics* pipeline_statistics, RenderPassCache& render_pass_cache, const GraphicsPipelineCacheKey& key_, std::array stages, const std::array& infos) - : key{key_}, maxwell3d{maxwell3d_}, gpu_memory{gpu_memory_}, device{device_}, - texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, scheduler{scheduler_}, + : key{key_}, device{device_}, texture_cache{texture_cache_}, + buffer_cache{buffer_cache_}, scheduler{scheduler_}, update_descriptor_queue{update_descriptor_queue_}, spv_modules{std::move(stages)} { if (shader_notify) { shader_notify->MarkShaderBuilding(); @@ -288,7 +287,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { buffer_cache.SetUniformBuffersState(enabled_uniform_buffer_masks, &uniform_buffer_sizes); - const auto& regs{maxwell3d.regs}; + const auto& regs{maxwell3d->regs}; const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex}; const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE { const Shader::Info& info{stage_infos[stage]}; @@ -302,7 +301,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { ++ssbo_index; } } - const auto& cbufs{maxwell3d.state.shader_stages[stage].const_buffers}; + const auto& cbufs{maxwell3d->state.shader_stages[stage].const_buffers}; const auto read_handle{[&](const auto& desc, u32 index) { ASSERT(cbufs[desc.cbuf_index].enabled); const u32 index_offset{index << desc.size_shift}; @@ -315,13 +314,13 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { const u32 second_offset{desc.secondary_cbuf_offset + index_offset}; const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address + second_offset}; - const u32 lhs_raw{gpu_memory.Read(addr)}; - const u32 rhs_raw{gpu_memory.Read(separate_addr)}; + const u32 lhs_raw{gpu_memory->Read(addr)}; + const u32 rhs_raw{gpu_memory->Read(separate_addr)}; const u32 raw{lhs_raw | rhs_raw}; return TexturePair(raw, via_header_index); } } - return TexturePair(gpu_memory.Read(addr), via_header_index); + return TexturePair(gpu_memory->Read(addr), via_header_index); }}; const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE { for (u32 index = 0; index < desc.count; ++index) { diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h index e8949a9ab0..85602592b3 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h @@ -69,15 +69,16 @@ class GraphicsPipeline { static constexpr size_t NUM_STAGES = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage; public: - explicit GraphicsPipeline( - Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory, - Scheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache, - VideoCore::ShaderNotify* shader_notify, const Device& device, - DescriptorPool& descriptor_pool, UpdateDescriptorQueue& update_descriptor_queue, - Common::ThreadWorker* worker_thread, PipelineStatistics* pipeline_statistics, - RenderPassCache& render_pass_cache, const GraphicsPipelineCacheKey& key, - std::array stages, - const std::array& infos); + explicit GraphicsPipeline(Scheduler& scheduler, BufferCache& buffer_cache, + TextureCache& texture_cache, VideoCore::ShaderNotify* shader_notify, + const Device& device, DescriptorPool& descriptor_pool, + UpdateDescriptorQueue& update_descriptor_queue, + Common::ThreadWorker* worker_thread, + PipelineStatistics* pipeline_statistics, + RenderPassCache& render_pass_cache, + const GraphicsPipelineCacheKey& key, + std::array stages, + const std::array& infos); GraphicsPipeline& operator=(GraphicsPipeline&&) noexcept = delete; GraphicsPipeline(GraphicsPipeline&&) noexcept = delete; @@ -109,6 +110,11 @@ public: return [](GraphicsPipeline* pl, bool is_indexed) { pl->ConfigureImpl(is_indexed); }; } + void SetEngine(Tegra::Engines::Maxwell3D* maxwell3d_, Tegra::MemoryManager* gpu_memory_) { + maxwell3d = maxwell3d_; + gpu_memory = gpu_memory_; + } + private: template void ConfigureImpl(bool is_indexed); @@ -120,8 +126,8 @@ private: void Validate(); const GraphicsPipelineCacheKey key; - Tegra::Engines::Maxwell3D& maxwell3d; - Tegra::MemoryManager& gpu_memory; + Tegra::Engines::Maxwell3D* maxwell3d; + Tegra::MemoryManager* gpu_memory; const Device& device; TextureCache& texture_cache; BufferCache& buffer_cache; diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp index b1e0b96c49..732e7b6f24 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp @@ -555,10 +555,10 @@ std::unique_ptr PipelineCache::CreateGraphicsPipeline( previous_stage = &program; } Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr}; - return std::make_unique( - *maxwell3d, *gpu_memory, scheduler, buffer_cache, texture_cache, &shader_notify, device, - descriptor_pool, update_descriptor_queue, thread_worker, statistics, render_pass_cache, key, - std::move(modules), infos); + return std::make_unique(scheduler, buffer_cache, texture_cache, + &shader_notify, device, descriptor_pool, + update_descriptor_queue, thread_worker, statistics, + render_pass_cache, key, std::move(modules), infos); } catch (const Shader::Exception& exception) { LOG_ERROR(Render_Vulkan, "{}", exception.what()); diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index bf750452f4..7e08055442 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -190,6 +190,8 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { return; } std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + // update engine as channel may be different. + pipeline->SetEngine(maxwell3d, gpu_memory); pipeline->Configure(is_indexed); BeginTransformFeedback(); diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 2731aead07..e8b0b0a3ba 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -885,12 +885,14 @@ void TextureCache

::InvalidateScale(Image& image) { } image.image_view_ids.clear(); image.image_view_infos.clear(); - if constexpr (ENABLE_VALIDATION) { - std::ranges::fill(state->graphics_image_view_ids, CORRUPT_ID); - std::ranges::fill(state->compute_image_view_ids, CORRUPT_ID); + for (auto& this_state : channel_storage) { + if constexpr (ENABLE_VALIDATION) { + std::ranges::fill(this_state.graphics_image_view_ids, CORRUPT_ID); + std::ranges::fill(this_state.compute_image_view_ids, CORRUPT_ID); + } + this_state.graphics_image_table.Invalidate(); + this_state.compute_image_table.Invalidate(); } - state->graphics_image_table.Invalidate(); - state->compute_image_table.Invalidate(); has_deleted_images = true; } From b617874724c461cba270a00c0f8e67fc4a6d553a Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Wed, 10 Nov 2021 17:37:17 +0100 Subject: [PATCH 16/68] Common: implement MultiLevelPageTable. --- src/common/CMakeLists.txt | 2 + src/common/multi_level_page_table.cpp | 7 +++ src/common/multi_level_page_table.h | 79 +++++++++++++++++++++++++ src/common/multi_level_page_table.inc | 83 +++++++++++++++++++++++++++ 4 files changed, 171 insertions(+) create mode 100644 src/common/multi_level_page_table.cpp create mode 100644 src/common/multi_level_page_table.h create mode 100644 src/common/multi_level_page_table.inc diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 3447fabd8f..2db4148194 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -81,6 +81,8 @@ add_library(common STATIC microprofile.cpp microprofile.h microprofileui.h + multi_level_page_table.cpp + multi_level_page_table.h nvidia_flags.cpp nvidia_flags.h page_table.cpp diff --git a/src/common/multi_level_page_table.cpp b/src/common/multi_level_page_table.cpp new file mode 100644 index 0000000000..561785ca74 --- /dev/null +++ b/src/common/multi_level_page_table.cpp @@ -0,0 +1,7 @@ +#include "common/multi_level_page_table.inc" + +namespace Common { +template class Common::MultiLevelPageTable; +template class Common::MultiLevelPageTable; +template class Common::MultiLevelPageTable; +} // namespace Common diff --git a/src/common/multi_level_page_table.h b/src/common/multi_level_page_table.h new file mode 100644 index 0000000000..dde1cc962e --- /dev/null +++ b/src/common/multi_level_page_table.h @@ -0,0 +1,79 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include +#include +#include + +#include "common/common_types.h" + +namespace Common { + +template +class MultiLevelPageTable final { +public: + constexpr MultiLevelPageTable() = default; + explicit MultiLevelPageTable(std::size_t address_space_bits, std::size_t first_level_bits, + std::size_t page_bits); + + ~MultiLevelPageTable() noexcept; + + MultiLevelPageTable(const MultiLevelPageTable&) = delete; + MultiLevelPageTable& operator=(const MultiLevelPageTable&) = delete; + + MultiLevelPageTable(MultiLevelPageTable&& other) noexcept + : address_space_bits{std::exchange(other.address_space_bits, 0)}, + first_level_bits{std::exchange(other.first_level_bits, 0)}, page_bits{std::exchange( + other.page_bits, 0)}, + first_level_shift{std::exchange(other.first_level_shift, 0)}, + first_level_chunk_size{std::exchange(other.first_level_chunk_size, 0)}, + first_level_map{std::move(other.first_level_map)}, base_ptr{std::exchange(other.base_ptr, + nullptr)} {} + + MultiLevelPageTable& operator=(MultiLevelPageTable&& other) noexcept { + address_space_bits = std::exchange(other.address_space_bits, 0); + first_level_bits = std::exchange(other.first_level_bits, 0); + page_bits = std::exchange(other.page_bits, 0); + first_level_shift = std::exchange(other.first_level_shift, 0); + first_level_chunk_size = std::exchange(other.first_level_chunk_size, 0); + alloc_size = std::exchange(other.alloc_size, 0); + first_level_map = std::move(other.first_level_map); + base_ptr = std::exchange(other.base_ptr, nullptr); + return *this; + } + + void ReserveRange(u64 start, std::size_t size); + + [[nodiscard]] constexpr const BaseAddr& operator[](std::size_t index) const { + return base_ptr[index]; + } + + [[nodiscard]] constexpr BaseAddr& operator[](std::size_t index) { + return base_ptr[index]; + } + + [[nodiscard]] constexpr BaseAddr* data() { + return base_ptr; + } + + [[nodiscard]] constexpr const BaseAddr* data() const { + return base_ptr; + } + +private: + void AllocateLevel(u64 level); + + std::size_t address_space_bits{}; + std::size_t first_level_bits{}; + std::size_t page_bits{}; + std::size_t first_level_shift{}; + std::size_t first_level_chunk_size{}; + std::size_t alloc_size{}; + std::vector first_level_map{}; + BaseAddr* base_ptr{}; +}; + +} // namespace Common diff --git a/src/common/multi_level_page_table.inc b/src/common/multi_level_page_table.inc new file mode 100644 index 0000000000..a75e61f9d7 --- /dev/null +++ b/src/common/multi_level_page_table.inc @@ -0,0 +1,83 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#ifdef _WIN32 +#include +#else +#include +#endif + +#include "common/assert.h" +#include "common/multi_level_page_table.h" + +namespace Common { + +template +MultiLevelPageTable::MultiLevelPageTable(std::size_t address_space_bits_, + std::size_t first_level_bits_, + std::size_t page_bits_) + : address_space_bits{address_space_bits_}, + first_level_bits{first_level_bits_}, page_bits{page_bits_} { + first_level_shift = address_space_bits - first_level_bits; + first_level_chunk_size = 1ULL << (first_level_shift - page_bits); + alloc_size = (1ULL << (address_space_bits - page_bits)) * sizeof(BaseAddr); + std::size_t first_level_size = 1ULL << first_level_bits; + first_level_map.resize(first_level_size, nullptr); +#ifdef _WIN32 + void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)}; +#else + void* base{mmap(nullptr, alloc_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)}; + + if (base == MAP_FAILED) { + base = nullptr; + } +#endif + + ASSERT(base); + base_ptr = reinterpret_cast(base); +} + +template +MultiLevelPageTable::~MultiLevelPageTable() noexcept { + if (!base_ptr) { + return; + } +#ifdef _WIN32 + ASSERT(VirtualFree(base_ptr, 0, MEM_RELEASE)); +#else + ASSERT(munmap(base_ptr, alloc_size) == 0); +#endif +} + +template +void MultiLevelPageTable::ReserveRange(u64 start, std::size_t size) { + const u64 new_start = start >> first_level_shift; + const u64 new_end = + (start + size + (first_level_chunk_size << page_bits) - 1) >> first_level_shift; + for (u64 i = new_start; i <= new_end; i++) { + if (!first_level_map[i]) { + AllocateLevel(i); + } + } +} + +template +void MultiLevelPageTable::AllocateLevel(u64 level) { + void* ptr = reinterpret_cast(base_ptr) + level * first_level_chunk_size; +#ifdef _WIN32 + void* base{VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE)}; +#else + void* base{mmap(ptr, first_level_chunk_size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)}; + + if (base == MAP_FAILED) { + base = nullptr; + } +#endif + ASSERT(base); + + first_level_map[level] = base; +} + +} // namespace Common From cbaf3fb433a351f7d9509f17f88d4896ba66afd1 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Thu, 11 Nov 2021 21:24:40 +0100 Subject: [PATCH 17/68] VideoCore: Update MemoryManager --- src/common/multi_level_page_table.cpp | 1 + src/common/multi_level_page_table.inc | 7 +- src/video_core/memory_manager.cpp | 147 ++++++++++---------------- src/video_core/memory_manager.h | 98 +++++------------ 4 files changed, 86 insertions(+), 167 deletions(-) diff --git a/src/common/multi_level_page_table.cpp b/src/common/multi_level_page_table.cpp index 561785ca74..aed04d0b59 100644 --- a/src/common/multi_level_page_table.cpp +++ b/src/common/multi_level_page_table.cpp @@ -4,4 +4,5 @@ namespace Common { template class Common::MultiLevelPageTable; template class Common::MultiLevelPageTable; template class Common::MultiLevelPageTable; +template class Common::MultiLevelPageTable; } // namespace Common diff --git a/src/common/multi_level_page_table.inc b/src/common/multi_level_page_table.inc index a75e61f9d7..7fbcb908ac 100644 --- a/src/common/multi_level_page_table.inc +++ b/src/common/multi_level_page_table.inc @@ -20,7 +20,7 @@ MultiLevelPageTable::MultiLevelPageTable(std::size_t address_space_bit : address_space_bits{address_space_bits_}, first_level_bits{first_level_bits_}, page_bits{page_bits_} { first_level_shift = address_space_bits - first_level_bits; - first_level_chunk_size = 1ULL << (first_level_shift - page_bits); + first_level_chunk_size = (1ULL << (first_level_shift - page_bits)) * sizeof(BaseAddr); alloc_size = (1ULL << (address_space_bits - page_bits)) * sizeof(BaseAddr); std::size_t first_level_size = 1ULL << first_level_bits; first_level_map.resize(first_level_size, nullptr); @@ -53,8 +53,7 @@ MultiLevelPageTable::~MultiLevelPageTable() noexcept { template void MultiLevelPageTable::ReserveRange(u64 start, std::size_t size) { const u64 new_start = start >> first_level_shift; - const u64 new_end = - (start + size + (first_level_chunk_size << page_bits) - 1) >> first_level_shift; + const u64 new_end = (start + size) >> first_level_shift; for (u64 i = new_start; i <= new_end; i++) { if (!first_level_map[i]) { AllocateLevel(i); @@ -64,7 +63,7 @@ void MultiLevelPageTable::ReserveRange(u64 start, std::size_t size) { template void MultiLevelPageTable::AllocateLevel(u64 level) { - void* ptr = reinterpret_cast(base_ptr) + level * first_level_chunk_size; + void* ptr = reinterpret_cast(base_ptr) + level * first_level_chunk_size; #ifdef _WIN32 void* base{VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE)}; #else diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index a3efd365e3..1e090279f0 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -16,36 +16,63 @@ namespace Tegra { -MemoryManager::MemoryManager(Core::System& system_) - : system{system_}, page_table(page_table_size) {} +MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 page_bits_) + : system{system_}, address_space_bits{address_space_bits_}, page_bits{page_bits_}, entries{}, + page_table{address_space_bits, address_space_bits + page_bits - 38, page_bits} { + address_space_size = 1ULL << address_space_bits; + allocate_start = address_space_bits > 32 ? 1ULL << 32 : 0; + page_size = 1ULL << page_bits; + page_mask = page_size - 1ULL; + const u64 page_table_bits = address_space_bits - cpu_page_bits; + const u64 page_table_size = 1ULL << page_table_bits; + page_table_mask = page_table_size - 1; + + entries.resize(page_table_size / 32, 0); +} MemoryManager::~MemoryManager() = default; -void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) { - rasterizer = rasterizer_; +MemoryManager::EntryType MemoryManager::GetEntry(size_t position) const { + position = position >> page_bits; + const u64 entry_mask = entries[position / 32]; + const size_t sub_index = position % 32; + return static_cast((entry_mask >> (2 * sub_index)) & 0x03ULL); } -GPUVAddr MemoryManager::UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) { +void MemoryManager::SetEntry(size_t position, MemoryManager::EntryType entry) { + position = position >> page_bits; + const u64 entry_mask = entries[position / 32]; + const size_t sub_index = position % 32; + entries[position / 32] = + (~(3ULL << sub_index * 2) & entry_mask) | (static_cast(entry) << sub_index * 2); +} + +template +GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, + size_t size) { u64 remaining_size{size}; + if constexpr (entry_type == EntryType::Mapped) { + page_table.ReserveRange(gpu_addr, size); + } for (u64 offset{}; offset < size; offset += page_size) { - if (remaining_size < page_size) { - SetPageEntry(gpu_addr + offset, page_entry + offset, remaining_size); - } else { - SetPageEntry(gpu_addr + offset, page_entry + offset); + const GPUVAddr current_gpu_addr = gpu_addr + offset; + SetEntry(current_gpu_addr, entry_type); + if constexpr (entry_type == EntryType::Mapped) { + const VAddr current_cpu_addr = cpu_addr + offset; + const auto index = PageEntryIndex(current_gpu_addr); + page_table[index] = static_cast(current_cpu_addr >> 12ULL); } remaining_size -= page_size; } return gpu_addr; } +void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) { + rasterizer = rasterizer_; +} + GPUVAddr MemoryManager::Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size) { - const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first); - if (it != map_ranges.end() && it->first == gpu_addr) { - it->second = size; - } else { - map_ranges.insert(it, MapRange{gpu_addr, size}); - } - return UpdateRange(gpu_addr, cpu_addr, size); + return PageTableOp(gpu_addr, cpu_addr, size); } GPUVAddr MemoryManager::MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align) { @@ -62,13 +89,6 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { if (size == 0) { return; } - const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first); - if (it != map_ranges.end()) { - ASSERT(it->first == gpu_addr); - map_ranges.erase(it); - } else { - ASSERT_MSG(false, "Unmapping non-existent GPU address=0x{:x}", gpu_addr); - } const auto submapped_ranges = GetSubmappedRange(gpu_addr, size); for (const auto& [map_addr, map_size] : submapped_ranges) { @@ -79,63 +99,23 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { rasterizer->UnmapMemory(*cpu_addr, map_size); } - UpdateRange(gpu_addr, PageEntry::State::Unmapped, size); + PageTableOp(gpu_addr, 0, size); } std::optional MemoryManager::AllocateFixed(GPUVAddr gpu_addr, std::size_t size) { for (u64 offset{}; offset < size; offset += page_size) { - if (!GetPageEntry(gpu_addr + offset).IsUnmapped()) { + if (GetEntry(gpu_addr + offset) != EntryType::Free) { return std::nullopt; } } - return UpdateRange(gpu_addr, PageEntry::State::Allocated, size); + return PageTableOp(gpu_addr, 0, size); } GPUVAddr MemoryManager::Allocate(std::size_t size, std::size_t align) { return *AllocateFixed(*FindFreeRange(size, align), size); } -void MemoryManager::TryLockPage(PageEntry page_entry, std::size_t size) { - if (!page_entry.IsValid()) { - return; - } - - ASSERT(system.CurrentProcess() - ->PageTable() - .LockForDeviceAddressSpace(page_entry.ToAddress(), size) - .IsSuccess()); -} - -void MemoryManager::TryUnlockPage(PageEntry page_entry, std::size_t size) { - if (!page_entry.IsValid()) { - return; - } - - ASSERT(system.CurrentProcess() - ->PageTable() - .UnlockForDeviceAddressSpace(page_entry.ToAddress(), size) - .IsSuccess()); -} - -PageEntry MemoryManager::GetPageEntry(GPUVAddr gpu_addr) const { - return page_table[PageEntryIndex(gpu_addr)]; -} - -void MemoryManager::SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) { - // TODO(bunnei): We should lock/unlock device regions. This currently causes issues due to - // improper tracking, but should be fixed in the future. - - //// Unlock the old page - // TryUnlockPage(page_table[PageEntryIndex(gpu_addr)], size); - - //// Lock the new page - // TryLockPage(page_entry, size); - auto& current_page = page_table[PageEntryIndex(gpu_addr)]; - - current_page = page_entry; -} - std::optional MemoryManager::FindFreeRange(std::size_t size, std::size_t align, bool start_32bit_address) const { if (!align) { @@ -145,9 +125,9 @@ std::optional MemoryManager::FindFreeRange(std::size_t size, std::size } u64 available_size{}; - GPUVAddr gpu_addr{start_32bit_address ? address_space_start_low : address_space_start}; + GPUVAddr gpu_addr{allocate_start}; while (gpu_addr + available_size < address_space_size) { - if (GetPageEntry(gpu_addr + available_size).IsUnmapped()) { + if (GetEntry(gpu_addr + available_size) == EntryType::Free) { available_size += page_size; if (available_size >= size) { @@ -168,15 +148,12 @@ std::optional MemoryManager::FindFreeRange(std::size_t size, std::size } std::optional MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { - if (gpu_addr == 0) { - return std::nullopt; - } - const auto page_entry{GetPageEntry(gpu_addr)}; - if (!page_entry.IsValid()) { + if (GetEntry(gpu_addr) != EntryType::Mapped) { return std::nullopt; } - return page_entry.ToAddress() + (gpu_addr & page_mask); + const VAddr cpu_addr_base = static_cast(page_table[PageEntryIndex(gpu_addr)]) << 12ULL; + return cpu_addr_base + (gpu_addr & page_mask); } std::optional MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const { @@ -227,10 +204,6 @@ template void MemoryManager::Write(GPUVAddr addr, u32 data); template void MemoryManager::Write(GPUVAddr addr, u64 data); u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) { - if (!GetPageEntry(gpu_addr).IsValid()) { - return {}; - } - const auto address{GpuToCpuAddress(gpu_addr)}; if (!address) { return {}; @@ -240,10 +213,6 @@ u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) { } const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const { - if (!GetPageEntry(gpu_addr).IsValid()) { - return {}; - } - const auto address{GpuToCpuAddress(gpu_addr)}; if (!address) { return {}; @@ -252,12 +221,6 @@ const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const { return system.Memory().GetPointer(*address); } -size_t MemoryManager::BytesToMapEnd(GPUVAddr gpu_addr) const noexcept { - auto it = std::ranges::upper_bound(map_ranges, gpu_addr, {}, &MapRange::first); - --it; - return it->second - (gpu_addr - it->first); -} - void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, bool is_safe) const { std::size_t remaining_size{size}; @@ -268,7 +231,7 @@ void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std: const std::size_t copy_amount{ std::min(static_cast(page_size) - page_offset, remaining_size)}; const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; - if (page_addr && *page_addr != 0) { + if (page_addr) { const auto src_addr{*page_addr + page_offset}; if (is_safe) { // Flush must happen on the rasterizer interface, such that memory is always @@ -307,7 +270,7 @@ void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffe const std::size_t copy_amount{ std::min(static_cast(page_size) - page_offset, remaining_size)}; const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; - if (page_addr && *page_addr != 0) { + if (page_addr) { const auto dest_addr{*page_addr + page_offset}; if (is_safe) { @@ -392,7 +355,7 @@ bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) cons size_t page_index{gpu_addr >> page_bits}; const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits}; while (page_index < page_last) { - if (!page_table[page_index].IsValid() || page_table[page_index].ToAddress() == 0) { + if (GetEntry(page_index << page_bits) == EntryType::Free) { return false; } ++page_index; @@ -408,7 +371,7 @@ std::vector> MemoryManager::GetSubmappedRange( size_t page_offset{gpu_addr & page_mask}; std::optional> last_segment{}; std::optional old_page_addr{}; - const auto extend_size = [&last_segment, &page_index, &page_offset](std::size_t bytes) { + const auto extend_size = [this, &last_segment, &page_index, &page_offset](std::size_t bytes) { if (!last_segment) { const GPUVAddr new_base_addr = (page_index << page_bits) + page_offset; last_segment = {new_base_addr, bytes}; diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 74f9ce1754..0a763fd198 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -8,6 +8,7 @@ #include #include "common/common_types.h" +#include "common/multi_level_page_table.h" namespace VideoCore { class RasterizerInterface; @@ -19,55 +20,10 @@ class System; namespace Tegra { -class PageEntry final { -public: - enum class State : u32 { - Unmapped = static_cast(-1), - Allocated = static_cast(-2), - }; - - constexpr PageEntry() = default; - constexpr PageEntry(State state_) : state{state_} {} - constexpr PageEntry(VAddr addr) : state{static_cast(addr >> ShiftBits)} {} - - [[nodiscard]] constexpr bool IsUnmapped() const { - return state == State::Unmapped; - } - - [[nodiscard]] constexpr bool IsAllocated() const { - return state == State::Allocated; - } - - [[nodiscard]] constexpr bool IsValid() const { - return !IsUnmapped() && !IsAllocated(); - } - - [[nodiscard]] constexpr VAddr ToAddress() const { - if (!IsValid()) { - return {}; - } - - return static_cast(state) << ShiftBits; - } - - [[nodiscard]] constexpr PageEntry operator+(u64 offset) const { - // If this is a reserved value, offsets do not apply - if (!IsValid()) { - return *this; - } - return PageEntry{(static_cast(state) << ShiftBits) + offset}; - } - -private: - static constexpr std::size_t ShiftBits{12}; - - State state{State::Unmapped}; -}; -static_assert(sizeof(PageEntry) == 4, "PageEntry is too large"); - class MemoryManager final { public: - explicit MemoryManager(Core::System& system_); + explicit MemoryManager(Core::System& system_, u64 address_space_bits_ = 40, + u64 page_bits_ = 16); ~MemoryManager(); /// Binds a renderer to the memory manager. @@ -86,9 +42,6 @@ public: [[nodiscard]] u8* GetPointer(GPUVAddr addr); [[nodiscard]] const u8* GetPointer(GPUVAddr addr) const; - /// Returns the number of bytes until the end of the memory map containing the given GPU address - [[nodiscard]] size_t BytesToMapEnd(GPUVAddr gpu_addr) const noexcept; - /** * ReadBlock and WriteBlock are full read and write operations over virtual * GPU Memory. It's important to use these when GPU memory may not be continuous @@ -145,44 +98,47 @@ public: void FlushRegion(GPUVAddr gpu_addr, size_t size) const; private: - [[nodiscard]] PageEntry GetPageEntry(GPUVAddr gpu_addr) const; - void SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size = page_size); - GPUVAddr UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size); [[nodiscard]] std::optional FindFreeRange(std::size_t size, std::size_t align, bool start_32bit_address = false) const; - void TryLockPage(PageEntry page_entry, std::size_t size); - void TryUnlockPage(PageEntry page_entry, std::size_t size); - void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, bool is_safe) const; void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size, bool is_safe); - [[nodiscard]] static constexpr std::size_t PageEntryIndex(GPUVAddr gpu_addr) { + [[nodiscard]] inline std::size_t PageEntryIndex(GPUVAddr gpu_addr) const { return (gpu_addr >> page_bits) & page_table_mask; } - static constexpr u64 address_space_size = 1ULL << 40; - static constexpr u64 address_space_start = 1ULL << 32; - static constexpr u64 address_space_start_low = 1ULL << 16; - static constexpr u64 page_bits{16}; - static constexpr u64 page_size{1 << page_bits}; - static constexpr u64 page_mask{page_size - 1}; - static constexpr u64 page_table_bits{24}; - static constexpr u64 page_table_size{1 << page_table_bits}; - static constexpr u64 page_table_mask{page_table_size - 1}; - Core::System& system; + const u64 address_space_bits; + const u64 page_bits; + u64 address_space_size; + u64 allocate_start; + u64 page_size; + u64 page_mask; + u64 page_table_mask; + static constexpr u64 cpu_page_bits{12}; + VideoCore::RasterizerInterface* rasterizer = nullptr; - std::vector page_table; + enum class EntryType : u64 { + Free = 0, + Reserved = 1, + Mapped = 2, + }; - using MapRange = std::pair; - std::vector map_ranges; + std::vector entries; - std::vector> cache_invalidate_queue; + template + GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size); + + EntryType GetEntry(size_t position) const; + + void SetEntry(size_t position, EntryType entry); + + Common::MultiLevelPageTable page_table; }; } // namespace Tegra From 835b950f7ec300920b920b9b45f82ceaa4b33813 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sat, 13 Nov 2021 02:29:54 +0100 Subject: [PATCH 18/68] NvHostCtrl: Fix merge of nvflinger. --- src/core/hle/service/nvdrv/devices/nvhost_ctrl.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h index 9bd10257bb..594d206007 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h @@ -180,7 +180,8 @@ private: struct IocCtrlEventUnregisterBatchParams { u64_le user_events{}; }; - static_assert(sizeof(IocCtrlEventUnregisterBatchParams) == 8, "IocCtrlEventKill is incorrect size"); + static_assert(sizeof(IocCtrlEventUnregisterBatchParams) == 8, + "IocCtrlEventKill is incorrect size"); NvResult NvOsGetConfigU32(const std::vector& input, std::vector& output); NvResult IocCtrlEventWait(const std::vector& input, std::vector& output, From 0f4ae3cc52a740bac836f8f8c2ff5d560b8ca46e Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sat, 13 Nov 2021 03:05:32 +0100 Subject: [PATCH 19/68] MemoryManager: Temporary Fix for NVDEC. --- src/video_core/memory_manager.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 1e090279f0..9e946d448a 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -125,7 +125,7 @@ std::optional MemoryManager::FindFreeRange(std::size_t size, std::size } u64 available_size{}; - GPUVAddr gpu_addr{allocate_start}; + GPUVAddr gpu_addr{start_32bit_address ? 0 : allocate_start}; while (gpu_addr + available_size < address_space_size) { if (GetEntry(gpu_addr + available_size) == EntryType::Free) { available_size += page_size; From c6ea0c650e38b7c475fea221b9655c754f7aaccc Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sat, 13 Nov 2021 03:25:35 +0100 Subject: [PATCH 20/68] NVDRV: Update copyright notices. --- src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp | 6 ++++-- src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h | 6 ++++-- src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp | 2 +- src/core/hle/service/nvdrv/devices/nvhost_ctrl.h | 6 ++++-- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 9946ce6246..5c70c9a57b 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -1,5 +1,7 @@ -// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu emulator team, Skyline Team and Contributors +// (https://github.com/skyline-emu/) +// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 +// or any later version Refer to the license.txt file included. #include #include diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index 4ecae3caf6..f5fb33ba7d 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h @@ -1,5 +1,7 @@ -// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu emulator team, Skyline Team and Contributors +// (https://github.com/skyline-emu/) +// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 +// or any later version Refer to the license.txt file included. #pragma once diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index 0e22d92730..a859a7abdc 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -1,4 +1,4 @@ -// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors +// SPDX-FileCopyrightText: 2021 yuzu emulator team, Skyline Team and Contributors // (https://github.com/skyline-emu/) // SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 // or any later version Refer to the license.txt file included. diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h index 594d206007..d56aea4059 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h @@ -1,5 +1,7 @@ -// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu emulator team, Skyline Team and Contributors +// (https://github.com/skyline-emu/) +// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 +// or any later version Refer to the license.txt file included. #pragma once From feb49c822d9cabc5bc7be9eab1f2bf4ba460176a Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 14 Nov 2021 20:55:52 +0100 Subject: [PATCH 21/68] NVDRV: Remake ASGPU --- src/common/CMakeLists.txt | 2 + src/common/address_space.cpp | 11 + src/common/address_space.h | 134 +++++ src/common/address_space.inc | 338 +++++++++++++ .../service/nvdrv/devices/nvhost_as_gpu.cpp | 460 ++++++++++++------ .../hle/service/nvdrv/devices/nvhost_as_gpu.h | 163 ++++--- src/video_core/memory_manager.cpp | 10 +- src/video_core/memory_manager.h | 3 +- 8 files changed, 882 insertions(+), 239 deletions(-) create mode 100644 src/common/address_space.cpp create mode 100644 src/common/address_space.h create mode 100644 src/common/address_space.inc diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 2db4148194..a026968734 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -17,6 +17,8 @@ endif () include(GenerateSCMRev) add_library(common STATIC + address_space.cpp + address_space.h algorithm.h alignment.h announce_multiplayer_room.h diff --git a/src/common/address_space.cpp b/src/common/address_space.cpp new file mode 100644 index 0000000000..6db85be87b --- /dev/null +++ b/src/common/address_space.cpp @@ -0,0 +1,11 @@ +// Copyright © 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) +// Licensed under GPLv3 or any later version +// Refer to the license.txt file included. + +#include "common/address_space.inc" + +namespace Common { + +template class Common::FlatAllocator; + +} diff --git a/src/common/address_space.h b/src/common/address_space.h new file mode 100644 index 0000000000..fd2f32b7d3 --- /dev/null +++ b/src/common/address_space.h @@ -0,0 +1,134 @@ +// Copyright © 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) +// Licensed under GPLv3 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include +#include +#include +#include + +#include "common/common_types.h" + +namespace Common { +template +concept AddressSpaceValid = std::is_unsigned_v && sizeof(VaType) * 8 >= AddressSpaceBits; + +struct EmptyStruct {}; + +/** + * @brief FlatAddressSpaceMap provides a generic VA->PA mapping implementation using a sorted vector + */ +template +requires AddressSpaceValid class FlatAddressSpaceMap { +private: + std::function + unmapCallback{}; //!< Callback called when the mappings in an region have changed + +protected: + /** + * @brief Represents a block of memory in the AS, the physical mapping is contiguous until + * another block with a different phys address is hit + */ + struct Block { + VaType virt{UnmappedVa}; //!< VA of the block + PaType phys{UnmappedPa}; //!< PA of the block, will increase 1-1 with VA until a new block + //!< is encountered + [[no_unique_address]] ExtraBlockInfo extraInfo; + + Block() = default; + + Block(VaType virt, PaType phys, ExtraBlockInfo extraInfo) + : virt(virt), phys(phys), extraInfo(extraInfo) {} + + constexpr bool Valid() { + return virt != UnmappedVa; + } + + constexpr bool Mapped() { + return phys != UnmappedPa; + } + + constexpr bool Unmapped() { + return phys == UnmappedPa; + } + + bool operator<(const VaType& pVirt) const { + return virt < pVirt; + } + }; + + std::mutex blockMutex; + std::vector blocks{Block{}}; + + /** + * @brief Maps a PA range into the given AS region + * @note blockMutex MUST be locked when calling this + */ + void MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extraInfo); + + /** + * @brief Unmaps the given range and merges it with other unmapped regions + * @note blockMutex MUST be locked when calling this + */ + void UnmapLocked(VaType virt, VaType size); + +public: + static constexpr VaType VaMaximum{(1ULL << (AddressSpaceBits - 1)) + + ((1ULL << (AddressSpaceBits - 1)) - + 1)}; //!< The maximum VA that this AS can technically reach + + VaType vaLimit{VaMaximum}; //!< A soft limit on the maximum VA of the AS + + FlatAddressSpaceMap(VaType vaLimit, std::function unmapCallback = {}); + + FlatAddressSpaceMap() = default; + + void Map(VaType virt, PaType phys, VaType size, ExtraBlockInfo extraInfo = {}) { + std::scoped_lock lock(blockMutex); + MapLocked(virt, phys, size, extraInfo); + } + + void Unmap(VaType virt, VaType size) { + std::scoped_lock lock(blockMutex); + UnmapLocked(virt, size); + } +}; + +/** + * @brief FlatMemoryManager specialises FlatAddressSpaceMap to work as an allocator, with an + * initial, fast linear pass and a subsequent slower pass that iterates until it finds a free block + */ +template +requires AddressSpaceValid class FlatAllocator + : public FlatAddressSpaceMap { +private: + using Base = FlatAddressSpaceMap; + + VaType currentLinearAllocEnd; //!< The end address for the initial linear allocation pass, once + //!< this reaches the AS limit the slower allocation path will be + //!< used + +public: + VaType vaStart; //!< The base VA of the allocator, no allocations will be below this + + FlatAllocator(VaType vaStart, VaType vaLimit = Base::VaMaximum); + + /** + * @brief Allocates a region in the AS of the given size and returns its address + */ + VaType Allocate(VaType size); + + /** + * @brief Marks the given region in the AS as allocated + */ + void AllocateFixed(VaType virt, VaType size); + + /** + * @brief Frees an AS region so it can be used again + */ + void Free(VaType virt, VaType size); +}; +} // namespace Common diff --git a/src/common/address_space.inc b/src/common/address_space.inc new file mode 100644 index 0000000000..907c55d88e --- /dev/null +++ b/src/common/address_space.inc @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPLv3 or later +// Copyright © 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) + +#include "common/address_space.h" +#include "common/assert.h" + +#define MAP_MEMBER(returnType) \ + template \ + requires AddressSpaceValid returnType FlatAddressSpaceMap< \ + VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo> +#define MAP_MEMBER_CONST() \ + template \ + requires AddressSpaceValid FlatAddressSpaceMap< \ + VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo> + +#define MM_MEMBER(returnType) \ + template \ + requires AddressSpaceValid returnType \ + FlatMemoryManager + +#define ALLOC_MEMBER(returnType) \ + template \ + requires AddressSpaceValid returnType \ + FlatAllocator +#define ALLOC_MEMBER_CONST() \ + template \ + requires AddressSpaceValid \ + FlatAllocator + +namespace Common { +MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType vaLimit, + std::function unmapCallback) + : unmapCallback(std::move(unmapCallback)), vaLimit(vaLimit) { + if (vaLimit > VaMaximum) + UNREACHABLE_MSG("Invalid VA limit!"); +} + +MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extraInfo) { + VaType virtEnd{virt + size}; + + if (virtEnd > vaLimit) + UNREACHABLE_MSG("Trying to map a block past the VA limit: virtEnd: 0x{:X}, vaLimit: 0x{:X}", + virtEnd, vaLimit); + + auto blockEndSuccessor{std::lower_bound(blocks.begin(), blocks.end(), virtEnd)}; + if (blockEndSuccessor == blocks.begin()) + UNREACHABLE_MSG("Trying to map a block before the VA start: virtEnd: 0x{:X}", virtEnd); + + auto blockEndPredecessor{std::prev(blockEndSuccessor)}; + + if (blockEndSuccessor != blocks.end()) { + // We have blocks in front of us, if one is directly in front then we don't have to add a + // tail + if (blockEndSuccessor->virt != virtEnd) { + PaType tailPhys{[&]() -> PaType { + if constexpr (!PaContigSplit) { + return blockEndPredecessor + ->phys; // Always propagate unmapped regions rather than calculating offset + } else { + if (blockEndPredecessor->Unmapped()) + return blockEndPredecessor->phys; // Always propagate unmapped regions + // rather than calculating offset + else + return blockEndPredecessor->phys + virtEnd - blockEndPredecessor->virt; + } + }()}; + + if (blockEndPredecessor->virt >= virt) { + // If this block's start would be overlapped by the map then reuse it as a tail + // block + blockEndPredecessor->virt = virtEnd; + blockEndPredecessor->phys = tailPhys; + blockEndPredecessor->extraInfo = blockEndPredecessor->extraInfo; + + // No longer predecessor anymore + blockEndSuccessor = blockEndPredecessor--; + } else { + // Else insert a new one and we're done + blocks.insert(blockEndSuccessor, + {Block(virt, phys, extraInfo), + Block(virtEnd, tailPhys, blockEndPredecessor->extraInfo)}); + if (unmapCallback) + unmapCallback(virt, size); + + return; + } + } + } else { + // blockEndPredecessor will always be unmapped as blocks has to be terminated by an unmapped + // chunk + if (blockEndPredecessor != blocks.begin() && blockEndPredecessor->virt >= virt) { + // Move the unmapped block start backwards + blockEndPredecessor->virt = virtEnd; + + // No longer predecessor anymore + blockEndSuccessor = blockEndPredecessor--; + } else { + // Else insert a new one and we're done + blocks.insert(blockEndSuccessor, + {Block(virt, phys, extraInfo), Block(virtEnd, UnmappedPa, {})}); + if (unmapCallback) + unmapCallback(virt, size); + + return; + } + } + + auto blockStartSuccessor{blockEndSuccessor}; + + // Walk the block vector to find the start successor as this is more efficient than another + // binary search in most scenarios + while (std::prev(blockStartSuccessor)->virt >= virt) + blockStartSuccessor--; + + // Check that the start successor is either the end block or something in between + if (blockStartSuccessor->virt > virtEnd) { + UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", blockStartSuccessor->virt); + } else if (blockStartSuccessor->virt == virtEnd) { + // We need to create a new block as there are none spare that we would overwrite + blocks.insert(blockStartSuccessor, Block(virt, phys, extraInfo)); + } else { + // Erase overwritten blocks + if (auto eraseStart{std::next(blockStartSuccessor)}; eraseStart != blockEndSuccessor) + blocks.erase(eraseStart, blockEndSuccessor); + + // Reuse a block that would otherwise be overwritten as a start block + blockStartSuccessor->virt = virt; + blockStartSuccessor->phys = phys; + blockStartSuccessor->extraInfo = extraInfo; + } + + if (unmapCallback) + unmapCallback(virt, size); +} + +MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { + VaType virtEnd{virt + size}; + + if (virtEnd > vaLimit) + UNREACHABLE_MSG("Trying to map a block past the VA limit: virtEnd: 0x{:X}, vaLimit: 0x{:X}", + virtEnd, vaLimit); + + auto blockEndSuccessor{std::lower_bound(blocks.begin(), blocks.end(), virtEnd)}; + if (blockEndSuccessor == blocks.begin()) + UNREACHABLE_MSG("Trying to unmap a block before the VA start: virtEnd: 0x{:X}", virtEnd); + + auto blockEndPredecessor{std::prev(blockEndSuccessor)}; + + auto walkBackToPredecessor{[&](auto iter) { + while (iter->virt >= virt) + iter--; + + return iter; + }}; + + auto eraseBlocksWithEndUnmapped{[&](auto unmappedEnd) { + auto blockStartPredecessor{walkBackToPredecessor(unmappedEnd)}; + auto blockStartSuccessor{std::next(blockStartPredecessor)}; + + auto eraseEnd{[&]() { + if (blockStartPredecessor->Unmapped()) { + // If the start predecessor is unmapped then we can erase everything in our region + // and be done + return std::next(unmappedEnd); + } else { + // Else reuse the end predecessor as the start of our unmapped region then erase all + // up to it + unmappedEnd->virt = virt; + return unmappedEnd; + } + }()}; + + // We can't have two unmapped regions after each other + if (eraseEnd != blocks.end() && + (eraseEnd == blockStartSuccessor || + (blockStartPredecessor->Unmapped() && eraseEnd->Unmapped()))) + UNREACHABLE_MSG("Multiple contiguous unmapped regions are unsupported!"); + + blocks.erase(blockStartSuccessor, eraseEnd); + }}; + + // We can avoid any splitting logic if these are the case + if (blockEndPredecessor->Unmapped()) { + if (blockEndPredecessor->virt > virt) + eraseBlocksWithEndUnmapped(blockEndPredecessor); + + if (unmapCallback) + unmapCallback(virt, size); + + return; // The region is unmapped, bail out early + } else if (blockEndSuccessor->virt == virtEnd && blockEndSuccessor->Unmapped()) { + eraseBlocksWithEndUnmapped(blockEndSuccessor); + + if (unmapCallback) + unmapCallback(virt, size); + + return; // The region is unmapped here and doesn't need splitting, bail out early + } else if (blockEndSuccessor == blocks.end()) { + // This should never happen as the end should always follow an unmapped block + UNREACHABLE_MSG("Unexpected Memory Manager state!"); + } else if (blockEndSuccessor->virt != virtEnd) { + // If one block is directly in front then we don't have to add a tail + + // The previous block is mapped so we will need to add a tail with an offset + PaType tailPhys{[&]() { + if constexpr (PaContigSplit) + return blockEndPredecessor->phys + virtEnd - blockEndPredecessor->virt; + else + return blockEndPredecessor->phys; + }()}; + + if (blockEndPredecessor->virt >= virt) { + // If this block's start would be overlapped by the unmap then reuse it as a tail block + blockEndPredecessor->virt = virtEnd; + blockEndPredecessor->phys = tailPhys; + + // No longer predecessor anymore + blockEndSuccessor = blockEndPredecessor--; + } else { + blocks.insert(blockEndSuccessor, + {Block(virt, UnmappedPa, {}), + Block(virtEnd, tailPhys, blockEndPredecessor->extraInfo)}); + if (unmapCallback) + unmapCallback(virt, size); + + return; // The previous block is mapped and ends before + } + } + + // Walk the block vector to find the start predecessor as this is more efficient than another + // binary search in most scenarios + auto blockStartPredecessor{walkBackToPredecessor(blockEndSuccessor)}; + auto blockStartSuccessor{std::next(blockStartPredecessor)}; + + if (blockStartSuccessor->virt > virtEnd) { + UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", blockStartSuccessor->virt); + } else if (blockStartSuccessor->virt == virtEnd) { + // There are no blocks between the start and the end that would let us skip inserting a new + // one for head + + // The previous block is may be unmapped, if so we don't need to insert any unmaps after it + if (blockStartPredecessor->Mapped()) + blocks.insert(blockStartSuccessor, Block(virt, UnmappedPa, {})); + } else if (blockStartPredecessor->Unmapped()) { + // If the previous block is unmapped + blocks.erase(blockStartSuccessor, blockEndPredecessor); + } else { + // Erase overwritten blocks, skipping the first one as we have written the unmapped start + // block there + if (auto eraseStart{std::next(blockStartSuccessor)}; eraseStart != blockEndSuccessor) + blocks.erase(eraseStart, blockEndSuccessor); + + // Add in the unmapped block header + blockStartSuccessor->virt = virt; + blockStartSuccessor->phys = UnmappedPa; + } + + if (unmapCallback) + unmapCallback(virt, size); +} + +ALLOC_MEMBER_CONST()::FlatAllocator(VaType vaStart, VaType vaLimit) + : Base(vaLimit), currentLinearAllocEnd(vaStart), vaStart(vaStart) {} + +ALLOC_MEMBER(VaType)::Allocate(VaType size) { + std::scoped_lock lock(this->blockMutex); + + VaType allocStart{UnmappedVa}; + VaType allocEnd{currentLinearAllocEnd + size}; + + // Avoid searching backwards in the address space if possible + if (allocEnd >= currentLinearAllocEnd && allocEnd <= this->vaLimit) { + auto allocEndSuccessor{ + std::lower_bound(this->blocks.begin(), this->blocks.end(), allocEnd)}; + if (allocEndSuccessor == this->blocks.begin()) + UNREACHABLE_MSG("First block in AS map is invalid!"); + + auto allocEndPredecessor{std::prev(allocEndSuccessor)}; + if (allocEndPredecessor->virt <= currentLinearAllocEnd) { + allocStart = currentLinearAllocEnd; + } else { + // Skip over fixed any mappings in front of us + while (allocEndSuccessor != this->blocks.end()) { + if (allocEndSuccessor->virt - allocEndPredecessor->virt < size || + allocEndPredecessor->Mapped()) { + allocStart = allocEndPredecessor->virt; + break; + } + + allocEndPredecessor = allocEndSuccessor++; + + // Use the VA limit to calculate if we can fit in the final block since it has no + // successor + if (allocEndSuccessor == this->blocks.end()) { + allocEnd = allocEndPredecessor->virt + size; + + if (allocEnd >= allocEndPredecessor->virt && allocEnd <= this->vaLimit) + allocStart = allocEndPredecessor->virt; + } + } + } + } + + if (allocStart != UnmappedVa) { + currentLinearAllocEnd = allocStart + size; + } else { // If linear allocation overflows the AS then find a gap + if (this->blocks.size() <= 2) + UNREACHABLE_MSG("Unexpected allocator state!"); + + auto searchPredecessor{this->blocks.begin()}; + auto searchSuccessor{std::next(searchPredecessor)}; + + while (searchSuccessor != this->blocks.end() && + (searchSuccessor->virt - searchPredecessor->virt < size || + searchPredecessor->Mapped())) { + searchPredecessor = searchSuccessor++; + } + + if (searchSuccessor != this->blocks.end()) + allocStart = searchPredecessor->virt; + else + return {}; // AS is full + } + + this->MapLocked(allocStart, true, size, {}); + return allocStart; +} + +ALLOC_MEMBER(void)::AllocateFixed(VaType virt, VaType size) { + this->Map(virt, true, size); +} + +ALLOC_MEMBER(void)::Free(VaType virt, VaType size) { + this->Unmap(virt, size); +} +} // namespace Common diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 5c70c9a57b..344ddfc900 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -6,6 +6,7 @@ #include #include +#include "common/alignment.h" #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" @@ -21,8 +22,8 @@ namespace Service::Nvidia::Devices { nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, Module& module_, NvCore::Container& core) - : nvdevice{system_}, module{module_}, container{core}, nvmap{core.GetNvMapFile()}, - gmmu{std::make_shared(system)} {} + : nvdevice{system_}, module{module_}, container{core}, nvmap{core.GetNvMapFile()}, vm{}, + gmmu{} {} nvhost_as_gpu::~nvhost_as_gpu() = default; NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, @@ -89,12 +90,49 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector& input, std::vector& IoctlAllocAsEx params{}; std::memcpy(¶ms, input.data(), input.size()); - LOG_WARNING(Service_NVDRV, "(STUBBED) called, big_page_size=0x{:X}", params.big_page_size); - if (params.big_page_size == 0) { - params.big_page_size = DEFAULT_BIG_PAGE_SIZE; + LOG_DEBUG(Service_NVDRV, "called, big_page_size=0x{:X}", params.big_page_size); + + std::scoped_lock lock(mutex); + + if (vm.initialised) { + UNREACHABLE_MSG("Cannot initialise an address space twice!"); + return NvResult::InvalidState; } - big_page_size = params.big_page_size; + if (params.big_page_size) { + if (!std::has_single_bit(params.big_page_size)) { + LOG_ERROR(Service_NVDRV, "Non power-of-2 big page size: 0x{:X}!", params.big_page_size); + return NvResult::BadValue; + } + + if (!(params.big_page_size & VM::SUPPORTED_BIG_PAGE_SIZES)) { + LOG_ERROR(Service_NVDRV, "Unsupported big page size: 0x{:X}!", params.big_page_size); + return NvResult::BadValue; + } + + vm.big_page_size = params.big_page_size; + vm.big_page_size_bits = static_cast(std::countr_zero(params.big_page_size)); + + vm.va_range_start = params.big_page_size << VM::VA_START_SHIFT; + } + + // If this is unspecified then default values should be used + if (params.va_range_start) { + vm.va_range_start = params.va_range_start; + vm.va_range_split = params.va_range_split; + vm.va_range_end = params.va_range_end; + } + + const u64 start_pages{vm.va_range_start >> VM::PAGE_SIZE_BITS}; + const u64 end_pages{vm.va_range_split >> VM::PAGE_SIZE_BITS}; + vm.small_page_allocator = std::make_shared(start_pages, end_pages); + + const u64 start_big_pages{vm.va_range_split >> vm.big_page_size_bits}; + const u64 end_big_pages{(vm.va_range_end - vm.va_range_split) >> vm.big_page_size_bits}; + vm.big_page_allocator = std::make_unique(start_big_pages, end_big_pages); + + gmmu = std::make_shared(system, 40, VM::PAGE_SIZE_BITS); + vm.initialised = true; return NvResult::Success; } @@ -106,21 +144,73 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector& input, std::vector< LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages, params.page_size, params.flags); - const auto size{static_cast(params.pages) * static_cast(params.page_size)}; - if ((params.flags & AddressSpaceFlags::FixedOffset) != AddressSpaceFlags::None) { - params.offset = *(gmmu->AllocateFixed(params.offset, size)); - } else { - params.offset = gmmu->Allocate(size, params.align); + std::scoped_lock lock(mutex); + + if (!vm.initialised) { + return NvResult::BadValue; } - auto result = NvResult::Success; - if (!params.offset) { - LOG_CRITICAL(Service_NVDRV, "allocation failed for size {}", size); - result = NvResult::InsufficientMemory; + if (params.page_size != VM::YUZU_PAGESIZE && params.page_size != vm.big_page_size) { + return NvResult::BadValue; } + if (params.page_size != vm.big_page_size && + ((params.flags & MappingFlags::Sparse) != MappingFlags::None)) { + UNIMPLEMENTED_MSG("Sparse small pages are not implemented!"); + return NvResult::NotImplemented; + } + + const u32 page_size_bits{params.page_size == VM::YUZU_PAGESIZE ? VM::PAGE_SIZE_BITS + : vm.big_page_size_bits}; + + auto& allocator{params.page_size == VM::YUZU_PAGESIZE ? *vm.small_page_allocator + : *vm.big_page_allocator}; + + if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) { + allocator.AllocateFixed(static_cast(params.offset >> page_size_bits), params.pages); + } else { + params.offset = static_cast(allocator.Allocate(params.pages)) << page_size_bits; + if (!params.offset) { + UNREACHABLE_MSG("Failed to allocate free space in the GPU AS!"); + return NvResult::InsufficientMemory; + } + } + + u64 size{static_cast(params.pages) * params.page_size}; + + if ((params.flags & MappingFlags::Sparse) != MappingFlags::None) { + gmmu->MapSparse(params.offset, size); + } + + allocation_map[params.offset] = { + .size = size, + .page_size = params.page_size, + .sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None, + }; + std::memcpy(output.data(), ¶ms, output.size()); - return result; + return NvResult::Success; +} + +void nvhost_as_gpu::FreeMappingLocked(u64 offset) { + auto mapping{mapping_map.at(offset)}; + + if (!mapping->fixed) { + auto& allocator{mapping->big_page ? *vm.big_page_allocator : *vm.small_page_allocator}; + u32 page_size_bits{mapping->big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS}; + + allocator.Free(static_cast(mapping->offset >> page_size_bits), + static_cast(mapping->size >> page_size_bits)); + } + + // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state + // Only FreeSpace can unmap them fully + if (mapping->sparse_alloc) + gmmu->MapSparse(offset, mapping->size); + else + gmmu->Unmap(offset, mapping->size); + + mapping_map.erase(offset); } NvResult nvhost_as_gpu::FreeSpace(const std::vector& input, std::vector& output) { @@ -130,7 +220,40 @@ NvResult nvhost_as_gpu::FreeSpace(const std::vector& input, std::vector& LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset, params.pages, params.page_size); - gmmu->Unmap(params.offset, static_cast(params.pages) * params.page_size); + std::scoped_lock lock(mutex); + + if (!vm.initialised) { + return NvResult::BadValue; + } + + try { + auto allocation{allocation_map[params.offset]}; + + if (allocation.page_size != params.page_size || + allocation.size != (static_cast(params.pages) * params.page_size)) { + return NvResult::BadValue; + } + + for (const auto& mapping : allocation.mappings) { + FreeMappingLocked(mapping->offset); + } + + // Unset sparse flag if required + if (allocation.sparse) { + gmmu->Unmap(params.offset, allocation.size); + } + + auto& allocator{params.page_size == VM::YUZU_PAGESIZE ? *vm.small_page_allocator + : *vm.big_page_allocator}; + u32 page_size_bits{params.page_size == VM::YUZU_PAGESIZE ? VM::PAGE_SIZE_BITS + : vm.big_page_size_bits}; + + allocator.Free(static_cast(params.offset >> page_size_bits), + static_cast(allocation.size >> page_size_bits)); + allocation_map.erase(params.offset); + } catch ([[maybe_unused]] const std::out_of_range& e) { + return NvResult::BadValue; + } std::memcpy(output.data(), ¶ms, output.size()); return NvResult::Success; @@ -141,43 +264,51 @@ NvResult nvhost_as_gpu::Remap(const std::vector& input, std::vector& out LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries); - auto result = NvResult::Success; std::vector entries(num_entries); std::memcpy(entries.data(), input.data(), input.size()); + std::scoped_lock lock(mutex); + + if (!vm.initialised) { + return NvResult::BadValue; + } + for (const auto& entry : entries) { - LOG_DEBUG(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}", - entry.offset, entry.nvmap_handle, entry.pages); + GPUVAddr virtual_address{static_cast(entry.as_offset_big_pages) + << vm.big_page_size_bits}; + u64 size{static_cast(entry.big_pages) << vm.big_page_size_bits}; - if (entry.nvmap_handle == 0) { - // If nvmap handle is null, we should unmap instead. - const auto offset{static_cast(entry.offset) << 0x10}; - const auto size{static_cast(entry.pages) << 0x10}; - gmmu->Unmap(offset, size); - continue; + auto alloc{allocation_map.upper_bound(virtual_address)}; + + if (alloc-- == allocation_map.begin() || + (virtual_address - alloc->first) + size > alloc->second.size) { + LOG_WARNING(Service_NVDRV, "Cannot remap into an unallocated region!"); + return NvResult::BadValue; } - const auto object{nvmap.GetHandle(entry.nvmap_handle)}; - if (!object) { - LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", entry.nvmap_handle); - result = NvResult::InvalidState; - break; + if (!alloc->second.sparse) { + LOG_WARNING(Service_NVDRV, "Cannot remap a non-sparse mapping!"); + return NvResult::BadValue; } - const auto offset{static_cast(entry.offset) << 0x10}; - const auto size{static_cast(entry.pages) << 0x10}; - const auto map_offset{static_cast(entry.map_offset) << 0x10}; - const auto addr{gmmu->Map(object->address + map_offset, offset, size)}; + if (!entry.handle) { + gmmu->MapSparse(virtual_address, size); + } else { + auto handle{nvmap.GetHandle(entry.handle)}; + if (!handle) { + return NvResult::BadValue; + } - if (!addr) { - LOG_CRITICAL(Service_NVDRV, "map returned an invalid address!"); - result = NvResult::InvalidState; - break; + VAddr cpu_address{static_cast( + handle->address + + (static_cast(entry.handle_offset_big_pages) << vm.big_page_size_bits))}; + + gmmu->Map(virtual_address, cpu_address, size); } } std::memcpy(output.data(), entries.data(), output.size()); - return result; + return NvResult::Success; } NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vector& output) { @@ -187,75 +318,96 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vector(buffer_map->CpuAddr() + params.buffer_offset)}; - const auto gpu_addr{static_cast(params.offset + params.buffer_offset)}; + std::scoped_lock lock(mutex); - if (!gmmu->Map(cpu_addr, gpu_addr, params.mapping_size)) { - LOG_CRITICAL(Service_NVDRV, - "remap failed, flags={:X}, nvmap_handle={:X}, buffer_offset={}, " - "mapping_size = {}, offset={}", - params.flags, params.nvmap_handle, params.buffer_offset, - params.mapping_size, params.offset); + if (!vm.initialised) { + return NvResult::BadValue; + } - std::memcpy(output.data(), ¶ms, output.size()); - return NvResult::InvalidState; + // Remaps a subregion of an existing mapping to a different PA + if ((params.flags & MappingFlags::Remap) != MappingFlags::None) { + try { + auto mapping{mapping_map.at(params.offset)}; + + if (mapping->size < params.mapping_size) { + LOG_WARNING(Service_NVDRV, + "Cannot remap a partially mapped GPU address space region: 0x{:X}", + params.offset); + return NvResult::BadValue; } - std::memcpy(output.data(), ¶ms, output.size()); - return NvResult::Success; - } else { - LOG_CRITICAL(Service_NVDRV, "address not mapped offset={}", params.offset); + u64 gpu_address{static_cast(params.offset + params.buffer_offset)}; + VAddr cpu_address{mapping->ptr + params.buffer_offset}; - std::memcpy(output.data(), ¶ms, output.size()); - return NvResult::InvalidState; + gmmu->Map(gpu_address, cpu_address, params.mapping_size); + + return NvResult::Success; + } catch ([[maybe_unused]] const std::out_of_range& e) { + LOG_WARNING(Service_NVDRV, "Cannot remap an unmapped GPU address space region: 0x{:X}", + params.offset); + return NvResult::BadValue; } } - const auto object{nvmap.GetHandle(params.nvmap_handle)}; - if (!object) { - LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle); - std::memcpy(output.data(), ¶ms, output.size()); - return NvResult::InvalidState; + auto handle{nvmap.GetHandle(params.handle)}; + if (!handle) { + return NvResult::BadValue; } - // The real nvservices doesn't make a distinction between handles and ids, and - // object can only have one handle and it will be the same as its id. Assert that this is the - // case to prevent unexpected behavior. - ASSERT(object->id == params.nvmap_handle); + VAddr cpu_address{static_cast(handle->address + params.buffer_offset)}; + u64 size{params.mapping_size ? params.mapping_size : handle->orig_size}; - u64 page_size{params.page_size}; - if (!page_size) { - page_size = object->align; - } + if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) { + auto alloc{allocation_map.upper_bound(params.offset)}; - const auto physical_address{object->address + params.buffer_offset}; - u64 size{params.mapping_size}; - if (!size) { - size = object->size; - } + if (alloc-- == allocation_map.begin() || + (params.offset - alloc->first) + size > alloc->second.size) { + UNREACHABLE_MSG("Cannot perform a fixed mapping into an unallocated region!"); + return NvResult::BadValue; + } - const bool is_alloc{(params.flags & AddressSpaceFlags::FixedOffset) == AddressSpaceFlags::None}; - if (is_alloc) { - params.offset = gmmu->MapAllocate(physical_address, size, page_size); + gmmu->Map(params.offset, cpu_address, size); + + auto mapping{std::make_shared(cpu_address, params.offset, size, true, false, + alloc->second.sparse)}; + alloc->second.mappings.push_back(mapping); + mapping_map[params.offset] = mapping; } else { - params.offset = gmmu->Map(physical_address, params.offset, size); - } + bool big_page{[&]() { + if (Common::IsAligned(handle->align, vm.big_page_size)) + return true; + else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) + return false; + else { + UNREACHABLE(); + return false; + } + }()}; - auto result = NvResult::Success; - if (!params.offset) { - LOG_CRITICAL(Service_NVDRV, "failed to map size={}", size); - result = NvResult::InvalidState; - } else { - AddBufferMap(params.offset, size, physical_address, is_alloc); + auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator}; + u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE}; + u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS}; + + params.offset = static_cast(allocator.Allocate( + static_cast(Common::AlignUp(size, page_size) >> page_size_bits))) + << page_size_bits; + if (!params.offset) { + UNREACHABLE_MSG("Failed to allocate free space in the GPU AS!"); + return NvResult::InsufficientMemory; + } + + gmmu->Map(params.offset, cpu_address, size); + + auto mapping{ + std::make_shared(cpu_address, params.offset, size, false, big_page, false)}; + mapping_map[params.offset] = mapping; } std::memcpy(output.data(), ¶ms, output.size()); - return result; + return NvResult::Success; } NvResult nvhost_as_gpu::UnmapBuffer(const std::vector& input, std::vector& output) { @@ -264,13 +416,36 @@ NvResult nvhost_as_gpu::UnmapBuffer(const std::vector& input, std::vectorUnmap(params.offset, *size); - } else { - LOG_ERROR(Service_NVDRV, "invalid offset=0x{:X}", params.offset); + std::scoped_lock lock(mutex); + + if (!vm.initialised) { + return NvResult::BadValue; + } + + try { + auto mapping{mapping_map.at(params.offset)}; + + if (!mapping->fixed) { + auto& allocator{mapping->big_page ? *vm.big_page_allocator : *vm.small_page_allocator}; + u32 page_size_bits{mapping->big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS}; + + allocator.Free(static_cast(mapping->offset >> page_size_bits), + static_cast(mapping->size >> page_size_bits)); + } + + // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state + // Only FreeSpace can unmap them fully + if (mapping->sparse_alloc) { + gmmu->MapSparse(params.offset, mapping->size); + } else { + gmmu->Unmap(params.offset, mapping->size); + } + + mapping_map.erase(params.offset); + } catch ([[maybe_unused]] const std::out_of_range& e) { + LOG_WARNING(Service_NVDRV, "Couldn't find region to unmap at 0x{:X}", params.offset); } - std::memcpy(output.data(), ¶ms, output.size()); return NvResult::Success; } @@ -284,28 +459,37 @@ NvResult nvhost_as_gpu::BindChannel(const std::vector& input, std::vector{ + VaRegion{ + .offset = vm.small_page_allocator->vaStart << VM::PAGE_SIZE_BITS, + .page_size = VM::YUZU_PAGESIZE, + .pages = vm.small_page_allocator->vaLimit - vm.small_page_allocator->vaStart, + }, + VaRegion{ + .offset = vm.big_page_allocator->vaStart << vm.big_page_size_bits, + .page_size = vm.big_page_size, + .pages = vm.big_page_allocator->vaLimit - vm.big_page_allocator->vaStart, + }, + }; +} + NvResult nvhost_as_gpu::GetVARegions(const std::vector& input, std::vector& output) { IoctlGetVaRegions params{}; std::memcpy(¶ms, input.data(), input.size()); - LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr, - params.buf_size); + LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr, + params.buf_size); - params.buf_size = 0x30; + std::scoped_lock lock(mutex); - params.small = IoctlVaRegion{ - .offset = 0x04000000, - .page_size = DEFAULT_SMALL_PAGE_SIZE, - .pages = 0x3fbfff, - }; + if (!vm.initialised) { + return NvResult::BadValue; + } - params.big = IoctlVaRegion{ - .offset = 0x04000000, - .page_size = big_page_size, - .pages = 0x1bffff, - }; - - // TODO(ogniK): This probably can stay stubbed but should add support way way later + GetVARegionsImpl(params); std::memcpy(output.data(), ¶ms, output.size()); return NvResult::Success; @@ -316,64 +500,24 @@ NvResult nvhost_as_gpu::GetVARegions(const std::vector& input, std::vector nvhost_as_gpu::FindBufferMap(GPUVAddr gpu_addr) const { - const auto end{buffer_mappings.upper_bound(gpu_addr)}; - for (auto iter{buffer_mappings.begin()}; iter != end; ++iter) { - if (gpu_addr >= iter->second.StartAddr() && gpu_addr < iter->second.EndAddr()) { - return iter->second; - } - } - - return std::nullopt; -} - -void nvhost_as_gpu::AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, - bool is_allocated) { - buffer_mappings[gpu_addr] = {gpu_addr, size, cpu_addr, is_allocated}; -} - -std::optional nvhost_as_gpu::RemoveBufferMap(GPUVAddr gpu_addr) { - if (const auto iter{buffer_mappings.find(gpu_addr)}; iter != buffer_mappings.end()) { - std::size_t size{}; - - if (iter->second.IsAllocated()) { - size = iter->second.Size(); - } - - buffer_mappings.erase(iter); - - return size; - } - - return std::nullopt; -} - Kernel::KEvent* nvhost_as_gpu::QueryEvent(u32 event_id) { LOG_CRITICAL(Service_NVDRV, "Unknown AS GPU Event {}", event_id); return nullptr; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index f5fb33ba7d..1d27739e26 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h @@ -5,14 +5,19 @@ #pragma once +#include +#include #include #include +#include #include #include +#include "common/address_space.h" #include "common/common_funcs.h" #include "common/common_types.h" #include "common/swap.h" +#include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/devices/nvdevice.h" namespace Tegra { @@ -30,17 +35,13 @@ class NvMap; namespace Service::Nvidia::Devices { -constexpr u32 DEFAULT_BIG_PAGE_SIZE = 1 << 16; -constexpr u32 DEFAULT_SMALL_PAGE_SIZE = 1 << 12; - -class nvmap; - -enum class AddressSpaceFlags : u32 { - None = 0x0, - FixedOffset = 0x1, - Remap = 0x100, +enum class MappingFlags : u32 { + None = 0, + Fixed = 1 << 0, + Sparse = 1 << 1, + Remap = 1 << 8, }; -DECLARE_ENUM_FLAG_OPERATORS(AddressSpaceFlags); +DECLARE_ENUM_FLAG_OPERATORS(MappingFlags); class nvhost_as_gpu final : public nvdevice { public: @@ -59,46 +60,15 @@ public: Kernel::KEvent* QueryEvent(u32 event_id) override; -private: - class BufferMap final { - public: - constexpr BufferMap() = default; - - constexpr BufferMap(GPUVAddr start_addr_, std::size_t size_) - : start_addr{start_addr_}, end_addr{start_addr_ + size_} {} - - constexpr BufferMap(GPUVAddr start_addr_, std::size_t size_, VAddr cpu_addr_, - bool is_allocated_) - : start_addr{start_addr_}, end_addr{start_addr_ + size_}, cpu_addr{cpu_addr_}, - is_allocated{is_allocated_} {} - - constexpr VAddr StartAddr() const { - return start_addr; - } - - constexpr VAddr EndAddr() const { - return end_addr; - } - - constexpr std::size_t Size() const { - return end_addr - start_addr; - } - - constexpr VAddr CpuAddr() const { - return cpu_addr; - } - - constexpr bool IsAllocated() const { - return is_allocated; - } - - private: - GPUVAddr start_addr{}; - GPUVAddr end_addr{}; - VAddr cpu_addr{}; - bool is_allocated{}; + struct VaRegion { + u64 offset; + u32 page_size; + u32 _pad0_; + u64 pages; }; + static_assert(sizeof(VaRegion) == 0x18); +private: struct IoctlAllocAsEx { u32_le flags{}; // usually passes 1 s32_le as_fd{}; // ignored; passes 0 @@ -113,7 +83,7 @@ private: struct IoctlAllocSpace { u32_le pages{}; u32_le page_size{}; - AddressSpaceFlags flags{}; + MappingFlags flags{}; INSERT_PADDING_WORDS(1); union { u64_le offset; @@ -130,19 +100,19 @@ private: static_assert(sizeof(IoctlFreeSpace) == 16, "IoctlFreeSpace is incorrect size"); struct IoctlRemapEntry { - u16_le flags{}; - u16_le kind{}; - u32_le nvmap_handle{}; - u32_le map_offset{}; - u32_le offset{}; - u32_le pages{}; + u16 flags; + u16 kind; + NvCore::NvMap::Handle::Id handle; + u32 handle_offset_big_pages; + u32 as_offset_big_pages; + u32 big_pages; }; static_assert(sizeof(IoctlRemapEntry) == 20, "IoctlRemapEntry is incorrect size"); struct IoctlMapBufferEx { - AddressSpaceFlags flags{}; // bit0: fixed_offset, bit2: cacheable - u32_le kind{}; // -1 is default - u32_le nvmap_handle{}; + MappingFlags flags{}; // bit0: fixed_offset, bit2: cacheable + u32_le kind{}; // -1 is default + NvCore::NvMap::Handle::Id handle; u32_le page_size{}; // 0 means don't care s64_le buffer_offset{}; u64_le mapping_size{}; @@ -160,27 +130,15 @@ private: }; static_assert(sizeof(IoctlBindChannel) == 4, "IoctlBindChannel is incorrect size"); - struct IoctlVaRegion { - u64_le offset{}; - u32_le page_size{}; - INSERT_PADDING_WORDS(1); - u64_le pages{}; - }; - static_assert(sizeof(IoctlVaRegion) == 24, "IoctlVaRegion is incorrect size"); - struct IoctlGetVaRegions { u64_le buf_addr{}; // (contained output user ptr on linux, ignored) u32_le buf_size{}; // forced to 2*sizeof(struct va_region) u32_le reserved{}; - IoctlVaRegion small{}; - IoctlVaRegion big{}; + std::array regions{}; }; - static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(IoctlVaRegion) * 2, + static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(VaRegion) * 2, "IoctlGetVaRegions is incorrect size"); - s32 channel{}; - u32 big_page_size{DEFAULT_BIG_PAGE_SIZE}; - NvResult AllocAsEx(const std::vector& input, std::vector& output); NvResult AllocateSpace(const std::vector& input, std::vector& output); NvResult Remap(const std::vector& input, std::vector& output); @@ -189,23 +147,74 @@ private: NvResult FreeSpace(const std::vector& input, std::vector& output); NvResult BindChannel(const std::vector& input, std::vector& output); + void GetVARegionsImpl(IoctlGetVaRegions& params); NvResult GetVARegions(const std::vector& input, std::vector& output); NvResult GetVARegions(const std::vector& input, std::vector& output, std::vector& inline_output); - std::optional FindBufferMap(GPUVAddr gpu_addr) const; - void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated); - std::optional RemoveBufferMap(GPUVAddr gpu_addr); + void FreeMappingLocked(u64 offset); Module& module; NvCore::Container& container; NvCore::NvMap& nvmap; + struct Mapping { + VAddr ptr; + u64 offset; + u64 size; + bool fixed; + bool big_page; // Only valid if fixed == false + bool sparse_alloc; + + Mapping(VAddr ptr_, u64 offset_, u64 size_, bool fixed_, bool big_page_, bool sparse_alloc_) + : ptr(ptr_), offset(offset_), size(size_), fixed(fixed_), big_page(big_page_), + sparse_alloc(sparse_alloc_) {} + }; + + struct Allocation { + u64 size; + std::list> mappings; + u32 page_size; + bool sparse; + }; + + std::map> + mapping_map; //!< This maps the base addresses of mapped buffers to their total sizes and + //!< mapping type, this is needed as what was originally a single buffer may + //!< have been split into multiple GPU side buffers with the remap flag. + std::map allocation_map; //!< Holds allocations created by AllocSpace from + //!< which fixed buffers can be mapped into + std::mutex mutex; //!< Locks all AS operations + + struct VM { + static constexpr u32 YUZU_PAGESIZE{0x1000}; + static constexpr u32 PAGE_SIZE_BITS{std::countr_zero(YUZU_PAGESIZE)}; + + static constexpr u32 SUPPORTED_BIG_PAGE_SIZES{0x30000}; + static constexpr u32 DEFAULT_BIG_PAGE_SIZE{0x20000}; + u32 big_page_size{DEFAULT_BIG_PAGE_SIZE}; + u32 big_page_size_bits{std::countr_zero(DEFAULT_BIG_PAGE_SIZE)}; + + static constexpr u32 VA_START_SHIFT{10}; + static constexpr u64 DEFAULT_VA_SPLIT{1ULL << 34}; + static constexpr u64 DEFAULT_VA_RANGE{1ULL << 37}; + u64 va_range_start{DEFAULT_BIG_PAGE_SIZE << VA_START_SHIFT}; + u64 va_range_split{DEFAULT_VA_SPLIT}; + u64 va_range_end{DEFAULT_VA_RANGE}; + + using Allocator = Common::FlatAllocator; + + std::unique_ptr big_page_allocator; + std::shared_ptr + small_page_allocator; //! Shared as this is also used by nvhost::GpuChannel + + bool initialised{}; + } vm; std::shared_ptr gmmu; - // This is expected to be ordered, therefore we must use a map, not unordered_map - std::map buffer_mappings; + // s32 channel{}; + // u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE}; }; } // namespace Service::Nvidia::Devices diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 9e946d448a..fc68bcc730 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -71,18 +71,22 @@ void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) rasterizer = rasterizer_; } -GPUVAddr MemoryManager::Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size) { +GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size) { return PageTableOp(gpu_addr, cpu_addr, size); } +GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size) { + return PageTableOp(gpu_addr, 0, size); +} + GPUVAddr MemoryManager::MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align) { - return Map(cpu_addr, *FindFreeRange(size, align), size); + return Map(*FindFreeRange(size, align), cpu_addr, size); } GPUVAddr MemoryManager::MapAllocate32(VAddr cpu_addr, std::size_t size) { const std::optional gpu_addr = FindFreeRange(size, 1, true); ASSERT(gpu_addr); - return Map(cpu_addr, *gpu_addr, size); + return Map(*gpu_addr, cpu_addr, size); } void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 0a763fd198..b8878476aa 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -88,7 +88,8 @@ public: std::vector> GetSubmappedRange(GPUVAddr gpu_addr, std::size_t size) const; - [[nodiscard]] GPUVAddr Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size); + GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size); + GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size); [[nodiscard]] GPUVAddr MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align); [[nodiscard]] GPUVAddr MapAllocate32(VAddr cpu_addr, std::size_t size); [[nodiscard]] std::optional AllocateFixed(GPUVAddr gpu_addr, std::size_t size); From 6fc4012396e98a1a6ac455791b314d2280a12a51 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Tue, 16 Nov 2021 00:01:40 +0100 Subject: [PATCH 22/68] VideoCore: Extra Fixes. --- src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp | 4 +++- src/video_core/engines/puller.cpp | 2 +- src/video_core/texture_cache/util.cpp | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index a859a7abdc..54074af75c 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -143,7 +143,7 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector } }(); - must_unmark_fail = true; + must_unmark_fail = false; const auto check_failing = [&]() { if (events[slot].fails > 2) { @@ -164,6 +164,7 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector if (params.timeout == 0) { if (check_failing()) { + events[slot].fails = 0; return NvResult::Success; } return NvResult::Timeout; @@ -180,6 +181,7 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector } if (check_failing()) { + event.fails = 0; return NvResult::Success; } diff --git a/src/video_core/engines/puller.cpp b/src/video_core/engines/puller.cpp index 37f2ced18f..3866c8746d 100644 --- a/src/video_core/engines/puller.cpp +++ b/src/video_core/engines/puller.cpp @@ -118,7 +118,7 @@ void Puller::ProcessSemaphoreTriggerMethod() { } void Puller::ProcessSemaphoreRelease() { - memory_manager.Write(regs.semaphore_address.SemaphoreAddress(), regs.semaphore_release); + rasterizer->SignalSemaphore(regs.semaphore_address.SemaphoreAddress(), regs.semaphore_release); } void Puller::ProcessSemaphoreAcquire() { diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp index 1820823b22..bea1c27d06 100644 --- a/src/video_core/texture_cache/util.cpp +++ b/src/video_core/texture_cache/util.cpp @@ -755,7 +755,7 @@ bool IsValidEntry(const Tegra::MemoryManager& gpu_memory, const TICEntry& config if (address == 0) { return false; } - if (address > (1ULL << 48)) { + if (address >= (1ULL << 40)) { return false; } if (gpu_memory.GpuToCpuAddress(address).has_value()) { From bb74973bba6005cee5f1409b3b5a6c572da3cf66 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Wed, 17 Nov 2021 05:44:01 +0100 Subject: [PATCH 23/68] General: Rebase fixes. --- src/video_core/texture_cache/texture_cache.h | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index e8b0b0a3ba..0ec999d63f 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -885,14 +885,13 @@ void TextureCache

::InvalidateScale(Image& image) { } image.image_view_ids.clear(); image.image_view_infos.clear(); - for (auto& this_state : channel_storage) { - if constexpr (ENABLE_VALIDATION) { - std::ranges::fill(this_state.graphics_image_view_ids, CORRUPT_ID); - std::ranges::fill(this_state.compute_image_view_ids, CORRUPT_ID); - } - this_state.graphics_image_table.Invalidate(); - this_state.compute_image_table.Invalidate(); + auto& channel_info = channel_storage[image.channel]; + if constexpr (ENABLE_VALIDATION) { + std::ranges::fill(channel_info.graphics_image_view_ids, CORRUPT_ID); + std::ranges::fill(channel_info.compute_image_view_ids, CORRUPT_ID); } + channel_info.graphics_image_table.Invalidate(); + channel_info.compute_image_table.Invalidate(); has_deleted_images = true; } From e462191482c6507daed67802c6c1d2c50f10c96e Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Fri, 17 Dec 2021 16:45:06 +0100 Subject: [PATCH 24/68] Refactor VideoCore to use AS sepparate from Channel. --- src/common/hash.h | 7 + src/video_core/CMakeLists.txt | 1 + .../control/channel_state_cache.cpp | 8 +- src/video_core/control/channel_state_cache.h | 30 +++- .../control/channel_state_cache.inc | 23 +++- src/video_core/memory_manager.cpp | 5 +- src/video_core/memory_manager.h | 9 ++ .../texture_cache/texture_cache.cpp | 16 +++ src/video_core/texture_cache/texture_cache.h | 130 ++++++------------ .../texture_cache/texture_cache_base.h | 96 ++++++------- 10 files changed, 172 insertions(+), 153 deletions(-) create mode 100644 src/video_core/texture_cache/texture_cache.cpp diff --git a/src/common/hash.h b/src/common/hash.h index b6f3e6d6fc..e8fe78b075 100644 --- a/src/common/hash.h +++ b/src/common/hash.h @@ -18,4 +18,11 @@ struct PairHash { } }; +template +struct IdentityHash { + [[nodiscard]] size_t operator()(T value) const noexcept { + return static_cast(value); + } +}; + } // namespace Common diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index e216c51a21..35faa70a0a 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -203,6 +203,7 @@ add_library(video_core STATIC texture_cache/render_targets.h texture_cache/samples_helper.h texture_cache/slot_vector.h + texture_cache/texture_cache.cpp texture_cache/texture_cache.h texture_cache/texture_cache_base.h texture_cache/types.h diff --git a/src/video_core/control/channel_state_cache.cpp b/src/video_core/control/channel_state_cache.cpp index f72a97b2f5..ec7ba907ca 100644 --- a/src/video_core/control/channel_state_cache.cpp +++ b/src/video_core/control/channel_state_cache.cpp @@ -1,5 +1,11 @@ #include "video_core/control/channel_state_cache.inc" namespace VideoCommon { + +ChannelInfo::ChannelInfo(Tegra::Control::ChannelState& channel_state) + : maxwell3d{*channel_state.maxwell_3d}, kepler_compute{*channel_state.kepler_compute}, + gpu_memory{*channel_state.memory_manager} {} + template class VideoCommon::ChannelSetupCaches; -} + +} // namespace VideoCommon diff --git a/src/video_core/control/channel_state_cache.h b/src/video_core/control/channel_state_cache.h index c8298c0030..c51040c831 100644 --- a/src/video_core/control/channel_state_cache.h +++ b/src/video_core/control/channel_state_cache.h @@ -2,6 +2,7 @@ #include #include +#include #include #include "common/common_types.h" @@ -41,9 +42,10 @@ template class ChannelSetupCaches { public: /// Operations for seting the channel of execution. + virtual ~ChannelSetupCaches(); /// Create channel state. - void CreateChannel(Tegra::Control::ChannelState& channel); + virtual void CreateChannel(Tegra::Control::ChannelState& channel); /// Bind a channel for execution. void BindToChannel(s32 id); @@ -51,18 +53,34 @@ public: /// Erase channel's state. void EraseChannel(s32 id); + Tegra::MemoryManager* GetFromID(size_t id) const { + std::unique_lock lk(config_mutex); + const auto ref = address_spaces.find(id); + return ref->second.gpu_memory; + } + protected: static constexpr size_t UNSET_CHANNEL{std::numeric_limits::max()}; + P* channel_state; + size_t current_channel_id{UNSET_CHANNEL}; + size_t current_address_space{}; + Tegra::Engines::Maxwell3D* maxwell3d; + Tegra::Engines::KeplerCompute* kepler_compute; + Tegra::MemoryManager* gpu_memory; + std::deque

channel_storage; std::deque free_channel_ids; std::unordered_map channel_map; + struct AddresSpaceRef { + size_t ref_count; + size_t storage_id; + Tegra::MemoryManager* gpu_memory; + }; + std::unordered_map address_spaces; + mutable std::mutex config_mutex; - P* channel_state; - size_t current_channel_id{UNSET_CHANNEL}; - Tegra::Engines::Maxwell3D* maxwell3d; - Tegra::Engines::KeplerCompute* kepler_compute; - Tegra::MemoryManager* gpu_memory; + virtual void OnGPUASRegister([[maybe_unused]] size_t map_id) {} }; } // namespace VideoCommon diff --git a/src/video_core/control/channel_state_cache.inc b/src/video_core/control/channel_state_cache.inc index 3eb73af9f4..185eabc35f 100644 --- a/src/video_core/control/channel_state_cache.inc +++ b/src/video_core/control/channel_state_cache.inc @@ -6,18 +6,18 @@ namespace VideoCommon { -ChannelInfo::ChannelInfo(Tegra::Control::ChannelState& channel_state) - : maxwell3d{*channel_state.maxwell_3d}, kepler_compute{*channel_state.kepler_compute}, - gpu_memory{*channel_state.memory_manager} {} +template +ChannelSetupCaches

::~ChannelSetupCaches() = default; template void ChannelSetupCaches

::CreateChannel(struct Tegra::Control::ChannelState& channel) { + std::unique_lock lk(config_mutex); ASSERT(channel_map.find(channel.bind_id) == channel_map.end() && channel.bind_id >= 0); auto new_id = [this, &channel]() { if (!free_channel_ids.empty()) { auto id = free_channel_ids.front(); free_channel_ids.pop_front(); - new (&channel_storage[id]) ChannelInfo(channel); + new (&channel_storage[id]) P(channel); return id; } channel_storage.emplace_back(channel); @@ -27,11 +27,24 @@ void ChannelSetupCaches

::CreateChannel(struct Tegra::Control::ChannelState& c if (current_channel_id != UNSET_CHANNEL) { channel_state = &channel_storage[current_channel_id]; } + auto as_it = address_spaces.find(channel.memory_manager->GetID()); + if (as_it != address_spaces.end()) { + as_it->second.ref_count++; + return; + } + AddresSpaceRef new_gpu_mem_ref{ + .ref_count = 1, + .storage_id = address_spaces.size(), + .gpu_memory = channel.memory_manager.get(), + }; + address_spaces.emplace(channel.memory_manager->GetID(), new_gpu_mem_ref); + OnGPUASRegister(channel.memory_manager->GetID()); } /// Bind a channel for execution. template void ChannelSetupCaches

::BindToChannel(s32 id) { + std::unique_lock lk(config_mutex); auto it = channel_map.find(id); ASSERT(it != channel_map.end() && id >= 0); current_channel_id = it->second; @@ -39,11 +52,13 @@ void ChannelSetupCaches

::BindToChannel(s32 id) { maxwell3d = &channel_state->maxwell3d; kepler_compute = &channel_state->kepler_compute; gpu_memory = &channel_state->gpu_memory; + current_address_space = gpu_memory->GetID(); } /// Erase channel's channel_state. template void ChannelSetupCaches

::EraseChannel(s32 id) { + std::unique_lock lk(config_mutex); const auto it = channel_map.find(id); ASSERT(it != channel_map.end() && id >= 0); const auto this_id = it->second; diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index fc68bcc730..d4c0dca78e 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -16,9 +16,12 @@ namespace Tegra { +std::atomic MemoryManager::unique_identifier_generator{}; + MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 page_bits_) : system{system_}, address_space_bits{address_space_bits_}, page_bits{page_bits_}, entries{}, - page_table{address_space_bits, address_space_bits + page_bits - 38, page_bits} { + page_table{address_space_bits, address_space_bits + page_bits - 38, page_bits}, + unique_identifier{unique_identifier_generator.fetch_add(1, std::memory_order_acq_rel)} { address_space_size = 1ULL << address_space_bits; allocate_start = address_space_bits > 32 ? 1ULL << 32 : 0; page_size = 1ULL << page_bits; diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index b8878476aa..56604ef3e8 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -3,6 +3,7 @@ #pragma once +#include #include #include #include @@ -26,6 +27,10 @@ public: u64 page_bits_ = 16); ~MemoryManager(); + size_t GetID() const { + return unique_identifier; + } + /// Binds a renderer to the memory manager. void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); @@ -140,6 +145,10 @@ private: void SetEntry(size_t position, EntryType entry); Common::MultiLevelPageTable page_table; + + const size_t unique_identifier; + + static std::atomic unique_identifier_generator; }; } // namespace Tegra diff --git a/src/video_core/texture_cache/texture_cache.cpp b/src/video_core/texture_cache/texture_cache.cpp new file mode 100644 index 0000000000..bc905a1a4f --- /dev/null +++ b/src/video_core/texture_cache/texture_cache.cpp @@ -0,0 +1,16 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv3 or any later version +// Refer to the license.txt file included. + +#include "video_core/control/channel_state_cache.inc" +#include "video_core/texture_cache/texture_cache_base.h" + +namespace VideoCommon { + +TextureCacheChannelInfo::TextureCacheChannelInfo(Tegra::Control::ChannelState& state) noexcept + : ChannelInfo(state), graphics_image_table{gpu_memory}, graphics_sampler_table{gpu_memory}, + compute_image_table{gpu_memory}, compute_sampler_table{gpu_memory} {} + +template class VideoCommon::ChannelSetupCaches; + +} // namespace VideoCommon diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 0ec999d63f..89c5faf88e 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -1,5 +1,7 @@ -// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu emulator team +// (https://github.com/skyline-emu/) +// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 +// or any later version Refer to the license.txt file included. #pragma once @@ -41,10 +43,6 @@ TextureCache

::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& // Setup channels current_channel_id = UNSET_CHANNEL; - state = nullptr; - maxwell3d = nullptr; - kepler_compute = nullptr; - gpu_memory = nullptr; // Make sure the first index is reserved for the null resources // This way the null resource becomes a compile time constant @@ -156,23 +154,24 @@ void TextureCache

::MarkModification(ImageId id) noexcept { template template void TextureCache

::FillGraphicsImageViews(std::span views) { - FillImageViews(state->graphics_image_table, state->graphics_image_view_ids, - views); + FillImageViews(channel_state->graphics_image_table, + channel_state->graphics_image_view_ids, views); } template void TextureCache

::FillComputeImageViews(std::span views) { - FillImageViews(state->compute_image_table, state->compute_image_view_ids, views); + FillImageViews(channel_state->compute_image_table, channel_state->compute_image_view_ids, + views); } template typename P::Sampler* TextureCache

::GetGraphicsSampler(u32 index) { - if (index > state->graphics_sampler_table.Limit()) { + if (index > channel_state->graphics_sampler_table.Limit()) { LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index); return &slot_samplers[NULL_SAMPLER_ID]; } - const auto [descriptor, is_new] = state->graphics_sampler_table.Read(index); - SamplerId& id = state->graphics_sampler_ids[index]; + const auto [descriptor, is_new] = channel_state->graphics_sampler_table.Read(index); + SamplerId& id = channel_state->graphics_sampler_ids[index]; if (is_new) { id = FindSampler(descriptor); } @@ -181,12 +180,12 @@ typename P::Sampler* TextureCache

::GetGraphicsSampler(u32 index) { template typename P::Sampler* TextureCache

::GetComputeSampler(u32 index) { - if (index > state->compute_sampler_table.Limit()) { + if (index > channel_state->compute_sampler_table.Limit()) { LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index); return &slot_samplers[NULL_SAMPLER_ID]; } - const auto [descriptor, is_new] = state->compute_sampler_table.Read(index); - SamplerId& id = state->compute_sampler_ids[index]; + const auto [descriptor, is_new] = channel_state->compute_sampler_table.Read(index); + SamplerId& id = channel_state->compute_sampler_ids[index]; if (is_new) { id = FindSampler(descriptor); } @@ -199,11 +198,12 @@ void TextureCache

::SynchronizeGraphicsDescriptors() { const bool linked_tsc = maxwell3d->regs.sampler_index == SamplerIndex::ViaHeaderIndex; const u32 tic_limit = maxwell3d->regs.tic.limit; const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d->regs.tsc.limit; - if (state->graphics_sampler_table.Synchornize(maxwell3d->regs.tsc.Address(), tsc_limit)) { - state->graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); + if (channel_state->graphics_sampler_table.Synchornize(maxwell3d->regs.tsc.Address(), + tsc_limit)) { + channel_state->graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); } - if (state->graphics_image_table.Synchornize(maxwell3d->regs.tic.Address(), tic_limit)) { - state->graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); + if (channel_state->graphics_image_table.Synchornize(maxwell3d->regs.tic.Address(), tic_limit)) { + channel_state->graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); } } @@ -213,11 +213,12 @@ void TextureCache

::SynchronizeComputeDescriptors() { const u32 tic_limit = kepler_compute->regs.tic.limit; const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute->regs.tsc.limit; const GPUVAddr tsc_gpu_addr = kepler_compute->regs.tsc.Address(); - if (state->compute_sampler_table.Synchornize(tsc_gpu_addr, tsc_limit)) { - state->compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); + if (channel_state->compute_sampler_table.Synchornize(tsc_gpu_addr, tsc_limit)) { + channel_state->compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); } - if (state->compute_image_table.Synchornize(kepler_compute->regs.tic.Address(), tic_limit)) { - state->compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); + if (channel_state->compute_image_table.Synchornize(kepler_compute->regs.tic.Address(), + tic_limit)) { + channel_state->compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); } } @@ -738,7 +739,7 @@ ImageViewId TextureCache

::FindImageView(const TICEntry& config) { if (!IsValidEntry(*gpu_memory, config)) { return NULL_IMAGE_VIEW_ID; } - const auto [pair, is_new] = state->image_views.try_emplace(config); + const auto [pair, is_new] = channel_state->image_views.try_emplace(config); ImageViewId& image_view_id = pair->second; if (is_new) { image_view_id = CreateImageView(config); @@ -1198,7 +1199,7 @@ SamplerId TextureCache

::FindSampler(const TSCEntry& config) { if (std::ranges::all_of(config.raw, [](u64 value) { return value == 0; })) { return NULL_SAMPLER_ID; } - const auto [pair, is_new] = state->samplers.try_emplace(config); + const auto [pair, is_new] = channel_state->samplers.try_emplace(config); if (is_new) { pair->second = slot_samplers.insert(runtime, config); } @@ -1327,8 +1328,8 @@ void TextureCache

::ForEachImageInRegionGPU(GPUVAddr gpu_addr, size_t size, Fu static constexpr bool BOOL_BREAK = std::is_same_v; boost::container::small_vector images; ForEachGPUPage(gpu_addr, size, [this, &images, gpu_addr, size, func](u64 page) { - const auto it = state->gpu_page_table.find(page); - if (it == state->gpu_page_table.end()) { + const auto it = channel_state->gpu_page_table->find(page); + if (it == channel_state->gpu_page_table->end()) { if constexpr (BOOL_BREAK) { return false; } else { @@ -1454,8 +1455,9 @@ void TextureCache

::RegisterImage(ImageId image_id) { } image.lru_index = lru_cache.Insert(image_id, frame_tick); - ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, - [this, image_id](u64 page) { state->gpu_page_table[page].push_back(image_id); }); + ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, image_id](u64 page) { + (*channel_state->gpu_page_table)[page].push_back(image_id); + }); if (False(image.flags & ImageFlagBits::Sparse)) { auto map_id = slot_map_views.insert(image.gpu_addr, image.cpu_addr, image.guest_size_bytes, image_id); @@ -1486,9 +1488,9 @@ void TextureCache

::UnregisterImage(ImageId image_id) { image.flags &= ~ImageFlagBits::BadOverlap; lru_cache.Free(image.lru_index); const auto& clear_page_table = - [this, image_id]( - u64 page, - std::unordered_map, IdentityHash>& selected_page_table) { + [this, image_id](u64 page, + std::unordered_map, Common::IdentityHash>& + selected_page_table) { const auto page_it = selected_page_table.find(page); if (page_it == selected_page_table.end()) { ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS); @@ -1504,7 +1506,7 @@ void TextureCache

::UnregisterImage(ImageId image_id) { image_ids.erase(vector_it); }; ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, &clear_page_table](u64 page) { - clear_page_table(page, state->gpu_page_table); + clear_page_table(page, (*channel_state->gpu_page_table)); }); if (False(image.flags & ImageFlagBits::Sparse)) { const auto map_id = image.map_view_id; @@ -1701,11 +1703,11 @@ void TextureCache

::DeleteImage(ImageId image_id, bool immediate_delete) { template void TextureCache

::RemoveImageViewReferences(std::span removed_views) { - auto it = state->image_views.begin(); - while (it != state->image_views.end()) { + auto it = channel_state->image_views.begin(); + while (it != channel_state->image_views.end()) { const auto found = std::ranges::find(removed_views, it->second); if (found != removed_views.end()) { - it = state->image_views.erase(it); + it = channel_state->image_views.erase(it); } else { ++it; } @@ -1967,61 +1969,19 @@ bool TextureCache

::IsFullClear(ImageViewId id) { scissor.max_y >= size.height; } -template -TextureCache

::ChannelInfo::ChannelInfo(Tegra::Control::ChannelState& state) noexcept - : maxwell3d{*state.maxwell_3d}, kepler_compute{*state.kepler_compute}, - gpu_memory{*state.memory_manager}, graphics_image_table{gpu_memory}, - graphics_sampler_table{gpu_memory}, compute_image_table{gpu_memory}, compute_sampler_table{ - gpu_memory} {} - template void TextureCache

::CreateChannel(struct Tegra::Control::ChannelState& channel) { - ASSERT(channel_map.find(channel.bind_id) == channel_map.end() && channel.bind_id >= 0); - auto new_id = [this, &channel]() { - if (!free_channel_ids.empty()) { - auto id = free_channel_ids.front(); - free_channel_ids.pop_front(); - new (&channel_storage[id]) ChannelInfo(channel); - return id; - } - channel_storage.emplace_back(channel); - return channel_storage.size() - 1; - }(); - channel_map.emplace(channel.bind_id, new_id); - if (current_channel_id != UNSET_CHANNEL) { - state = &channel_storage[current_channel_id]; - } + VideoCommon::ChannelSetupCaches::CreateChannel(channel); + const auto it = channel_map.find(channel.bind_id); + auto* this_state = &channel_storage[it->second]; + const auto& this_as_ref = address_spaces[channel.memory_manager->GetID()]; + this_state->gpu_page_table = &gpu_page_table_storage[this_as_ref.storage_id]; } /// Bind a channel for execution. template -void TextureCache

::BindToChannel(s32 id) { - auto it = channel_map.find(id); - ASSERT(it != channel_map.end() && id >= 0); - current_channel_id = it->second; - state = &channel_storage[current_channel_id]; - maxwell3d = &state->maxwell3d; - kepler_compute = &state->kepler_compute; - gpu_memory = &state->gpu_memory; -} - -/// Erase channel's state. -template -void TextureCache

::EraseChannel(s32 id) { - const auto it = channel_map.find(id); - ASSERT(it != channel_map.end() && id >= 0); - const auto this_id = it->second; - free_channel_ids.push_back(this_id); - channel_map.erase(it); - if (this_id == current_channel_id) { - current_channel_id = UNSET_CHANNEL; - state = nullptr; - maxwell3d = nullptr; - kepler_compute = nullptr; - gpu_memory = nullptr; - } else if (current_channel_id != UNSET_CHANNEL) { - state = &channel_storage[current_channel_id]; - } +void TextureCache

::OnGPUASRegister([[maybe_unused]] size_t map_id) { + gpu_page_table_storage.emplace_back(); } } // namespace VideoCommon diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h index 69efcb7182..b24968b03f 100644 --- a/src/video_core/texture_cache/texture_cache_base.h +++ b/src/video_core/texture_cache/texture_cache_base.h @@ -1,5 +1,7 @@ -// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu emulator team +// (https://github.com/skyline-emu/) +// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 +// or any later version Refer to the license.txt file included. #pragma once @@ -13,9 +15,11 @@ #include #include "common/common_types.h" +#include "common/hash.h" #include "common/literals.h" #include "common/lru_cache.h" #include "video_core/compatible_formats.h" +#include "video_core/control/channel_state_cache.h" #include "video_core/delayed_destruction_ring.h" #include "video_core/engines/fermi_2d.h" #include "video_core/surface.h" @@ -50,8 +54,35 @@ struct ImageViewInOut { ImageViewId id{}; }; +using TextureCacheGPUMap = std::unordered_map, Common::IdentityHash>; + +class TextureCacheChannelInfo : public ChannelInfo { +public: + TextureCacheChannelInfo() = delete; + TextureCacheChannelInfo(Tegra::Control::ChannelState& state) noexcept; + TextureCacheChannelInfo(const TextureCacheChannelInfo& state) = delete; + TextureCacheChannelInfo& operator=(const TextureCacheChannelInfo&) = delete; + TextureCacheChannelInfo(TextureCacheChannelInfo&& other) noexcept = default; + TextureCacheChannelInfo& operator=(TextureCacheChannelInfo&& other) noexcept = default; + + DescriptorTable graphics_image_table{gpu_memory}; + DescriptorTable graphics_sampler_table{gpu_memory}; + std::vector graphics_sampler_ids; + std::vector graphics_image_view_ids; + + DescriptorTable compute_image_table{gpu_memory}; + DescriptorTable compute_sampler_table{gpu_memory}; + std::vector compute_sampler_ids; + std::vector compute_image_view_ids; + + std::unordered_map image_views; + std::unordered_map samplers; + + TextureCacheGPUMap* gpu_page_table; +}; + template -class TextureCache { +class TextureCache : public VideoCommon::ChannelSetupCaches { /// Address shift for caching images into a hash table static constexpr u64 YUZU_PAGEBITS = 20; @@ -85,13 +116,6 @@ class TextureCache { PixelFormat src_format; }; - template - struct IdentityHash { - [[nodiscard]] size_t operator()(T value) const noexcept { - return static_cast(value); - } - }; - public: explicit TextureCache(Runtime&, VideoCore::RasterizerInterface&); @@ -179,13 +203,7 @@ public: [[nodiscard]] bool IsRescaling(const ImageViewBase& image_view) const noexcept; /// Create channel state. - void CreateChannel(struct Tegra::Control::ChannelState& channel); - - /// Bind a channel for execution. - void BindToChannel(s32 id); - - /// Erase channel's state. - void EraseChannel(s32 id); + void CreateChannel(Tegra::Control::ChannelState& channel) final override; std::mutex mutex; @@ -221,6 +239,8 @@ private: } } + void OnGPUASRegister(size_t map_id) final override; + /// Runs the Garbage Collector. void RunGarbageCollector(); @@ -355,51 +375,15 @@ private: Runtime& runtime; - struct ChannelInfo { - ChannelInfo() = delete; - ChannelInfo(struct Tegra::Control::ChannelState& state) noexcept; - ChannelInfo(const ChannelInfo& state) = delete; - ChannelInfo& operator=(const ChannelInfo&) = delete; - ChannelInfo(ChannelInfo&& other) noexcept = default; - ChannelInfo& operator=(ChannelInfo&& other) noexcept = default; - - Tegra::Engines::Maxwell3D& maxwell3d; - Tegra::Engines::KeplerCompute& kepler_compute; - Tegra::MemoryManager& gpu_memory; - - DescriptorTable graphics_image_table{gpu_memory}; - DescriptorTable graphics_sampler_table{gpu_memory}; - std::vector graphics_sampler_ids; - std::vector graphics_image_view_ids; - - DescriptorTable compute_image_table{gpu_memory}; - DescriptorTable compute_sampler_table{gpu_memory}; - std::vector compute_sampler_ids; - std::vector compute_image_view_ids; - - std::unordered_map image_views; - std::unordered_map samplers; - - std::unordered_map, IdentityHash> gpu_page_table; - }; - - std::deque channel_storage; - std::deque free_channel_ids; - std::unordered_map channel_map; - - ChannelInfo* state; - size_t current_channel_id{UNSET_CHANNEL}; VideoCore::RasterizerInterface& rasterizer; - Tegra::Engines::Maxwell3D* maxwell3d; - Tegra::Engines::KeplerCompute* kepler_compute; - Tegra::MemoryManager* gpu_memory; + std::deque gpu_page_table_storage; RenderTargets render_targets; std::unordered_map framebuffers; - std::unordered_map, IdentityHash> page_table; - std::unordered_map, IdentityHash> sparse_page_table; + std::unordered_map, Common::IdentityHash> page_table; + std::unordered_map, Common::IdentityHash> sparse_page_table; std::unordered_map> sparse_views; VAddr virtual_invalid_space{}; From 9cf4c8831d6a8b0d9c6e2a48e89b667fd73accee Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Thu, 23 Dec 2021 01:40:45 +0100 Subject: [PATCH 25/68] Texture cache: Fix dangling references on multichannel. --- src/video_core/control/channel_state_cache.h | 1 + .../control/channel_state_cache.inc | 17 ++++--- src/video_core/texture_cache/texture_cache.h | 45 ++++++++++--------- 3 files changed, 36 insertions(+), 27 deletions(-) diff --git a/src/video_core/control/channel_state_cache.h b/src/video_core/control/channel_state_cache.h index c51040c831..9b1d7362bb 100644 --- a/src/video_core/control/channel_state_cache.h +++ b/src/video_core/control/channel_state_cache.h @@ -72,6 +72,7 @@ protected: std::deque

channel_storage; std::deque free_channel_ids; std::unordered_map channel_map; + std::vector active_channel_ids; struct AddresSpaceRef { size_t ref_count; size_t storage_id; diff --git a/src/video_core/control/channel_state_cache.inc b/src/video_core/control/channel_state_cache.inc index 185eabc35f..d3ae758b2c 100644 --- a/src/video_core/control/channel_state_cache.inc +++ b/src/video_core/control/channel_state_cache.inc @@ -1,3 +1,6 @@ + +#include + #include "video_core/control/channel_state.h" #include "video_core/control/channel_state_cache.h" #include "video_core/engines/kepler_compute.h" @@ -27,15 +30,16 @@ void ChannelSetupCaches

::CreateChannel(struct Tegra::Control::ChannelState& c if (current_channel_id != UNSET_CHANNEL) { channel_state = &channel_storage[current_channel_id]; } + active_channel_ids.push_back(new_id); auto as_it = address_spaces.find(channel.memory_manager->GetID()); if (as_it != address_spaces.end()) { - as_it->second.ref_count++; - return; + as_it->second.ref_count++; + return; } AddresSpaceRef new_gpu_mem_ref{ - .ref_count = 1, - .storage_id = address_spaces.size(), - .gpu_memory = channel.memory_manager.get(), + .ref_count = 1, + .storage_id = address_spaces.size(), + .gpu_memory = channel.memory_manager.get(), }; address_spaces.emplace(channel.memory_manager->GetID(), new_gpu_mem_ref); OnGPUASRegister(channel.memory_manager->GetID()); @@ -73,7 +77,8 @@ void ChannelSetupCaches

::EraseChannel(s32 id) { } else if (current_channel_id != UNSET_CHANNEL) { channel_state = &channel_storage[current_channel_id]; } + active_channel_ids.erase( + std::find(active_channel_ids.begin(), active_channel_ids.end(), this_id)); } - } // namespace VideoCommon diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 89c5faf88e..5ef07d18f6 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -41,9 +41,6 @@ TextureCache

::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& sampler_descriptor.mipmap_filter.Assign(Tegra::Texture::TextureMipmapFilter::Linear); sampler_descriptor.cubemap_anisotropy.Assign(1); - // Setup channels - current_channel_id = UNSET_CHANNEL; - // Make sure the first index is reserved for the null resources // This way the null resource becomes a compile time constant void(slot_images.insert(NullImageParams{})); @@ -886,13 +883,15 @@ void TextureCache

::InvalidateScale(Image& image) { } image.image_view_ids.clear(); image.image_view_infos.clear(); - auto& channel_info = channel_storage[image.channel]; - if constexpr (ENABLE_VALIDATION) { - std::ranges::fill(channel_info.graphics_image_view_ids, CORRUPT_ID); - std::ranges::fill(channel_info.compute_image_view_ids, CORRUPT_ID); + for (size_t c : active_channel_ids) { + auto& channel_info = channel_storage[c]; + if constexpr (ENABLE_VALIDATION) { + std::ranges::fill(channel_info.graphics_image_view_ids, CORRUPT_ID); + std::ranges::fill(channel_info.compute_image_view_ids, CORRUPT_ID); + } + channel_info.graphics_image_table.Invalidate(); + channel_info.compute_image_table.Invalidate(); } - channel_info.graphics_image_table.Invalidate(); - channel_info.compute_image_table.Invalidate(); has_deleted_images = true; } @@ -1690,26 +1689,30 @@ void TextureCache

::DeleteImage(ImageId image_id, bool immediate_delete) { if (alloc_images.empty()) { image_allocs_table.erase(alloc_it); } - for (auto& this_state : channel_storage) { + for (size_t c : active_channel_ids) { + auto& channel_info = channel_storage[c]; if constexpr (ENABLE_VALIDATION) { - std::ranges::fill(this_state.graphics_image_view_ids, CORRUPT_ID); - std::ranges::fill(this_state.compute_image_view_ids, CORRUPT_ID); + std::ranges::fill(channel_info.graphics_image_view_ids, CORRUPT_ID); + std::ranges::fill(channel_info.compute_image_view_ids, CORRUPT_ID); } - this_state.graphics_image_table.Invalidate(); - this_state.compute_image_table.Invalidate(); + channel_info.graphics_image_table.Invalidate(); + channel_info.compute_image_table.Invalidate(); } has_deleted_images = true; } template void TextureCache

::RemoveImageViewReferences(std::span removed_views) { - auto it = channel_state->image_views.begin(); - while (it != channel_state->image_views.end()) { - const auto found = std::ranges::find(removed_views, it->second); - if (found != removed_views.end()) { - it = channel_state->image_views.erase(it); - } else { - ++it; + for (size_t c : active_channel_ids) { + auto& channel_info = channel_storage[c]; + auto it = channel_info.image_views.begin(); + while (it != channel_info.image_views.end()) { + const auto found = std::ranges::find(removed_views, it->second); + if (found != removed_views.end()) { + it = channel_info.image_views.erase(it); + } else { + ++it; + } } } } From f350c3d74ea7880fc6d21f7f638b0d4a70a3246b Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sat, 1 Jan 2022 22:03:37 +0100 Subject: [PATCH 26/68] Texture cache: Fix the remaining issues with memory mnagement and unmapping. --- .../service/nvdrv/devices/nvhost_as_gpu.cpp | 3 +++ src/video_core/control/channel_state_cache.h | 10 +++++++ src/video_core/gpu.cpp | 8 ++++++ src/video_core/gpu.h | 2 ++ src/video_core/memory_manager.cpp | 11 +++++++- src/video_core/rasterizer_interface.h | 2 +- .../renderer_opengl/gl_rasterizer.cpp | 4 +-- .../renderer_opengl/gl_rasterizer.h | 2 +- .../renderer_vulkan/vk_rasterizer.cpp | 4 +-- .../renderer_vulkan/vk_rasterizer.h | 2 +- src/video_core/texture_cache/texture_cache.h | 27 ++++++++++++++----- .../texture_cache/texture_cache_base.h | 4 +-- 12 files changed, 63 insertions(+), 16 deletions(-) diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 344ddfc900..db2a6c3b2e 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -16,6 +16,7 @@ #include "core/hle/service/nvdrv/devices/nvhost_gpu.h" #include "core/hle/service/nvdrv/nvdrv.h" #include "video_core/control/channel_state.h" +#include "video_core/gpu.h" #include "video_core/memory_manager.h" #include "video_core/rasterizer_interface.h" @@ -24,6 +25,7 @@ namespace Service::Nvidia::Devices { nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, Module& module_, NvCore::Container& core) : nvdevice{system_}, module{module_}, container{core}, nvmap{core.GetNvMapFile()}, vm{}, gmmu{} {} + nvhost_as_gpu::~nvhost_as_gpu() = default; NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, @@ -132,6 +134,7 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector& input, std::vector& vm.big_page_allocator = std::make_unique(start_big_pages, end_big_pages); gmmu = std::make_shared(system, 40, VM::PAGE_SIZE_BITS); + system.GPU().InitAddressSpace(*gmmu); vm.initialised = true; return NvResult::Success; diff --git a/src/video_core/control/channel_state_cache.h b/src/video_core/control/channel_state_cache.h index 9b1d7362bb..31d80e8b71 100644 --- a/src/video_core/control/channel_state_cache.h +++ b/src/video_core/control/channel_state_cache.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include "common/common_types.h" @@ -59,6 +60,15 @@ public: return ref->second.gpu_memory; } + std::optional getStorageID(size_t id) const { + std::unique_lock lk(config_mutex); + const auto ref = address_spaces.find(id); + if (ref == address_spaces.end()) { + return std::nullopt; + } + return ref->second.storage_id; + } + protected: static constexpr size_t UNSET_CHANNEL{std::numeric_limits::max()}; diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 8c0ff00941..eebd7f3ff0 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -73,6 +73,10 @@ struct GPU::Impl { rasterizer->InitializeChannel(to_init); } + void InitAddressSpace(Tegra::MemoryManager& memory_manager) { + memory_manager.BindRasterizer(rasterizer); + } + void ReleaseChannel(Control::ChannelState& to_release) { UNIMPLEMENTED(); } @@ -452,6 +456,10 @@ void GPU::ReleaseChannel(Control::ChannelState& to_release) { impl->ReleaseChannel(to_release); } +void GPU::InitAddressSpace(Tegra::MemoryManager& memory_manager) { + impl->InitAddressSpace(memory_manager); +} + void GPU::BindRenderer(std::unique_ptr renderer) { impl->BindRenderer(std::move(renderer)); } diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index 74d55e074e..7e84b0d2f4 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -118,6 +118,8 @@ public: void ReleaseChannel(Control::ChannelState& to_release); + void InitAddressSpace(Tegra::MemoryManager& memory_manager); + /// Request a host GPU memory flush from the CPU. [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size); diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index d4c0dca78e..b36067613f 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -59,10 +59,19 @@ GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cp } for (u64 offset{}; offset < size; offset += page_size) { const GPUVAddr current_gpu_addr = gpu_addr + offset; + [[maybe_unused]] const auto current_entry_type = GetEntry(current_gpu_addr); SetEntry(current_gpu_addr, entry_type); + if (current_entry_type != entry_type) { + rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, page_size); + } if constexpr (entry_type == EntryType::Mapped) { const VAddr current_cpu_addr = cpu_addr + offset; const auto index = PageEntryIndex(current_gpu_addr); + const u32 sub_value = static_cast(current_cpu_addr >> 12ULL); + if (current_entry_type == entry_type && sub_value != page_table[index]) { + rasterizer->InvalidateRegion(static_cast(page_table[index]) << 12ULL, + page_size); + } page_table[index] = static_cast(current_cpu_addr >> 12ULL); } remaining_size -= page_size; @@ -168,7 +177,7 @@ std::optional MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t s const size_t page_last{(addr + size + page_size - 1) >> page_bits}; while (page_index < page_last) { const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; - if (page_addr && *page_addr != 0) { + if (page_addr) { return page_addr; } ++page_index; diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h index 8dacb26268..5362aafb67 100644 --- a/src/video_core/rasterizer_interface.h +++ b/src/video_core/rasterizer_interface.h @@ -95,7 +95,7 @@ public: virtual void UnmapMemory(VAddr addr, u64 size) = 0; /// Remap GPU memory range. This means underneath backing memory changed - virtual void ModifyGPUMemory(GPUVAddr addr, u64 size) = 0; + virtual void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) = 0; /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory /// and invalidated diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 4eb9bd1160..f745fbf562 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -379,10 +379,10 @@ void RasterizerOpenGL::UnmapMemory(VAddr addr, u64 size) { shader_cache.OnCPUWrite(addr, size); } -void RasterizerOpenGL::ModifyGPUMemory(GPUVAddr addr, u64 size) { +void RasterizerOpenGL::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) { { std::scoped_lock lock{texture_cache.mutex}; - texture_cache.UnmapGPUMemory(addr, size); + texture_cache.UnmapGPUMemory(as_id, addr, size); } } diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 39b20cfb20..d469075a13 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -82,7 +82,7 @@ public: void OnCPUWrite(VAddr addr, u64 size) override; void SyncGuestHost() override; void UnmapMemory(VAddr addr, u64 size) override; - void ModifyGPUMemory(GPUVAddr addr, u64 size) override; + void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override; void SignalSemaphore(GPUVAddr addr, u32 value) override; void SignalSyncPoint(u32 value) override; void SignalReference() override; diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 7e08055442..50fdf5e18b 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -441,10 +441,10 @@ void RasterizerVulkan::UnmapMemory(VAddr addr, u64 size) { pipeline_cache.OnCPUWrite(addr, size); } -void RasterizerVulkan::ModifyGPUMemory(GPUVAddr addr, u64 size) { +void RasterizerVulkan::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) { { std::scoped_lock lock{texture_cache.mutex}; - texture_cache.UnmapGPUMemory(addr, size); + texture_cache.UnmapGPUMemory(as_id, addr, size); } } diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index 642fe65769..c836158b87 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h @@ -78,7 +78,7 @@ public: void OnCPUWrite(VAddr addr, u64 size) override; void SyncGuestHost() override; void UnmapMemory(VAddr addr, u64 size) override; - void ModifyGPUMemory(GPUVAddr addr, u64 size) override; + void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override; void SignalSemaphore(GPUVAddr addr, u32 value) override; void SignalSyncPoint(u32 value) override; void SignalReference() override; diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 5ef07d18f6..a483892e43 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -480,12 +480,20 @@ void TextureCache

::UnmapMemory(VAddr cpu_addr, size_t size) { } template -void TextureCache

::UnmapGPUMemory(GPUVAddr gpu_addr, size_t size) { +void TextureCache

::UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t size) { std::vector deleted_images; - ForEachImageInRegionGPU(gpu_addr, size, + ForEachImageInRegionGPU(as_id, gpu_addr, size, [&](ImageId id, Image&) { deleted_images.push_back(id); }); for (const ImageId id : deleted_images) { Image& image = slot_images[id]; + if (True(image.flags & ImageFlagBits::CpuModified)) { + return; + } + image.flags |= ImageFlagBits::CpuModified; + if (True(image.flags & ImageFlagBits::Tracked)) { + UntrackImage(image, id); + } + /* if (True(image.flags & ImageFlagBits::Remapped)) { continue; } @@ -493,6 +501,7 @@ void TextureCache

::UnmapGPUMemory(GPUVAddr gpu_addr, size_t size) { if (True(image.flags & ImageFlagBits::Tracked)) { UntrackImage(image, id); } + */ } } @@ -1322,13 +1331,19 @@ void TextureCache

::ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& f template template -void TextureCache

::ForEachImageInRegionGPU(GPUVAddr gpu_addr, size_t size, Func&& func) { +void TextureCache

::ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, size_t size, + Func&& func) { using FuncReturn = typename std::invoke_result::type; static constexpr bool BOOL_BREAK = std::is_same_v; boost::container::small_vector images; - ForEachGPUPage(gpu_addr, size, [this, &images, gpu_addr, size, func](u64 page) { - const auto it = channel_state->gpu_page_table->find(page); - if (it == channel_state->gpu_page_table->end()) { + auto storage_id = getStorageID(as_id); + if (!storage_id) { + return; + } + auto& gpu_page_table = gpu_page_table_storage[*storage_id]; + ForEachGPUPage(gpu_addr, size, [this, gpu_page_table, &images, gpu_addr, size, func](u64 page) { + const auto it = gpu_page_table.find(page); + if (it == gpu_page_table.end()) { if constexpr (BOOL_BREAK) { return false; } else { diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h index b24968b03f..2f4db50478 100644 --- a/src/video_core/texture_cache/texture_cache_base.h +++ b/src/video_core/texture_cache/texture_cache_base.h @@ -173,7 +173,7 @@ public: void UnmapMemory(VAddr cpu_addr, size_t size); /// Remove images in a region - void UnmapGPUMemory(GPUVAddr gpu_addr, size_t size); + void UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t size); /// Blit an image with the given parameters void BlitImage(const Tegra::Engines::Fermi2D::Surface& dst, @@ -309,7 +309,7 @@ private: void ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& func); template - void ForEachImageInRegionGPU(GPUVAddr gpu_addr, size_t size, Func&& func); + void ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, size_t size, Func&& func); template void ForEachSparseImageInRegion(GPUVAddr gpu_addr, size_t size, Func&& func); From e44ac8b821833672f7c99ad22dbe57ac9403279d Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Thu, 6 Jan 2022 19:47:15 +0100 Subject: [PATCH 27/68] Texture Cache: Fix GC and GPU Modified on Joins. --- src/video_core/texture_cache/texture_cache.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index a483892e43..66de41f047 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -1758,6 +1758,7 @@ void TextureCache

::SynchronizeAliases(ImageId image_id) { boost::container::small_vector aliased_images; Image& image = slot_images[image_id]; bool any_rescaled = True(image.flags & ImageFlagBits::Rescaled); + bool any_modified = True(image.flags & ImageFlagBits::GpuModified); u64 most_recent_tick = image.modification_tick; for (const AliasedImage& aliased : image.aliased_images) { ImageBase& aliased_image = slot_images[aliased.id]; @@ -1765,9 +1766,7 @@ void TextureCache

::SynchronizeAliases(ImageId image_id) { most_recent_tick = std::max(most_recent_tick, aliased_image.modification_tick); aliased_images.push_back(&aliased); any_rescaled |= True(aliased_image.flags & ImageFlagBits::Rescaled); - if (True(aliased_image.flags & ImageFlagBits::GpuModified)) { - image.flags |= ImageFlagBits::GpuModified; - } + any_modified |= True(aliased_image.flags & ImageFlagBits::GpuModified); } } if (aliased_images.empty()) { @@ -1782,6 +1781,9 @@ void TextureCache

::SynchronizeAliases(ImageId image_id) { } } image.modification_tick = most_recent_tick; + if (any_modified) { + image.flags |= ImageFlagBits::GpuModified; + } std::ranges::sort(aliased_images, [this](const AliasedImage* lhs, const AliasedImage* rhs) { const ImageBase& lhs_image = slot_images[lhs->id]; const ImageBase& rhs_image = slot_images[rhs->id]; From 668e80a9f42fb4ce0e16f6381d05bcbd286b2da1 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 30 Jan 2022 10:31:13 +0100 Subject: [PATCH 28/68] VideoCore: Refactor syncing. --- src/core/core.cpp | 12 ++ src/core/core.h | 9 + .../service/nvdrv/devices/nvdisp_disp0.cpp | 2 +- .../hle/service/nvdrv/devices/nvhost_ctrl.cpp | 19 +- .../hle/service/nvdrv/devices/nvhost_ctrl.h | 4 + .../hle/service/nvdrv/devices/nvhost_gpu.cpp | 10 +- src/core/hle/service/nvflinger/nvflinger.cpp | 9 +- src/video_core/CMakeLists.txt | 40 ++-- src/video_core/cdma_pusher.cpp | 25 ++- src/video_core/cdma_pusher.h | 15 +- src/video_core/control/channel_state.cpp | 2 +- src/video_core/control/channel_state.h | 2 +- src/video_core/control/channel_state_cache.h | 4 + src/video_core/control/scheduler.cpp | 2 +- src/video_core/control/scheduler.h | 2 +- src/video_core/dma_pusher.h | 26 ++- src/video_core/engines/puller.cpp | 65 +++--- src/video_core/engines/puller.h | 1 - src/video_core/fence_manager.h | 12 +- src/video_core/gpu.cpp | 197 +++++++++--------- src/video_core/gpu.h | 19 +- src/video_core/gpu_thread.cpp | 6 +- src/video_core/gpu_thread.h | 2 + .../codecs/codec.cpp | 36 ++-- .../codecs/codec.h | 14 +- .../codecs/h264.cpp | 4 +- .../{command_classes => host1x}/codecs/h264.h | 6 +- .../codecs/vp8.cpp | 4 +- .../{command_classes => host1x}/codecs/vp8.h | 5 +- .../codecs/vp9.cpp | 8 +- .../{command_classes => host1x}/codecs/vp9.h | 12 +- .../codecs/vp9_types.h | 0 src/video_core/host1x/control.cpp | 35 ++++ .../host1x.h => host1x/control.h} | 17 +- src/video_core/host1x/host1x.h | 33 +++ .../{command_classes => host1x}/nvdec.cpp | 6 +- .../{command_classes => host1x}/nvdec.h | 7 +- .../nvdec_common.h | 4 +- .../sync_manager.cpp | 10 +- .../sync_manager.h | 6 + src/video_core/host1x/syncpoint_manager.cpp | 93 +++++++++ src/video_core/host1x/syncpoint_manager.h | 99 +++++++++ .../{command_classes => host1x}/vic.cpp | 9 +- .../{command_classes => host1x}/vic.h | 7 +- 44 files changed, 648 insertions(+), 252 deletions(-) rename src/video_core/{command_classes => host1x}/codecs/codec.cpp (91%) rename src/video_core/{command_classes => host1x}/codecs/codec.h (78%) rename src/video_core/{command_classes => host1x}/codecs/h264.cpp (98%) rename src/video_core/{command_classes => host1x}/codecs/h264.h (96%) rename src/video_core/{command_classes => host1x}/codecs/vp8.cpp (93%) rename src/video_core/{command_classes => host1x}/codecs/vp8.h (93%) rename src/video_core/{command_classes => host1x}/codecs/vp9.cpp (99%) rename src/video_core/{command_classes => host1x}/codecs/vp9.h (93%) rename src/video_core/{command_classes => host1x}/codecs/vp9_types.h (100%) create mode 100644 src/video_core/host1x/control.cpp rename src/video_core/{command_classes/host1x.h => host1x/control.h} (60%) create mode 100644 src/video_core/host1x/host1x.h rename src/video_core/{command_classes => host1x}/nvdec.cpp (92%) rename src/video_core/{command_classes => host1x}/nvdec.h (88%) rename src/video_core/{command_classes => host1x}/nvdec_common.h (98%) rename src/video_core/{command_classes => host1x}/sync_manager.cpp (77%) rename src/video_core/{command_classes => host1x}/sync_manager.h (95%) create mode 100644 src/video_core/host1x/syncpoint_manager.cpp create mode 100644 src/video_core/host1x/syncpoint_manager.h rename src/video_core/{command_classes => host1x}/vic.cpp (98%) rename src/video_core/{command_classes => host1x}/vic.h (93%) diff --git a/src/core/core.cpp b/src/core/core.cpp index 1210928682..fa059a3948 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -51,6 +51,7 @@ #include "core/telemetry_session.h" #include "core/tools/freezer.h" #include "network/network.h" +#include "video_core/host1x/host1x.h" #include "video_core/renderer_base.h" #include "video_core/video_core.h" @@ -215,6 +216,7 @@ struct System::Impl { telemetry_session = std::make_unique(); + host1x_core = std::make_unique(); gpu_core = VideoCore::CreateGPU(emu_window, system); if (!gpu_core) { return SystemResultStatus::ErrorVideoCore; @@ -373,6 +375,7 @@ struct System::Impl { app_loader.reset(); audio_core.reset(); gpu_core.reset(); + host1x_core.reset(); perf_stats.reset(); kernel.Shutdown(); memory.Reset(); @@ -450,6 +453,7 @@ struct System::Impl { /// AppLoader used to load the current executing application std::unique_ptr app_loader; std::unique_ptr gpu_core; + std::unique_ptr host1x_core; std::unique_ptr interrupt_manager; std::unique_ptr device_memory; std::unique_ptr audio_core; @@ -668,6 +672,14 @@ const Tegra::GPU& System::GPU() const { return *impl->gpu_core; } +Tegra::Host1x::Host1x& System::Host1x() { + return *impl->host1x_core; +} + +const Tegra::Host1x::Host1x& System::Host1x() const { + return *impl->host1x_core; +} + Core::Hardware::InterruptManager& System::InterruptManager() { return *impl->interrupt_manager; } diff --git a/src/core/core.h b/src/core/core.h index 0ce3b1d60c..e4168a921f 100644 --- a/src/core/core.h +++ b/src/core/core.h @@ -74,6 +74,9 @@ class TimeManager; namespace Tegra { class DebugContext; class GPU; +namespace Host1x { +class Host1x; +} // namespace Host1x } // namespace Tegra namespace VideoCore { @@ -260,6 +263,12 @@ public: /// Gets an immutable reference to the GPU interface. [[nodiscard]] const Tegra::GPU& GPU() const; + /// Gets a mutable reference to the Host1x interface + [[nodiscard]] Tegra::Host1x::Host1x& Host1x(); + + /// Gets an immutable reference to the Host1x interface. + [[nodiscard]] const Tegra::Host1x::Host1x& Host1x() const; + /// Gets a mutable reference to the renderer. [[nodiscard]] VideoCore::RendererBase& Renderer(); diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp index b1c0e9eb2d..e6a976714f 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp @@ -50,7 +50,7 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat form stride, format, transform, crop_rect}; system.GetPerfStats().EndSystemFrame(); - system.GPU().SwapBuffers(&framebuffer); + system.GPU().RequestSwapBuffers(&framebuffer, nullptr, 0); system.SpeedLimiter().DoSpeedLimiting(system.CoreTiming().GetGlobalTimeUs()); system.GetPerfStats().BeginSystemFrame(); } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index 54074af75c..ffe42d4235 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -18,6 +18,7 @@ #include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "core/hle/service/nvdrv/devices/nvhost_ctrl.h" #include "video_core/gpu.h" +#include "video_core/host1x/host1x.h" namespace Service::Nvidia::Devices { @@ -129,7 +130,7 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector return NvResult::Success; } - auto& gpu = system.GPU(); + auto& host1x_syncpoint_manager = system.Host1x().GetSyncpointManager(); const u32 target_value = params.fence.value; auto lock = NvEventsLock(); @@ -149,7 +150,7 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector if (events[slot].fails > 2) { { auto lk = system.StallProcesses(); - gpu.WaitFence(fence_id, target_value); + host1x_syncpoint_manager.WaitHost(fence_id, target_value); system.UnstallProcesses(); } params.value.raw = target_value; @@ -198,7 +199,15 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector } params.value.raw |= slot; - gpu.RegisterSyncptInterrupt(fence_id, target_value); + event.wait_handle = + host1x_syncpoint_manager.RegisterHostAction(fence_id, target_value, [this, slot]() { + auto& event = events[slot]; + if (event.status.exchange(EventState::Signalling, std::memory_order_acq_rel) == + EventState::Waiting) { + event.kevent->GetWritableEvent().Signal(); + } + event.status.store(EventState::Signalled, std::memory_order_release); + }); return NvResult::Timeout; } @@ -288,8 +297,10 @@ NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector& input, std::v auto& event = events[event_id]; if (event.status.exchange(EventState::Cancelling, std::memory_order_acq_rel) == EventState::Waiting) { - system.GPU().CancelSyncptInterrupt(event.assigned_syncpt, event.assigned_value); + auto& host1x_syncpoint_manager = system.Host1x().GetSyncpointManager(); + host1x_syncpoint_manager.DeregisterHostAction(event.assigned_syncpt, event.wait_handle); syncpoint_manager.RefreshSyncpoint(event.assigned_syncpt); + event.wait_handle = {}; } event.fails++; event.status.store(EventState::Cancelled, std::memory_order_release); diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h index d56aea4059..136a1e925e 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h @@ -11,6 +11,7 @@ #include "common/common_types.h" #include "core/hle/service/nvdrv/devices/nvdevice.h" #include "core/hle/service/nvdrv/nvdrv.h" +#include "video_core/host1x/syncpoint_manager.h" namespace Service::Nvidia::NvCore { class Container; @@ -78,6 +79,9 @@ private: // Tells if an NVEvent is registered or not bool registered{}; + // Used for waiting on a syncpoint & canceling it. + Tegra::Host1x::SyncpointManager::ActionHandle wait_handle{}; + bool IsBeingUsed() { const auto current_status = status.load(std::memory_order_acquire); return current_status == EventState::Waiting || diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index 38d45cb79a..db3e266ad6 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp @@ -210,10 +210,10 @@ NvResult nvhost_gpu::AllocateObjectContext(const std::vector& input, std::ve static std::vector BuildWaitCommandList(NvFence fence) { return { - Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1, + Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointPayload, 1, Tegra::SubmissionMode::Increasing), {fence.value}, - Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1, + Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointOperation, 1, Tegra::SubmissionMode::Increasing), BuildFenceAction(Tegra::Engines::Puller::FenceOperation::Acquire, fence.id), }; @@ -222,12 +222,12 @@ static std::vector BuildWaitCommandList(NvFence fence) { static std::vector BuildIncrementCommandList(NvFence fence, u32 add_increment) { std::vector result{ - Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1, + Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointPayload, 1, Tegra::SubmissionMode::Increasing), {}}; for (u32 count = 0; count < add_increment; ++count) { - result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1, + result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointOperation, 1, Tegra::SubmissionMode::Increasing)); result.emplace_back( BuildFenceAction(Tegra::Engines::Puller::FenceOperation::Increment, fence.id)); @@ -239,7 +239,7 @@ static std::vector BuildIncrementCommandList(NvFence fence static std::vector BuildIncrementWithWfiCommandList(NvFence fence, u32 add_increment) { std::vector result{ - Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForInterrupt, 1, + Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForIdle, 1, Tegra::SubmissionMode::Increasing), {}}; const std::vector increment{ diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index 8c3013f833..aa112021d2 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp @@ -24,6 +24,8 @@ #include "core/hle/service/vi/layer/vi_layer.h" #include "core/hle/service/vi/vi_results.h" #include "video_core/gpu.h" +#include "video_core/host1x/host1x.h" +#include "video_core/host1x/syncpoint_manager.h" namespace Service::NVFlinger { @@ -267,12 +269,12 @@ void NVFlinger::Compose() { return; // We are likely shutting down } - auto& gpu = system.GPU(); + auto& syncpoint_manager = system.Host1x().GetSyncpointManager(); const auto& multi_fence = buffer.fence; guard->unlock(); for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) { const auto& fence = multi_fence.fences[fence_id]; - gpu.WaitFence(fence.id, fence.value); + syncpoint_manager.WaitGuest(fence.id, fence.value); } guard->lock(); @@ -284,6 +286,7 @@ void NVFlinger::Compose() { auto nvdisp = nvdrv->GetDevice(disp_fd); ASSERT(nvdisp); + guard->unlock(); Common::Rectangle crop_rect{ static_cast(buffer.crop.Left()), static_cast(buffer.crop.Top()), static_cast(buffer.crop.Right()), static_cast(buffer.crop.Bottom())}; @@ -292,6 +295,8 @@ void NVFlinger::Compose() { igbp_buffer.Width(), igbp_buffer.Height(), igbp_buffer.Stride(), static_cast(buffer.transform), crop_rect); + guard->lock(); + swap_interval = buffer.swap_interval; auto fence = android::Fence::NoFence(); diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 35faa70a0a..723f9b67ca 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -4,7 +4,7 @@ add_subdirectory(host_shaders) if(LIBVA_FOUND) - set_source_files_properties(command_classes/codecs/codec.cpp + set_source_files_properties(host1x/codecs/codec.cpp PROPERTIES COMPILE_DEFINITIONS LIBVA_FOUND=1) list(APPEND FFmpeg_LIBRARIES ${LIBVA_LIBRARIES}) endif() @@ -15,24 +15,6 @@ add_library(video_core STATIC buffer_cache/buffer_cache.h cdma_pusher.cpp cdma_pusher.h - command_classes/codecs/codec.cpp - command_classes/codecs/codec.h - command_classes/codecs/h264.cpp - command_classes/codecs/h264.h - command_classes/codecs/vp8.cpp - command_classes/codecs/vp8.h - command_classes/codecs/vp9.cpp - command_classes/codecs/vp9.h - command_classes/codecs/vp9_types.h - command_classes/host1x.cpp - command_classes/host1x.h - command_classes/nvdec.cpp - command_classes/nvdec.h - command_classes/nvdec_common.h - command_classes/sync_manager.cpp - command_classes/sync_manager.h - command_classes/vic.cpp - command_classes/vic.h compatible_formats.cpp compatible_formats.h control/channel_state.cpp @@ -63,6 +45,26 @@ add_library(video_core STATIC engines/puller.cpp engines/puller.h framebuffer_config.h + host1x/codecs/codec.cpp + host1x/codecs/codec.h + host1x/codecs/h264.cpp + host1x/codecs/h264.h + host1x/codecs/vp8.cpp + host1x/codecs/vp8.h + host1x/codecs/vp9.cpp + host1x/codecs/vp9.h + host1x/codecs/vp9_types.h + host1x/control.cpp + host1x/control.h + host1x/nvdec.cpp + host1x/nvdec.h + host1x/nvdec_common.h + host1x/sync_manager.cpp + host1x/sync_manager.h + host1x/syncpoint_manager.cpp + host1x/syncpoint_manager.h + host1x/vic.cpp + host1x/vic.h macro/macro.cpp macro/macro.h macro/macro_hle.cpp diff --git a/src/video_core/cdma_pusher.cpp b/src/video_core/cdma_pusher.cpp index 8e890a85ed..148126347a 100644 --- a/src/video_core/cdma_pusher.cpp +++ b/src/video_core/cdma_pusher.cpp @@ -2,20 +2,22 @@ // SPDX-License-Identifier: MIT #include -#include "command_classes/host1x.h" -#include "command_classes/nvdec.h" -#include "command_classes/vic.h" #include "video_core/cdma_pusher.h" -#include "video_core/command_classes/sync_manager.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/gpu.h" +#include "video_core/host1x/control.h" +#include "video_core/host1x/nvdec.h" +#include "video_core/host1x/nvdec_common.h" +#include "video_core/host1x/sync_manager.h" +#include "video_core/host1x/vic.h" +#include "video_core/memory_manager.h" namespace Tegra { CDmaPusher::CDmaPusher(GPU& gpu_) - : gpu{gpu_}, nvdec_processor(std::make_shared(gpu)), - vic_processor(std::make_unique(gpu, nvdec_processor)), - host1x_processor(std::make_unique(gpu)), - sync_manager(std::make_unique(gpu)) {} + : gpu{gpu_}, nvdec_processor(std::make_shared(gpu)), + vic_processor(std::make_unique(gpu, nvdec_processor)), + host1x_processor(std::make_unique(gpu)), + sync_manager(std::make_unique(gpu)) {} CDmaPusher::~CDmaPusher() = default; @@ -109,16 +111,17 @@ void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) { case ThiMethod::SetMethod1: LOG_DEBUG(Service_NVDRV, "VIC method 0x{:X}, Args=({})", static_cast(vic_thi_state.method_0), data); - vic_processor->ProcessMethod(static_cast(vic_thi_state.method_0), data); + vic_processor->ProcessMethod(static_cast(vic_thi_state.method_0), + data); break; default: break; } break; - case ChClassId::Host1x: + case ChClassId::Control: // This device is mainly for syncpoint synchronization LOG_DEBUG(Service_NVDRV, "Host1X Class Method"); - host1x_processor->ProcessMethod(static_cast(offset), data); + host1x_processor->ProcessMethod(static_cast(offset), data); break; default: UNIMPLEMENTED_MSG("Current class not implemented {:X}", static_cast(current_class)); diff --git a/src/video_core/cdma_pusher.h b/src/video_core/cdma_pusher.h index d6ffef95f7..de17c20821 100644 --- a/src/video_core/cdma_pusher.h +++ b/src/video_core/cdma_pusher.h @@ -13,10 +13,13 @@ namespace Tegra { class GPU; -class Host1x; + +namespace Host1x { +class Control; class Nvdec; class SyncptIncrManager; class Vic; +} // namespace Host1x enum class ChSubmissionMode : u32 { SetClass = 0, @@ -30,7 +33,7 @@ enum class ChSubmissionMode : u32 { enum class ChClassId : u32 { NoClass = 0x0, - Host1x = 0x1, + Control = 0x1, VideoEncodeMpeg = 0x20, VideoEncodeNvEnc = 0x21, VideoStreamingVi = 0x30, @@ -102,10 +105,10 @@ private: void ThiStateWrite(ThiRegisters& state, u32 offset, u32 argument); GPU& gpu; - std::shared_ptr nvdec_processor; - std::unique_ptr vic_processor; - std::unique_ptr host1x_processor; - std::unique_ptr sync_manager; + std::shared_ptr nvdec_processor; + std::unique_ptr vic_processor; + std::unique_ptr host1x_processor; + std::unique_ptr sync_manager; ChClassId current_class{}; ThiRegisters vic_thi_state{}; ThiRegisters nvdec_thi_state{}; diff --git a/src/video_core/control/channel_state.cpp b/src/video_core/control/channel_state.cpp index 67803fe94c..3613c49927 100644 --- a/src/video_core/control/channel_state.cpp +++ b/src/video_core/control/channel_state.cpp @@ -1,5 +1,5 @@ // Copyright 2021 yuzu Emulator Project -// Licensed under GPLv2 or any later version +// Licensed under GPLv3 or any later version // Refer to the license.txt file included. #include "common/assert.h" diff --git a/src/video_core/control/channel_state.h b/src/video_core/control/channel_state.h index 82808a6b82..08a7591e17 100644 --- a/src/video_core/control/channel_state.h +++ b/src/video_core/control/channel_state.h @@ -1,5 +1,5 @@ // Copyright 2021 yuzu Emulator Project -// Licensed under GPLv2 or any later version +// Licensed under GPLv3 or any later version // Refer to the license.txt file included. #pragma once diff --git a/src/video_core/control/channel_state_cache.h b/src/video_core/control/channel_state_cache.h index 31d80e8b71..dbf833de74 100644 --- a/src/video_core/control/channel_state_cache.h +++ b/src/video_core/control/channel_state_cache.h @@ -1,3 +1,7 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv3 or any later version +// Refer to the license.txt file included. + #pragma once #include diff --git a/src/video_core/control/scheduler.cpp b/src/video_core/control/scheduler.cpp index e1abcb188b..a9bb00aa77 100644 --- a/src/video_core/control/scheduler.cpp +++ b/src/video_core/control/scheduler.cpp @@ -1,5 +1,5 @@ // Copyright 2021 yuzu Emulator Project -// Licensed under GPLv2 or any later version +// Licensed under GPLv3 or any later version // Refer to the license.txt file included. #include diff --git a/src/video_core/control/scheduler.h b/src/video_core/control/scheduler.h index 802e9caffb..c1a7739467 100644 --- a/src/video_core/control/scheduler.h +++ b/src/video_core/control/scheduler.h @@ -1,5 +1,5 @@ // Copyright 2021 yuzu Emulator Project -// Licensed under GPLv2 or any later version +// Licensed under GPLv3 or any later version // Refer to the license.txt file included. #pragma once diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h index fd7c936c40..938f0f11cf 100644 --- a/src/video_core/dma_pusher.h +++ b/src/video_core/dma_pusher.h @@ -37,24 +37,32 @@ enum class SubmissionMode : u32 { // Note that, traditionally, methods are treated as 4-byte addressable locations, and hence // their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4. // So the values you see in docs might be multiplied by 4. +// Register documentation: +// https://github.com/NVIDIA/open-gpu-doc/blob/ab27fc22db5de0d02a4cabe08e555663b62db4d4/classes/host/cla26f.h +// +// Register Description (approx): +// https://github.com/NVIDIA/open-gpu-doc/blob/ab27fc22db5de0d02a4cabe08e555663b62db4d4/manuals/volta/gv100/dev_pbdma.ref.txt enum class BufferMethods : u32 { BindObject = 0x0, + Illegal = 0x1, Nop = 0x2, SemaphoreAddressHigh = 0x4, SemaphoreAddressLow = 0x5, - SemaphoreSequence = 0x6, - SemaphoreTrigger = 0x7, - NotifyIntr = 0x8, + SemaphoreSequencePayload = 0x6, + SemaphoreOperation = 0x7, + NonStallInterrupt = 0x8, WrcacheFlush = 0x9, - Unk28 = 0xA, - UnkCacheFlush = 0xB, + MemOpA = 0xA, + MemOpB = 0xB, + MemOpC = 0xC, + MemOpD = 0xD, RefCnt = 0x14, SemaphoreAcquire = 0x1A, SemaphoreRelease = 0x1B, - FenceValue = 0x1C, - FenceAction = 0x1D, - WaitForInterrupt = 0x1E, - Unk7c = 0x1F, + SyncpointPayload = 0x1C, + SyncpointOperation = 0x1D, + WaitForIdle = 0x1E, + CRCCheck = 0x1F, Yield = 0x20, NonPullerMethods = 0x40, }; diff --git a/src/video_core/engines/puller.cpp b/src/video_core/engines/puller.cpp index 3866c8746d..8c17639e43 100644 --- a/src/video_core/engines/puller.cpp +++ b/src/video_core/engines/puller.cpp @@ -68,11 +68,6 @@ void Puller::ProcessFenceActionMethod() { } } -void Puller::ProcessWaitForInterruptMethod() { - // TODO(bunnei) ImplementMe - LOG_WARNING(HW_GPU, "(STUBBED) called"); -} - void Puller::ProcessSemaphoreTriggerMethod() { const auto semaphoreOperationMask = 0xF; const auto op = @@ -91,29 +86,33 @@ void Puller::ProcessSemaphoreTriggerMethod() { block.timestamp = gpu.GetTicks(); memory_manager.WriteBlock(regs.semaphore_address.SemaphoreAddress(), &block, sizeof(block)); } else { - const u32 word{memory_manager.Read(regs.semaphore_address.SemaphoreAddress())}; - if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) || - (op == GpuSemaphoreOperation::AcquireGequal && - static_cast(word - regs.semaphore_sequence) > 0) || - (op == GpuSemaphoreOperation::AcquireMask && (word & regs.semaphore_sequence))) { - // Nothing to do in this case - } else { + do { + const u32 word{memory_manager.Read(regs.semaphore_address.SemaphoreAddress())}; regs.acquire_source = true; regs.acquire_value = regs.semaphore_sequence; if (op == GpuSemaphoreOperation::AcquireEqual) { regs.acquire_active = true; regs.acquire_mode = false; + if (word != regs.acquire_value) { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + continue; + } } else if (op == GpuSemaphoreOperation::AcquireGequal) { regs.acquire_active = true; regs.acquire_mode = true; + if (word < regs.acquire_value) { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + continue; + } } else if (op == GpuSemaphoreOperation::AcquireMask) { - // TODO(kemathe) The acquire mask operation waits for a value that, ANDed with - // semaphore_sequence, gives a non-0 result - LOG_ERROR(HW_GPU, "Invalid semaphore operation AcquireMask not implemented"); + if (word & regs.semaphore_sequence == 0) { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + continue; + } } else { LOG_ERROR(HW_GPU, "Invalid semaphore operation"); } - } + } while (false); } } @@ -124,6 +123,7 @@ void Puller::ProcessSemaphoreRelease() { void Puller::ProcessSemaphoreAcquire() { const u32 word = memory_manager.Read(regs.semaphore_address.SemaphoreAddress()); const auto value = regs.semaphore_acquire; + std::this_thread::sleep_for(std::chrono::milliseconds(5)); if (word != value) { regs.acquire_active = true; regs.acquire_value = value; @@ -146,32 +146,39 @@ void Puller::CallPullerMethod(const MethodCall& method_call) { case BufferMethods::Nop: case BufferMethods::SemaphoreAddressHigh: case BufferMethods::SemaphoreAddressLow: - case BufferMethods::SemaphoreSequence: - case BufferMethods::UnkCacheFlush: + case BufferMethods::SemaphoreSequencePayload: case BufferMethods::WrcacheFlush: - case BufferMethods::FenceValue: + case BufferMethods::SyncpointPayload: break; case BufferMethods::RefCnt: rasterizer->SignalReference(); break; - case BufferMethods::FenceAction: + case BufferMethods::SyncpointOperation: ProcessFenceActionMethod(); break; - case BufferMethods::WaitForInterrupt: - ProcessWaitForInterruptMethod(); + case BufferMethods::WaitForIdle: + rasterizer->WaitForIdle(); break; - case BufferMethods::SemaphoreTrigger: { + case BufferMethods::SemaphoreOperation: { ProcessSemaphoreTriggerMethod(); break; } - case BufferMethods::NotifyIntr: { - // TODO(Kmather73): Research and implement this method. - LOG_ERROR(HW_GPU, "Special puller engine method NotifyIntr not implemented"); + case BufferMethods::NonStallInterrupt: { + LOG_ERROR(HW_GPU, "Special puller engine method NonStallInterrupt not implemented"); break; } - case BufferMethods::Unk28: { - // TODO(Kmather73): Research and implement this method. - LOG_ERROR(HW_GPU, "Special puller engine method Unk28 not implemented"); + case BufferMethods::MemOpA: { + LOG_ERROR(HW_GPU, "Memory Operation A"); + break; + } + case BufferMethods::MemOpB: { + // Implement this better. + rasterizer->SyncGuestHost(); + break; + } + case BufferMethods::MemOpC: + case BufferMethods::MemOpD: { + LOG_ERROR(HW_GPU, "Memory Operation C,D"); break; } case BufferMethods::SemaphoreAcquire: { diff --git a/src/video_core/engines/puller.h b/src/video_core/engines/puller.h index d948ec790d..b4619e9a88 100644 --- a/src/video_core/engines/puller.h +++ b/src/video_core/engines/puller.h @@ -141,7 +141,6 @@ private: void ProcessSemaphoreAcquire(); void ProcessSemaphoreRelease(); void ProcessSemaphoreTriggerMethod(); - void ProcessWaitForInterruptMethod(); [[nodiscard]] bool ExecuteMethodOnEngine(u32 method); /// Mapping of command subchannels to their bound engine ids diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h index d658e038d9..03a70e5e01 100644 --- a/src/video_core/fence_manager.h +++ b/src/video_core/fence_manager.h @@ -11,6 +11,8 @@ #include "common/common_types.h" #include "video_core/delayed_destruction_ring.h" #include "video_core/gpu.h" +#include "video_core/host1x/host1x.h" +#include "video_core/host1x/syncpoint_manager.h" #include "video_core/rasterizer_interface.h" namespace VideoCommon { @@ -72,6 +74,7 @@ public: } void SignalSyncPoint(u32 value) { + syncpoint_manager.IncrementGuest(value); TryReleasePendingFences(); const bool should_flush = ShouldFlush(); CommitAsyncFlushes(); @@ -96,7 +99,7 @@ public: auto payload = current_fence->GetPayload(); std::memcpy(address, &payload, sizeof(payload)); } else { - gpu.IncrementSyncPoint(current_fence->GetPayload()); + syncpoint_manager.IncrementHost(current_fence->GetPayload()); } PopFence(); } @@ -106,8 +109,8 @@ protected: explicit FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_, TTextureCache& texture_cache_, TTBufferCache& buffer_cache_, TQueryCache& query_cache_) - : rasterizer{rasterizer_}, gpu{gpu_}, texture_cache{texture_cache_}, - buffer_cache{buffer_cache_}, query_cache{query_cache_} {} + : rasterizer{rasterizer_}, gpu{gpu_}, syncpoint_manager{gpu.Host1x().GetSyncpointManager()}, + texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, query_cache{query_cache_} {} virtual ~FenceManager() = default; @@ -125,6 +128,7 @@ protected: VideoCore::RasterizerInterface& rasterizer; Tegra::GPU& gpu; + Tegra::Host1x::SyncpointManager& syncpoint_manager; TTextureCache& texture_cache; TTBufferCache& buffer_cache; TQueryCache& query_cache; @@ -142,7 +146,7 @@ private: const auto payload = current_fence->GetPayload(); std::memcpy(address, &payload, sizeof(payload)); } else { - gpu.IncrementSyncPoint(current_fence->GetPayload()); + syncpoint_manager.IncrementHost(current_fence->GetPayload()); } PopFence(); } diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index eebd7f3ff0..1097db08ae 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -28,6 +28,8 @@ #include "video_core/engines/maxwell_dma.h" #include "video_core/gpu.h" #include "video_core/gpu_thread.h" +#include "video_core/host1x/host1x.h" +#include "video_core/host1x/syncpoint_manager.h" #include "video_core/memory_manager.h" #include "video_core/renderer_base.h" #include "video_core/shader_notify.h" @@ -38,7 +40,7 @@ MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192)); struct GPU::Impl { explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_) - : gpu{gpu_}, system{system_}, use_nvdec{use_nvdec_}, + : gpu{gpu_}, system{system_}, host1x{system.Host1x()}, use_nvdec{use_nvdec_}, shader_notify{std::make_unique()}, is_async{is_async_}, gpu_thread{system_, is_async_}, scheduler{std::make_unique(gpu)} {} @@ -115,31 +117,35 @@ struct GPU::Impl { } /// Request a host GPU memory flush from the CPU. - [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size) { - std::unique_lock lck{flush_request_mutex}; - const u64 fence = ++last_flush_fence; - flush_requests.emplace_back(fence, addr, size); + template + [[nodiscard]] u64 RequestSyncOperation(Func&& action) { + std::unique_lock lck{sync_request_mutex}; + const u64 fence = ++last_sync_fence; + sync_requests.emplace_back(action); return fence; } /// Obtains current flush request fence id. - [[nodiscard]] u64 CurrentFlushRequestFence() const { - return current_flush_fence.load(std::memory_order_relaxed); + [[nodiscard]] u64 CurrentSyncRequestFence() const { + return current_sync_fence.load(std::memory_order_relaxed); + } + + void WaitForSyncOperation(const u64 fence) { + std::unique_lock lck{sync_request_mutex}; + sync_request_cv.wait(lck, [this, fence] { return CurrentSyncRequestFence() >= fence; }); } /// Tick pending requests within the GPU. void TickWork() { - std::unique_lock lck{flush_request_mutex}; - while (!flush_requests.empty()) { - auto& request = flush_requests.front(); - const u64 fence = request.fence; - const VAddr addr = request.addr; - const std::size_t size = request.size; - flush_requests.pop_front(); - flush_request_mutex.unlock(); - rasterizer->FlushRegion(addr, size); - current_flush_fence.store(fence); - flush_request_mutex.lock(); + std::unique_lock lck{sync_request_mutex}; + while (!sync_requests.empty()) { + auto request = std::move(sync_requests.front()); + sync_requests.pop_front(); + sync_request_mutex.unlock(); + request(); + current_sync_fence.fetch_add(1, std::memory_order_release); + sync_request_mutex.lock(); + sync_request_cv.notify_all(); } } @@ -207,78 +213,26 @@ struct GPU::Impl { /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame. void WaitFence(u32 syncpoint_id, u32 value) { - // Synced GPU, is always in sync - if (!is_async) { - return; - } if (syncpoint_id == UINT32_MAX) { - // TODO: Research what this does. - LOG_ERROR(HW_GPU, "Waiting for syncpoint -1 not implemented"); return; } MICROPROFILE_SCOPE(GPU_wait); - std::unique_lock lock{sync_mutex}; - sync_cv.wait(lock, [=, this] { - if (shutting_down.load(std::memory_order_relaxed)) { - // We're shutting down, ensure no threads continue to wait for the next syncpoint - return true; - } - return syncpoints.at(syncpoint_id).load() >= value; - }); + host1x.GetSyncpointManager().WaitHost(syncpoint_id, value); } void IncrementSyncPoint(u32 syncpoint_id) { - auto& syncpoint = syncpoints.at(syncpoint_id); - syncpoint++; - std::scoped_lock lock{sync_mutex}; - sync_cv.notify_all(); - auto& interrupt = syncpt_interrupts.at(syncpoint_id); - if (!interrupt.empty()) { - u32 value = syncpoint.load(); - auto it = interrupt.begin(); - while (it != interrupt.end()) { - if (value >= *it) { - TriggerCpuInterrupt(syncpoint_id, *it); - it = interrupt.erase(it); - continue; - } - it++; - } - } + host1x.GetSyncpointManager().IncrementHost(syncpoint_id); } [[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const { - return syncpoints.at(syncpoint_id).load(); + return host1x.GetSyncpointManager().GetHostSyncpointValue(syncpoint_id); } void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) { - std::scoped_lock lock{sync_mutex}; - u32 current_value = syncpoints.at(syncpoint_id).load(); - if ((static_cast(current_value) - static_cast(value)) >= 0) { + auto& syncpoint_manager = host1x.GetSyncpointManager(); + syncpoint_manager.RegisterHostAction(syncpoint_id, value, [this, syncpoint_id, value]() { TriggerCpuInterrupt(syncpoint_id, value); - return; - } - auto& interrupt = syncpt_interrupts.at(syncpoint_id); - bool contains = std::any_of(interrupt.begin(), interrupt.end(), - [value](u32 in_value) { return in_value == value; }); - if (contains) { - return; - } - interrupt.emplace_back(value); - } - - [[nodiscard]] bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value) { - std::scoped_lock lock{sync_mutex}; - auto& interrupt = syncpt_interrupts.at(syncpoint_id); - const auto iter = - std::find_if(interrupt.begin(), interrupt.end(), - [value](u32 interrupt_value) { return value == interrupt_value; }); - - if (iter == interrupt.end()) { - return false; - } - interrupt.erase(iter); - return true; + }); } [[nodiscard]] u64 GetTicks() const { @@ -387,8 +341,48 @@ struct GPU::Impl { interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value); } + void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer, + Service::Nvidia::NvFence* fences, size_t num_fences) { + size_t current_request_counter{}; + { + std::unique_lock lk(request_swap_mutex); + if (free_swap_counters.empty()) { + current_request_counter = request_swap_counters.size(); + request_swap_counters.emplace_back(num_fences); + } else { + current_request_counter = free_swap_counters.front(); + request_swap_counters[current_request_counter] = num_fences; + free_swap_counters.pop_front(); + } + } + const auto wait_fence = + RequestSyncOperation([this, current_request_counter, framebuffer, fences, num_fences] { + auto& syncpoint_manager = host1x.GetSyncpointManager(); + if (num_fences == 0) { + renderer->SwapBuffers(framebuffer); + } + const auto executer = [this, current_request_counter, + framebuffer_copy = *framebuffer]() { + { + std::unique_lock lk(request_swap_mutex); + if (--request_swap_counters[current_request_counter] != 0) { + return; + } + free_swap_counters.push_back(current_request_counter); + } + renderer->SwapBuffers(&framebuffer_copy); + }; + for (size_t i = 0; i < num_fences; i++) { + syncpoint_manager.RegisterGuestAction(fences[i].id, fences[i].value, executer); + } + }); + gpu_thread.TickGPU(); + WaitForSyncOperation(wait_fence); + } + GPU& gpu; Core::System& system; + Host1x::Host1x& host1x; std::map> cdma_pushers; std::unique_ptr renderer; @@ -411,18 +405,11 @@ struct GPU::Impl { std::condition_variable sync_cv; - struct FlushRequest { - explicit FlushRequest(u64 fence_, VAddr addr_, std::size_t size_) - : fence{fence_}, addr{addr_}, size{size_} {} - u64 fence; - VAddr addr; - std::size_t size; - }; - - std::list flush_requests; - std::atomic current_flush_fence{}; - u64 last_flush_fence{}; - std::mutex flush_request_mutex; + std::list> sync_requests; + std::atomic current_sync_fence{}; + u64 last_sync_fence{}; + std::mutex sync_request_mutex; + std::condition_variable sync_request_cv; const bool is_async; @@ -433,6 +420,10 @@ struct GPU::Impl { std::unordered_map> channels; Tegra::Control::ChannelState* current_channel; s32 bound_channel{-1}; + + std::deque free_swap_counters; + std::deque request_swap_counters; + std::mutex request_swap_mutex; }; GPU::GPU(Core::System& system, bool is_async, bool use_nvdec) @@ -477,17 +468,32 @@ void GPU::OnCommandListEnd() { } u64 GPU::RequestFlush(VAddr addr, std::size_t size) { - return impl->RequestFlush(addr, size); + return impl->RequestSyncOperation( + [this, addr, size]() { impl->rasterizer->FlushRegion(addr, size); }); } -u64 GPU::CurrentFlushRequestFence() const { - return impl->CurrentFlushRequestFence(); +u64 GPU::CurrentSyncRequestFence() const { + return impl->CurrentSyncRequestFence(); +} + +void GPU::WaitForSyncOperation(u64 fence) { + return impl->WaitForSyncOperation(fence); } void GPU::TickWork() { impl->TickWork(); } +/// Gets a mutable reference to the Host1x interface +Host1x::Host1x& GPU::Host1x() { + return impl->host1x; +} + +/// Gets an immutable reference to the Host1x interface. +const Host1x::Host1x& GPU::Host1x() const { + return impl->host1x; +} + Engines::Maxwell3D& GPU::Maxwell3D() { return impl->Maxwell3D(); } @@ -536,6 +542,11 @@ const VideoCore::ShaderNotify& GPU::ShaderNotify() const { return impl->ShaderNotify(); } +void GPU::RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer, + Service::Nvidia::NvFence* fences, size_t num_fences) { + impl->RequestSwapBuffers(framebuffer, fences, num_fences); +} + void GPU::WaitFence(u32 syncpoint_id, u32 value) { impl->WaitFence(syncpoint_id, value); } @@ -552,10 +563,6 @@ void GPU::RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) { impl->RegisterSyncptInterrupt(syncpoint_id, value); } -bool GPU::CancelSyncptInterrupt(u32 syncpoint_id, u32 value) { - return impl->CancelSyncptInterrupt(syncpoint_id, value); -} - u64 GPU::GetTicks() const { return impl->GetTicks(); } diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index 7e84b0d2f4..c1a5382572 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -93,6 +93,10 @@ namespace Control { struct ChannelState; } +namespace Host1x { +class Host1x; +} // namespace Host1x + class MemoryManager; class GPU final { @@ -124,11 +128,19 @@ public: [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size); /// Obtains current flush request fence id. - [[nodiscard]] u64 CurrentFlushRequestFence() const; + [[nodiscard]] u64 CurrentSyncRequestFence() const; + + void WaitForSyncOperation(u64 fence); /// Tick pending requests within the GPU. void TickWork(); + /// Gets a mutable reference to the Host1x interface + [[nodiscard]] Host1x::Host1x& Host1x(); + + /// Gets an immutable reference to the Host1x interface. + [[nodiscard]] const Host1x::Host1x& Host1x() const; + /// Returns a reference to the Maxwell3D GPU engine. [[nodiscard]] Engines::Maxwell3D& Maxwell3D(); @@ -174,8 +186,6 @@ public: void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value); - bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value); - [[nodiscard]] u64 GetTicks() const; [[nodiscard]] bool IsAsync() const; @@ -184,6 +194,9 @@ public: void RendererFrameEndNotify(); + void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer, + Service::Nvidia::NvFence* fences, size_t num_fences); + /// Performs any additional setup necessary in order to begin GPU emulation. /// This can be used to launch any necessary threads and register any necessary /// core timing events. diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp index 9844cde43a..2c03545bf1 100644 --- a/src/video_core/gpu_thread.cpp +++ b/src/video_core/gpu_thread.cpp @@ -93,8 +93,12 @@ void ThreadManager::FlushRegion(VAddr addr, u64 size) { } auto& gpu = system.GPU(); u64 fence = gpu.RequestFlush(addr, size); + TickGPU(); + gpu.WaitForSyncOperation(fence); +} + +void ThreadManager::TickGPU() { PushCommand(GPUTickCommand(), true); - ASSERT(fence <= gpu.CurrentFlushRequestFence()); } void ThreadManager::InvalidateRegion(VAddr addr, u64 size) { diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h index c5078a2b37..64628d3e38 100644 --- a/src/video_core/gpu_thread.h +++ b/src/video_core/gpu_thread.h @@ -135,6 +135,8 @@ public: void OnCommandListEnd(); + void TickGPU(); + private: /// Pushes a command to be executed by the GPU thread u64 PushCommand(CommandData&& command_data, bool block = false); diff --git a/src/video_core/command_classes/codecs/codec.cpp b/src/video_core/host1x/codecs/codec.cpp similarity index 91% rename from src/video_core/command_classes/codecs/codec.cpp rename to src/video_core/host1x/codecs/codec.cpp index a5eb97b7f5..70c47ae03d 100644 --- a/src/video_core/command_classes/codecs/codec.cpp +++ b/src/video_core/host1x/codecs/codec.cpp @@ -6,11 +6,11 @@ #include #include "common/assert.h" #include "common/settings.h" -#include "video_core/command_classes/codecs/codec.h" -#include "video_core/command_classes/codecs/h264.h" -#include "video_core/command_classes/codecs/vp8.h" -#include "video_core/command_classes/codecs/vp9.h" #include "video_core/gpu.h" +#include "video_core/host1x/codecs/codec.h" +#include "video_core/host1x/codecs/h264.h" +#include "video_core/host1x/codecs/vp8.h" +#include "video_core/host1x/codecs/vp9.h" #include "video_core/memory_manager.h" extern "C" { @@ -73,7 +73,7 @@ void AVFrameDeleter(AVFrame* ptr) { av_frame_free(&ptr); } -Codec::Codec(GPU& gpu_, const NvdecCommon::NvdecRegisters& regs) +Codec::Codec(GPU& gpu_, const Host1x::NvdecCommon::NvdecRegisters& regs) : gpu(gpu_), state{regs}, h264_decoder(std::make_unique(gpu)), vp8_decoder(std::make_unique(gpu)), vp9_decoder(std::make_unique(gpu)) {} @@ -168,11 +168,11 @@ void Codec::InitializeGpuDecoder() { void Codec::Initialize() { const AVCodecID codec = [&] { switch (current_codec) { - case NvdecCommon::VideoCodec::H264: + case Host1x::NvdecCommon::VideoCodec::H264: return AV_CODEC_ID_H264; - case NvdecCommon::VideoCodec::VP8: + case Host1x::NvdecCommon::VideoCodec::VP8: return AV_CODEC_ID_VP8; - case NvdecCommon::VideoCodec::VP9: + case Host1x::NvdecCommon::VideoCodec::VP9: return AV_CODEC_ID_VP9; default: UNIMPLEMENTED_MSG("Unknown codec {}", current_codec); @@ -197,7 +197,7 @@ void Codec::Initialize() { initialized = true; } -void Codec::SetTargetCodec(NvdecCommon::VideoCodec codec) { +void Codec::SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec) { if (current_codec != codec) { current_codec = codec; LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", GetCurrentCodecName()); @@ -215,11 +215,11 @@ void Codec::Decode() { bool vp9_hidden_frame = false; const auto& frame_data = [&]() { switch (current_codec) { - case Tegra::NvdecCommon::VideoCodec::H264: + case Tegra::Host1x::NvdecCommon::VideoCodec::H264: return h264_decoder->ComposeFrame(state, is_first_frame); - case Tegra::NvdecCommon::VideoCodec::VP8: + case Tegra::Host1x::NvdecCommon::VideoCodec::VP8: return vp8_decoder->ComposeFrame(state); - case Tegra::NvdecCommon::VideoCodec::VP9: + case Tegra::Host1x::NvdecCommon::VideoCodec::VP9: vp9_decoder->ComposeFrame(state); vp9_hidden_frame = vp9_decoder->WasFrameHidden(); return vp9_decoder->GetFrameBytes(); @@ -287,21 +287,21 @@ AVFramePtr Codec::GetCurrentFrame() { return frame; } -NvdecCommon::VideoCodec Codec::GetCurrentCodec() const { +Host1x::NvdecCommon::VideoCodec Codec::GetCurrentCodec() const { return current_codec; } std::string_view Codec::GetCurrentCodecName() const { switch (current_codec) { - case NvdecCommon::VideoCodec::None: + case Host1x::NvdecCommon::VideoCodec::None: return "None"; - case NvdecCommon::VideoCodec::H264: + case Host1x::NvdecCommon::VideoCodec::H264: return "H264"; - case NvdecCommon::VideoCodec::VP8: + case Host1x::NvdecCommon::VideoCodec::VP8: return "VP8"; - case NvdecCommon::VideoCodec::H265: + case Host1x::NvdecCommon::VideoCodec::H265: return "H265"; - case NvdecCommon::VideoCodec::VP9: + case Host1x::NvdecCommon::VideoCodec::VP9: return "VP9"; default: return "Unknown"; diff --git a/src/video_core/command_classes/codecs/codec.h b/src/video_core/host1x/codecs/codec.h similarity index 78% rename from src/video_core/command_classes/codecs/codec.h rename to src/video_core/host1x/codecs/codec.h index 0c2405465c..117cb3ccd6 100644 --- a/src/video_core/command_classes/codecs/codec.h +++ b/src/video_core/host1x/codecs/codec.h @@ -6,8 +6,8 @@ #include #include #include - -#include "video_core/command_classes/nvdec_common.h" +#include "common/common_types.h" +#include "video_core/host1x/nvdec_common.h" extern "C" { #if defined(__GNUC__) || defined(__clang__) @@ -34,14 +34,14 @@ class VP9; class Codec { public: - explicit Codec(GPU& gpu, const NvdecCommon::NvdecRegisters& regs); + explicit Codec(GPU& gpu, const Host1x::NvdecCommon::NvdecRegisters& regs); ~Codec(); /// Initialize the codec, returning success or failure void Initialize(); /// Sets NVDEC video stream codec - void SetTargetCodec(NvdecCommon::VideoCodec codec); + void SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec); /// Call decoders to construct headers, decode AVFrame with ffmpeg void Decode(); @@ -50,7 +50,7 @@ public: [[nodiscard]] AVFramePtr GetCurrentFrame(); /// Returns the value of current_codec - [[nodiscard]] NvdecCommon::VideoCodec GetCurrentCodec() const; + [[nodiscard]] Host1x::NvdecCommon::VideoCodec GetCurrentCodec() const; /// Return name of the current codec [[nodiscard]] std::string_view GetCurrentCodecName() const; @@ -63,14 +63,14 @@ private: bool CreateGpuAvDevice(); bool initialized{}; - NvdecCommon::VideoCodec current_codec{NvdecCommon::VideoCodec::None}; + Host1x::NvdecCommon::VideoCodec current_codec{Host1x::NvdecCommon::VideoCodec::None}; const AVCodec* av_codec{nullptr}; AVCodecContext* av_codec_ctx{nullptr}; AVBufferRef* av_gpu_decoder{nullptr}; GPU& gpu; - const NvdecCommon::NvdecRegisters& state; + const Host1x::NvdecCommon::NvdecRegisters& state; std::unique_ptr h264_decoder; std::unique_ptr vp8_decoder; std::unique_ptr vp9_decoder; diff --git a/src/video_core/command_classes/codecs/h264.cpp b/src/video_core/host1x/codecs/h264.cpp similarity index 98% rename from src/video_core/command_classes/codecs/h264.cpp rename to src/video_core/host1x/codecs/h264.cpp index e2acd54d4f..95534bc85f 100644 --- a/src/video_core/command_classes/codecs/h264.cpp +++ b/src/video_core/host1x/codecs/h264.cpp @@ -5,8 +5,8 @@ #include #include "common/settings.h" -#include "video_core/command_classes/codecs/h264.h" #include "video_core/gpu.h" +#include "video_core/host1x/codecs/h264.h" #include "video_core/memory_manager.h" namespace Tegra::Decoder { @@ -28,7 +28,7 @@ H264::H264(GPU& gpu_) : gpu(gpu_) {} H264::~H264() = default; -const std::vector& H264::ComposeFrame(const NvdecCommon::NvdecRegisters& state, +const std::vector& H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state, bool is_first_frame) { H264DecoderContext context; gpu.MemoryManager().ReadBlock(state.picture_info_offset, &context, sizeof(H264DecoderContext)); diff --git a/src/video_core/command_classes/codecs/h264.h b/src/video_core/host1x/codecs/h264.h similarity index 96% rename from src/video_core/command_classes/codecs/h264.h rename to src/video_core/host1x/codecs/h264.h index 261574364e..a98730474a 100644 --- a/src/video_core/command_classes/codecs/h264.h +++ b/src/video_core/host1x/codecs/h264.h @@ -8,7 +8,7 @@ #include "common/bit_field.h" #include "common/common_funcs.h" #include "common/common_types.h" -#include "video_core/command_classes/nvdec_common.h" +#include "video_core/host1x/nvdec_common.h" namespace Tegra { class GPU; @@ -59,8 +59,8 @@ public: ~H264(); /// Compose the H264 frame for FFmpeg decoding - [[nodiscard]] const std::vector& ComposeFrame(const NvdecCommon::NvdecRegisters& state, - bool is_first_frame = false); + [[nodiscard]] const std::vector& ComposeFrame( + const Host1x::NvdecCommon::NvdecRegisters& state, bool is_first_frame = false); private: std::vector frame; diff --git a/src/video_core/command_classes/codecs/vp8.cpp b/src/video_core/host1x/codecs/vp8.cpp similarity index 93% rename from src/video_core/command_classes/codecs/vp8.cpp rename to src/video_core/host1x/codecs/vp8.cpp index c83b9bbc27..aac026e17c 100644 --- a/src/video_core/command_classes/codecs/vp8.cpp +++ b/src/video_core/host1x/codecs/vp8.cpp @@ -3,8 +3,8 @@ #include -#include "video_core/command_classes/codecs/vp8.h" #include "video_core/gpu.h" +#include "video_core/host1x/codecs/vp8.h" #include "video_core/memory_manager.h" namespace Tegra::Decoder { @@ -12,7 +12,7 @@ VP8::VP8(GPU& gpu_) : gpu(gpu_) {} VP8::~VP8() = default; -const std::vector& VP8::ComposeFrame(const NvdecCommon::NvdecRegisters& state) { +const std::vector& VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) { VP8PictureInfo info; gpu.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo)); diff --git a/src/video_core/command_classes/codecs/vp8.h b/src/video_core/host1x/codecs/vp8.h similarity index 93% rename from src/video_core/command_classes/codecs/vp8.h rename to src/video_core/host1x/codecs/vp8.h index 3357667b08..a1dfa5f03d 100644 --- a/src/video_core/command_classes/codecs/vp8.h +++ b/src/video_core/host1x/codecs/vp8.h @@ -8,7 +8,7 @@ #include "common/common_funcs.h" #include "common/common_types.h" -#include "video_core/command_classes/nvdec_common.h" +#include "video_core/host1x/nvdec_common.h" namespace Tegra { class GPU; @@ -20,7 +20,8 @@ public: ~VP8(); /// Compose the VP8 frame for FFmpeg decoding - [[nodiscard]] const std::vector& ComposeFrame(const NvdecCommon::NvdecRegisters& state); + [[nodiscard]] const std::vector& ComposeFrame( + const Host1x::NvdecCommon::NvdecRegisters& state); private: std::vector frame; diff --git a/src/video_core/command_classes/codecs/vp9.cpp b/src/video_core/host1x/codecs/vp9.cpp similarity index 99% rename from src/video_core/command_classes/codecs/vp9.cpp rename to src/video_core/host1x/codecs/vp9.cpp index c01431441d..bc50c6ba41 100644 --- a/src/video_core/command_classes/codecs/vp9.cpp +++ b/src/video_core/host1x/codecs/vp9.cpp @@ -4,8 +4,8 @@ #include // for std::copy #include #include "common/assert.h" -#include "video_core/command_classes/codecs/vp9.h" #include "video_core/gpu.h" +#include "video_core/host1x/codecs/vp9.h" #include "video_core/memory_manager.h" namespace Tegra::Decoder { @@ -355,7 +355,7 @@ void VP9::WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_ } } -Vp9PictureInfo VP9::GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state) { +Vp9PictureInfo VP9::GetVp9PictureInfo(const Host1x::NvdecCommon::NvdecRegisters& state) { PictureInfo picture_info; gpu.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo)); Vp9PictureInfo vp9_info = picture_info.Convert(); @@ -376,7 +376,7 @@ void VP9::InsertEntropy(u64 offset, Vp9EntropyProbs& dst) { entropy.Convert(dst); } -Vp9FrameContainer VP9::GetCurrentFrame(const NvdecCommon::NvdecRegisters& state) { +Vp9FrameContainer VP9::GetCurrentFrame(const Host1x::NvdecCommon::NvdecRegisters& state) { Vp9FrameContainer current_frame{}; { gpu.SyncGuestHost(); @@ -769,7 +769,7 @@ VpxBitStreamWriter VP9::ComposeUncompressedHeader() { return uncomp_writer; } -void VP9::ComposeFrame(const NvdecCommon::NvdecRegisters& state) { +void VP9::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) { std::vector bitstream; { Vp9FrameContainer curr_frame = GetCurrentFrame(state); diff --git a/src/video_core/command_classes/codecs/vp9.h b/src/video_core/host1x/codecs/vp9.h similarity index 93% rename from src/video_core/command_classes/codecs/vp9.h rename to src/video_core/host1x/codecs/vp9.h index ecc40e8b1b..a425c0fa40 100644 --- a/src/video_core/command_classes/codecs/vp9.h +++ b/src/video_core/host1x/codecs/vp9.h @@ -8,8 +8,8 @@ #include "common/common_types.h" #include "common/stream.h" -#include "video_core/command_classes/codecs/vp9_types.h" -#include "video_core/command_classes/nvdec_common.h" +#include "video_core/host1x/codecs/vp9_types.h" +#include "video_core/host1x/nvdec_common.h" namespace Tegra { class GPU; @@ -117,7 +117,7 @@ public: /// Composes the VP9 frame from the GPU state information. /// Based on the official VP9 spec documentation - void ComposeFrame(const NvdecCommon::NvdecRegisters& state); + void ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state); /// Returns true if the most recent frame was a hidden frame. [[nodiscard]] bool WasFrameHidden() const { @@ -162,13 +162,15 @@ private: void WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob); /// Returns VP9 information from NVDEC provided offset and size - [[nodiscard]] Vp9PictureInfo GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state); + [[nodiscard]] Vp9PictureInfo GetVp9PictureInfo( + const Host1x::NvdecCommon::NvdecRegisters& state); /// Read and convert NVDEC provided entropy probs to Vp9EntropyProbs struct void InsertEntropy(u64 offset, Vp9EntropyProbs& dst); /// Returns frame to be decoded after buffering - [[nodiscard]] Vp9FrameContainer GetCurrentFrame(const NvdecCommon::NvdecRegisters& state); + [[nodiscard]] Vp9FrameContainer GetCurrentFrame( + const Host1x::NvdecCommon::NvdecRegisters& state); /// Use NVDEC providied information to compose the headers for the current frame [[nodiscard]] std::vector ComposeCompressedHeader(); diff --git a/src/video_core/command_classes/codecs/vp9_types.h b/src/video_core/host1x/codecs/vp9_types.h similarity index 100% rename from src/video_core/command_classes/codecs/vp9_types.h rename to src/video_core/host1x/codecs/vp9_types.h diff --git a/src/video_core/host1x/control.cpp b/src/video_core/host1x/control.cpp new file mode 100644 index 0000000000..b72b01aa33 --- /dev/null +++ b/src/video_core/host1x/control.cpp @@ -0,0 +1,35 @@ +// Copyright 2022 yuzu Emulator Project +// Licensed under GPLv3 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "video_core/gpu.h" +#include "video_core/host1x/control.h" +#include "video_core/host1x/host1x.h" + +namespace Tegra::Host1x { + +Control::Control(GPU& gpu_) : gpu(gpu_) {} + +Control::~Control() = default; + +void Control::ProcessMethod(Method method, u32 argument) { + switch (method) { + case Method::LoadSyncptPayload32: + syncpoint_value = argument; + break; + case Method::WaitSyncpt: + case Method::WaitSyncpt32: + Execute(argument); + break; + default: + UNIMPLEMENTED_MSG("Control method 0x{:X}", static_cast(method)); + break; + } +} + +void Control::Execute(u32 data) { + gpu.Host1x().GetSyncpointManager().WaitHost(data, syncpoint_value); +} + +} // namespace Tegra::Host1x diff --git a/src/video_core/command_classes/host1x.h b/src/video_core/host1x/control.h similarity index 60% rename from src/video_core/command_classes/host1x.h rename to src/video_core/host1x/control.h index bb48a4381e..04dac7d514 100644 --- a/src/video_core/command_classes/host1x.h +++ b/src/video_core/host1x/control.h @@ -1,5 +1,7 @@ -// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors +// (https://github.com/skyline-emu/) +// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 +// or any later version Refer to the license.txt file included. #pragma once @@ -7,9 +9,12 @@ namespace Tegra { class GPU; + +namespace Host1x { + class Nvdec; -class Host1x { +class Control { public: enum class Method : u32 { WaitSyncpt = 0x8, @@ -17,8 +22,8 @@ public: WaitSyncpt32 = 0x50, }; - explicit Host1x(GPU& gpu); - ~Host1x(); + explicit Control(GPU& gpu); + ~Control(); /// Writes the method into the state, Invoke Execute() if encountered void ProcessMethod(Method method, u32 argument); @@ -31,4 +36,6 @@ private: GPU& gpu; }; +} // namespace Host1x + } // namespace Tegra diff --git a/src/video_core/host1x/host1x.h b/src/video_core/host1x/host1x.h new file mode 100644 index 0000000000..2971be2861 --- /dev/null +++ b/src/video_core/host1x/host1x.h @@ -0,0 +1,33 @@ +// Copyright 2022 yuzu Emulator Project +// Licensed under GPLv3 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" + +#include "video_core/host1x/syncpoint_manager.h" + +namespace Tegra { + +namespace Host1x { + +class Host1x { +public: + Host1x() : syncpoint_manager{} {} + + SyncpointManager& GetSyncpointManager() { + return syncpoint_manager; + } + + const SyncpointManager& GetSyncpointManager() const { + return syncpoint_manager; + } + +private: + SyncpointManager syncpoint_manager; +}; + +} // namespace Host1x + +} // namespace Tegra diff --git a/src/video_core/command_classes/nvdec.cpp b/src/video_core/host1x/nvdec.cpp similarity index 92% rename from src/video_core/command_classes/nvdec.cpp rename to src/video_core/host1x/nvdec.cpp index 4fbbe3da62..5f6decd0de 100644 --- a/src/video_core/command_classes/nvdec.cpp +++ b/src/video_core/host1x/nvdec.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include "common/assert.h" -#include "video_core/command_classes/nvdec.h" #include "video_core/gpu.h" +#include "video_core/host1x/nvdec.h" -namespace Tegra { +namespace Tegra::Host1x { #define NVDEC_REG_INDEX(field_name) \ (offsetof(NvdecCommon::NvdecRegisters, field_name) / sizeof(u64)) @@ -44,4 +44,4 @@ void Nvdec::Execute() { } } -} // namespace Tegra +} // namespace Tegra::Host1x diff --git a/src/video_core/command_classes/nvdec.h b/src/video_core/host1x/nvdec.h similarity index 88% rename from src/video_core/command_classes/nvdec.h rename to src/video_core/host1x/nvdec.h index 488531fc6b..41ba1f7a04 100644 --- a/src/video_core/command_classes/nvdec.h +++ b/src/video_core/host1x/nvdec.h @@ -6,11 +6,13 @@ #include #include #include "common/common_types.h" -#include "video_core/command_classes/codecs/codec.h" +#include "video_core/host1x/codecs/codec.h" namespace Tegra { class GPU; +namespace Host1x { + class Nvdec { public: explicit Nvdec(GPU& gpu); @@ -30,4 +32,7 @@ private: NvdecCommon::NvdecRegisters state; std::unique_ptr codec; }; + +} // namespace Host1x + } // namespace Tegra diff --git a/src/video_core/command_classes/nvdec_common.h b/src/video_core/host1x/nvdec_common.h similarity index 98% rename from src/video_core/command_classes/nvdec_common.h rename to src/video_core/host1x/nvdec_common.h index 521e5b52b7..49d67ebbe5 100644 --- a/src/video_core/command_classes/nvdec_common.h +++ b/src/video_core/host1x/nvdec_common.h @@ -7,7 +7,7 @@ #include "common/common_funcs.h" #include "common/common_types.h" -namespace Tegra::NvdecCommon { +namespace Tegra::Host1x::NvdecCommon { enum class VideoCodec : u64 { None = 0x0, @@ -94,4 +94,4 @@ ASSERT_REG_POSITION(vp9_curr_frame_mvs_offset, 0x176); #undef ASSERT_REG_POSITION -} // namespace Tegra::NvdecCommon +} // namespace Tegra::Host1x::NvdecCommon diff --git a/src/video_core/command_classes/sync_manager.cpp b/src/video_core/host1x/sync_manager.cpp similarity index 77% rename from src/video_core/command_classes/sync_manager.cpp rename to src/video_core/host1x/sync_manager.cpp index 67e58046fa..8694f77e2a 100644 --- a/src/video_core/command_classes/sync_manager.cpp +++ b/src/video_core/host1x/sync_manager.cpp @@ -4,8 +4,12 @@ #include #include "sync_manager.h" #include "video_core/gpu.h" +#include "video_core/host1x/host1x.h" +#include "video_core/host1x/syncpoint_manager.h" namespace Tegra { +namespace Host1x { + SyncptIncrManager::SyncptIncrManager(GPU& gpu_) : gpu(gpu_) {} SyncptIncrManager::~SyncptIncrManager() = default; @@ -36,8 +40,12 @@ void SyncptIncrManager::IncrementAllDone() { if (!increments[done_count].complete) { break; } - gpu.IncrementSyncPoint(increments[done_count].syncpt_id); + auto& syncpoint_manager = gpu.Host1x().GetSyncpointManager(); + syncpoint_manager.IncrementGuest(increments[done_count].syncpt_id); + syncpoint_manager.IncrementHost(increments[done_count].syncpt_id); } increments.erase(increments.begin(), increments.begin() + done_count); } + +} // namespace Host1x } // namespace Tegra diff --git a/src/video_core/command_classes/sync_manager.h b/src/video_core/host1x/sync_manager.h similarity index 95% rename from src/video_core/command_classes/sync_manager.h rename to src/video_core/host1x/sync_manager.h index 6dfaae0808..aba72d5c58 100644 --- a/src/video_core/command_classes/sync_manager.h +++ b/src/video_core/host1x/sync_manager.h @@ -8,7 +8,11 @@ #include "common/common_types.h" namespace Tegra { + class GPU; + +namespace Host1x { + struct SyncptIncr { u32 id; u32 class_id; @@ -44,4 +48,6 @@ private: GPU& gpu; }; +} // namespace Host1x + } // namespace Tegra diff --git a/src/video_core/host1x/syncpoint_manager.cpp b/src/video_core/host1x/syncpoint_manager.cpp new file mode 100644 index 0000000000..c606b8bd0d --- /dev/null +++ b/src/video_core/host1x/syncpoint_manager.cpp @@ -0,0 +1,93 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv3 or any later version +// Refer to the license.txt file included. + +#include "video_core/host1x/syncpoint_manager.h" + +namespace Tegra { + +namespace Host1x { + +SyncpointManager::ActionHandle SyncpointManager::RegisterAction( + std::atomic& syncpoint, std::list& action_storage, u32 expected_value, + std::function& action) { + if (syncpoint.load(std::memory_order_acquire) >= expected_value) { + action(); + return {}; + } + + std::unique_lock lk(guard); + if (syncpoint.load(std::memory_order_relaxed) >= expected_value) { + action(); + return {}; + } + auto it = action_storage.begin(); + while (it != action_storage.end()) { + if (it->expected_value >= expected_value) { + break; + } + ++it; + } + return action_storage.emplace(it, expected_value, action); +} + +void SyncpointManager::DeregisterAction(std::list& action_storage, + ActionHandle& handle) { + std::unique_lock lk(guard); + action_storage.erase(handle); +} + +void SyncpointManager::DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle) { + DeregisterAction(guest_action_storage[syncpoint_id], handle); +} + +void SyncpointManager::DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle) { + DeregisterAction(host_action_storage[syncpoint_id], handle); +} + +void SyncpointManager::IncrementGuest(u32 syncpoint_id) { + Increment(syncpoints_guest[syncpoint_id], wait_guest_cv, guest_action_storage[syncpoint_id]); +} + +void SyncpointManager::IncrementHost(u32 syncpoint_id) { + Increment(syncpoints_host[syncpoint_id], wait_host_cv, host_action_storage[syncpoint_id]); +} + +void SyncpointManager::WaitGuest(u32 syncpoint_id, u32 expected_value) { + Wait(syncpoints_guest[syncpoint_id], wait_guest_cv, expected_value); +} + +void SyncpointManager::WaitHost(u32 syncpoint_id, u32 expected_value) { + Wait(syncpoints_host[syncpoint_id], wait_host_cv, expected_value); +} + +void SyncpointManager::Increment(std::atomic& syncpoint, std::condition_variable& wait_cv, + std::list& action_storage) { + auto new_value{syncpoint.fetch_add(1, std::memory_order_acq_rel) + 1}; + + std::unique_lock lk(guard); + auto it = action_storage.begin(); + while (it != action_storage.end()) { + if (it->expected_value > new_value) { + break; + } + it->action(); + it = action_storage.erase(it); + } + wait_cv.notify_all(); +} + +void SyncpointManager::Wait(std::atomic& syncpoint, std::condition_variable& wait_cv, + u32 expected_value) { + const auto pred = [&]() { return syncpoint.load(std::memory_order_acquire) >= expected_value; }; + if (pred()) { + return; + } + + std::unique_lock lk(guard); + wait_cv.wait(lk, pred); +} + +} // namespace Host1x + +} // namespace Tegra diff --git a/src/video_core/host1x/syncpoint_manager.h b/src/video_core/host1x/syncpoint_manager.h new file mode 100644 index 0000000000..0ecc040ab8 --- /dev/null +++ b/src/video_core/host1x/syncpoint_manager.h @@ -0,0 +1,99 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv3 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "common/common_types.h" + +namespace Tegra { + +namespace Host1x { + +class SyncpointManager { +public: + u32 GetGuestSyncpointValue(u32 id) { + return syncpoints_guest[id].load(std::memory_order_acquire); + } + + u32 GetHostSyncpointValue(u32 id) { + return syncpoints_host[id].load(std::memory_order_acquire); + } + + struct RegisteredAction { + RegisteredAction(u32 expected_value_, std::function& action_) + : expected_value{expected_value_}, action{action_} {} + u32 expected_value; + std::function action; + }; + using ActionHandle = std::list::iterator; + + template + ActionHandle RegisterGuestAction(u32 syncpoint_id, u32 expected_value, Func&& action) { + std::function func(action); + return RegisterAction(syncpoints_guest[syncpoint_id], guest_action_storage[syncpoint_id], + expected_value, func); + } + + template + ActionHandle RegisterHostAction(u32 syncpoint_id, u32 expected_value, Func&& action) { + std::function func(action); + return RegisterAction(syncpoints_host[syncpoint_id], host_action_storage[syncpoint_id], + expected_value, func); + } + + void DeregisterGuestAction(u32 syncpoint_id,ActionHandle& handle); + + void DeregisterHostAction(u32 syncpoint_id,ActionHandle& handle); + + void IncrementGuest(u32 syncpoint_id); + + void IncrementHost(u32 syncpoint_id); + + void WaitGuest(u32 syncpoint_id, u32 expected_value); + + void WaitHost(u32 syncpoint_id, u32 expected_value); + + bool IsReadyGuest(u32 syncpoint_id, u32 expected_value) { + return syncpoints_guest[syncpoint_id].load(std::memory_order_acquire) >= expected_value; + } + + bool IsReadyHost(u32 syncpoint_id, u32 expected_value) { + return syncpoints_host[syncpoint_id].load(std::memory_order_acquire) >= expected_value; + } + +private: + void Increment(std::atomic& syncpoint, std::condition_variable& wait_cv, + std::list& action_storage); + + ActionHandle RegisterAction(std::atomic& syncpoint, + std::list& action_storage, u32 expected_value, + std::function& action); + + void DeregisterAction(std::list& action_storage, ActionHandle& handle); + + void Wait(std::atomic& syncpoint, std::condition_variable& wait_cv, u32 expected_value); + + static constexpr size_t NUM_MAX_SYNCPOINTS = 192; + + std::array, NUM_MAX_SYNCPOINTS> syncpoints_guest{}; + std::array, NUM_MAX_SYNCPOINTS> syncpoints_host{}; + + std::array, NUM_MAX_SYNCPOINTS> guest_action_storage; + std::array, NUM_MAX_SYNCPOINTS> host_action_storage; + + std::mutex guard; + std::condition_variable wait_guest_cv; + std::condition_variable wait_host_cv; +}; + +} // namespace Host1x + +} // namespace Tegra diff --git a/src/video_core/command_classes/vic.cpp b/src/video_core/host1x/vic.cpp similarity index 98% rename from src/video_core/command_classes/vic.cpp rename to src/video_core/host1x/vic.cpp index 7c17df353a..a9422670a1 100644 --- a/src/video_core/command_classes/vic.cpp +++ b/src/video_core/host1x/vic.cpp @@ -18,14 +18,17 @@ extern "C" { #include "common/bit_field.h" #include "common/logging/log.h" -#include "video_core/command_classes/nvdec.h" -#include "video_core/command_classes/vic.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/gpu.h" +#include "video_core/host1x/nvdec.h" +#include "video_core/host1x/vic.h" #include "video_core/memory_manager.h" #include "video_core/textures/decoders.h" namespace Tegra { + +namespace Host1x { + namespace { enum class VideoPixelFormat : u64_le { RGBA8 = 0x1f, @@ -235,4 +238,6 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) { chroma_buffer.size()); } +} // namespace Host1x + } // namespace Tegra diff --git a/src/video_core/command_classes/vic.h b/src/video_core/host1x/vic.h similarity index 93% rename from src/video_core/command_classes/vic.h rename to src/video_core/host1x/vic.h index 010daa6b64..c51f8af7e9 100644 --- a/src/video_core/command_classes/vic.h +++ b/src/video_core/host1x/vic.h @@ -11,6 +11,9 @@ struct SwsContext; namespace Tegra { class GPU; + +namespace Host1x { + class Nvdec; union VicConfig; @@ -40,7 +43,7 @@ private: void WriteYUVFrame(const AVFrame* frame, const VicConfig& config); GPU& gpu; - std::shared_ptr nvdec_processor; + std::shared_ptr nvdec_processor; /// Avoid reallocation of the following buffers every frame, as their /// size does not change during a stream @@ -58,4 +61,6 @@ private: s32 scaler_height{}; }; +} // namespace Host1x + } // namespace Tegra From 2931101e6f5aa755566ef40f6e6dc71909fd3e92 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 30 Jan 2022 22:26:01 +0100 Subject: [PATCH 29/68] NVDRV: Refactor Host1x --- src/core/core.cpp | 2 +- src/core/hle/service/nvdrv/core/container.cpp | 8 ++-- src/core/hle/service/nvdrv/core/container.h | 10 +++-- src/core/hle/service/nvdrv/core/nvmap.cpp | 33 +++++++---------- src/core/hle/service/nvdrv/core/nvmap.h | 18 +++++++-- .../service/nvdrv/core/syncpoint_manager.cpp | 6 +-- .../service/nvdrv/core/syncpoint_manager.h | 12 ++++-- .../nvdrv/devices/nvhost_nvdec_common.cpp | 37 ++++++------------- src/core/hle/service/nvdrv/nvdrv.cpp | 2 +- src/video_core/CMakeLists.txt | 2 + src/video_core/cdma_pusher.cpp | 12 +++--- src/video_core/cdma_pusher.h | 7 ++-- src/video_core/gpu.cpp | 27 +------------- src/video_core/gpu.h | 6 --- src/video_core/host1x/codecs/codec.cpp | 10 ++--- src/video_core/host1x/codecs/codec.h | 9 +++-- src/video_core/host1x/codecs/h264.cpp | 13 ++++--- src/video_core/host1x/codecs/h264.h | 10 +++-- src/video_core/host1x/codecs/vp8.cpp | 8 ++-- src/video_core/host1x/codecs/vp8.h | 10 +++-- src/video_core/host1x/codecs/vp9.cpp | 12 +++--- src/video_core/host1x/codecs/vp9.h | 10 +++-- src/video_core/host1x/codecs/vp9_types.h | 1 - src/video_core/host1x/control.cpp | 5 +-- src/video_core/host1x/control.h | 6 +-- src/video_core/host1x/host1x.cpp | 18 +++++++++ src/video_core/host1x/host1x.h | 27 +++++++++++++- src/video_core/host1x/nvdec.cpp | 5 ++- src/video_core/host1x/nvdec.h | 7 ++-- src/video_core/host1x/sync_manager.cpp | 5 +-- src/video_core/host1x/sync_manager.h | 8 ++-- src/video_core/host1x/vic.cpp | 22 +++++------ src/video_core/host1x/vic.h | 6 +-- 33 files changed, 201 insertions(+), 173 deletions(-) create mode 100644 src/video_core/host1x/host1x.cpp diff --git a/src/core/core.cpp b/src/core/core.cpp index fa059a3948..13d02e75f5 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -216,7 +216,7 @@ struct System::Impl { telemetry_session = std::make_unique(); - host1x_core = std::make_unique(); + host1x_core = std::make_unique(system); gpu_core = VideoCore::CreateGPU(emu_window, system); if (!gpu_core) { return SystemResultStatus::ErrorVideoCore; diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp index 97b5b2c86f..fbd66f0016 100644 --- a/src/core/hle/service/nvdrv/core/container.cpp +++ b/src/core/hle/service/nvdrv/core/container.cpp @@ -6,18 +6,18 @@ #include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/core/syncpoint_manager.h" -#include "video_core/gpu.h" +#include "video_core/host1x/host1x.h" namespace Service::Nvidia::NvCore { struct ContainerImpl { - ContainerImpl(Tegra::GPU& gpu_) : file{}, manager{gpu_} {} + ContainerImpl(Tegra::Host1x::Host1x& host1x_) : file{host1x_}, manager{host1x_} {} NvMap file; SyncpointManager manager; }; -Container::Container(Tegra::GPU& gpu_) { - impl = std::make_unique(gpu_); +Container::Container(Tegra::Host1x::Host1x& host1x_) { + impl = std::make_unique(host1x_); } Container::~Container() = default; diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h index 91ac2305ab..da75d74ff7 100644 --- a/src/core/hle/service/nvdrv/core/container.h +++ b/src/core/hle/service/nvdrv/core/container.h @@ -8,8 +8,12 @@ #include namespace Tegra { -class GPU; -} + +namespace Host1x { +class Host1x; +} // namespace Host1x + +} // namespace Tegra namespace Service::Nvidia::NvCore { @@ -20,7 +24,7 @@ struct ContainerImpl; class Container { public: - Container(Tegra::GPU& gpu_); + Container(Tegra::Host1x::Host1x& host1x); ~Container(); NvMap& GetNvMapFile(); diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index 1126daeb5e..9acec7ba66 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -7,6 +7,7 @@ #include "common/logging/log.h" #include "core/hle/service/nvdrv/core/nvmap.h" #include "core/memory.h" +#include "video_core/host1x/host1x.h" using Core::Memory::YUZU_PAGESIZE; @@ -61,7 +62,7 @@ NvResult NvMap::Handle::Duplicate(bool internal_session) { return NvResult::Success; } -NvMap::NvMap() = default; +NvMap::NvMap(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {} void NvMap::AddHandle(std::shared_ptr handle_description) { std::scoped_lock lock(handles_lock); @@ -77,12 +78,11 @@ void NvMap::UnmapHandle(Handle& handle_description) { } // Free and unmap the handle from the SMMU - /* - state.soc->smmu.Unmap(handle_description.pin_virt_address, - static_cast(handle_description.aligned_size)); - smmuAllocator.Free(handle_description.pin_virt_address, - static_cast(handle_description.aligned_size)); handle_description.pin_virt_address = 0; - */ + host1x.MemoryManager().Unmap(static_cast(handle_description.pin_virt_address), + handle_description.aligned_size); + host1x.Allocator().Free(handle_description.pin_virt_address, + static_cast(handle_description.aligned_size)); + handle_description.pin_virt_address = 0; } bool NvMap::TryRemoveHandle(const Handle& handle_description) { @@ -131,12 +131,9 @@ VAddr NvMap::GetHandleAddress(Handle::Id handle) { } u32 NvMap::PinHandle(NvMap::Handle::Id handle) { - UNIMPLEMENTED_MSG("pinning"); - return 0; - /* auto handle_description{GetHandle(handle)}; - if (!handle_description) - [[unlikely]] return 0; + if (!handle_description) [[unlikely]] + return 0; std::scoped_lock lock(handle_description->mutex); if (!handle_description->pins) { @@ -157,8 +154,10 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) { // If not then allocate some space and map it u32 address{}; + auto& smmu_allocator = host1x.Allocator(); + auto& smmu_memory_manager = host1x.MemoryManager(); while (!(address = - smmuAllocator.Allocate(static_cast(handle_description->aligned_size)))) { + smmu_allocator.Allocate(static_cast(handle_description->aligned_size)))) { // Free handles until the allocation succeeds std::scoped_lock queueLock(unmap_queue_lock); if (auto freeHandleDesc{unmap_queue.front()}) { @@ -172,19 +171,16 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) { } } - state.soc->smmu.Map(address, handle_description->GetPointer(), - static_cast(handle_description->aligned_size)); + smmu_memory_manager.Map(static_cast(address), handle_description->address, + handle_description->aligned_size); handle_description->pin_virt_address = address; } handle_description->pins++; return handle_description->pin_virt_address; - */ } void NvMap::UnpinHandle(Handle::Id handle) { - UNIMPLEMENTED_MSG("Unpinning"); - /* auto handle_description{GetHandle(handle)}; if (!handle_description) return; @@ -199,7 +195,6 @@ void NvMap::UnpinHandle(Handle::Id handle) { unmap_queue.push_back(handle_description); handle_description->unmap_queue_entry = std::prev(unmap_queue.end()); } - */ } std::optional NvMap::FreeHandle(Handle::Id handle, bool internal_session) { diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index 5e6c73589a..5acdc961e6 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -15,6 +15,14 @@ #include "common/common_types.h" #include "core/hle/service/nvdrv/nvdata.h" +namespace Tegra { + +namespace Host1x { +class Host1x; +} // namespace Host1x + +} // namespace Tegra + namespace Service::Nvidia::NvCore { /** * @brief The nvmap core class holds the global state for nvmap and provides methods to manage @@ -90,15 +98,17 @@ public: }; private: - std::list> unmap_queue; - std::mutex unmap_queue_lock; //!< Protects access to `unmap_queue` + std::list> unmap_queue{}; + std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue` - std::unordered_map> handles; //!< Main owning map of handles + std::unordered_map> + handles{}; //!< Main owning map of handles std::mutex handles_lock; //!< Protects access to `handles` static constexpr u32 HandleIdIncrement{ 4}; //!< Each new handle ID is an increment of 4 from the previous std::atomic next_handle_id{HandleIdIncrement}; + Tegra::Host1x::Host1x& host1x; void AddHandle(std::shared_ptr handle); @@ -125,7 +135,7 @@ public: bool was_uncached; //!< If the handle was allocated as uncached }; - NvMap(); + NvMap(Tegra::Host1x::Host1x& host1x); /** * @brief Creates an unallocated handle of the given size diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp index ff6cbb37e5..61e00448c9 100644 --- a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp @@ -3,16 +3,16 @@ #include "common/assert.h" #include "core/hle/service/nvdrv/core/syncpoint_manager.h" -#include "video_core/gpu.h" +#include "video_core/host1x/host1x.h" namespace Service::Nvidia::NvCore { -SyncpointManager::SyncpointManager(Tegra::GPU& gpu_) : gpu{gpu_} {} +SyncpointManager::SyncpointManager(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {} SyncpointManager::~SyncpointManager() = default; u32 SyncpointManager::RefreshSyncpoint(u32 syncpoint_id) { - syncpoints[syncpoint_id].min = gpu.GetSyncpointValue(syncpoint_id); + syncpoints[syncpoint_id].min = host1x.GetSyncpointManager().GetHostSyncpointValue(syncpoint_id); return GetSyncpointMin(syncpoint_id); } diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.h b/src/core/hle/service/nvdrv/core/syncpoint_manager.h index cf7f0b4bee..f332edc6e4 100644 --- a/src/core/hle/service/nvdrv/core/syncpoint_manager.h +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.h @@ -10,14 +10,18 @@ #include "core/hle/service/nvdrv/nvdata.h" namespace Tegra { -class GPU; -} + +namespace Host1x { +class Host1x; +} // namespace Host1x + +} // namespace Tegra namespace Service::Nvidia::NvCore { class SyncpointManager final { public: - explicit SyncpointManager(Tegra::GPU& gpu_); + explicit SyncpointManager(Tegra::Host1x::Host1x& host1x); ~SyncpointManager(); /** @@ -78,7 +82,7 @@ private: std::array syncpoints{}; - Tegra::GPU& gpu; + Tegra::Host1x::Host1x& host1x; }; } // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index 77e6a1cd64..b17589aa3e 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp @@ -13,6 +13,7 @@ #include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h" #include "core/memory.h" +#include "video_core/host1x/host1x.h" #include "video_core/memory_manager.h" #include "video_core/renderer_base.h" @@ -140,29 +141,8 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector& input, std::vecto SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer)); - auto& gpu = system.GPU(); - for (auto& cmd_buffer : cmd_buffer_handles) { - auto object{nvmap.GetHandle(cmd_buffer.map_handle)}; - if (!object) { - LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmd_buffer.map_handle); - std::memcpy(output.data(), ¶ms, output.size()); - return NvResult::InvalidState; - } - if (object->dma_map_addr == 0) { - // NVDEC and VIC memory is in the 32-bit address space - // MapAllocate32 will attempt to map a lower 32-bit value in the shared gpu memory space - const GPUVAddr low_addr = - gpu.MemoryManager().MapAllocate32(object->address, object->size); - object->dma_map_addr = static_cast(low_addr); - // Ensure that the dma_map_addr is indeed in the lower 32-bit address space. - ASSERT(object->dma_map_addr == low_addr); - } - if (!object->dma_map_addr) { - LOG_ERROR(Service_NVDRV, "failed to map size={}", object->size); - } else { - cmd_buffer.map_address = static_cast(object->dma_map_addr); - } + cmd_buffer.map_address = nvmap.PinHandle(cmd_buffer.map_handle); } std::memcpy(output.data(), ¶ms, sizeof(IoctlMapBuffer)); std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(), @@ -172,11 +152,16 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector& input, std::vecto } NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector& input, std::vector& output) { - // This is intntionally stubbed. - // Skip unmapping buffers here, as to not break the continuity of the VP9 reference frame - // addresses, and risk invalidating data before the async GPU thread is done with it + IoctlMapBuffer params{}; + std::memcpy(¶ms, input.data(), sizeof(IoctlMapBuffer)); + std::vector cmd_buffer_handles(params.num_entries); + + SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer)); + for (auto& cmd_buffer : cmd_buffer_handles) { + nvmap.UnpinHandle(cmd_buffer.map_handle); + } + std::memset(output.data(), 0, output.size()); - LOG_DEBUG(Service_NVDRV, "(STUBBED) called"); return NvResult::Success; } diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index b39a4c6db5..8a9f3c7174 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -71,7 +71,7 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger } Module::Module(Core::System& system) - : service_context{system, "nvdrv"}, events_interface{*this}, container{system.GPU()} { + : service_context{system, "nvdrv"}, events_interface{*this}, container{system.Host1x()} { builders["/dev/nvhost-as-gpu"] = [this, &system](DeviceFD fd) { std::shared_ptr device = std::make_shared(system, *this, container); diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 723f9b67ca..40e6d1ec48 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -56,6 +56,8 @@ add_library(video_core STATIC host1x/codecs/vp9_types.h host1x/control.cpp host1x/control.h + host1x/host1x.cpp + host1x/host1x.h host1x/nvdec.cpp host1x/nvdec.h host1x/nvdec_common.h diff --git a/src/video_core/cdma_pusher.cpp b/src/video_core/cdma_pusher.cpp index 148126347a..28a2d2090d 100644 --- a/src/video_core/cdma_pusher.cpp +++ b/src/video_core/cdma_pusher.cpp @@ -4,8 +4,8 @@ #include #include "video_core/cdma_pusher.h" #include "video_core/engines/maxwell_3d.h" -#include "video_core/gpu.h" #include "video_core/host1x/control.h" +#include "video_core/host1x/host1x.h" #include "video_core/host1x/nvdec.h" #include "video_core/host1x/nvdec_common.h" #include "video_core/host1x/sync_manager.h" @@ -13,11 +13,11 @@ #include "video_core/memory_manager.h" namespace Tegra { -CDmaPusher::CDmaPusher(GPU& gpu_) - : gpu{gpu_}, nvdec_processor(std::make_shared(gpu)), - vic_processor(std::make_unique(gpu, nvdec_processor)), - host1x_processor(std::make_unique(gpu)), - sync_manager(std::make_unique(gpu)) {} +CDmaPusher::CDmaPusher(Host1x::Host1x& host1x_) + : host1x{host1x_}, nvdec_processor(std::make_shared(host1x)), + vic_processor(std::make_unique(host1x, nvdec_processor)), + host1x_processor(std::make_unique(host1x)), + sync_manager(std::make_unique(host1x)) {} CDmaPusher::~CDmaPusher() = default; diff --git a/src/video_core/cdma_pusher.h b/src/video_core/cdma_pusher.h index de17c20821..83112dfce4 100644 --- a/src/video_core/cdma_pusher.h +++ b/src/video_core/cdma_pusher.h @@ -12,10 +12,9 @@ namespace Tegra { -class GPU; - namespace Host1x { class Control; +class Host1x; class Nvdec; class SyncptIncrManager; class Vic; @@ -91,7 +90,7 @@ enum class ThiMethod : u32 { class CDmaPusher { public: - explicit CDmaPusher(GPU& gpu_); + explicit CDmaPusher(Host1x::Host1x& host1x); ~CDmaPusher(); /// Process the command entry @@ -104,7 +103,7 @@ private: /// Write arguments value to the ThiRegisters member at the specified offset void ThiStateWrite(ThiRegisters& state, u32 offset, u32 argument); - GPU& gpu; + Host1x::Host1x& host1x; std::shared_ptr nvdec_processor; std::unique_ptr vic_processor; std::unique_ptr host1x_processor; diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 1097db08ae..e05c9a3573 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -83,19 +83,11 @@ struct GPU::Impl { UNIMPLEMENTED(); } - void CreateHost1xChannel() { - if (host1x_channel) { - return; - } - host1x_channel = CreateChannel(0); - host1x_channel->memory_manager = std::make_shared(system); - InitChannel(*host1x_channel); - } - /// Binds a renderer to the GPU. void BindRenderer(std::unique_ptr renderer_) { renderer = std::move(renderer_); rasterizer = renderer->ReadRasterizer(); + host1x.MemoryManager().BindRasterizer(rasterizer); } /// Flush all current written commands into the host GPU for execution. @@ -173,12 +165,6 @@ struct GPU::Impl { return *current_channel->kepler_compute; } - /// Returns a reference to the GPU memory manager. - [[nodiscard]] Tegra::MemoryManager& MemoryManager() { - CreateHost1xChannel(); - return *host1x_channel->memory_manager; - } - /// Returns a reference to the GPU DMA pusher. [[nodiscard]] Tegra::DmaPusher& DmaPusher() { ASSERT(current_channel); @@ -299,7 +285,7 @@ struct GPU::Impl { } if (!cdma_pushers.contains(id)) { - cdma_pushers.insert_or_assign(id, std::make_unique(gpu)); + cdma_pushers.insert_or_assign(id, std::make_unique(host1x)); } // SubmitCommandBuffer would make the nvdec operations async, this is not currently working @@ -389,7 +375,6 @@ struct GPU::Impl { VideoCore::RasterizerInterface* rasterizer = nullptr; const bool use_nvdec; - std::shared_ptr host1x_channel; s32 new_channel_id{1}; /// Shader build notifier std::unique_ptr shader_notify; @@ -510,14 +495,6 @@ const Engines::KeplerCompute& GPU::KeplerCompute() const { return impl->KeplerCompute(); } -Tegra::MemoryManager& GPU::MemoryManager() { - return impl->MemoryManager(); -} - -const Tegra::MemoryManager& GPU::MemoryManager() const { - return impl->MemoryManager(); -} - Tegra::DmaPusher& GPU::DmaPusher() { return impl->DmaPusher(); } diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index c1a5382572..f04edf5c49 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -153,12 +153,6 @@ public: /// Returns a reference to the KeplerCompute GPU engine. [[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const; - /// Returns a reference to the GPU memory manager. - [[nodiscard]] Tegra::MemoryManager& MemoryManager(); - - /// Returns a const reference to the GPU memory manager. - [[nodiscard]] const Tegra::MemoryManager& MemoryManager() const; - /// Returns a reference to the GPU DMA pusher. [[nodiscard]] Tegra::DmaPusher& DmaPusher(); diff --git a/src/video_core/host1x/codecs/codec.cpp b/src/video_core/host1x/codecs/codec.cpp index 70c47ae03d..42e7d6e4fb 100644 --- a/src/video_core/host1x/codecs/codec.cpp +++ b/src/video_core/host1x/codecs/codec.cpp @@ -6,11 +6,11 @@ #include #include "common/assert.h" #include "common/settings.h" -#include "video_core/gpu.h" #include "video_core/host1x/codecs/codec.h" #include "video_core/host1x/codecs/h264.h" #include "video_core/host1x/codecs/vp8.h" #include "video_core/host1x/codecs/vp9.h" +#include "video_core/host1x/host1x.h" #include "video_core/memory_manager.h" extern "C" { @@ -73,10 +73,10 @@ void AVFrameDeleter(AVFrame* ptr) { av_frame_free(&ptr); } -Codec::Codec(GPU& gpu_, const Host1x::NvdecCommon::NvdecRegisters& regs) - : gpu(gpu_), state{regs}, h264_decoder(std::make_unique(gpu)), - vp8_decoder(std::make_unique(gpu)), - vp9_decoder(std::make_unique(gpu)) {} +Codec::Codec(Host1x::Host1x& host1x_, const Host1x::NvdecCommon::NvdecRegisters& regs) + : host1x(host1x_), state{regs}, h264_decoder(std::make_unique(host1x)), + vp8_decoder(std::make_unique(host1x)), + vp9_decoder(std::make_unique(host1x)) {} Codec::~Codec() { if (!initialized) { diff --git a/src/video_core/host1x/codecs/codec.h b/src/video_core/host1x/codecs/codec.h index 117cb3ccd6..0d45fb7fe7 100644 --- a/src/video_core/host1x/codecs/codec.h +++ b/src/video_core/host1x/codecs/codec.h @@ -21,7 +21,6 @@ extern "C" { } namespace Tegra { -class GPU; void AVFrameDeleter(AVFrame* ptr); using AVFramePtr = std::unique_ptr; @@ -32,9 +31,13 @@ class VP8; class VP9; } // namespace Decoder +namespace Host1x { +class Host1x; +} // namespace Host1x + class Codec { public: - explicit Codec(GPU& gpu, const Host1x::NvdecCommon::NvdecRegisters& regs); + explicit Codec(Host1x::Host1x& host1x, const Host1x::NvdecCommon::NvdecRegisters& regs); ~Codec(); /// Initialize the codec, returning success or failure @@ -69,7 +72,7 @@ private: AVCodecContext* av_codec_ctx{nullptr}; AVBufferRef* av_gpu_decoder{nullptr}; - GPU& gpu; + Host1x::Host1x& host1x; const Host1x::NvdecCommon::NvdecRegisters& state; std::unique_ptr h264_decoder; std::unique_ptr vp8_decoder; diff --git a/src/video_core/host1x/codecs/h264.cpp b/src/video_core/host1x/codecs/h264.cpp index 95534bc85f..e87bd65fa5 100644 --- a/src/video_core/host1x/codecs/h264.cpp +++ b/src/video_core/host1x/codecs/h264.cpp @@ -5,8 +5,8 @@ #include #include "common/settings.h" -#include "video_core/gpu.h" #include "video_core/host1x/codecs/h264.h" +#include "video_core/host1x/host1x.h" #include "video_core/memory_manager.h" namespace Tegra::Decoder { @@ -24,19 +24,20 @@ constexpr std::array zig_zag_scan{ }; } // Anonymous namespace -H264::H264(GPU& gpu_) : gpu(gpu_) {} +H264::H264(Host1x::Host1x& host1x_) : host1x{host1x_} {} H264::~H264() = default; const std::vector& H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state, bool is_first_frame) { H264DecoderContext context; - gpu.MemoryManager().ReadBlock(state.picture_info_offset, &context, sizeof(H264DecoderContext)); + host1x.MemoryManager().ReadBlock(state.picture_info_offset, &context, + sizeof(H264DecoderContext)); const s64 frame_number = context.h264_parameter_set.frame_number.Value(); if (!is_first_frame && frame_number != 0) { frame.resize(context.stream_len); - gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size()); + host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size()); return frame; } @@ -155,8 +156,8 @@ const std::vector& H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegist frame.resize(encoded_header.size() + context.stream_len); std::memcpy(frame.data(), encoded_header.data(), encoded_header.size()); - gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, - frame.data() + encoded_header.size(), context.stream_len); + host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, + frame.data() + encoded_header.size(), context.stream_len); return frame; } diff --git a/src/video_core/host1x/codecs/h264.h b/src/video_core/host1x/codecs/h264.h index a98730474a..5cc86454e4 100644 --- a/src/video_core/host1x/codecs/h264.h +++ b/src/video_core/host1x/codecs/h264.h @@ -11,7 +11,11 @@ #include "video_core/host1x/nvdec_common.h" namespace Tegra { -class GPU; + +namespace Host1x { +class Host1x; +} // namespace Host1x + namespace Decoder { class H264BitWriter { @@ -55,7 +59,7 @@ private: class H264 { public: - explicit H264(GPU& gpu); + explicit H264(Host1x::Host1x& host1x); ~H264(); /// Compose the H264 frame for FFmpeg decoding @@ -64,7 +68,7 @@ public: private: std::vector frame; - GPU& gpu; + Host1x::Host1x& host1x; struct H264ParameterSet { s32 log2_max_pic_order_cnt_lsb_minus4; ///< 0x00 diff --git a/src/video_core/host1x/codecs/vp8.cpp b/src/video_core/host1x/codecs/vp8.cpp index aac026e17c..28fb12cb8e 100644 --- a/src/video_core/host1x/codecs/vp8.cpp +++ b/src/video_core/host1x/codecs/vp8.cpp @@ -3,18 +3,18 @@ #include -#include "video_core/gpu.h" #include "video_core/host1x/codecs/vp8.h" +#include "video_core/host1x/host1x.h" #include "video_core/memory_manager.h" namespace Tegra::Decoder { -VP8::VP8(GPU& gpu_) : gpu(gpu_) {} +VP8::VP8(Host1x::Host1x& host1x_) : host1x{host1x_} {} VP8::~VP8() = default; const std::vector& VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) { VP8PictureInfo info; - gpu.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo)); + host1x.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo)); const bool is_key_frame = info.key_frame == 1u; const auto bitstream_size = static_cast(info.vld_buffer_size); @@ -45,7 +45,7 @@ const std::vector& VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegiste frame[9] = static_cast(((info.frame_height >> 8) & 0x3f)); } const u64 bitstream_offset = state.frame_bitstream_offset; - gpu.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size); + host1x.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size); return frame; } diff --git a/src/video_core/host1x/codecs/vp8.h b/src/video_core/host1x/codecs/vp8.h index a1dfa5f03d..5bf07ecab1 100644 --- a/src/video_core/host1x/codecs/vp8.h +++ b/src/video_core/host1x/codecs/vp8.h @@ -11,12 +11,16 @@ #include "video_core/host1x/nvdec_common.h" namespace Tegra { -class GPU; + +namespace Host1x { +class Host1x; +} // namespace Host1x + namespace Decoder { class VP8 { public: - explicit VP8(GPU& gpu); + explicit VP8(Host1x::Host1x& host1x); ~VP8(); /// Compose the VP8 frame for FFmpeg decoding @@ -25,7 +29,7 @@ public: private: std::vector frame; - GPU& gpu; + Host1x::Host1x& host1x; struct VP8PictureInfo { INSERT_PADDING_WORDS_NOINIT(14); diff --git a/src/video_core/host1x/codecs/vp9.cpp b/src/video_core/host1x/codecs/vp9.cpp index bc50c6ba41..667aadc6a9 100644 --- a/src/video_core/host1x/codecs/vp9.cpp +++ b/src/video_core/host1x/codecs/vp9.cpp @@ -4,8 +4,8 @@ #include // for std::copy #include #include "common/assert.h" -#include "video_core/gpu.h" #include "video_core/host1x/codecs/vp9.h" +#include "video_core/host1x/host1x.h" #include "video_core/memory_manager.h" namespace Tegra::Decoder { @@ -236,7 +236,7 @@ constexpr std::array map_lut{ } } // Anonymous namespace -VP9::VP9(GPU& gpu_) : gpu{gpu_} {} +VP9::VP9(Host1x::Host1x& host1x_) : host1x{host1x_} {} VP9::~VP9() = default; @@ -357,7 +357,7 @@ void VP9::WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_ Vp9PictureInfo VP9::GetVp9PictureInfo(const Host1x::NvdecCommon::NvdecRegisters& state) { PictureInfo picture_info; - gpu.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo)); + host1x.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo)); Vp9PictureInfo vp9_info = picture_info.Convert(); InsertEntropy(state.vp9_entropy_probs_offset, vp9_info.entropy); @@ -372,17 +372,17 @@ Vp9PictureInfo VP9::GetVp9PictureInfo(const Host1x::NvdecCommon::NvdecRegisters& void VP9::InsertEntropy(u64 offset, Vp9EntropyProbs& dst) { EntropyProbs entropy; - gpu.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs)); + host1x.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs)); entropy.Convert(dst); } Vp9FrameContainer VP9::GetCurrentFrame(const Host1x::NvdecCommon::NvdecRegisters& state) { Vp9FrameContainer current_frame{}; { - gpu.SyncGuestHost(); + // gpu.SyncGuestHost(); epic, why? current_frame.info = GetVp9PictureInfo(state); current_frame.bit_stream.resize(current_frame.info.bitstream_size); - gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, current_frame.bit_stream.data(), + host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, current_frame.bit_stream.data(), current_frame.info.bitstream_size); } if (!next_frame.bit_stream.empty()) { diff --git a/src/video_core/host1x/codecs/vp9.h b/src/video_core/host1x/codecs/vp9.h index a425c0fa40..d4083e8d3e 100644 --- a/src/video_core/host1x/codecs/vp9.h +++ b/src/video_core/host1x/codecs/vp9.h @@ -12,7 +12,11 @@ #include "video_core/host1x/nvdec_common.h" namespace Tegra { -class GPU; + +namespace Host1x { +class Host1x; +} // namespace Host1x + namespace Decoder { /// The VpxRangeEncoder, and VpxBitStreamWriter classes are used to compose the @@ -106,7 +110,7 @@ private: class VP9 { public: - explicit VP9(GPU& gpu_); + explicit VP9(Host1x::Host1x& host1x); ~VP9(); VP9(const VP9&) = delete; @@ -176,7 +180,7 @@ private: [[nodiscard]] std::vector ComposeCompressedHeader(); [[nodiscard]] VpxBitStreamWriter ComposeUncompressedHeader(); - GPU& gpu; + Host1x::Host1x& host1x; std::vector frame; std::array loop_filter_ref_deltas{}; diff --git a/src/video_core/host1x/codecs/vp9_types.h b/src/video_core/host1x/codecs/vp9_types.h index bb3d8df6e1..adad8ed7ed 100644 --- a/src/video_core/host1x/codecs/vp9_types.h +++ b/src/video_core/host1x/codecs/vp9_types.h @@ -9,7 +9,6 @@ #include "common/common_types.h" namespace Tegra { -class GPU; namespace Decoder { struct Vp9FrameDimensions { diff --git a/src/video_core/host1x/control.cpp b/src/video_core/host1x/control.cpp index b72b01aa33..a81c635aec 100644 --- a/src/video_core/host1x/control.cpp +++ b/src/video_core/host1x/control.cpp @@ -3,13 +3,12 @@ // Refer to the license.txt file included. #include "common/assert.h" -#include "video_core/gpu.h" #include "video_core/host1x/control.h" #include "video_core/host1x/host1x.h" namespace Tegra::Host1x { -Control::Control(GPU& gpu_) : gpu(gpu_) {} +Control::Control(Host1x& host1x_) : host1x(host1x_) {} Control::~Control() = default; @@ -29,7 +28,7 @@ void Control::ProcessMethod(Method method, u32 argument) { } void Control::Execute(u32 data) { - gpu.Host1x().GetSyncpointManager().WaitHost(data, syncpoint_value); + host1x.GetSyncpointManager().WaitHost(data, syncpoint_value); } } // namespace Tegra::Host1x diff --git a/src/video_core/host1x/control.h b/src/video_core/host1x/control.h index 04dac7d514..18a9b56c0e 100644 --- a/src/video_core/host1x/control.h +++ b/src/video_core/host1x/control.h @@ -8,10 +8,10 @@ #include "common/common_types.h" namespace Tegra { -class GPU; namespace Host1x { +class Host1x; class Nvdec; class Control { @@ -22,7 +22,7 @@ public: WaitSyncpt32 = 0x50, }; - explicit Control(GPU& gpu); + explicit Control(Host1x& host1x); ~Control(); /// Writes the method into the state, Invoke Execute() if encountered @@ -33,7 +33,7 @@ private: void Execute(u32 data); u32 syncpoint_value{}; - GPU& gpu; + Host1x& host1x; }; } // namespace Host1x diff --git a/src/video_core/host1x/host1x.cpp b/src/video_core/host1x/host1x.cpp new file mode 100644 index 0000000000..eb00f48557 --- /dev/null +++ b/src/video_core/host1x/host1x.cpp @@ -0,0 +1,18 @@ +// Copyright 2022 yuzu Emulator Project +// Licensed under GPLv3 or any later version +// Refer to the license.txt file included. + +#include "core/core.h" +#include "video_core/host1x/host1x.h" + +namespace Tegra { + +namespace Host1x { + +Host1x::Host1x(Core::System& system_) + : system{system_}, syncpoint_manager{}, memory_manager{system, 32, 12}, + allocator{std::make_unique>(1 << 12)} {} + +} // namespace Host1x + +} // namespace Tegra diff --git a/src/video_core/host1x/host1x.h b/src/video_core/host1x/host1x.h index 2971be2861..e4b69d75aa 100644 --- a/src/video_core/host1x/host1x.h +++ b/src/video_core/host1x/host1x.h @@ -6,7 +6,13 @@ #include "common/common_types.h" +#include "common/address_space.h" #include "video_core/host1x/syncpoint_manager.h" +#include "video_core/memory_manager.h" + +namespace Core { +class System; +} // namespace Core namespace Tegra { @@ -14,7 +20,7 @@ namespace Host1x { class Host1x { public: - Host1x() : syncpoint_manager{} {} + Host1x(Core::System& system); SyncpointManager& GetSyncpointManager() { return syncpoint_manager; @@ -24,8 +30,27 @@ public: return syncpoint_manager; } + Tegra::MemoryManager& MemoryManager() { + return memory_manager; + } + + const Tegra::MemoryManager& MemoryManager() const { + return memory_manager; + } + + Common::FlatAllocator& Allocator() { + return *allocator; + } + + const Common::FlatAllocator& Allocator() const { + return *allocator; + } + private: + Core::System& system; SyncpointManager syncpoint_manager; + Tegra::MemoryManager memory_manager; + std::unique_ptr> allocator; }; } // namespace Host1x diff --git a/src/video_core/host1x/nvdec.cpp b/src/video_core/host1x/nvdec.cpp index 5f6decd0de..a4bd5b79ff 100644 --- a/src/video_core/host1x/nvdec.cpp +++ b/src/video_core/host1x/nvdec.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include "common/assert.h" -#include "video_core/gpu.h" +#include "video_core/host1x/host1x.h" #include "video_core/host1x/nvdec.h" namespace Tegra::Host1x { @@ -10,7 +10,8 @@ namespace Tegra::Host1x { #define NVDEC_REG_INDEX(field_name) \ (offsetof(NvdecCommon::NvdecRegisters, field_name) / sizeof(u64)) -Nvdec::Nvdec(GPU& gpu_) : gpu(gpu_), state{}, codec(std::make_unique(gpu, state)) {} +Nvdec::Nvdec(Host1x& host1x_) + : host1x(host1x_), state{}, codec(std::make_unique(host1x, state)) {} Nvdec::~Nvdec() = default; diff --git a/src/video_core/host1x/nvdec.h b/src/video_core/host1x/nvdec.h index 41ba1f7a04..3949d51815 100644 --- a/src/video_core/host1x/nvdec.h +++ b/src/video_core/host1x/nvdec.h @@ -9,13 +9,14 @@ #include "video_core/host1x/codecs/codec.h" namespace Tegra { -class GPU; namespace Host1x { +class Host1x; + class Nvdec { public: - explicit Nvdec(GPU& gpu); + explicit Nvdec(Host1x& host1x); ~Nvdec(); /// Writes the method into the state, Invoke Execute() if encountered @@ -28,7 +29,7 @@ private: /// Invoke codec to decode a frame void Execute(); - GPU& gpu; + Host1x& host1x; NvdecCommon::NvdecRegisters state; std::unique_ptr codec; }; diff --git a/src/video_core/host1x/sync_manager.cpp b/src/video_core/host1x/sync_manager.cpp index 8694f77e2a..5ef9ea217b 100644 --- a/src/video_core/host1x/sync_manager.cpp +++ b/src/video_core/host1x/sync_manager.cpp @@ -3,14 +3,13 @@ #include #include "sync_manager.h" -#include "video_core/gpu.h" #include "video_core/host1x/host1x.h" #include "video_core/host1x/syncpoint_manager.h" namespace Tegra { namespace Host1x { -SyncptIncrManager::SyncptIncrManager(GPU& gpu_) : gpu(gpu_) {} +SyncptIncrManager::SyncptIncrManager(Host1x& host1x_) : host1x(host1x_) {} SyncptIncrManager::~SyncptIncrManager() = default; void SyncptIncrManager::Increment(u32 id) { @@ -40,7 +39,7 @@ void SyncptIncrManager::IncrementAllDone() { if (!increments[done_count].complete) { break; } - auto& syncpoint_manager = gpu.Host1x().GetSyncpointManager(); + auto& syncpoint_manager = host1x.GetSyncpointManager(); syncpoint_manager.IncrementGuest(increments[done_count].syncpt_id); syncpoint_manager.IncrementHost(increments[done_count].syncpt_id); } diff --git a/src/video_core/host1x/sync_manager.h b/src/video_core/host1x/sync_manager.h index aba72d5c58..7bb77fa278 100644 --- a/src/video_core/host1x/sync_manager.h +++ b/src/video_core/host1x/sync_manager.h @@ -9,10 +9,10 @@ namespace Tegra { -class GPU; - namespace Host1x { +class Host1x; + struct SyncptIncr { u32 id; u32 class_id; @@ -25,7 +25,7 @@ struct SyncptIncr { class SyncptIncrManager { public: - explicit SyncptIncrManager(GPU& gpu); + explicit SyncptIncrManager(Host1x& host1x); ~SyncptIncrManager(); /// Add syncpoint id and increment all @@ -45,7 +45,7 @@ private: std::mutex increment_lock; u32 current_id{}; - GPU& gpu; + Host1x& host1x; }; } // namespace Host1x diff --git a/src/video_core/host1x/vic.cpp b/src/video_core/host1x/vic.cpp index a9422670a1..5d80398412 100644 --- a/src/video_core/host1x/vic.cpp +++ b/src/video_core/host1x/vic.cpp @@ -19,7 +19,7 @@ extern "C" { #include "common/logging/log.h" #include "video_core/engines/maxwell_3d.h" -#include "video_core/gpu.h" +#include "video_core/host1x/host1x.h" #include "video_core/host1x/nvdec.h" #include "video_core/host1x/vic.h" #include "video_core/memory_manager.h" @@ -49,8 +49,8 @@ union VicConfig { BitField<46, 14, u64_le> surface_height_minus1; }; -Vic::Vic(GPU& gpu_, std::shared_ptr nvdec_processor_) - : gpu(gpu_), +Vic::Vic(Host1x& host1x_, std::shared_ptr nvdec_processor_) + : host1x(host1x_), nvdec_processor(std::move(nvdec_processor_)), converted_frame_buffer{nullptr, av_free} {} Vic::~Vic() = default; @@ -81,7 +81,7 @@ void Vic::Execute() { LOG_ERROR(Service_NVDRV, "VIC Luma address not set."); return; } - const VicConfig config{gpu.MemoryManager().Read(config_struct_address + 0x20)}; + const VicConfig config{host1x.MemoryManager().Read(config_struct_address + 0x20)}; const AVFramePtr frame_ptr = nvdec_processor->GetFrame(); const auto* frame = frame_ptr.get(); if (!frame) { @@ -159,12 +159,12 @@ void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) { Texture::SwizzleSubrect(width, height, width * 4, width, 4, luma_buffer.data(), converted_frame_buf_addr, block_height, 0, 0); - gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size); + host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size); } else { // send pitch linear frame const size_t linear_size = width * height * 4; - gpu.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr, - linear_size); + host1x.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr, + linear_size); } } @@ -192,8 +192,8 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) { luma_buffer[dst + x] = luma_src[src + x]; } } - gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), - luma_buffer.size()); + host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), + luma_buffer.size()); // Chroma const std::size_t half_height = frame_height / 2; @@ -234,8 +234,8 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) { ASSERT(false); break; } - gpu.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(), - chroma_buffer.size()); + host1x.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(), + chroma_buffer.size()); } } // namespace Host1x diff --git a/src/video_core/host1x/vic.h b/src/video_core/host1x/vic.h index c51f8af7e9..2b78786e85 100644 --- a/src/video_core/host1x/vic.h +++ b/src/video_core/host1x/vic.h @@ -10,10 +10,10 @@ struct SwsContext; namespace Tegra { -class GPU; namespace Host1x { +class Host1x; class Nvdec; union VicConfig; @@ -28,7 +28,7 @@ public: SetOutputSurfaceChromaUnusedOffset = 0x1ca }; - explicit Vic(GPU& gpu, std::shared_ptr nvdec_processor); + explicit Vic(Host1x& host1x, std::shared_ptr nvdec_processor); ~Vic(); @@ -42,7 +42,7 @@ private: void WriteYUVFrame(const AVFrame* frame, const VicConfig& config); - GPU& gpu; + Host1x& host1x; std::shared_ptr nvdec_processor; /// Avoid reallocation of the following buffers every frame, as their From 920429fde745b3bf6d33b6ca991628f64988f754 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 30 Jan 2022 23:13:46 +0100 Subject: [PATCH 30/68] NVDRV: Further refactors and eliminate old code. --- src/core/CMakeLists.txt | 2 - src/core/core.cpp | 11 ----- src/core/core.h | 10 ---- src/core/hardware_interrupt_manager.cpp | 32 ------------- src/core/hardware_interrupt_manager.h | 32 ------------- .../hle/service/nvdrv/devices/nvhost_ctrl.cpp | 26 ++-------- .../hle/service/nvdrv/devices/nvhost_ctrl.h | 2 - .../hle/service/nvdrv/devices/nvhost_gpu.cpp | 7 ++- src/core/hle/service/nvdrv/nvdrv.cpp | 21 -------- src/core/hle/service/nvdrv/nvdrv.h | 7 --- .../hle/service/nvdrv/nvdrv_interface.cpp | 4 -- src/core/hle/service/nvdrv/nvdrv_interface.h | 2 - src/video_core/command_classes/host1x.cpp | 29 ----------- src/video_core/gpu.cpp | 48 ------------------- src/video_core/gpu.h | 9 ---- src/video_core/host1x/syncpoint_manager.cpp | 4 ++ .../renderer_opengl/gl_rasterizer.cpp | 4 -- .../renderer_vulkan/vk_rasterizer.cpp | 4 -- 18 files changed, 12 insertions(+), 242 deletions(-) delete mode 100644 src/core/hardware_interrupt_manager.cpp delete mode 100644 src/core/hardware_interrupt_manager.h delete mode 100644 src/video_core/command_classes/host1x.cpp diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 3ef19f9c26..95302c419a 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -138,8 +138,6 @@ add_library(core STATIC frontend/emu_window.h frontend/framebuffer_layout.cpp frontend/framebuffer_layout.h - hardware_interrupt_manager.cpp - hardware_interrupt_manager.h hid/emulated_console.cpp hid/emulated_console.h hid/emulated_controller.cpp diff --git a/src/core/core.cpp b/src/core/core.cpp index 13d02e75f5..1deeee1545 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -27,7 +27,6 @@ #include "core/file_sys/savedata_factory.h" #include "core/file_sys/vfs_concat.h" #include "core/file_sys/vfs_real.h" -#include "core/hardware_interrupt_manager.h" #include "core/hid/hid_core.h" #include "core/hle/kernel/k_memory_manager.h" #include "core/hle/kernel/k_process.h" @@ -226,7 +225,6 @@ struct System::Impl { service_manager = std::make_shared(kernel); services = std::make_unique(service_manager, system); - interrupt_manager = std::make_unique(system); // Initialize time manager, which must happen after kernel is created time_manager.Initialize(); @@ -454,7 +452,6 @@ struct System::Impl { std::unique_ptr app_loader; std::unique_ptr gpu_core; std::unique_ptr host1x_core; - std::unique_ptr interrupt_manager; std::unique_ptr device_memory; std::unique_ptr audio_core; Core::Memory::Memory memory; @@ -680,14 +677,6 @@ const Tegra::Host1x::Host1x& System::Host1x() const { return *impl->host1x_core; } -Core::Hardware::InterruptManager& System::InterruptManager() { - return *impl->interrupt_manager; -} - -const Core::Hardware::InterruptManager& System::InterruptManager() const { - return *impl->interrupt_manager; -} - VideoCore::RendererBase& System::Renderer() { return impl->gpu_core->Renderer(); } diff --git a/src/core/core.h b/src/core/core.h index e4168a921f..7843cc8ad9 100644 --- a/src/core/core.h +++ b/src/core/core.h @@ -91,10 +91,6 @@ namespace Core::Timing { class CoreTiming; } -namespace Core::Hardware { -class InterruptManager; -} - namespace Core::HID { class HIDCore; } @@ -305,12 +301,6 @@ public: /// Provides a constant reference to the core timing instance. [[nodiscard]] const Timing::CoreTiming& CoreTiming() const; - /// Provides a reference to the interrupt manager instance. - [[nodiscard]] Core::Hardware::InterruptManager& InterruptManager(); - - /// Provides a constant reference to the interrupt manager instance. - [[nodiscard]] const Core::Hardware::InterruptManager& InterruptManager() const; - /// Provides a reference to the kernel instance. [[nodiscard]] Kernel::KernelCore& Kernel(); diff --git a/src/core/hardware_interrupt_manager.cpp b/src/core/hardware_interrupt_manager.cpp deleted file mode 100644 index d08cc33159..0000000000 --- a/src/core/hardware_interrupt_manager.cpp +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later - -#include "core/core.h" -#include "core/core_timing.h" -#include "core/hardware_interrupt_manager.h" -#include "core/hle/service/nvdrv/nvdrv_interface.h" -#include "core/hle/service/sm/sm.h" - -namespace Core::Hardware { - -InterruptManager::InterruptManager(Core::System& system_in) : system(system_in) { - gpu_interrupt_event = Core::Timing::CreateEvent( - "GPUInterrupt", - [this](std::uintptr_t message, u64 time, - std::chrono::nanoseconds) -> std::optional { - auto nvdrv = system.ServiceManager().GetService("nvdrv"); - const u32 syncpt = static_cast(message >> 32); - const u32 value = static_cast(message); - nvdrv->SignalGPUInterruptSyncpt(syncpt, value); - return std::nullopt; - }); -} - -InterruptManager::~InterruptManager() = default; - -void InterruptManager::GPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) { - const u64 msg = (static_cast(syncpoint_id) << 32ULL) | value; - system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{10}, gpu_interrupt_event, msg); -} - -} // namespace Core::Hardware diff --git a/src/core/hardware_interrupt_manager.h b/src/core/hardware_interrupt_manager.h deleted file mode 100644 index 5665c59186..0000000000 --- a/src/core/hardware_interrupt_manager.h +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later - -#pragma once - -#include - -#include "common/common_types.h" - -namespace Core { -class System; -} - -namespace Core::Timing { -struct EventType; -} - -namespace Core::Hardware { - -class InterruptManager { -public: - explicit InterruptManager(Core::System& system); - ~InterruptManager(); - - void GPUInterruptSyncpt(u32 syncpoint_id, u32 value); - -private: - Core::System& system; - std::shared_ptr gpu_interrupt_event; -}; - -} // namespace Core::Hardware diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index ffe42d4235..076edb02f7 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -77,12 +77,9 @@ NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, const std::vector& return NvResult::NotImplemented; } -void nvhost_ctrl::OnOpen(DeviceFD fd) { - events_interface.RegisterForSignal(this); -} -void nvhost_ctrl::OnClose(DeviceFD fd) { - events_interface.UnregisterForSignal(this); -} +void nvhost_ctrl::OnOpen(DeviceFD fd) {} + +void nvhost_ctrl::OnClose(DeviceFD fd) {} NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector& input, std::vector& output) { IocGetConfigParams params{}; @@ -395,21 +392,4 @@ u32 nvhost_ctrl::FindFreeNvEvent(u32 syncpoint_id) { return 0; } -void nvhost_ctrl::SignalNvEvent(u32 syncpoint_id, u32 value) { - u64 signal_mask = events_mask; - while (signal_mask != 0) { - const u64 event_id = std::countr_zero(signal_mask); - signal_mask &= ~(1ULL << event_id); - auto& event = events[event_id]; - if (event.assigned_syncpt != syncpoint_id || event.assigned_value != value) { - continue; - } - if (event.status.exchange(EventState::Signalling, std::memory_order_acq_rel) == - EventState::Waiting) { - event.kevent->GetWritableEvent().Signal(); - } - event.status.store(EventState::Signalled, std::memory_order_release); - } -} - } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h index 136a1e925e..f511c02961 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h @@ -56,8 +56,6 @@ public: }; static_assert(sizeof(SyncpointEventValue) == sizeof(u32)); - void SignalNvEvent(u32 syncpoint_id, u32 value); - private: struct InternalEvent { // Mask representing registered events diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index db3e266ad6..3f981af5a1 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp @@ -14,6 +14,7 @@ #include "video_core/control/channel_state.h" #include "video_core/engines/puller.h" #include "video_core/gpu.h" +#include "video_core/host1x/host1x.h" namespace Service::Nvidia::Devices { namespace { @@ -31,7 +32,8 @@ nvhost_gpu::nvhost_gpu(Core::System& system_, EventInterface& events_interface_, syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()}, channel_state{system.GPU().AllocateChannel()} { channel_fence.id = syncpoint_manager.AllocateSyncpoint(); - channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id); + channel_fence.value = + system_.Host1x().GetSyncpointManager().GetGuestSyncpointValue(channel_fence.id); sm_exception_breakpoint_int_report_event = events_interface.CreateEvent("GpuChannelSMExceptionBreakpointInt"); sm_exception_breakpoint_pause_report_event = @@ -189,7 +191,8 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(const std::vector& input, std::vector lk(guard); - on_signal.push_back(device); -} - -void EventInterface::UnregisterForSignal(Devices::nvhost_ctrl* device) { - std::unique_lock lk(guard); - on_signal.remove(device); -} - -void EventInterface::Signal(u32 syncpoint_id, u32 value) { - std::unique_lock lk(guard); - for (auto* device : on_signal) { - device->SignalNvEvent(syncpoint_id, value); - } -} - Kernel::KEvent* EventInterface::CreateEvent(std::string name) { Kernel::KEvent* new_event = module.service_context.CreateEvent(std::move(name)); return new_event; @@ -221,10 +204,6 @@ NvResult Module::Close(DeviceFD fd) { return NvResult::Success; } -void Module::SignalSyncpt(const u32 syncpoint_id, const u32 value) { - events_interface.Signal(syncpoint_id, value); -} - NvResult Module::QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event) { if (fd < 0) { LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd); diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index 1fe98cf32c..31c45236e6 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -49,11 +49,6 @@ public: EventInterface(Module& module_); ~EventInterface(); - void RegisterForSignal(Devices::nvhost_ctrl*); - void UnregisterForSignal(Devices::nvhost_ctrl*); - - void Signal(u32 syncpoint_id, u32 value); - Kernel::KEvent* CreateEvent(std::string name); void FreeEvent(Kernel::KEvent* event); @@ -96,8 +91,6 @@ public: /// Closes a device file descriptor and returns operation success. NvResult Close(DeviceFD fd); - void SignalSyncpt(const u32 syncpoint_id, const u32 value); - NvResult QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event); private: diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.cpp b/src/core/hle/service/nvdrv/nvdrv_interface.cpp index bd41205b88..5e50a04e8a 100644 --- a/src/core/hle/service/nvdrv/nvdrv_interface.cpp +++ b/src/core/hle/service/nvdrv/nvdrv_interface.cpp @@ -15,10 +15,6 @@ namespace Service::Nvidia { -void NVDRV::SignalGPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) { - nvdrv->SignalSyncpt(syncpoint_id, value); -} - void NVDRV::Open(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_NVDRV, "called"); IPC::ResponseBuilder rb{ctx, 4}; diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.h b/src/core/hle/service/nvdrv/nvdrv_interface.h index cbd37b52b6..cd58a4f35c 100644 --- a/src/core/hle/service/nvdrv/nvdrv_interface.h +++ b/src/core/hle/service/nvdrv/nvdrv_interface.h @@ -18,8 +18,6 @@ public: explicit NVDRV(Core::System& system_, std::shared_ptr nvdrv_, const char* name); ~NVDRV() override; - void SignalGPUInterruptSyncpt(u32 syncpoint_id, u32 value); - private: void Open(Kernel::HLERequestContext& ctx); void Ioctl1(Kernel::HLERequestContext& ctx); diff --git a/src/video_core/command_classes/host1x.cpp b/src/video_core/command_classes/host1x.cpp deleted file mode 100644 index 11855fe103..0000000000 --- a/src/video_core/command_classes/host1x.cpp +++ /dev/null @@ -1,29 +0,0 @@ -// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later - -#include "common/assert.h" -#include "video_core/command_classes/host1x.h" -#include "video_core/gpu.h" - -Tegra::Host1x::Host1x(GPU& gpu_) : gpu(gpu_) {} - -Tegra::Host1x::~Host1x() = default; - -void Tegra::Host1x::ProcessMethod(Method method, u32 argument) { - switch (method) { - case Method::LoadSyncptPayload32: - syncpoint_value = argument; - break; - case Method::WaitSyncpt: - case Method::WaitSyncpt32: - Execute(argument); - break; - default: - UNIMPLEMENTED_MSG("Host1x method 0x{:X}", static_cast(method)); - break; - } -} - -void Tegra::Host1x::Execute(u32 data) { - gpu.WaitFence(data, syncpoint_value); -} diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index e05c9a3573..a1d19b1c8a 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -14,7 +14,6 @@ #include "core/core.h" #include "core/core_timing.h" #include "core/frontend/emu_window.h" -#include "core/hardware_interrupt_manager.h" #include "core/hle/service/nvdrv/nvdata.h" #include "core/perf_stats.h" #include "video_core/cdma_pusher.h" @@ -36,8 +35,6 @@ namespace Tegra { -MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192)); - struct GPU::Impl { explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_) : gpu{gpu_}, system{system_}, host1x{system.Host1x()}, use_nvdec{use_nvdec_}, @@ -197,30 +194,6 @@ struct GPU::Impl { return *shader_notify; } - /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame. - void WaitFence(u32 syncpoint_id, u32 value) { - if (syncpoint_id == UINT32_MAX) { - return; - } - MICROPROFILE_SCOPE(GPU_wait); - host1x.GetSyncpointManager().WaitHost(syncpoint_id, value); - } - - void IncrementSyncPoint(u32 syncpoint_id) { - host1x.GetSyncpointManager().IncrementHost(syncpoint_id); - } - - [[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const { - return host1x.GetSyncpointManager().GetHostSyncpointValue(syncpoint_id); - } - - void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) { - auto& syncpoint_manager = host1x.GetSyncpointManager(); - syncpoint_manager.RegisterHostAction(syncpoint_id, value, [this, syncpoint_id, value]() { - TriggerCpuInterrupt(syncpoint_id, value); - }); - } - [[nodiscard]] u64 GetTicks() const { // This values were reversed engineered by fincs from NVN // The gpu clock is reported in units of 385/625 nanoseconds @@ -322,11 +295,6 @@ struct GPU::Impl { gpu_thread.FlushAndInvalidateRegion(addr, size); } - void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const { - auto& interrupt_manager = system.InterruptManager(); - interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value); - } - void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer, Service::Nvidia::NvFence* fences, size_t num_fences) { size_t current_request_counter{}; @@ -524,22 +492,6 @@ void GPU::RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer, impl->RequestSwapBuffers(framebuffer, fences, num_fences); } -void GPU::WaitFence(u32 syncpoint_id, u32 value) { - impl->WaitFence(syncpoint_id, value); -} - -void GPU::IncrementSyncPoint(u32 syncpoint_id) { - impl->IncrementSyncPoint(syncpoint_id); -} - -u32 GPU::GetSyncpointValue(u32 syncpoint_id) const { - return impl->GetSyncpointValue(syncpoint_id); -} - -void GPU::RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) { - impl->RegisterSyncptInterrupt(syncpoint_id, value); -} - u64 GPU::GetTicks() const { return impl->GetTicks(); } diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index f04edf5c49..655373b33c 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -171,15 +171,6 @@ public: /// Returns a const reference to the shader notifier. [[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const; - /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame. - void WaitFence(u32 syncpoint_id, u32 value); - - void IncrementSyncPoint(u32 syncpoint_id); - - [[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const; - - void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value); - [[nodiscard]] u64 GetTicks() const; [[nodiscard]] bool IsAsync() const; diff --git a/src/video_core/host1x/syncpoint_manager.cpp b/src/video_core/host1x/syncpoint_manager.cpp index c606b8bd0d..825bd551e7 100644 --- a/src/video_core/host1x/syncpoint_manager.cpp +++ b/src/video_core/host1x/syncpoint_manager.cpp @@ -2,12 +2,15 @@ // Licensed under GPLv3 or any later version // Refer to the license.txt file included. +#include "common/microprofile.h" #include "video_core/host1x/syncpoint_manager.h" namespace Tegra { namespace Host1x { +MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192)); + SyncpointManager::ActionHandle SyncpointManager::RegisterAction( std::atomic& syncpoint, std::list& action_storage, u32 expected_value, std::function& action) { @@ -58,6 +61,7 @@ void SyncpointManager::WaitGuest(u32 syncpoint_id, u32 expected_value) { } void SyncpointManager::WaitHost(u32 syncpoint_id, u32 expected_value) { + MICROPROFILE_SCOPE(GPU_wait); Wait(syncpoints_host[syncpoint_id], wait_host_cv, expected_value); } diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index f745fbf562..b572950a67 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -396,10 +396,6 @@ void RasterizerOpenGL::SignalSemaphore(GPUVAddr addr, u32 value) { } void RasterizerOpenGL::SignalSyncPoint(u32 value) { - if (!gpu.IsAsync()) { - gpu.IncrementSyncPoint(value); - return; - } fence_manager.SignalSyncPoint(value); } diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 50fdf5e18b..a95f682314 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -458,10 +458,6 @@ void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) { } void RasterizerVulkan::SignalSyncPoint(u32 value) { - if (!gpu.IsAsync()) { - gpu.IncrementSyncPoint(value); - return; - } fence_manager.SignalSyncPoint(value); } From 98b5e236d4049d1a2a4c6413486b5015b7efe2c8 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sat, 5 Feb 2022 12:30:19 +0100 Subject: [PATCH 31/68] Vulkan: Fix Scissor on Clears --- src/video_core/renderer_vulkan/vk_rasterizer.cpp | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index a95f682314..d7b57e0f3d 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -247,8 +247,15 @@ void RasterizerVulkan::Clear() { } UpdateViewportsState(regs); + VkRect2D default_scissor; + default_scissor.offset.x = 0; + default_scissor.offset.y = 0; + default_scissor.extent.width = std::numeric_limits::max(); + default_scissor.extent.height = std::numeric_limits::max(); + VkClearRect clear_rect{ - .rect = GetScissorState(regs, 0, up_scale, down_shift), + .rect = regs.clear_flags.scissor ? GetScissorState(regs, 0, up_scale, down_shift) + : default_scissor, .baseArrayLayer = regs.clear_buffers.layer, .layerCount = 1, }; From 4d60410dd979fb688de7735d2b4b25a557bdeac7 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sat, 5 Feb 2022 18:15:26 +0100 Subject: [PATCH 32/68] MemoryManager: initial multi paging system implementation. --- src/common/multi_level_page_table.inc | 3 + .../service/nvdrv/devices/nvhost_as_gpu.cpp | 45 +- .../hle/service/nvdrv/devices/nvhost_as_gpu.h | 1 + src/core/hle/service/nvdrv/devices/nvmap.cpp | 10 + src/video_core/memory_manager.cpp | 442 +++++++++++------- src/video_core/memory_manager.h | 57 ++- 6 files changed, 346 insertions(+), 212 deletions(-) diff --git a/src/common/multi_level_page_table.inc b/src/common/multi_level_page_table.inc index 7fbcb908ac..9a68cad936 100644 --- a/src/common/multi_level_page_table.inc +++ b/src/common/multi_level_page_table.inc @@ -19,6 +19,9 @@ MultiLevelPageTable::MultiLevelPageTable(std::size_t address_space_bit std::size_t page_bits_) : address_space_bits{address_space_bits_}, first_level_bits{first_level_bits_}, page_bits{page_bits_} { + if (page_bits == 0) { + return; + } first_level_shift = address_space_bits - first_level_bits; first_level_chunk_size = (1ULL << (first_level_shift - page_bits)) * sizeof(BaseAddr); alloc_size = (1ULL << (address_space_bits - page_bits)) * sizeof(BaseAddr); diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index db2a6c3b2e..d95a883931 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -133,7 +133,8 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector& input, std::vector& const u64 end_big_pages{(vm.va_range_end - vm.va_range_split) >> vm.big_page_size_bits}; vm.big_page_allocator = std::make_unique(start_big_pages, end_big_pages); - gmmu = std::make_shared(system, 40, VM::PAGE_SIZE_BITS); + gmmu = std::make_shared(system, 40, vm.big_page_size_bits, + VM::PAGE_SIZE_BITS); system.GPU().InitAddressSpace(*gmmu); vm.initialised = true; @@ -189,6 +190,7 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector& input, std::vector< .size = size, .page_size = params.page_size, .sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None, + .big_pages = params.page_size != VM::YUZU_PAGESIZE, }; std::memcpy(output.data(), ¶ms, output.size()); @@ -209,7 +211,7 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) { // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state // Only FreeSpace can unmap them fully if (mapping->sparse_alloc) - gmmu->MapSparse(offset, mapping->size); + gmmu->MapSparse(offset, mapping->size, mapping->big_page); else gmmu->Unmap(offset, mapping->size); @@ -294,8 +296,9 @@ NvResult nvhost_as_gpu::Remap(const std::vector& input, std::vector& out return NvResult::BadValue; } + const bool use_big_pages = alloc->second.big_pages; if (!entry.handle) { - gmmu->MapSparse(virtual_address, size); + gmmu->MapSparse(virtual_address, size, use_big_pages); } else { auto handle{nvmap.GetHandle(entry.handle)}; if (!handle) { @@ -306,7 +309,7 @@ NvResult nvhost_as_gpu::Remap(const std::vector& input, std::vector& out handle->address + (static_cast(entry.handle_offset_big_pages) << vm.big_page_size_bits))}; - gmmu->Map(virtual_address, cpu_address, size); + gmmu->Map(virtual_address, cpu_address, size, use_big_pages); } } @@ -345,7 +348,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vector(params.offset + params.buffer_offset)}; VAddr cpu_address{mapping->ptr + params.buffer_offset}; - gmmu->Map(gpu_address, cpu_address, params.mapping_size); + gmmu->Map(gpu_address, cpu_address, params.mapping_size, mapping->big_page); return NvResult::Success; } catch ([[maybe_unused]] const std::out_of_range& e) { @@ -363,6 +366,17 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vector(handle->address + params.buffer_offset)}; u64 size{params.mapping_size ? params.mapping_size : handle->orig_size}; + bool big_page{[&]() { + if (Common::IsAligned(handle->align, vm.big_page_size)) + return true; + else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) + return false; + else { + UNREACHABLE(); + return false; + } + }()}; + if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) { auto alloc{allocation_map.upper_bound(params.offset)}; @@ -372,23 +386,14 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vectorMap(params.offset, cpu_address, size); + const bool use_big_pages = alloc->second.big_pages && big_page; + gmmu->Map(params.offset, cpu_address, size, use_big_pages); - auto mapping{std::make_shared(cpu_address, params.offset, size, true, false, - alloc->second.sparse)}; + auto mapping{std::make_shared(cpu_address, params.offset, size, true, + use_big_pages, alloc->second.sparse)}; alloc->second.mappings.push_back(mapping); mapping_map[params.offset] = mapping; } else { - bool big_page{[&]() { - if (Common::IsAligned(handle->align, vm.big_page_size)) - return true; - else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) - return false; - else { - UNREACHABLE(); - return false; - } - }()}; auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator}; u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE}; @@ -402,7 +407,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vectorMap(params.offset, cpu_address, size); + gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size), big_page); auto mapping{ std::make_shared(cpu_address, params.offset, size, false, big_page, false)}; @@ -439,7 +444,7 @@ NvResult nvhost_as_gpu::UnmapBuffer(const std::vector& input, std::vectorsparse_alloc) { - gmmu->MapSparse(params.offset, mapping->size); + gmmu->MapSparse(params.offset, mapping->size, mapping->big_page); } else { gmmu->Unmap(params.offset, mapping->size); } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index 1d27739e26..12e881f0db 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h @@ -177,6 +177,7 @@ private: std::list> mappings; u32 page_size; bool sparse; + bool big_pages; }; std::map> diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 279997e813..992c117f1a 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -9,6 +9,8 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/hle/kernel/k_page_table.h" +#include "core/hle/kernel/k_process.h" #include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/devices/nvmap.h" @@ -136,6 +138,10 @@ NvResult nvmap::IocAlloc(const std::vector& input, std::vector& output) LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); return result; } + ASSERT(system.CurrentProcess() + ->PageTable() + .LockForDeviceAddressSpace(handle_description->address, handle_description->size) + .IsSuccess()); std::memcpy(output.data(), ¶ms, sizeof(params)); return result; } @@ -256,6 +262,10 @@ NvResult nvmap::IocFree(const std::vector& input, std::vector& output) { } if (auto freeInfo{file.FreeHandle(params.handle, false)}) { + ASSERT(system.CurrentProcess() + ->PageTable() + .UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size) + .IsSuccess()); params.address = freeInfo->address; params.size = static_cast(freeInfo->size); params.flags.raw = 0; diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index b36067613f..836ece136d 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -7,6 +7,7 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/device_memory.h" #include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_process.h" #include "core/memory.h" @@ -14,40 +15,69 @@ #include "video_core/rasterizer_interface.h" #include "video_core/renderer_base.h" +#pragma optimize("", off) + namespace Tegra { std::atomic MemoryManager::unique_identifier_generator{}; -MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 page_bits_) - : system{system_}, address_space_bits{address_space_bits_}, page_bits{page_bits_}, entries{}, - page_table{address_space_bits, address_space_bits + page_bits - 38, page_bits}, +MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 big_page_bits_, + u64 page_bits_) + : system{system_}, memory{system.Memory()}, device_memory{system.DeviceMemory()}, + address_space_bits{address_space_bits_}, page_bits{page_bits_}, big_page_bits{big_page_bits_}, + entries{}, big_entries{}, page_table{address_space_bits, address_space_bits + page_bits - 38, + page_bits != big_page_bits ? page_bits : 0}, unique_identifier{unique_identifier_generator.fetch_add(1, std::memory_order_acq_rel)} { address_space_size = 1ULL << address_space_bits; - allocate_start = address_space_bits > 32 ? 1ULL << 32 : 0; page_size = 1ULL << page_bits; page_mask = page_size - 1ULL; - const u64 page_table_bits = address_space_bits - cpu_page_bits; + big_page_size = 1ULL << big_page_bits; + big_page_mask = big_page_size - 1ULL; + const u64 page_table_bits = address_space_bits - page_bits; + const u64 big_page_table_bits = address_space_bits - big_page_bits; const u64 page_table_size = 1ULL << page_table_bits; + const u64 big_page_table_size = 1ULL << big_page_table_bits; page_table_mask = page_table_size - 1; + big_page_table_mask = big_page_table_size - 1; + big_entries.resize(big_page_table_size / 32, 0); + big_page_table_cpu.resize(big_page_table_size); + big_page_table_physical.resize(big_page_table_size); entries.resize(page_table_size / 32, 0); } MemoryManager::~MemoryManager() = default; +template MemoryManager::EntryType MemoryManager::GetEntry(size_t position) const { - position = position >> page_bits; - const u64 entry_mask = entries[position / 32]; - const size_t sub_index = position % 32; - return static_cast((entry_mask >> (2 * sub_index)) & 0x03ULL); + if constexpr (is_big_page) { + position = position >> big_page_bits; + const u64 entry_mask = big_entries[position / 32]; + const size_t sub_index = position % 32; + return static_cast((entry_mask >> (2 * sub_index)) & 0x03ULL); + } else { + position = position >> page_bits; + const u64 entry_mask = entries[position / 32]; + const size_t sub_index = position % 32; + return static_cast((entry_mask >> (2 * sub_index)) & 0x03ULL); + } } +template void MemoryManager::SetEntry(size_t position, MemoryManager::EntryType entry) { - position = position >> page_bits; - const u64 entry_mask = entries[position / 32]; - const size_t sub_index = position % 32; - entries[position / 32] = - (~(3ULL << sub_index * 2) & entry_mask) | (static_cast(entry) << sub_index * 2); + if constexpr (is_big_page) { + position = position >> big_page_bits; + const u64 entry_mask = big_entries[position / 32]; + const size_t sub_index = position % 32; + big_entries[position / 32] = + (~(3ULL << sub_index * 2) & entry_mask) | (static_cast(entry) << sub_index * 2); + } else { + position = position >> page_bits; + const u64 entry_mask = entries[position / 32]; + const size_t sub_index = position % 32; + entries[position / 32] = + (~(3ULL << sub_index * 2) & entry_mask) | (static_cast(entry) << sub_index * 2); + } } template @@ -59,48 +89,66 @@ GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cp } for (u64 offset{}; offset < size; offset += page_size) { const GPUVAddr current_gpu_addr = gpu_addr + offset; - [[maybe_unused]] const auto current_entry_type = GetEntry(current_gpu_addr); - SetEntry(current_gpu_addr, entry_type); + [[maybe_unused]] const auto current_entry_type = GetEntry(current_gpu_addr); + SetEntry(current_gpu_addr, entry_type); if (current_entry_type != entry_type) { rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, page_size); } if constexpr (entry_type == EntryType::Mapped) { const VAddr current_cpu_addr = cpu_addr + offset; - const auto index = PageEntryIndex(current_gpu_addr); - const u32 sub_value = static_cast(current_cpu_addr >> 12ULL); - if (current_entry_type == entry_type && sub_value != page_table[index]) { - rasterizer->InvalidateRegion(static_cast(page_table[index]) << 12ULL, - page_size); - } - page_table[index] = static_cast(current_cpu_addr >> 12ULL); + const auto index = PageEntryIndex(current_gpu_addr); + const u32 sub_value = static_cast(current_cpu_addr >> cpu_page_bits); + page_table[index] = sub_value; } remaining_size -= page_size; } return gpu_addr; } +template +GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, + size_t size) { + u64 remaining_size{size}; + for (u64 offset{}; offset < size; offset += big_page_size) { + const GPUVAddr current_gpu_addr = gpu_addr + offset; + [[maybe_unused]] const auto current_entry_type = GetEntry(current_gpu_addr); + SetEntry(current_gpu_addr, entry_type); + if (current_entry_type != entry_type) { + rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, big_page_size); + } + if constexpr (entry_type == EntryType::Mapped) { + const VAddr current_cpu_addr = cpu_addr + offset; + const auto index = PageEntryIndex(current_gpu_addr); + const u32 sub_value = static_cast(current_cpu_addr >> cpu_page_bits); + big_page_table_cpu[index] = sub_value; + const PAddr phys_address = + device_memory.GetPhysicalAddr(memory.GetPointer(current_cpu_addr)); + big_page_table_physical[index] = static_cast(phys_address); + } + remaining_size -= big_page_size; + } + return gpu_addr; +} + void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) { rasterizer = rasterizer_; } -GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size) { +GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, + bool is_big_pages) { + if (is_big_pages) [[likely]] { + return BigPageTableOp(gpu_addr, cpu_addr, size); + } return PageTableOp(gpu_addr, cpu_addr, size); } -GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size) { +GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages) { + if (is_big_pages) [[likely]] { + return BigPageTableOp(gpu_addr, 0, size); + } return PageTableOp(gpu_addr, 0, size); } -GPUVAddr MemoryManager::MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align) { - return Map(*FindFreeRange(size, align), cpu_addr, size); -} - -GPUVAddr MemoryManager::MapAllocate32(VAddr cpu_addr, std::size_t size) { - const std::optional gpu_addr = FindFreeRange(size, 1, true); - ASSERT(gpu_addr); - return Map(*gpu_addr, cpu_addr, size); -} - void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { if (size == 0) { return; @@ -115,61 +163,24 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { rasterizer->UnmapMemory(*cpu_addr, map_size); } + BigPageTableOp(gpu_addr, 0, size); PageTableOp(gpu_addr, 0, size); } -std::optional MemoryManager::AllocateFixed(GPUVAddr gpu_addr, std::size_t size) { - for (u64 offset{}; offset < size; offset += page_size) { - if (GetEntry(gpu_addr + offset) != EntryType::Free) { +std::optional MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { + if (GetEntry(gpu_addr) != EntryType::Mapped) [[unlikely]] { + if (GetEntry(gpu_addr) != EntryType::Mapped) { return std::nullopt; } + + const VAddr cpu_addr_base = static_cast(page_table[PageEntryIndex(gpu_addr)]) + << cpu_page_bits; + return cpu_addr_base + (gpu_addr & page_mask); } - return PageTableOp(gpu_addr, 0, size); -} - -GPUVAddr MemoryManager::Allocate(std::size_t size, std::size_t align) { - return *AllocateFixed(*FindFreeRange(size, align), size); -} - -std::optional MemoryManager::FindFreeRange(std::size_t size, std::size_t align, - bool start_32bit_address) const { - if (!align) { - align = page_size; - } else { - align = Common::AlignUp(align, page_size); - } - - u64 available_size{}; - GPUVAddr gpu_addr{start_32bit_address ? 0 : allocate_start}; - while (gpu_addr + available_size < address_space_size) { - if (GetEntry(gpu_addr + available_size) == EntryType::Free) { - available_size += page_size; - - if (available_size >= size) { - return gpu_addr; - } - } else { - gpu_addr += available_size + page_size; - available_size = 0; - - const auto remainder{gpu_addr % align}; - if (remainder) { - gpu_addr = (gpu_addr - remainder) + align; - } - } - } - - return std::nullopt; -} - -std::optional MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { - if (GetEntry(gpu_addr) != EntryType::Mapped) { - return std::nullopt; - } - - const VAddr cpu_addr_base = static_cast(page_table[PageEntryIndex(gpu_addr)]) << 12ULL; - return cpu_addr_base + (gpu_addr & page_mask); + const VAddr cpu_addr_base = + static_cast(big_page_table_cpu[PageEntryIndex(gpu_addr)]) << cpu_page_bits; + return cpu_addr_base + (gpu_addr & big_page_mask); } std::optional MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const { @@ -225,7 +236,7 @@ u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) { return {}; } - return system.Memory().GetPointer(*address); + return memory.GetPointer(*address); } const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const { @@ -234,98 +245,161 @@ const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const { return {}; } - return system.Memory().GetPointer(*address); + return memory.GetPointer(*address); } -void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, - bool is_safe) const { +#pragma inline_recursion(on) + +template +inline void MemoryManager::MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, + FuncMapped&& func_mapped, FuncReserved&& func_reserved, + FuncUnmapped&& func_unmapped) const { + u64 used_page_size; + u64 used_page_mask; + u64 used_page_bits; + if constexpr (is_big_pages) { + used_page_size = big_page_size; + used_page_mask = big_page_mask; + used_page_bits = big_page_bits; + } else { + used_page_size = page_size; + used_page_mask = page_mask; + used_page_bits = page_bits; + } std::size_t remaining_size{size}; - std::size_t page_index{gpu_src_addr >> page_bits}; - std::size_t page_offset{gpu_src_addr & page_mask}; + std::size_t page_index{gpu_src_addr >> used_page_bits}; + std::size_t page_offset{gpu_src_addr & used_page_mask}; + GPUVAddr current_address = gpu_src_addr; while (remaining_size > 0) { const std::size_t copy_amount{ - std::min(static_cast(page_size) - page_offset, remaining_size)}; - const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; - if (page_addr) { - const auto src_addr{*page_addr + page_offset}; - if (is_safe) { - // Flush must happen on the rasterizer interface, such that memory is always - // synchronous when it is read (even when in asynchronous GPU mode). - // Fixes Dead Cells title menu. - rasterizer->FlushRegion(src_addr, copy_amount); - } - system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); - } else { - std::memset(dest_buffer, 0, copy_amount); + std::min(static_cast(used_page_size) - page_offset, remaining_size)}; + auto entry = GetEntry(current_address); + if (entry == EntryType::Mapped) [[likely]] { + func_mapped(page_index, page_offset, copy_amount); + } else if (entry == EntryType::Reserved) { + func_reserved(page_index, page_offset, copy_amount); + } else [[unlikely]] { + func_unmapped(page_index, page_offset, copy_amount); } - page_index++; page_offset = 0; - dest_buffer = static_cast(dest_buffer) + copy_amount; remaining_size -= copy_amount; + current_address += copy_amount; } } +template +void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, + std::size_t size) const { + auto set_to_zero = [&]([[maybe_unused]] std::size_t page_index, + [[maybe_unused]] std::size_t offset, std::size_t copy_amount) { + std::memset(dest_buffer, 0, copy_amount); + dest_buffer = static_cast(dest_buffer) + copy_amount; + }; + auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast(page_table[page_index]) << cpu_page_bits) + offset; + if constexpr (is_safe) { + rasterizer->FlushRegion(cpu_addr_base, copy_amount); + } + memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount); + dest_buffer = static_cast(dest_buffer) + copy_amount; + }; + auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + if constexpr (is_safe) { + rasterizer->FlushRegion(cpu_addr_base, copy_amount); + } + memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount); + // u8* physical = device_memory.GetPointer(big_page_table_physical[page_index] + offset); + // std::memcpy(dest_buffer, physical, copy_amount); + dest_buffer = static_cast(dest_buffer) + copy_amount; + }; + auto read_short_pages = [&](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation(base, copy_amount, mapped_normal, set_to_zero, set_to_zero); + }; + MemoryOperation(gpu_src_addr, size, mapped_big, set_to_zero, read_short_pages); +} + void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const { - ReadBlockImpl(gpu_src_addr, dest_buffer, size, true); + ReadBlockImpl(gpu_src_addr, dest_buffer, size); } void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer, const std::size_t size) const { - ReadBlockImpl(gpu_src_addr, dest_buffer, size, false); + ReadBlockImpl(gpu_src_addr, dest_buffer, size); } -void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size, - bool is_safe) { - std::size_t remaining_size{size}; - std::size_t page_index{gpu_dest_addr >> page_bits}; - std::size_t page_offset{gpu_dest_addr & page_mask}; - - while (remaining_size > 0) { - const std::size_t copy_amount{ - std::min(static_cast(page_size) - page_offset, remaining_size)}; - const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; - if (page_addr) { - const auto dest_addr{*page_addr + page_offset}; - - if (is_safe) { - // Invalidate must happen on the rasterizer interface, such that memory is always - // synchronous when it is written (even when in asynchronous GPU mode). - rasterizer->InvalidateRegion(dest_addr, copy_amount); - } - system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); - } - - page_index++; - page_offset = 0; +template +void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, + std::size_t size) { + auto just_advance = [&]([[maybe_unused]] std::size_t page_index, + [[maybe_unused]] std::size_t offset, std::size_t copy_amount) { src_buffer = static_cast(src_buffer) + copy_amount; - remaining_size -= copy_amount; - } + }; + auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast(page_table[page_index]) << cpu_page_bits) + offset; + if constexpr (is_safe) { + rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); + } + memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount); + src_buffer = static_cast(src_buffer) + copy_amount; + }; + auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + if constexpr (is_safe) { + rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); + } + memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount); + /*u8* physical = + device_memory.GetPointer(big_page_table_physical[page_index] << cpu_page_bits) + offset; + std::memcpy(physical, src_buffer, copy_amount);*/ + src_buffer = static_cast(src_buffer) + copy_amount; + }; + auto write_short_pages = [&](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation(base, copy_amount, mapped_normal, just_advance, just_advance); + }; + MemoryOperation(gpu_dest_addr, size, mapped_big, just_advance, write_short_pages); } void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) { - WriteBlockImpl(gpu_dest_addr, src_buffer, size, true); + WriteBlockImpl(gpu_dest_addr, src_buffer, size); } void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) { - WriteBlockImpl(gpu_dest_addr, src_buffer, size, false); + WriteBlockImpl(gpu_dest_addr, src_buffer, size); } void MemoryManager::FlushRegion(GPUVAddr gpu_addr, size_t size) const { - size_t remaining_size{size}; - size_t page_index{gpu_addr >> page_bits}; - size_t page_offset{gpu_addr & page_mask}; - while (remaining_size > 0) { - const size_t num_bytes{std::min(page_size - page_offset, remaining_size)}; - if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { - rasterizer->FlushRegion(*page_addr + page_offset, num_bytes); - } - ++page_index; - page_offset = 0; - remaining_size -= num_bytes; - } + auto do_nothing = [&]([[maybe_unused]] std::size_t page_index, + [[maybe_unused]] std::size_t offset, + [[maybe_unused]] std::size_t copy_amount) {}; + + auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast(page_table[page_index]) << cpu_page_bits) + offset; + rasterizer->FlushRegion(cpu_addr_base, copy_amount); + }; + auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + rasterizer->FlushRegion(cpu_addr_base, copy_amount); + }; + auto flush_short_pages = [&](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation(base, copy_amount, mapped_normal, do_nothing, do_nothing); + }; + MemoryOperation(gpu_addr, size, mapped_big, do_nothing, flush_short_pages); } void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size) { @@ -348,7 +422,7 @@ bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const { } bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const { - size_t page_index{gpu_addr >> page_bits}; + size_t page_index{gpu_addr >> big_page_bits}; const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits}; std::optional old_page_addr{}; while (page_index != page_last) { @@ -371,7 +445,7 @@ bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) cons size_t page_index{gpu_addr >> page_bits}; const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits}; while (page_index < page_last) { - if (GetEntry(page_index << page_bits) == EntryType::Free) { + if (GetEntry(page_index << page_bits) == EntryType::Free) { return false; } ++page_index; @@ -379,47 +453,63 @@ bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) cons return true; } +#pragma inline_recursion(on) + std::vector> MemoryManager::GetSubmappedRange( GPUVAddr gpu_addr, std::size_t size) const { std::vector> result{}; - size_t page_index{gpu_addr >> page_bits}; - size_t remaining_size{size}; - size_t page_offset{gpu_addr & page_mask}; std::optional> last_segment{}; std::optional old_page_addr{}; - const auto extend_size = [this, &last_segment, &page_index, &page_offset](std::size_t bytes) { - if (!last_segment) { - const GPUVAddr new_base_addr = (page_index << page_bits) + page_offset; - last_segment = {new_base_addr, bytes}; - } else { - last_segment->second += bytes; - } - }; - const auto split = [&last_segment, &result] { + const auto split = [&last_segment, &result]([[maybe_unused]] std::size_t page_index, + [[maybe_unused]] std::size_t offset, + [[maybe_unused]] std::size_t copy_amount) { if (last_segment) { result.push_back(*last_segment); last_segment = std::nullopt; } }; - while (remaining_size > 0) { - const size_t num_bytes{std::min(page_size - page_offset, remaining_size)}; - const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; - if (!page_addr || *page_addr == 0) { - split(); - } else if (old_page_addr) { - if (*old_page_addr + page_size != *page_addr) { - split(); + const auto extend_size_big = [this, &split, &old_page_addr, + &last_segment](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + if (old_page_addr) { + if (*old_page_addr != cpu_addr_base) { + split(0, 0, 0); } - extend_size(num_bytes); - } else { - extend_size(num_bytes); } - ++page_index; - page_offset = 0; - remaining_size -= num_bytes; - old_page_addr = page_addr; - } - split(); + old_page_addr = {cpu_addr_base + copy_amount}; + if (!last_segment) { + const GPUVAddr new_base_addr = (page_index << big_page_bits) + offset; + last_segment = {new_base_addr, copy_amount}; + } else { + last_segment->second += copy_amount; + } + }; + const auto extend_size_short = [this, &split, &old_page_addr, + &last_segment](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast(page_table[page_index]) << cpu_page_bits) + offset; + if (old_page_addr) { + if (*old_page_addr != cpu_addr_base) { + split(0, 0, 0); + } + } + old_page_addr = {cpu_addr_base + copy_amount}; + if (!last_segment) { + const GPUVAddr new_base_addr = (page_index << page_bits) + offset; + last_segment = {new_base_addr, copy_amount}; + } else { + last_segment->second += copy_amount; + } + }; + auto do_short_pages = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation(base, copy_amount, extend_size_short, split, split); + }; + MemoryOperation(gpu_addr, size, extend_size_big, split, do_short_pages); + split(0, 0, 0); return result; } diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 56604ef3e8..9c388a06e8 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -10,21 +10,26 @@ #include "common/common_types.h" #include "common/multi_level_page_table.h" +#include "common/virtual_buffer.h" namespace VideoCore { class RasterizerInterface; } namespace Core { +class DeviceMemory; +namespace Memory { +class Memory; +} // namespace Memory class System; -} +} // namespace Core namespace Tegra { class MemoryManager final { public: explicit MemoryManager(Core::System& system_, u64 address_space_bits_ = 40, - u64 page_bits_ = 16); + u64 big_page_bits_ = 16, u64 page_bits_ = 12); ~MemoryManager(); size_t GetID() const { @@ -93,12 +98,8 @@ public: std::vector> GetSubmappedRange(GPUVAddr gpu_addr, std::size_t size) const; - GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size); - GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size); - [[nodiscard]] GPUVAddr MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align); - [[nodiscard]] GPUVAddr MapAllocate32(VAddr cpu_addr, std::size_t size); - [[nodiscard]] std::optional AllocateFixed(GPUVAddr gpu_addr, std::size_t size); - [[nodiscard]] GPUVAddr Allocate(std::size_t size, std::size_t align); + GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, bool is_big_pages = true); + GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages = true); void Unmap(GPUVAddr gpu_addr, std::size_t size); void FlushRegion(GPUVAddr gpu_addr, size_t size) const; @@ -107,26 +108,42 @@ private: [[nodiscard]] std::optional FindFreeRange(std::size_t size, std::size_t align, bool start_32bit_address = false) const; - void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, - bool is_safe) const; - void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size, - bool is_safe); + template + inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped, + FuncReserved&& func_reserved, FuncUnmapped&& func_unmapped) const; + template + void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const; + + template + void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size); + + template [[nodiscard]] inline std::size_t PageEntryIndex(GPUVAddr gpu_addr) const { - return (gpu_addr >> page_bits) & page_table_mask; + if constexpr (is_big_page) { + return (gpu_addr >> big_page_bits) & big_page_table_mask; + } else { + return (gpu_addr >> page_bits) & page_table_mask; + } } Core::System& system; + Core::Memory::Memory& memory; + Core::DeviceMemory& device_memory; const u64 address_space_bits; const u64 page_bits; u64 address_space_size; - u64 allocate_start; u64 page_size; u64 page_mask; u64 page_table_mask; static constexpr u64 cpu_page_bits{12}; + const u64 big_page_bits; + u64 big_page_size; + u64 big_page_mask; + u64 big_page_table_mask; + VideoCore::RasterizerInterface* rasterizer = nullptr; enum class EntryType : u64 { @@ -136,15 +153,23 @@ private: }; std::vector entries; + std::vector big_entries; template GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size); - EntryType GetEntry(size_t position) const; + template + GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size); - void SetEntry(size_t position, EntryType entry); + template + inline EntryType GetEntry(size_t position) const; + + template + inline void SetEntry(size_t position, EntryType entry); Common::MultiLevelPageTable page_table; + Common::VirtualBuffer big_page_table_cpu; + Common::VirtualBuffer big_page_table_physical; const size_t unique_identifier; From bc8b3d225eda388f0603830cbff8357893abb0f9 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 6 Feb 2022 01:16:11 +0100 Subject: [PATCH 33/68] VideoCore: Refactor fencing system. --- .../service/nvdrv/devices/nvdisp_disp0.cpp | 5 +- .../hle/service/nvdrv/devices/nvdisp_disp0.h | 3 +- src/core/hle/service/nvflinger/nvflinger.cpp | 15 +-- src/video_core/buffer_cache/buffer_cache.h | 13 +++ src/video_core/dma_pusher.cpp | 3 - src/video_core/engines/maxwell_3d.cpp | 24 ++++- src/video_core/engines/puller.cpp | 39 +++++--- src/video_core/fence_manager.h | 96 ++++++++----------- src/video_core/gpu.cpp | 17 ++-- src/video_core/gpu.h | 4 +- src/video_core/gpu_thread.cpp | 2 +- src/video_core/rasterizer_interface.h | 7 +- .../renderer_opengl/gl_fence_manager.cpp | 13 +-- .../renderer_opengl/gl_fence_manager.h | 6 +- .../renderer_opengl/gl_rasterizer.cpp | 22 ++--- .../renderer_opengl/gl_rasterizer.h | 5 +- .../renderer_vulkan/vk_fence_manager.cpp | 15 +-- .../renderer_vulkan/vk_fence_manager.h | 6 +- .../renderer_vulkan/vk_rasterizer.cpp | 21 ++-- .../renderer_vulkan/vk_rasterizer.h | 5 +- 20 files changed, 154 insertions(+), 167 deletions(-) diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp index e6a976714f..18c5324a9a 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp @@ -40,7 +40,8 @@ void nvdisp_disp0::OnClose(DeviceFD fd) {} void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, u32 height, u32 stride, android::BufferTransformFlags transform, - const Common::Rectangle& crop_rect) { + const Common::Rectangle& crop_rect, + std::array& fences, u32 num_fences) { const VAddr addr = nvmap.GetHandleAddress(buffer_handle); LOG_TRACE(Service, "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}", @@ -50,7 +51,7 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat form stride, format, transform, crop_rect}; system.GetPerfStats().EndSystemFrame(); - system.GPU().RequestSwapBuffers(&framebuffer, nullptr, 0); + system.GPU().RequestSwapBuffers(&framebuffer, fences, num_fences); system.SpeedLimiter().DoSpeedLimiting(system.CoreTiming().GetGlobalTimeUs()); system.GetPerfStats().BeginSystemFrame(); } diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h index 1ca9b2e74a..04217ab129 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h @@ -38,7 +38,8 @@ public: /// Performs a screen flip, drawing the buffer pointed to by the handle. void flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, u32 height, u32 stride, android::BufferTransformFlags transform, - const Common::Rectangle& crop_rect); + const Common::Rectangle& crop_rect, + std::array& fences, u32 num_fences); Kernel::KEvent* QueryEvent(u32 event_id) override; diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index aa112021d2..4658f1e8b8 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp @@ -269,17 +269,6 @@ void NVFlinger::Compose() { return; // We are likely shutting down } - auto& syncpoint_manager = system.Host1x().GetSyncpointManager(); - const auto& multi_fence = buffer.fence; - guard->unlock(); - for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) { - const auto& fence = multi_fence.fences[fence_id]; - syncpoint_manager.WaitGuest(fence.id, fence.value); - } - guard->lock(); - - MicroProfileFlip(); - // Now send the buffer to the GPU for drawing. // TODO(Subv): Support more than just disp0. The display device selection is probably based // on which display we're drawing (Default, Internal, External, etc) @@ -293,8 +282,10 @@ void NVFlinger::Compose() { nvdisp->flip(igbp_buffer.BufferId(), igbp_buffer.Offset(), igbp_buffer.ExternalFormat(), igbp_buffer.Width(), igbp_buffer.Height(), igbp_buffer.Stride(), - static_cast(buffer.transform), crop_rect); + static_cast(buffer.transform), crop_rect, + buffer.fence.fences, buffer.fence.num_fences); + MicroProfileFlip(); guard->lock(); swap_interval = buffer.swap_interval; diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 6b6764d721..e55cac0d65 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -826,6 +826,19 @@ void BufferCache

::CommitAsyncFlushesHigh() { const bool is_accuracy_normal = Settings::values.gpu_accuracy.GetValue() == Settings::GPUAccuracy::Normal; + auto it = committed_ranges.begin(); + while (it != committed_ranges.end()) { + auto& current_intervals = *it; + auto next_it = std::next(it); + while (next_it != committed_ranges.end()) { + for (auto& interval : *next_it) { + current_intervals.subtract(interval); + } + next_it++; + } + it++; + } + boost::container::small_vector, 1> downloads; u64 total_size_bytes = 0; u64 largest_copy = 0; diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp index b01f04d0c9..9835e3ac1e 100644 --- a/src/video_core/dma_pusher.cpp +++ b/src/video_core/dma_pusher.cpp @@ -24,8 +24,6 @@ MICROPROFILE_DEFINE(DispatchCalls, "GPU", "Execute command buffer", MP_RGB(128, void DmaPusher::DispatchCalls() { MICROPROFILE_SCOPE(DispatchCalls); - gpu.SyncGuestHost(); - dma_pushbuffer_subindex = 0; dma_state.is_last_call = true; @@ -36,7 +34,6 @@ void DmaPusher::DispatchCalls() { } } gpu.FlushCommands(); - gpu.SyncGuestHost(); gpu.OnCommandListEnd(); } diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index 3a4646289c..950c70dcd9 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -242,6 +242,9 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume return; case MAXWELL3D_REG_INDEX(fragment_barrier): return rasterizer->FragmentBarrier(); + case MAXWELL3D_REG_INDEX(invalidate_texture_data_cache): + rasterizer->InvalidateGPUCache(); + return rasterizer->WaitForIdle(); case MAXWELL3D_REG_INDEX(tiled_cache_barrier): return rasterizer->TiledCacheBarrier(); } @@ -472,10 +475,25 @@ void Maxwell3D::ProcessQueryGet() { switch (regs.query.query_get.operation) { case Regs::QueryOperation::Release: - if (regs.query.query_get.fence == 1) { - rasterizer->SignalSemaphore(regs.query.QueryAddress(), regs.query.query_sequence); + if (regs.query.query_get.fence == 1 || regs.query.query_get.short_query != 0) { + const GPUVAddr sequence_address{regs.query.QueryAddress()}; + const u32 payload = regs.query.query_sequence; + std::function operation([this, sequence_address, payload] { + memory_manager.Write(sequence_address, payload); + }); + rasterizer->SignalFence(std::move(operation)); } else { - StampQueryResult(regs.query.query_sequence, regs.query.query_get.short_query == 0); + struct LongQueryResult { + u64_le value; + u64_le timestamp; + }; + const GPUVAddr sequence_address{regs.query.QueryAddress()}; + const u32 payload = regs.query.query_sequence; + std::function operation([this, sequence_address, payload] { + LongQueryResult query_result{payload, system.GPU().GetTicks()}; + memory_manager.WriteBlock(sequence_address, &query_result, sizeof(query_result)); + }); + rasterizer->SignalFence(std::move(operation)); } break; case Regs::QueryOperation::Acquire: diff --git a/src/video_core/engines/puller.cpp b/src/video_core/engines/puller.cpp index 8c17639e43..dd9494efab 100644 --- a/src/video_core/engines/puller.cpp +++ b/src/video_core/engines/puller.cpp @@ -79,12 +79,15 @@ void Puller::ProcessSemaphoreTriggerMethod() { u64 timestamp; }; - Block block{}; - block.sequence = regs.semaphore_sequence; - // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of - // CoreTiming - block.timestamp = gpu.GetTicks(); - memory_manager.WriteBlock(regs.semaphore_address.SemaphoreAddress(), &block, sizeof(block)); + const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()}; + const u32 payload = regs.semaphore_sequence; + std::function operation([this, sequence_address, payload] { + Block block{}; + block.sequence = payload; + block.timestamp = gpu.GetTicks(); + memory_manager.WriteBlock(sequence_address, &block, sizeof(block)); + }); + rasterizer->SignalFence(std::move(operation)); } else { do { const u32 word{memory_manager.Read(regs.semaphore_address.SemaphoreAddress())}; @@ -94,6 +97,7 @@ void Puller::ProcessSemaphoreTriggerMethod() { regs.acquire_active = true; regs.acquire_mode = false; if (word != regs.acquire_value) { + rasterizer->ReleaseFences(); std::this_thread::sleep_for(std::chrono::milliseconds(1)); continue; } @@ -101,11 +105,13 @@ void Puller::ProcessSemaphoreTriggerMethod() { regs.acquire_active = true; regs.acquire_mode = true; if (word < regs.acquire_value) { + rasterizer->ReleaseFences(); std::this_thread::sleep_for(std::chrono::milliseconds(1)); continue; } } else if (op == GpuSemaphoreOperation::AcquireMask) { - if (word & regs.semaphore_sequence == 0) { + if (word && regs.semaphore_sequence == 0) { + rasterizer->ReleaseFences(); std::this_thread::sleep_for(std::chrono::milliseconds(1)); continue; } @@ -117,16 +123,23 @@ void Puller::ProcessSemaphoreTriggerMethod() { } void Puller::ProcessSemaphoreRelease() { - rasterizer->SignalSemaphore(regs.semaphore_address.SemaphoreAddress(), regs.semaphore_release); + const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()}; + const u32 payload = regs.semaphore_release; + std::function operation([this, sequence_address, payload] { + memory_manager.Write(sequence_address, payload); + }); + rasterizer->SignalFence(std::move(operation)); } void Puller::ProcessSemaphoreAcquire() { - const u32 word = memory_manager.Read(regs.semaphore_address.SemaphoreAddress()); + u32 word = memory_manager.Read(regs.semaphore_address.SemaphoreAddress()); const auto value = regs.semaphore_acquire; - std::this_thread::sleep_for(std::chrono::milliseconds(5)); - if (word != value) { + while (word != value) { regs.acquire_active = true; regs.acquire_value = value; + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + rasterizer->ReleaseFences(); + word = memory_manager.Read(regs.semaphore_address.SemaphoreAddress()); // TODO(kemathe73) figure out how to do the acquire_timeout regs.acquire_mode = false; regs.acquire_source = false; @@ -147,9 +160,9 @@ void Puller::CallPullerMethod(const MethodCall& method_call) { case BufferMethods::SemaphoreAddressHigh: case BufferMethods::SemaphoreAddressLow: case BufferMethods::SemaphoreSequencePayload: - case BufferMethods::WrcacheFlush: case BufferMethods::SyncpointPayload: break; + case BufferMethods::WrcacheFlush: case BufferMethods::RefCnt: rasterizer->SignalReference(); break; @@ -173,7 +186,7 @@ void Puller::CallPullerMethod(const MethodCall& method_call) { } case BufferMethods::MemOpB: { // Implement this better. - rasterizer->SyncGuestHost(); + rasterizer->InvalidateGPUCache(); break; } case BufferMethods::MemOpC: diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h index 03a70e5e01..c390ac91bc 100644 --- a/src/video_core/fence_manager.h +++ b/src/video_core/fence_manager.h @@ -5,6 +5,8 @@ #include #include +#include +#include #include #include @@ -19,28 +21,7 @@ namespace VideoCommon { class FenceBase { public: - explicit FenceBase(u32 payload_, bool is_stubbed_) - : address{}, payload{payload_}, is_semaphore{false}, is_stubbed{is_stubbed_} {} - - explicit FenceBase(u8* address_, u32 payload_, bool is_stubbed_) - : address{address_}, payload{payload_}, is_semaphore{true}, is_stubbed{is_stubbed_} {} - - u8* GetAddress() const { - return address; - } - - u32 GetPayload() const { - return payload; - } - - bool IsSemaphore() const { - return is_semaphore; - } - -private: - u8* address; - u32 payload; - bool is_semaphore; + explicit FenceBase(bool is_stubbed_) : is_stubbed{is_stubbed_} {} protected: bool is_stubbed; @@ -60,31 +41,28 @@ public: buffer_cache.AccumulateFlushes(); } - void SignalSemaphore(u8* addr, u32 value) { + void SyncOperation(std::function&& func) { + uncommitted_operations.emplace_back(std::move(func)); + } + + void SignalFence(std::function&& func) { TryReleasePendingFences(); const bool should_flush = ShouldFlush(); CommitAsyncFlushes(); - TFence new_fence = CreateFence(addr, value, !should_flush); + uncommitted_operations.emplace_back(std::move(func)); + CommitOperations(); + TFence new_fence = CreateFence(!should_flush); fences.push(new_fence); QueueFence(new_fence); if (should_flush) { rasterizer.FlushCommands(); } - rasterizer.SyncGuestHost(); } void SignalSyncPoint(u32 value) { syncpoint_manager.IncrementGuest(value); - TryReleasePendingFences(); - const bool should_flush = ShouldFlush(); - CommitAsyncFlushes(); - TFence new_fence = CreateFence(value, !should_flush); - fences.push(new_fence); - QueueFence(new_fence); - if (should_flush) { - rasterizer.FlushCommands(); - } - rasterizer.SyncGuestHost(); + std::function func([this, value] { syncpoint_manager.IncrementHost(value); }); + SignalFence(std::move(func)); } void WaitPendingFences() { @@ -94,12 +72,10 @@ public: WaitFence(current_fence); } PopAsyncFlushes(); - if (current_fence->IsSemaphore()) { - char* address = reinterpret_cast(current_fence->GetAddress()); - auto payload = current_fence->GetPayload(); - std::memcpy(address, &payload, sizeof(payload)); - } else { - syncpoint_manager.IncrementHost(current_fence->GetPayload()); + auto operations = std::move(pending_operations.front()); + pending_operations.pop_front(); + for (auto& operation : operations) { + operation(); } PopFence(); } @@ -114,11 +90,9 @@ protected: virtual ~FenceManager() = default; - /// Creates a Sync Point Fence Interface, does not create a backend fence if 'is_stubbed' is + /// Creates a Fence Interface, does not create a backend fence if 'is_stubbed' is /// true - virtual TFence CreateFence(u32 value, bool is_stubbed) = 0; - /// Creates a Semaphore Fence Interface, does not create a backend fence if 'is_stubbed' is true - virtual TFence CreateFence(u8* addr, u32 value, bool is_stubbed) = 0; + virtual TFence CreateFence(bool is_stubbed) = 0; /// Queues a fence into the backend if the fence isn't stubbed. virtual void QueueFence(TFence& fence) = 0; /// Notifies that the backend fence has been signaled/reached in host GPU. @@ -141,12 +115,10 @@ private: return; } PopAsyncFlushes(); - if (current_fence->IsSemaphore()) { - char* address = reinterpret_cast(current_fence->GetAddress()); - const auto payload = current_fence->GetPayload(); - std::memcpy(address, &payload, sizeof(payload)); - } else { - syncpoint_manager.IncrementHost(current_fence->GetPayload()); + auto operations = std::move(pending_operations.front()); + pending_operations.pop_front(); + for (auto& operation : operations) { + operation(); } PopFence(); } @@ -165,16 +137,20 @@ private: } void PopAsyncFlushes() { - std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; - texture_cache.PopAsyncFlushes(); - buffer_cache.PopAsyncFlushes(); + { + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + texture_cache.PopAsyncFlushes(); + buffer_cache.PopAsyncFlushes(); + } query_cache.PopAsyncFlushes(); } void CommitAsyncFlushes() { - std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; - texture_cache.CommitAsyncFlushes(); - buffer_cache.CommitAsyncFlushes(); + { + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + texture_cache.CommitAsyncFlushes(); + buffer_cache.CommitAsyncFlushes(); + } query_cache.CommitAsyncFlushes(); } @@ -183,7 +159,13 @@ private: fences.pop(); } + void CommitOperations() { + pending_operations.emplace_back(std::move(uncommitted_operations)); + } + std::queue fences; + std::deque> uncommitted_operations; + std::deque>> pending_operations; DelayedDestructionRing delayed_destruction_ring; }; diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index a1d19b1c8a..d7a3dd96ba 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -93,16 +93,13 @@ struct GPU::Impl { } /// Synchronizes CPU writes with Host GPU memory. - void SyncGuestHost() { - rasterizer->SyncGuestHost(); + void InvalidateGPUCache() { + rasterizer->InvalidateGPUCache(); } /// Signal the ending of command list. void OnCommandListEnd() { - if (is_async) { - // This command only applies to asynchronous GPU mode - gpu_thread.OnCommandListEnd(); - } + gpu_thread.OnCommandListEnd(); } /// Request a host GPU memory flush from the CPU. @@ -296,7 +293,7 @@ struct GPU::Impl { } void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer, - Service::Nvidia::NvFence* fences, size_t num_fences) { + std::array& fences, size_t num_fences) { size_t current_request_counter{}; { std::unique_lock lk(request_swap_mutex); @@ -412,8 +409,8 @@ void GPU::FlushCommands() { impl->FlushCommands(); } -void GPU::SyncGuestHost() { - impl->SyncGuestHost(); +void GPU::InvalidateGPUCache() { + impl->InvalidateGPUCache(); } void GPU::OnCommandListEnd() { @@ -488,7 +485,7 @@ const VideoCore::ShaderNotify& GPU::ShaderNotify() const { } void GPU::RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer, - Service::Nvidia::NvFence* fences, size_t num_fences) { + std::array& fences, size_t num_fences) { impl->RequestSwapBuffers(framebuffer, fences, num_fences); } diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index 655373b33c..0a4a8b14fa 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -110,7 +110,7 @@ public: /// Flush all current written commands into the host GPU for execution. void FlushCommands(); /// Synchronizes CPU writes with Host GPU memory. - void SyncGuestHost(); + void InvalidateGPUCache(); /// Signal the ending of command list. void OnCommandListEnd(); @@ -180,7 +180,7 @@ public: void RendererFrameEndNotify(); void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer, - Service::Nvidia::NvFence* fences, size_t num_fences); + std::array& fences, size_t num_fences); /// Performs any additional setup necessary in order to begin GPU emulation. /// This can be used to launch any necessary threads and register any necessary diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp index 2c03545bf1..1bd477011e 100644 --- a/src/video_core/gpu_thread.cpp +++ b/src/video_core/gpu_thread.cpp @@ -98,7 +98,7 @@ void ThreadManager::FlushRegion(VAddr addr, u64 size) { } void ThreadManager::TickGPU() { - PushCommand(GPUTickCommand(), true); + PushCommand(GPUTickCommand()); } void ThreadManager::InvalidateRegion(VAddr addr, u64 size) { diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h index 5362aafb67..cb07f3d38b 100644 --- a/src/video_core/rasterizer_interface.h +++ b/src/video_core/rasterizer_interface.h @@ -62,7 +62,10 @@ public: virtual void DisableGraphicsUniformBuffer(size_t stage, u32 index) = 0; /// Signal a GPU based semaphore as a fence - virtual void SignalSemaphore(GPUVAddr addr, u32 value) = 0; + virtual void SignalFence(std::function&& func) = 0; + + /// Send an operation to be done after a certain amount of flushes. + virtual void SyncOperation(std::function&& func) = 0; /// Signal a GPU based syncpoint as a fence virtual void SignalSyncPoint(u32 value) = 0; @@ -89,7 +92,7 @@ public: virtual void OnCPUWrite(VAddr addr, u64 size) = 0; /// Sync memory between guest and host. - virtual void SyncGuestHost() = 0; + virtual void InvalidateGPUCache() = 0; /// Unmap memory range virtual void UnmapMemory(VAddr addr, u64 size) = 0; diff --git a/src/video_core/renderer_opengl/gl_fence_manager.cpp b/src/video_core/renderer_opengl/gl_fence_manager.cpp index c76446b605..91463f854f 100644 --- a/src/video_core/renderer_opengl/gl_fence_manager.cpp +++ b/src/video_core/renderer_opengl/gl_fence_manager.cpp @@ -10,10 +10,7 @@ namespace OpenGL { -GLInnerFence::GLInnerFence(u32 payload_, bool is_stubbed_) : FenceBase{payload_, is_stubbed_} {} - -GLInnerFence::GLInnerFence(u8* address_, u32 payload_, bool is_stubbed_) - : FenceBase{address_, payload_, is_stubbed_} {} +GLInnerFence::GLInnerFence(bool is_stubbed_) : FenceBase{is_stubbed_} {} GLInnerFence::~GLInnerFence() = default; @@ -48,12 +45,8 @@ FenceManagerOpenGL::FenceManagerOpenGL(VideoCore::RasterizerInterface& rasterize BufferCache& buffer_cache_, QueryCache& query_cache_) : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_} {} -Fence FenceManagerOpenGL::CreateFence(u32 value, bool is_stubbed) { - return std::make_shared(value, is_stubbed); -} - -Fence FenceManagerOpenGL::CreateFence(u8* addr, u32 value, bool is_stubbed) { - return std::make_shared(addr, value, is_stubbed); +Fence FenceManagerOpenGL::CreateFence(bool is_stubbed) { + return std::make_shared(is_stubbed); } void FenceManagerOpenGL::QueueFence(Fence& fence) { diff --git a/src/video_core/renderer_opengl/gl_fence_manager.h b/src/video_core/renderer_opengl/gl_fence_manager.h index fced8d0029..f1446e732a 100644 --- a/src/video_core/renderer_opengl/gl_fence_manager.h +++ b/src/video_core/renderer_opengl/gl_fence_manager.h @@ -16,8 +16,7 @@ namespace OpenGL { class GLInnerFence : public VideoCommon::FenceBase { public: - explicit GLInnerFence(u32 payload_, bool is_stubbed_); - explicit GLInnerFence(u8* address_, u32 payload_, bool is_stubbed_); + explicit GLInnerFence(bool is_stubbed_); ~GLInnerFence(); void Queue(); @@ -40,8 +39,7 @@ public: QueryCache& query_cache); protected: - Fence CreateFence(u32 value, bool is_stubbed) override; - Fence CreateFence(u8* addr, u32 value, bool is_stubbed) override; + Fence CreateFence(bool is_stubbed) override; void QueueFence(Fence& fence) override; bool IsFenceSignaled(Fence& fence) const override; void WaitFence(Fence& fence) override; diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index b572950a67..6ebd6cff94 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -358,7 +358,7 @@ void RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) { } } -void RasterizerOpenGL::SyncGuestHost() { +void RasterizerOpenGL::InvalidateGPUCache() { MICROPROFILE_SCOPE(OpenGL_CacheManagement); shader_cache.SyncGuestHost(); { @@ -386,13 +386,12 @@ void RasterizerOpenGL::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) { } } -void RasterizerOpenGL::SignalSemaphore(GPUVAddr addr, u32 value) { - if (!gpu.IsAsync()) { - gpu_memory->Write(addr, value); - return; - } - auto paddr = gpu_memory->GetPointer(addr); - fence_manager.SignalSemaphore(paddr, value); +void RasterizerOpenGL::SignalFence(std::function&& func) { + fence_manager.SignalFence(std::move(func)); +} + +void RasterizerOpenGL::SyncOperation(std::function&& func) { + fence_manager.SyncOperation(std::move(func)); } void RasterizerOpenGL::SignalSyncPoint(u32 value) { @@ -400,16 +399,10 @@ void RasterizerOpenGL::SignalSyncPoint(u32 value) { } void RasterizerOpenGL::SignalReference() { - if (!gpu.IsAsync()) { - return; - } fence_manager.SignalOrdering(); } void RasterizerOpenGL::ReleaseFences() { - if (!gpu.IsAsync()) { - return; - } fence_manager.WaitPendingFences(); } @@ -426,6 +419,7 @@ void RasterizerOpenGL::WaitForIdle() { } void RasterizerOpenGL::FragmentBarrier() { + glTextureBarrier(); glMemoryBarrier(GL_FRAMEBUFFER_BARRIER_BIT | GL_TEXTURE_FETCH_BARRIER_BIT); } diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index d469075a13..fe0ba979ae 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -80,10 +80,11 @@ public: bool MustFlushRegion(VAddr addr, u64 size) override; void InvalidateRegion(VAddr addr, u64 size) override; void OnCPUWrite(VAddr addr, u64 size) override; - void SyncGuestHost() override; + void InvalidateGPUCache() override; void UnmapMemory(VAddr addr, u64 size) override; void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override; - void SignalSemaphore(GPUVAddr addr, u32 value) override; + void SignalFence(std::function&& func) override; + void SyncOperation(std::function&& func) override; void SignalSyncPoint(u32 value) override; void SignalReference() override; void ReleaseFences() override; diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp index 301cbbabe8..0214b103a7 100644 --- a/src/video_core/renderer_vulkan/vk_fence_manager.cpp +++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp @@ -11,11 +11,8 @@ namespace Vulkan { -InnerFence::InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_) - : FenceBase{payload_, is_stubbed_}, scheduler{scheduler_} {} - -InnerFence::InnerFence(Scheduler& scheduler_, u8* address_, u32 payload_, bool is_stubbed_) - : FenceBase{address_, payload_, is_stubbed_}, scheduler{scheduler_} {} +InnerFence::InnerFence(Scheduler& scheduler_, bool is_stubbed_) + : FenceBase{is_stubbed_}, scheduler{scheduler_} {} InnerFence::~InnerFence() = default; @@ -48,12 +45,8 @@ FenceManager::FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::G : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_}, scheduler{scheduler_} {} -Fence FenceManager::CreateFence(u32 value, bool is_stubbed) { - return std::make_shared(scheduler, value, is_stubbed); -} - -Fence FenceManager::CreateFence(u8* addr, u32 value, bool is_stubbed) { - return std::make_shared(scheduler, addr, value, is_stubbed); +Fence FenceManager::CreateFence(bool is_stubbed) { + return std::make_shared(scheduler, is_stubbed); } void FenceManager::QueueFence(Fence& fence) { diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h index ea9e88052d..7fe2afcd91 100644 --- a/src/video_core/renderer_vulkan/vk_fence_manager.h +++ b/src/video_core/renderer_vulkan/vk_fence_manager.h @@ -25,8 +25,7 @@ class Scheduler; class InnerFence : public VideoCommon::FenceBase { public: - explicit InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_); - explicit InnerFence(Scheduler& scheduler_, u8* address_, u32 payload_, bool is_stubbed_); + explicit InnerFence(Scheduler& scheduler_, bool is_stubbed_); ~InnerFence(); void Queue(); @@ -50,8 +49,7 @@ public: QueryCache& query_cache, const Device& device, Scheduler& scheduler); protected: - Fence CreateFence(u32 value, bool is_stubbed) override; - Fence CreateFence(u8* addr, u32 value, bool is_stubbed) override; + Fence CreateFence(bool is_stubbed) override; void QueueFence(Fence& fence) override; bool IsFenceSignaled(Fence& fence) const override; void WaitFence(Fence& fence) override; diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index d7b57e0f3d..a35e41199c 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -428,7 +428,7 @@ void RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) { } } -void RasterizerVulkan::SyncGuestHost() { +void RasterizerVulkan::InvalidateGPUCache() { pipeline_cache.SyncGuestHost(); { std::scoped_lock lock{buffer_cache.mutex}; @@ -455,13 +455,12 @@ void RasterizerVulkan::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) { } } -void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) { - if (!gpu.IsAsync()) { - gpu_memory->Write(addr, value); - return; - } - auto paddr = gpu_memory->GetPointer(addr); - fence_manager.SignalSemaphore(paddr, value); +void RasterizerVulkan::SignalFence(std::function&& func) { + fence_manager.SignalFence(std::move(func)); +} + +void RasterizerVulkan::SyncOperation(std::function&& func) { + fence_manager.SyncOperation(std::move(func)); } void RasterizerVulkan::SignalSyncPoint(u32 value) { @@ -469,16 +468,10 @@ void RasterizerVulkan::SignalSyncPoint(u32 value) { } void RasterizerVulkan::SignalReference() { - if (!gpu.IsAsync()) { - return; - } fence_manager.SignalOrdering(); } void RasterizerVulkan::ReleaseFences() { - if (!gpu.IsAsync()) { - return; - } fence_manager.WaitPendingFences(); } diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index c836158b87..fb9e83e8f3 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h @@ -76,10 +76,11 @@ public: bool MustFlushRegion(VAddr addr, u64 size) override; void InvalidateRegion(VAddr addr, u64 size) override; void OnCPUWrite(VAddr addr, u64 size) override; - void SyncGuestHost() override; + void InvalidateGPUCache() override; void UnmapMemory(VAddr addr, u64 size) override; void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override; - void SignalSemaphore(GPUVAddr addr, u32 value) override; + void SignalFence(std::function&& func) override; + void SyncOperation(std::function&& func) override; void SignalSyncPoint(u32 value) override; void SignalReference() override; void ReleaseFences() override; From 5caa150e9a08d006a484c0b06a8f3fb603487d64 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 6 Feb 2022 01:56:38 +0100 Subject: [PATCH 34/68] OpenGL: Fix TickWork --- src/video_core/renderer_opengl/gl_rasterizer.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 6ebd6cff94..02bb177155 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -215,6 +215,9 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { if (!pipeline) { return; } + + gpu.TickWork(); + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; pipeline->SetEngine(maxwell3d, gpu_memory); pipeline->Configure(is_indexed); @@ -272,6 +275,7 @@ void RasterizerOpenGL::DispatchCompute() { if (!pipeline) { return; } + pipeline->SetEngine(kepler_compute, gpu_memory); pipeline->Configure(); const auto& qmd{kepler_compute->launch_description}; glDispatchCompute(qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z); From 359f22b808f54de9f19e40a8f93604ee7e4bac80 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 6 Feb 2022 18:51:07 +0100 Subject: [PATCH 35/68] MemoryManager: Finish up the initial implementation. --- src/video_core/memory_manager.cpp | 177 ++++++++++++++++++++++-------- src/video_core/memory_manager.h | 11 +- 2 files changed, 138 insertions(+), 50 deletions(-) diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 836ece136d..e1a8b5391d 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -15,8 +15,6 @@ #include "video_core/rasterizer_interface.h" #include "video_core/renderer_base.h" -#pragma optimize("", off) - namespace Tegra { std::atomic MemoryManager::unique_identifier_generator{}; @@ -42,7 +40,7 @@ MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 big_entries.resize(big_page_table_size / 32, 0); big_page_table_cpu.resize(big_page_table_size); - big_page_table_physical.resize(big_page_table_size); + big_page_continous.resize(big_page_table_size / continous_bits, 0); entries.resize(page_table_size / 32, 0); } @@ -80,6 +78,19 @@ void MemoryManager::SetEntry(size_t position, MemoryManager::EntryType entry) { } } +inline bool MemoryManager::IsBigPageContinous(size_t big_page_index) const { + const u64 entry_mask = big_page_continous[big_page_index / continous_bits]; + const size_t sub_index = big_page_index % continous_bits; + return ((entry_mask >> sub_index) & 0x1ULL) != 0; +} + +inline void MemoryManager::SetBigPageContinous(size_t big_page_index, bool value) { + const u64 continous_mask = big_page_continous[big_page_index / continous_bits]; + const size_t sub_index = big_page_index % continous_bits; + big_page_continous[big_page_index / continous_bits] = + (~(1ULL << sub_index) & continous_mask) | (value ? 1ULL << sub_index : 0); +} + template GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size) { @@ -121,9 +132,19 @@ GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr const auto index = PageEntryIndex(current_gpu_addr); const u32 sub_value = static_cast(current_cpu_addr >> cpu_page_bits); big_page_table_cpu[index] = sub_value; - const PAddr phys_address = - device_memory.GetPhysicalAddr(memory.GetPointer(current_cpu_addr)); - big_page_table_physical[index] = static_cast(phys_address); + const bool is_continous = ([&] { + uintptr_t base_ptr{ + reinterpret_cast(memory.GetPointer(current_cpu_addr))}; + for (VAddr start_cpu = current_cpu_addr + page_size; + start_cpu < current_cpu_addr + big_page_size; start_cpu += page_size) { + base_ptr += page_size; + if (base_ptr != reinterpret_cast(memory.GetPointer(start_cpu))) { + return false; + } + } + return true; + })(); + SetBigPageContinous(index, is_continous); } remaining_size -= big_page_size; } @@ -248,12 +269,17 @@ const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const { return memory.GetPointer(*address); } +#ifdef _MSC_VER // no need for gcc / clang but msvc's compiler is more conservative with inlining. #pragma inline_recursion(on) +#endif template inline void MemoryManager::MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped, FuncReserved&& func_reserved, FuncUnmapped&& func_unmapped) const { + static constexpr bool BOOL_BREAK_MAPPED = std::is_same_v; + static constexpr bool BOOL_BREAK_RESERVED = std::is_same_v; + static constexpr bool BOOL_BREAK_UNMAPPED = std::is_same_v; u64 used_page_size; u64 used_page_mask; u64 used_page_bits; @@ -276,11 +302,31 @@ inline void MemoryManager::MemoryOperation(GPUVAddr gpu_src_addr, std::size_t si std::min(static_cast(used_page_size) - page_offset, remaining_size)}; auto entry = GetEntry(current_address); if (entry == EntryType::Mapped) [[likely]] { - func_mapped(page_index, page_offset, copy_amount); + if constexpr (BOOL_BREAK_MAPPED) { + if (func_mapped(page_index, page_offset, copy_amount)) { + return; + } + } else { + func_mapped(page_index, page_offset, copy_amount); + } + } else if (entry == EntryType::Reserved) { - func_reserved(page_index, page_offset, copy_amount); + if constexpr (BOOL_BREAK_RESERVED) { + if (func_reserved(page_index, page_offset, copy_amount)) { + return; + } + } else { + func_reserved(page_index, page_offset, copy_amount); + } + } else [[unlikely]] { - func_unmapped(page_index, page_offset, copy_amount); + if constexpr (BOOL_BREAK_UNMAPPED) { + if (func_unmapped(page_index, page_offset, copy_amount)) { + return; + } + } else { + func_unmapped(page_index, page_offset, copy_amount); + } } page_index++; page_offset = 0; @@ -303,7 +349,8 @@ void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, if constexpr (is_safe) { rasterizer->FlushRegion(cpu_addr_base, copy_amount); } - memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount); + u8* physical = memory.GetPointer(cpu_addr_base); + std::memcpy(dest_buffer, physical, copy_amount); dest_buffer = static_cast(dest_buffer) + copy_amount; }; auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { @@ -312,9 +359,12 @@ void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, if constexpr (is_safe) { rasterizer->FlushRegion(cpu_addr_base, copy_amount); } - memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount); - // u8* physical = device_memory.GetPointer(big_page_table_physical[page_index] + offset); - // std::memcpy(dest_buffer, physical, copy_amount); + if (!IsBigPageContinous(page_index)) { + memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount); + } else { + u8* physical = memory.GetPointer(cpu_addr_base); + std::memcpy(dest_buffer, physical, copy_amount); + } dest_buffer = static_cast(dest_buffer) + copy_amount; }; auto read_short_pages = [&](std::size_t page_index, std::size_t offset, @@ -347,7 +397,8 @@ void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffe if constexpr (is_safe) { rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); } - memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount); + u8* physical = memory.GetPointer(cpu_addr_base); + std::memcpy(physical, src_buffer, copy_amount); src_buffer = static_cast(src_buffer) + copy_amount; }; auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { @@ -356,10 +407,12 @@ void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffe if constexpr (is_safe) { rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); } - memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount); - /*u8* physical = - device_memory.GetPointer(big_page_table_physical[page_index] << cpu_page_bits) + offset; - std::memcpy(physical, src_buffer, copy_amount);*/ + if (!IsBigPageContinous(page_index)) { + memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount); + } else { + u8* physical = memory.GetPointer(cpu_addr_base); + std::memcpy(physical, src_buffer, copy_amount); + } src_buffer = static_cast(src_buffer) + copy_amount; }; auto write_short_pages = [&](std::size_t page_index, std::size_t offset, @@ -413,48 +466,80 @@ void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std } bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const { - const auto cpu_addr{GpuToCpuAddress(gpu_addr)}; - if (!cpu_addr) { + if (GetEntry(gpu_addr) == EntryType::Mapped) [[likely]] { + size_t page_index = gpu_addr >> big_page_bits; + if (IsBigPageContinous(page_index)) [[likely]] { + const std::size_t page{(page_index & big_page_mask) + size}; + return page <= big_page_size; + } + const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; + return page <= Core::Memory::YUZU_PAGESIZE; + } + if (GetEntry(gpu_addr) != EntryType::Mapped) { return false; } - const std::size_t page{(*cpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; + const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; return page <= Core::Memory::YUZU_PAGESIZE; } bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const { - size_t page_index{gpu_addr >> big_page_bits}; - const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits}; std::optional old_page_addr{}; - while (page_index != page_last) { - const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; - if (!page_addr || *page_addr == 0) { - return false; + bool result{true}; + auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, + std::size_t copy_amount) { + result = false; + return true; + }; + auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast(page_table[page_index]) << cpu_page_bits) + offset; + if (old_page_addr && *old_page_addr != cpu_addr_base) { + result = false; + return true; } - if (old_page_addr) { - if (*old_page_addr + page_size != *page_addr) { - return false; - } + old_page_addr = {cpu_addr_base + copy_amount}; + return false; + }; + auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + if (old_page_addr && *old_page_addr != cpu_addr_base) { + result = false; + return true; } - old_page_addr = page_addr; - ++page_index; - } - return true; + old_page_addr = {cpu_addr_base + copy_amount}; + return false; + }; + auto check_short_pages = [&](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation(base, copy_amount, short_check, fail, fail); + return !result; + }; + MemoryOperation(gpu_addr, size, big_check, fail, check_short_pages); + return result; } bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) const { - size_t page_index{gpu_addr >> page_bits}; - const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits}; - while (page_index < page_last) { - if (GetEntry(page_index << page_bits) == EntryType::Free) { - return false; - } - ++page_index; - } - return true; + std::optional old_page_addr{}; + bool result{true}; + auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, + [[maybe_unused]] std::size_t copy_amount) { + result = false; + return true; + }; + auto pass = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, + [[maybe_unused]] std::size_t copy_amount) { return false; }; + auto check_short_pages = [&](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation(base, copy_amount, pass, pass, fail); + return !result; + }; + MemoryOperation(gpu_addr, size, pass, fail, check_short_pages); + return result; } -#pragma inline_recursion(on) - std::vector> MemoryManager::GetSubmappedRange( GPUVAddr gpu_addr, std::size_t size) const { std::vector> result{}; diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 9c388a06e8..8f8877a923 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -105,9 +105,6 @@ public: void FlushRegion(GPUVAddr gpu_addr, size_t size) const; private: - [[nodiscard]] std::optional FindFreeRange(std::size_t size, std::size_t align, - bool start_32bit_address = false) const; - template inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped, FuncReserved&& func_reserved, FuncUnmapped&& func_unmapped) const; @@ -127,6 +124,9 @@ private: } } + inline bool IsBigPageContinous(size_t big_page_index) const; + inline void SetBigPageContinous(size_t big_page_index, bool value); + Core::System& system; Core::Memory::Memory& memory; Core::DeviceMemory& device_memory; @@ -169,7 +169,10 @@ private: Common::MultiLevelPageTable page_table; Common::VirtualBuffer big_page_table_cpu; - Common::VirtualBuffer big_page_table_physical; + + std::vector big_page_continous; + + constexpr static size_t continous_bits = 64; const size_t unique_identifier; From a283eda320ea1a4df5e6370528eb1b2ec64e466c Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 6 Mar 2022 11:03:48 +0100 Subject: [PATCH 36/68] Shader Decompiler: Fix dangerous behavior of invalid iterator insertion. --- .../frontend/maxwell/structured_control_flow.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp index 578bc8c1b7..ce42475d46 100644 --- a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp +++ b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp @@ -964,9 +964,9 @@ private: demote_endif_node.type = Type::EndIf; demote_endif_node.data.end_if.merge = return_block_it->data.block; - asl.insert(return_block_it, demote_endif_node); - asl.insert(return_block_it, demote_node); - asl.insert(return_block_it, demote_if_node); + const auto next_it_1 = asl.insert(return_block_it, demote_endif_node); + const auto next_it_2 = asl.insert(next_it_1, demote_node); + asl.insert(next_it_2, demote_if_node); } ObjectPool& stmt_pool; From ba34cf0a691bc73ae6b2d8db6019b1f10d22dde5 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 6 Mar 2022 19:54:40 +0100 Subject: [PATCH 37/68] Shader Decompiler: Check for shift when deriving composite samplers. --- src/shader_recompiler/ir_opt/texture_pass.cpp | 34 +++++++++++++++++-- src/shader_recompiler/shader_info.h | 4 +++ .../renderer_opengl/gl_compute_pipeline.cpp | 5 +-- .../renderer_opengl/gl_graphics_pipeline.cpp | 5 +-- .../renderer_vulkan/vk_compute_pipeline.cpp | 4 +-- .../renderer_vulkan/vk_graphics_pipeline.cpp | 5 +-- 6 files changed, 46 insertions(+), 11 deletions(-) diff --git a/src/shader_recompiler/ir_opt/texture_pass.cpp b/src/shader_recompiler/ir_opt/texture_pass.cpp index 597112ba47..4bad811c22 100644 --- a/src/shader_recompiler/ir_opt/texture_pass.cpp +++ b/src/shader_recompiler/ir_opt/texture_pass.cpp @@ -19,8 +19,10 @@ namespace { struct ConstBufferAddr { u32 index; u32 offset; + u32 shift_left; u32 secondary_index; u32 secondary_offset; + u32 secondary_shift_left; IR::U32 dynamic_offset; u32 count; bool has_secondary; @@ -182,6 +184,7 @@ std::optional TryGetConstBuffer(const IR::Inst* inst) { switch (inst->GetOpcode()) { default: return std::nullopt; + case IR::Opcode::BitwiseXor32: case IR::Opcode::BitwiseOr32: { std::optional lhs{Track(inst->Arg(0))}; std::optional rhs{Track(inst->Arg(1))}; @@ -194,19 +197,33 @@ std::optional TryGetConstBuffer(const IR::Inst* inst) { if (lhs->count > 1 || rhs->count > 1) { return std::nullopt; } - if (lhs->index > rhs->index || lhs->offset > rhs->offset) { + if (lhs->shift_left > 0 || lhs->index > rhs->index || lhs->offset > rhs->offset) { std::swap(lhs, rhs); } return ConstBufferAddr{ .index = lhs->index, .offset = lhs->offset, + .shift_left = lhs->shift_left, .secondary_index = rhs->index, .secondary_offset = rhs->offset, + .secondary_shift_left = rhs->shift_left, .dynamic_offset = {}, .count = 1, .has_secondary = true, }; } + case IR::Opcode::ShiftLeftLogical32: { + const IR::Value shift{inst->Arg(1)}; + if (!shift.IsImmediate()) { + return std::nullopt; + } + std::optional lhs{Track(inst->Arg(0))}; + if (lhs) { + lhs->shift_left = shift.U32(); + } + return lhs; + break; + } case IR::Opcode::GetCbufU32x2: case IR::Opcode::GetCbufU32: break; @@ -222,8 +239,10 @@ std::optional TryGetConstBuffer(const IR::Inst* inst) { return ConstBufferAddr{ .index = index.U32(), .offset = offset.U32(), + .shift_left = 0, .secondary_index = 0, .secondary_offset = 0, + .secondary_shift_left = 0, .dynamic_offset = {}, .count = 1, .has_secondary = false, @@ -247,8 +266,10 @@ std::optional TryGetConstBuffer(const IR::Inst* inst) { return ConstBufferAddr{ .index = index.U32(), .offset = base_offset, + .shift_left = 0, .secondary_index = 0, .secondary_offset = 0, + .secondary_shift_left = 0, .dynamic_offset = dynamic_offset, .count = 8, .has_secondary = false, @@ -267,8 +288,10 @@ TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) { addr = ConstBufferAddr{ .index = env.TextureBoundBuffer(), .offset = inst.Arg(0).U32(), + .shift_left = 0, .secondary_index = 0, .secondary_offset = 0, + .secondary_shift_left = 0, .dynamic_offset = {}, .count = 1, .has_secondary = false, @@ -284,8 +307,9 @@ TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) { TextureType ReadTextureType(Environment& env, const ConstBufferAddr& cbuf) { const u32 secondary_index{cbuf.has_secondary ? cbuf.secondary_index : cbuf.index}; const u32 secondary_offset{cbuf.has_secondary ? cbuf.secondary_offset : cbuf.offset}; - const u32 lhs_raw{env.ReadCbufValue(cbuf.index, cbuf.offset)}; - const u32 rhs_raw{env.ReadCbufValue(secondary_index, secondary_offset)}; + const u32 lhs_raw{env.ReadCbufValue(cbuf.index, cbuf.offset) << cbuf.shift_left}; + const u32 rhs_raw{env.ReadCbufValue(secondary_index, secondary_offset) + << cbuf.secondary_shift_left}; return env.ReadTextureType(lhs_raw | rhs_raw); } @@ -487,8 +511,10 @@ void TexturePass(Environment& env, IR::Program& program) { .has_secondary = cbuf.has_secondary, .cbuf_index = cbuf.index, .cbuf_offset = cbuf.offset, + .shift_left = cbuf.shift_left, .secondary_cbuf_index = cbuf.secondary_index, .secondary_cbuf_offset = cbuf.secondary_offset, + .secondary_shift_left = cbuf.secondary_shift_left, .count = cbuf.count, .size_shift = DESCRIPTOR_SIZE_SHIFT, }); @@ -499,8 +525,10 @@ void TexturePass(Environment& env, IR::Program& program) { .has_secondary = cbuf.has_secondary, .cbuf_index = cbuf.index, .cbuf_offset = cbuf.offset, + .shift_left = cbuf.shift_left, .secondary_cbuf_index = cbuf.secondary_index, .secondary_cbuf_offset = cbuf.secondary_offset, + .secondary_shift_left = cbuf.secondary_shift_left, .count = cbuf.count, .size_shift = DESCRIPTOR_SIZE_SHIFT, }); diff --git a/src/shader_recompiler/shader_info.h b/src/shader_recompiler/shader_info.h index f5690805c4..cc596da4f9 100644 --- a/src/shader_recompiler/shader_info.h +++ b/src/shader_recompiler/shader_info.h @@ -61,8 +61,10 @@ struct TextureBufferDescriptor { bool has_secondary; u32 cbuf_index; u32 cbuf_offset; + u32 shift_left; u32 secondary_cbuf_index; u32 secondary_cbuf_offset; + u32 secondary_shift_left; u32 count; u32 size_shift; }; @@ -85,8 +87,10 @@ struct TextureDescriptor { bool has_secondary; u32 cbuf_index; u32 cbuf_offset; + u32 shift_left; u32 secondary_cbuf_index; u32 secondary_cbuf_offset; + u32 secondary_shift_left; u32 count; u32 size_shift; }; diff --git a/src/video_core/renderer_opengl/gl_compute_pipeline.cpp b/src/video_core/renderer_opengl/gl_compute_pipeline.cpp index 26b51f442b..26d0660048 100644 --- a/src/video_core/renderer_opengl/gl_compute_pipeline.cpp +++ b/src/video_core/renderer_opengl/gl_compute_pipeline.cpp @@ -100,8 +100,9 @@ void ComputePipeline::Configure() { const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset}; const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() + secondary_offset}; - const u32 lhs_raw{gpu_memory->Read(addr)}; - const u32 rhs_raw{gpu_memory->Read(separate_addr)}; + const u32 lhs_raw{gpu_memory->Read(addr) << desc.shift_left}; + const u32 rhs_raw{gpu_memory->Read(separate_addr) + << desc.secondary_shift_left}; return TexturePair(lhs_raw | rhs_raw, via_header_index); } } diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp index c877d77926..41493a7da2 100644 --- a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp +++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp @@ -312,8 +312,9 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { const u32 second_offset{desc.secondary_cbuf_offset + index_offset}; const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address + second_offset}; - const u32 lhs_raw{gpu_memory->Read(addr)}; - const u32 rhs_raw{gpu_memory->Read(separate_addr)}; + const u32 lhs_raw{gpu_memory->Read(addr) << desc.shift_left}; + const u32 rhs_raw{gpu_memory->Read(separate_addr) + << desc.secondary_shift_left}; const u32 raw{lhs_raw | rhs_raw}; return TexturePair(raw, via_header_index); } diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp index 6447210e27..7906e11a88 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp @@ -126,8 +126,8 @@ void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute, const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset}; const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() + secondary_offset}; - const u32 lhs_raw{gpu_memory.Read(addr)}; - const u32 rhs_raw{gpu_memory.Read(separate_addr)}; + const u32 lhs_raw{gpu_memory.Read(addr) << desc.shift_left}; + const u32 rhs_raw{gpu_memory.Read(separate_addr) << desc.secondary_shift_left}; return TexturePair(lhs_raw | rhs_raw, via_header_index); } } diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp index 1e993185f9..f47786f48f 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp @@ -314,8 +314,9 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { const u32 second_offset{desc.secondary_cbuf_offset + index_offset}; const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address + second_offset}; - const u32 lhs_raw{gpu_memory->Read(addr)}; - const u32 rhs_raw{gpu_memory->Read(separate_addr)}; + const u32 lhs_raw{gpu_memory->Read(addr) << desc.shift_left}; + const u32 rhs_raw{gpu_memory->Read(separate_addr) + << desc.secondary_shift_left}; const u32 raw{lhs_raw | rhs_raw}; return TexturePair(raw, via_header_index); } From 3d02143476cbc92450587c4e56eafe1cb76cd9ad Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sat, 26 Mar 2022 13:40:42 +0100 Subject: [PATCH 38/68] Shader Decompiler: implement better tracking for Vulkan samplers. --- src/shader_recompiler/ir_opt/texture_pass.cpp | 68 ++++++++++++++++--- 1 file changed, 59 insertions(+), 9 deletions(-) diff --git a/src/shader_recompiler/ir_opt/texture_pass.cpp b/src/shader_recompiler/ir_opt/texture_pass.cpp index 4bad811c22..0726d4d218 100644 --- a/src/shader_recompiler/ir_opt/texture_pass.cpp +++ b/src/shader_recompiler/ir_opt/texture_pass.cpp @@ -174,20 +174,41 @@ bool IsTextureInstruction(const IR::Inst& inst) { return IndexedInstruction(inst) != IR::Opcode::Void; } -std::optional TryGetConstBuffer(const IR::Inst* inst); +std::optional TryGetConstBuffer(const IR::Inst* inst, Environment& env); -std::optional Track(const IR::Value& value) { - return IR::BreadthFirstSearch(value, TryGetConstBuffer); +std::optional Track(const IR::Value& value, Environment& env) { + return IR::BreadthFirstSearch( + value, [&env](const IR::Inst* inst) { return TryGetConstBuffer(inst, env); }); } -std::optional TryGetConstBuffer(const IR::Inst* inst) { +std::optional TryGetConstant(IR::Value& value, Environment& env) { + const IR::Inst* inst = value.InstRecursive(); + if (inst->GetOpcode() != IR::Opcode::GetCbufU32) { + return std::nullopt; + } + const IR::Value index{inst->Arg(0)}; + const IR::Value offset{inst->Arg(1)}; + if (!index.IsImmediate()) { + return std::nullopt; + } + if (!offset.IsImmediate()) { + return std::nullopt; + } + const auto index_number = index.U32(); + if (index_number != 1) { + return std::nullopt; + } + const auto offset_number = offset.U32(); + return env.ReadCbufValue(index_number, offset_number); +} + +std::optional TryGetConstBuffer(const IR::Inst* inst, Environment& env) { switch (inst->GetOpcode()) { default: return std::nullopt; - case IR::Opcode::BitwiseXor32: case IR::Opcode::BitwiseOr32: { - std::optional lhs{Track(inst->Arg(0))}; - std::optional rhs{Track(inst->Arg(1))}; + std::optional lhs{Track(inst->Arg(0), env)}; + std::optional rhs{Track(inst->Arg(1), env)}; if (!lhs || !rhs) { return std::nullopt; } @@ -217,13 +238,42 @@ std::optional TryGetConstBuffer(const IR::Inst* inst) { if (!shift.IsImmediate()) { return std::nullopt; } - std::optional lhs{Track(inst->Arg(0))}; + std::optional lhs{Track(inst->Arg(0), env)}; if (lhs) { lhs->shift_left = shift.U32(); } return lhs; break; } + case IR::Opcode::BitwiseAnd32: { + IR::Value op1{inst->Arg(0)}; + IR::Value op2{inst->Arg(1)}; + if (op1.IsImmediate()) { + std::swap(op1, op2); + } + if (!op2.IsImmediate() && !op1.IsImmediate()) { + do { + auto try_index = TryGetConstant(op1, env); + if (try_index) { + op1 = op2; + op2 = IR::Value{*try_index}; + break; + } + auto try_index_2 = TryGetConstant(op2, env); + if (try_index_2) { + op2 = IR::Value{*try_index_2}; + break; + } + return std::nullopt; + } while (false); + } + std::optional lhs{Track(op1, env)}; + if (lhs) { + lhs->shift_left = std::countr_zero(op2.U32()); + } + return lhs; + break; + } case IR::Opcode::GetCbufU32x2: case IR::Opcode::GetCbufU32: break; @@ -279,7 +329,7 @@ std::optional TryGetConstBuffer(const IR::Inst* inst) { TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) { ConstBufferAddr addr; if (IsBindless(inst)) { - const std::optional track_addr{Track(inst.Arg(0))}; + const std::optional track_addr{Track(inst.Arg(0), env)}; if (!track_addr) { throw NotImplementedException("Failed to track bindless texture constant buffer"); } From 5a568b1655f0d721891083da19e2da2614796389 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sat, 19 Feb 2022 14:18:02 +0100 Subject: [PATCH 39/68] MemoryManager: Fix errors popping out. --- src/core/memory.cpp | 9 +++++++++ src/core/memory.h | 1 + src/video_core/memory_manager.cpp | 12 ++++++++---- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 34ad7cadd3..2ac792566e 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -551,6 +551,11 @@ struct Memory::Impl { []() {}); } + [[nodiscard]] u8* GetPointerSilent(const VAddr vaddr) const { + return GetPointerImpl( + vaddr, []() {}, []() {}); + } + /** * Reads a particular data type out of memory at the given virtual address. * @@ -686,6 +691,10 @@ u8* Memory::GetPointer(VAddr vaddr) { return impl->GetPointer(vaddr); } +u8* Memory::GetPointerSilent(VAddr vaddr) { + return impl->GetPointerSilent(vaddr); +} + const u8* Memory::GetPointer(VAddr vaddr) const { return impl->GetPointer(vaddr); } diff --git a/src/core/memory.h b/src/core/memory.h index a11ff8766b..81eac448b4 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -114,6 +114,7 @@ public: * If the address is not valid, nullptr will be returned. */ u8* GetPointer(VAddr vaddr); + u8* GetPointerSilent(VAddr vaddr); template T* GetPointer(VAddr vaddr) { diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index e1a8b5391d..0f97db2720 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -134,11 +134,15 @@ GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr big_page_table_cpu[index] = sub_value; const bool is_continous = ([&] { uintptr_t base_ptr{ - reinterpret_cast(memory.GetPointer(current_cpu_addr))}; + reinterpret_cast(memory.GetPointerSilent(current_cpu_addr))}; + if (base_ptr == 0) { + return false; + } for (VAddr start_cpu = current_cpu_addr + page_size; start_cpu < current_cpu_addr + big_page_size; start_cpu += page_size) { base_ptr += page_size; - if (base_ptr != reinterpret_cast(memory.GetPointer(start_cpu))) { + auto next_ptr = reinterpret_cast(memory.GetPointerSilent(start_cpu)); + if (next_ptr == 0 || base_ptr != next_ptr) { return false; } } @@ -359,7 +363,7 @@ void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, if constexpr (is_safe) { rasterizer->FlushRegion(cpu_addr_base, copy_amount); } - if (!IsBigPageContinous(page_index)) { + if (!IsBigPageContinous(page_index)) [[unlikely]] { memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount); } else { u8* physical = memory.GetPointer(cpu_addr_base); @@ -407,7 +411,7 @@ void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffe if constexpr (is_safe) { rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); } - if (!IsBigPageContinous(page_index)) { + if (!IsBigPageContinous(page_index)) [[unlikely]] { memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount); } else { u8* physical = memory.GetPointer(cpu_addr_base); From 7cfa28a6664fa79b5b12341a93e995d74cb0deef Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sat, 2 Apr 2022 22:38:58 +0200 Subject: [PATCH 40/68] Memory Manager: ensure safety of GPU to CPU address. --- src/video_core/memory_manager.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 0f97db2720..4e52ce0fd2 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -193,6 +193,9 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { } std::optional MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { + if (gpu_addr >= address_space_size) [[unlikely]] { + return std::nullopt; + } if (GetEntry(gpu_addr) != EntryType::Mapped) [[unlikely]] { if (GetEntry(gpu_addr) != EntryType::Mapped) { return std::nullopt; From b2099fbdcc3cb213aec8f836033fb02c4b6bbd09 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sat, 26 Mar 2022 13:43:00 +0100 Subject: [PATCH 41/68] Maxwell3D: Add small_index_2 --- src/video_core/engines/maxwell_3d.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index 950c70dcd9..add1ccebe3 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -219,6 +219,8 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume regs.index_array.count = regs.small_index_2.count; regs.index_array.first = regs.small_index_2.first; dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; + // a macro calls this one over and over, should it increase instancing? + // Used by Hades and likely other Vulkan games. return DrawArrays(); case MAXWELL3D_REG_INDEX(topology_override): use_topology_override = true; From f5fd6b5c8674fcf64a3e70809ee0a34d3a95beb6 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 14 Aug 2022 02:36:36 -0700 Subject: [PATCH 42/68] DMA & InlineToMemory Engines Rework. --- src/common/algorithm.h | 8 + src/video_core/buffer_cache/buffer_cache.h | 4 +- src/video_core/engines/engine_upload.cpp | 46 +++- src/video_core/engines/engine_upload.h | 6 +- src/video_core/engines/kepler_compute.cpp | 13 +- src/video_core/engines/kepler_memory.cpp | 13 +- src/video_core/engines/maxwell_3d.cpp | 5 +- src/video_core/engines/maxwell_dma.cpp | 91 ++++--- src/video_core/engines/maxwell_dma.h | 6 + src/video_core/host1x/vic.cpp | 5 +- src/video_core/memory_manager.cpp | 91 +++++++ src/video_core/memory_manager.h | 6 + src/video_core/rasterizer_interface.h | 2 +- .../renderer_opengl/gl_rasterizer.cpp | 2 +- .../renderer_opengl/gl_rasterizer.h | 2 +- .../renderer_vulkan/vk_compute_pass.cpp | 2 - .../renderer_vulkan/vk_rasterizer.cpp | 2 +- .../renderer_vulkan/vk_rasterizer.h | 2 +- src/video_core/texture_cache/util.cpp | 1 - src/video_core/textures/decoders.cpp | 225 ++++++------------ src/video_core/textures/decoders.h | 33 +-- 21 files changed, 323 insertions(+), 242 deletions(-) diff --git a/src/common/algorithm.h b/src/common/algorithm.h index 9ddfd637b6..055dca142d 100644 --- a/src/common/algorithm.h +++ b/src/common/algorithm.h @@ -24,4 +24,12 @@ template > return first != last && !comp(value, *first) ? first : last; } +template +T FoldRight(T initial_value, Func&& func, Args&&... args) { + T value{initial_value}; + const auto high_func = [&value, &func](T x) { value = func(value, x); }; + (std::invoke(high_func, std::forward(args)), ...); + return value; +} + } // namespace Common diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index e55cac0d65..359c11d6f2 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -126,7 +126,7 @@ public: void DownloadMemory(VAddr cpu_addr, u64 size); - bool InlineMemory(VAddr dest_address, size_t copy_size, std::span inlined_buffer); + bool InlineMemory(VAddr dest_address, size_t copy_size, std::span inlined_buffer); void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size); @@ -1685,7 +1685,7 @@ void BufferCache

::MappedUploadMemory(Buffer& buffer, u64 total_size_bytes, template bool BufferCache

::InlineMemory(VAddr dest_address, size_t copy_size, - std::span inlined_buffer) { + std::span inlined_buffer) { const bool is_dirty = IsRegionRegistered(dest_address, copy_size); if (!is_dirty) { return false; diff --git a/src/video_core/engines/engine_upload.cpp b/src/video_core/engines/engine_upload.cpp index 6ff5b1eca7..a348192346 100644 --- a/src/video_core/engines/engine_upload.cpp +++ b/src/video_core/engines/engine_upload.cpp @@ -3,6 +3,7 @@ #include +#include "common/algorithm.h" #include "common/assert.h" #include "video_core/engines/engine_upload.h" #include "video_core/memory_manager.h" @@ -34,21 +35,48 @@ void State::ProcessData(const u32 data, const bool is_last_call) { if (!is_last_call) { return; } + ProcessData(inner_buffer); +} + +void State::ProcessData(const u32* data, size_t num_data) { + std::span read_buffer(reinterpret_cast(data), num_data * sizeof(u32)); + ProcessData(read_buffer); +} + +void State::ProcessData(std::span read_buffer) { const GPUVAddr address{regs.dest.Address()}; if (is_linear) { - rasterizer->AccelerateInlineToMemory(address, copy_size, inner_buffer); + if (regs.line_count == 1) { + rasterizer->AccelerateInlineToMemory(address, copy_size, read_buffer); + } else { + for (u32 line = 0; line < regs.line_count; ++line) { + const GPUVAddr dest_line = address + static_cast(line) * regs.dest.pitch; + memory_manager.WriteBlockUnsafe( + dest_line, read_buffer.data() + static_cast(line) * regs.line_length_in, + regs.line_length_in); + } + memory_manager.InvalidateRegion(address, regs.dest.pitch * regs.line_count); + } } else { - UNIMPLEMENTED_IF(regs.dest.z != 0); - UNIMPLEMENTED_IF(regs.dest.depth != 1); - UNIMPLEMENTED_IF(regs.dest.BlockWidth() != 0); - UNIMPLEMENTED_IF(regs.dest.BlockDepth() != 0); + u32 width = regs.dest.width; + u32 x_elements = regs.line_length_in; + u32 x_offset = regs.dest.x; + const u32 bpp_shift = Common::FoldRight( + 4U, [](u32 x, u32 y) { return std::min(x, static_cast(std::countr_zero(y))); }, + width, x_elements, x_offset, static_cast(address)); + width >>= bpp_shift; + x_elements >>= bpp_shift; + x_offset >>= bpp_shift; + const u32 bytes_per_pixel = 1U << bpp_shift; const std::size_t dst_size = Tegra::Texture::CalculateSize( - true, 1, regs.dest.width, regs.dest.height, 1, regs.dest.BlockHeight(), 0); + true, bytes_per_pixel, width, regs.dest.height, regs.dest.depth, + regs.dest.BlockHeight(), regs.dest.BlockDepth()); tmp_buffer.resize(dst_size); memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size); - Tegra::Texture::SwizzleKepler(regs.dest.width, regs.dest.height, regs.dest.x, regs.dest.y, - regs.dest.BlockHeight(), copy_size, inner_buffer.data(), - tmp_buffer.data()); + Tegra::Texture::SwizzleSubrect(tmp_buffer, read_buffer, bytes_per_pixel, width, + regs.dest.height, regs.dest.depth, x_offset, regs.dest.y, + x_elements, regs.line_count, regs.dest.BlockHeight(), + regs.dest.BlockDepth(), regs.line_length_in); memory_manager.WriteBlock(address, tmp_buffer.data(), dst_size); } } diff --git a/src/video_core/engines/engine_upload.h b/src/video_core/engines/engine_upload.h index 94ff3314a7..f08f6e36aa 100644 --- a/src/video_core/engines/engine_upload.h +++ b/src/video_core/engines/engine_upload.h @@ -3,6 +3,7 @@ #pragma once +#include #include #include "common/bit_field.h" #include "common/common_types.h" @@ -33,7 +34,7 @@ struct Registers { u32 width; u32 height; u32 depth; - u32 z; + u32 layer; u32 x; u32 y; @@ -62,11 +63,14 @@ public: void ProcessExec(bool is_linear_); void ProcessData(u32 data, bool is_last_call); + void ProcessData(const u32* data, size_t num_data); /// Binds a rasterizer to this engine. void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); private: + void ProcessData(std::span read_buffer); + u32 write_offset = 0; u32 copy_size = 0; std::vector inner_buffer; diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp index 5db254d948..7c50bdbe09 100644 --- a/src/video_core/engines/kepler_compute.cpp +++ b/src/video_core/engines/kepler_compute.cpp @@ -36,8 +36,6 @@ void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_cal } case KEPLER_COMPUTE_REG_INDEX(data_upload): { upload_state.ProcessData(method_argument, is_last_call); - if (is_last_call) { - } break; } case KEPLER_COMPUTE_REG_INDEX(launch): @@ -50,8 +48,15 @@ void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_cal void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending) { - for (std::size_t i = 0; i < amount; i++) { - CallMethod(method, base_start[i], methods_pending - static_cast(i) <= 1); + switch (method) { + case KEPLER_COMPUTE_REG_INDEX(data_upload): + upload_state.ProcessData(base_start, static_cast(amount)); + return; + default: + for (std::size_t i = 0; i < amount; i++) { + CallMethod(method, base_start[i], methods_pending - static_cast(i) <= 1); + } + break; } } diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp index e2b0295427..a3fbab1e52 100644 --- a/src/video_core/engines/kepler_memory.cpp +++ b/src/video_core/engines/kepler_memory.cpp @@ -33,8 +33,6 @@ void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call } case KEPLERMEMORY_REG_INDEX(data): { upload_state.ProcessData(method_argument, is_last_call); - if (is_last_call) { - } break; } } @@ -42,8 +40,15 @@ void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call void KeplerMemory::CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending) { - for (std::size_t i = 0; i < amount; i++) { - CallMethod(method, base_start[i], methods_pending - static_cast(i) <= 1); + switch (method) { + case KEPLERMEMORY_REG_INDEX(data): + upload_state.ProcessData(base_start, static_cast(amount)); + return; + default: + for (std::size_t i = 0; i < amount; i++) { + CallMethod(method, base_start[i], methods_pending - static_cast(i) <= 1); + } + break; } } diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index add1ccebe3..632052c53c 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -239,8 +239,6 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume return upload_state.ProcessExec(regs.exec_upload.linear != 0); case MAXWELL3D_REG_INDEX(data_upload): upload_state.ProcessData(argument, is_last_call); - if (is_last_call) { - } return; case MAXWELL3D_REG_INDEX(fragment_barrier): return rasterizer->FragmentBarrier(); @@ -316,6 +314,9 @@ void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount, case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 15: ProcessCBMultiData(base_start, amount); break; + case MAXWELL3D_REG_INDEX(data_upload): + upload_state.ProcessData(base_start, static_cast(amount)); + return; default: for (std::size_t i = 0; i < amount; i++) { CallMethod(method, base_start[i], methods_pending - static_cast(i) <= 1); diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index 0efe582824..a12a95ce27 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later +#include "common/algorithm.h" #include "common/assert.h" #include "common/logging/log.h" #include "common/microprofile.h" @@ -54,8 +55,6 @@ void MaxwellDMA::Launch() { const LaunchDMA& launch = regs.launch_dma; ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE); ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED); - ASSERT(regs.dst_params.origin.x == 0); - ASSERT(regs.dst_params.origin.y == 0); const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH; const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH; @@ -121,12 +120,13 @@ void MaxwellDMA::CopyPitchToPitch() { void MaxwellDMA::CopyBlockLinearToPitch() { UNIMPLEMENTED_IF(regs.src_params.block_size.width != 0); - UNIMPLEMENTED_IF(regs.src_params.block_size.depth != 0); UNIMPLEMENTED_IF(regs.src_params.layer != 0); + const bool is_remapping = regs.launch_dma.remap_enable != 0; + // Optimized path for micro copies. const size_t dst_size = static_cast(regs.pitch_out) * regs.line_count; - if (dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X && + if (!is_remapping && dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X && regs.src_params.height > GOB_SIZE_Y) { FastCopyBlockLinearToPitch(); return; @@ -134,10 +134,27 @@ void MaxwellDMA::CopyBlockLinearToPitch() { // Deswizzle the input and copy it over. UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0); - const u32 bytes_per_pixel = - regs.launch_dma.remap_enable ? regs.pitch_out / regs.line_length_in : 1; const Parameters& src_params = regs.src_params; - const u32 width = src_params.width; + + const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1; + const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1; + + const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size; + + u32 width = src_params.width; + u32 x_elements = regs.line_length_in; + u32 x_offset = src_params.origin.x; + u32 bpp_shift = 0U; + if (!is_remapping) { + bpp_shift = Common::FoldRight( + 4U, [](u32 x, u32 y) { return std::min(x, static_cast(std::countr_zero(y))); }, + width, x_elements, x_offset, static_cast(regs.offset_in)); + width >>= bpp_shift; + x_elements >>= bpp_shift; + x_offset >>= bpp_shift; + } + + const u32 bytes_per_pixel = base_bpp << bpp_shift; const u32 height = src_params.height; const u32 depth = src_params.depth; const u32 block_height = src_params.block_size.height; @@ -155,30 +172,46 @@ void MaxwellDMA::CopyBlockLinearToPitch() { memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); - UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, width, bytes_per_pixel, - block_height, src_params.origin.x, src_params.origin.y, write_buffer.data(), - read_buffer.data()); + UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset, + src_params.origin.y, x_elements, regs.line_count, block_height, block_depth, + regs.pitch_out); memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); } void MaxwellDMA::CopyPitchToBlockLinear() { UNIMPLEMENTED_IF_MSG(regs.dst_params.block_size.width != 0, "Block width is not one"); + UNIMPLEMENTED_IF(regs.dst_params.layer != 0); UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0); + const bool is_remapping = regs.launch_dma.remap_enable != 0; + const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1; + const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1; + const auto& dst_params = regs.dst_params; - const u32 bytes_per_pixel = - regs.launch_dma.remap_enable ? regs.pitch_in / regs.line_length_in : 1; - const u32 width = dst_params.width; + + const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size; + + u32 width = dst_params.width; + u32 x_elements = regs.line_length_in; + u32 x_offset = dst_params.origin.x; + u32 bpp_shift = 0U; + if (!is_remapping) { + bpp_shift = Common::FoldRight( + 4U, [](u32 x, u32 y) { return std::min(x, static_cast(std::countr_zero(y))); }, + width, x_elements, x_offset, static_cast(regs.offset_out)); + width >>= bpp_shift; + x_elements >>= bpp_shift; + x_offset >>= bpp_shift; + } + + const u32 bytes_per_pixel = base_bpp << bpp_shift; const u32 height = dst_params.height; const u32 depth = dst_params.depth; const u32 block_height = dst_params.block_size.height; const u32 block_depth = dst_params.block_size.depth; const size_t dst_size = CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth); - const size_t dst_layer_size = - CalculateSize(true, bytes_per_pixel, width, height, 1, block_height, block_depth); - const size_t src_size = static_cast(regs.pitch_in) * regs.line_count; if (read_buffer.size() < src_size) { @@ -188,32 +221,23 @@ void MaxwellDMA::CopyPitchToBlockLinear() { write_buffer.resize(dst_size); } + memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); if (Settings::IsGPULevelExtreme()) { - memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); } else { - memory_manager.ReadBlockUnsafe(regs.offset_in, read_buffer.data(), src_size); memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size); } // If the input is linear and the output is tiled, swizzle the input and copy it over. - if (regs.dst_params.block_size.depth > 0) { - ASSERT(dst_params.layer == 0); - SwizzleSliceToVoxel(regs.line_length_in, regs.line_count, regs.pitch_in, width, height, - bytes_per_pixel, block_height, block_depth, dst_params.origin.x, - dst_params.origin.y, write_buffer.data(), read_buffer.data()); - } else { - SwizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_in, width, bytes_per_pixel, - write_buffer.data() + dst_layer_size * dst_params.layer, read_buffer.data(), - block_height, dst_params.origin.x, dst_params.origin.y); - } + SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset, + dst_params.origin.y, x_elements, regs.line_count, block_height, block_depth, + regs.pitch_in); memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); } void MaxwellDMA::FastCopyBlockLinearToPitch() { - const u32 bytes_per_pixel = - regs.launch_dma.remap_enable ? regs.pitch_out / regs.line_length_in : 1; + const u32 bytes_per_pixel = 1U; const size_t src_size = GOB_SIZE; const size_t dst_size = static_cast(regs.pitch_out) * regs.line_count; u32 pos_x = regs.src_params.origin.x; @@ -239,9 +263,10 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() { memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size); } - UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, regs.src_params.width, - bytes_per_pixel, regs.src_params.block_size.height, pos_x, pos_y, - write_buffer.data(), read_buffer.data()); + UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, regs.src_params.width, + regs.src_params.height, 1, pos_x, pos_y, regs.line_length_in, regs.line_count, + regs.src_params.block_size.height, regs.src_params.block_size.depth, + regs.pitch_out); memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); } diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h index 074bac92c4..9c5d567a67 100644 --- a/src/video_core/engines/maxwell_dma.h +++ b/src/video_core/engines/maxwell_dma.h @@ -189,10 +189,16 @@ public: BitField<4, 3, Swizzle> dst_y; BitField<8, 3, Swizzle> dst_z; BitField<12, 3, Swizzle> dst_w; + BitField<0, 12, u32> dst_components_raw; BitField<16, 2, u32> component_size_minus_one; BitField<20, 2, u32> num_src_components_minus_one; BitField<24, 2, u32> num_dst_components_minus_one; }; + + Swizzle GetComponent(size_t i) { + const u32 raw = dst_components_raw; + return static_cast((raw >> (i * 3)) & 0x7); + } }; static_assert(sizeof(RemapConst) == 12); diff --git a/src/video_core/host1x/vic.cpp b/src/video_core/host1x/vic.cpp index 5d80398412..b9ac415295 100644 --- a/src/video_core/host1x/vic.cpp +++ b/src/video_core/host1x/vic.cpp @@ -156,8 +156,9 @@ void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) { const u32 block_height = static_cast(config.block_linear_height_log2); const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0); luma_buffer.resize(size); - Texture::SwizzleSubrect(width, height, width * 4, width, 4, luma_buffer.data(), - converted_frame_buf_addr, block_height, 0, 0); + std::span frame_buff(converted_frame_buf_addr, 4 * width * height); + Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, + 0, 0, width, height, block_height, 0, width * 4); host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size); } else { diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 4e52ce0fd2..4a692448ea 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -462,6 +462,97 @@ void MemoryManager::FlushRegion(GPUVAddr gpu_addr, size_t size) const { MemoryOperation(gpu_addr, size, mapped_big, do_nothing, flush_short_pages); } +bool MemoryManager::IsMemoryDirty(GPUVAddr gpu_addr, size_t size) const { + bool result = false; + auto do_nothing = [&]([[maybe_unused]] std::size_t page_index, + [[maybe_unused]] std::size_t offset, + [[maybe_unused]] std::size_t copy_amount) { return false; }; + + auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast(page_table[page_index]) << cpu_page_bits) + offset; + result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount); + return result; + }; + auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount); + return result; + }; + auto check_short_pages = [&](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation(base, copy_amount, mapped_normal, do_nothing, do_nothing); + return result; + }; + MemoryOperation(gpu_addr, size, mapped_big, do_nothing, check_short_pages); + return result; +} + +size_t MemoryManager::MaxContinousRange(GPUVAddr gpu_addr, size_t size) const { + std::optional old_page_addr{}; + size_t range_so_far = 0; + bool result{false}; + auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, + std::size_t copy_amount) { + result = true; + return true; + }; + auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast(page_table[page_index]) << cpu_page_bits) + offset; + if (old_page_addr && *old_page_addr != cpu_addr_base) { + result = true; + return true; + } + range_so_far += copy_amount; + old_page_addr = {cpu_addr_base + copy_amount}; + return false; + }; + auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + if (old_page_addr && *old_page_addr != cpu_addr_base) { + return true; + } + range_so_far += copy_amount; + old_page_addr = {cpu_addr_base + copy_amount}; + return false; + }; + auto check_short_pages = [&](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation(base, copy_amount, short_check, fail, fail); + return result; + }; + MemoryOperation(gpu_addr, size, big_check, fail, check_short_pages); + return range_so_far; +} + +void MemoryManager::InvalidateRegion(GPUVAddr gpu_addr, size_t size) const { + auto do_nothing = [&]([[maybe_unused]] std::size_t page_index, + [[maybe_unused]] std::size_t offset, + [[maybe_unused]] std::size_t copy_amount) {}; + + auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast(page_table[page_index]) << cpu_page_bits) + offset; + rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); + }; + auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); + }; + auto invalidate_short_pages = [&](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation(base, copy_amount, mapped_normal, do_nothing, do_nothing); + }; + MemoryOperation(gpu_addr, size, mapped_big, do_nothing, invalidate_short_pages); +} + void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size) { std::vector tmp_buffer(size); ReadBlock(gpu_src_addr, tmp_buffer.data(), size); diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 8f8877a923..9c08edc20d 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -104,6 +104,12 @@ public: void FlushRegion(GPUVAddr gpu_addr, size_t size) const; + void InvalidateRegion(GPUVAddr gpu_addr, size_t size) const; + + bool IsMemoryDirty(GPUVAddr gpu_addr, size_t size) const; + + size_t MaxContinousRange(GPUVAddr gpu_addr, size_t size) const; + private: template inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped, diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h index cb07f3d38b..d2d40884c2 100644 --- a/src/video_core/rasterizer_interface.h +++ b/src/video_core/rasterizer_interface.h @@ -129,7 +129,7 @@ public: [[nodiscard]] virtual Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() = 0; virtual void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, - std::span memory) = 0; + std::span memory) = 0; /// Attempt to use a faster method to display the framebuffer to screen [[nodiscard]] virtual bool AccelerateDisplay(const Tegra::FramebufferConfig& config, diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 02bb177155..c2d80605d2 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -476,7 +476,7 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerOpenGL::AccessAccelerateDMA() } void RasterizerOpenGL::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, - std::span memory) { + std::span memory) { auto cpu_addr = gpu_memory->GpuToCpuAddress(address); if (!cpu_addr) [[unlikely]] { gpu_memory->WriteBlock(address, memory.data(), copy_size); diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index fe0ba979ae..45131b7852 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -99,7 +99,7 @@ public: const Tegra::Engines::Fermi2D::Config& copy_config) override; Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override; void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, - std::span memory) override; + std::span memory) override; bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, u32 pixel_stride) override; void LoadDiskResources(u64 title_id, std::stop_token stop_loading, diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp index f17a5ccd6e..241d7573ed 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp +++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp @@ -26,8 +26,6 @@ namespace Vulkan { -using Tegra::Texture::SWIZZLE_TABLE; - namespace { constexpr u32 ASTC_BINDING_INPUT_BUFFER = 0; diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index a35e41199c..acfd5da7d6 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -548,7 +548,7 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerVulkan::AccessAccelerateDMA() } void RasterizerVulkan::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, - std::span memory) { + std::span memory) { auto cpu_addr = gpu_memory->GpuToCpuAddress(address); if (!cpu_addr) [[unlikely]] { gpu_memory->WriteBlock(address, memory.data(), copy_size); diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index fb9e83e8f3..4cde3c983a 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h @@ -95,7 +95,7 @@ public: const Tegra::Engines::Fermi2D::Config& copy_config) override; Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override; void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, - std::span memory) override; + std::span memory) override; bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, u32 pixel_stride) override; void LoadDiskResources(u64 title_id, std::stop_token stop_loading, diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp index bea1c27d06..1223df5a0a 100644 --- a/src/video_core/texture_cache/util.cpp +++ b/src/video_core/texture_cache/util.cpp @@ -517,7 +517,6 @@ void SwizzleBlockLinearImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr const u32 host_bytes_per_layer = num_blocks_per_layer * bytes_per_block; UNIMPLEMENTED_IF(info.tile_width_spacing > 0); - UNIMPLEMENTED_IF(copy.image_offset.x != 0); UNIMPLEMENTED_IF(copy.image_offset.y != 0); UNIMPLEMENTED_IF(copy.image_offset.z != 0); diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp index 913f8ebcb3..fcc636e0b9 100644 --- a/src/video_core/textures/decoders.cpp +++ b/src/video_core/textures/decoders.cpp @@ -89,6 +89,69 @@ void SwizzleImpl(std::span output, std::span input, u32 width, u32 } } +template +void SwizzleSubrectImpl(std::span output, std::span input, u32 width, u32 height, + u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 num_lines, + u32 block_height, u32 block_depth, u32 pitch_linear) { + // The origin of the transformation can be configured here, leave it as zero as the current API + // doesn't expose it. + static constexpr u32 origin_z = 0; + + // We can configure here a custom pitch + // As it's not exposed 'width * BYTES_PER_PIXEL' will be the expected pitch. + const u32 pitch = pitch_linear; + const u32 stride = Common::AlignUpLog2(width * BYTES_PER_PIXEL, GOB_SIZE_X_SHIFT); + + const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT); + const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth); + const u32 slice_size = + Common::DivCeilLog2(height, block_height + GOB_SIZE_Y_SHIFT) * block_size; + + const u32 block_height_mask = (1U << block_height) - 1; + const u32 block_depth_mask = (1U << block_depth) - 1; + const u32 x_shift = GOB_SIZE_SHIFT + block_height + block_depth; + + u32 unprocessed_lines = num_lines; + u32 extent_y = std::min(num_lines, height - origin_y); + + for (u32 slice = 0; slice < depth; ++slice) { + const u32 z = slice + origin_z; + const u32 offset_z = (z >> block_depth) * slice_size + + ((z & block_depth_mask) << (GOB_SIZE_SHIFT + block_height)); + const u32 lines_in_y = std::min(unprocessed_lines, extent_y); + for (u32 line = 0; line < lines_in_y; ++line) { + const u32 y = line + origin_y; + const u32 swizzled_y = pdep(y); + + const u32 block_y = y >> GOB_SIZE_Y_SHIFT; + const u32 offset_y = (block_y >> block_height) * block_size + + ((block_y & block_height_mask) << GOB_SIZE_SHIFT); + + u32 swizzled_x = pdep(origin_x * BYTES_PER_PIXEL); + for (u32 column = 0; column < extent_x; + ++column, incrpdep(swizzled_x)) { + const u32 x = (column + origin_x) * BYTES_PER_PIXEL; + const u32 offset_x = (x >> GOB_SIZE_X_SHIFT) << x_shift; + + const u32 base_swizzled_offset = offset_z + offset_y + offset_x; + const u32 swizzled_offset = base_swizzled_offset + (swizzled_x | swizzled_y); + + const u32 unswizzled_offset = + slice * pitch * height + line * pitch + column * BYTES_PER_PIXEL; + + u8* const dst = &output[TO_LINEAR ? swizzled_offset : unswizzled_offset]; + const u8* const src = &input[TO_LINEAR ? unswizzled_offset : swizzled_offset]; + + std::memcpy(dst, src, BYTES_PER_PIXEL); + } + } + unprocessed_lines -= lines_in_y; + if (unprocessed_lines == 0) { + return; + } + } +} + template void Swizzle(std::span output, std::span input, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth, u32 stride_alignment) { @@ -111,97 +174,6 @@ void Swizzle(std::span output, std::span input, u32 bytes_per_pixe } } -template -void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, - u8* swizzled_data, const u8* unswizzled_data, u32 block_height_bit, - u32 offset_x, u32 offset_y) { - const u32 block_height = 1U << block_height_bit; - const u32 image_width_in_gobs = - (swizzled_width * BYTES_PER_PIXEL + (GOB_SIZE_X - 1)) / GOB_SIZE_X; - for (u32 line = 0; line < subrect_height; ++line) { - const u32 dst_y = line + offset_y; - const u32 gob_address_y = - (dst_y / (GOB_SIZE_Y * block_height)) * GOB_SIZE * block_height * image_width_in_gobs + - ((dst_y % (GOB_SIZE_Y * block_height)) / GOB_SIZE_Y) * GOB_SIZE; - - const u32 swizzled_y = pdep(dst_y); - u32 swizzled_x = pdep(offset_x * BYTES_PER_PIXEL); - for (u32 x = 0; x < subrect_width; - ++x, incrpdep(swizzled_x)) { - const u32 dst_x = x + offset_x; - const u32 gob_address = - gob_address_y + (dst_x * BYTES_PER_PIXEL / GOB_SIZE_X) * GOB_SIZE * block_height; - const u32 swizzled_offset = gob_address + (swizzled_x | swizzled_y); - const u32 unswizzled_offset = line * source_pitch + x * BYTES_PER_PIXEL; - - const u8* const source_line = unswizzled_data + unswizzled_offset; - u8* const dest_addr = swizzled_data + swizzled_offset; - std::memcpy(dest_addr, source_line, BYTES_PER_PIXEL); - } - } -} - -template -void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 block_height, - u32 origin_x, u32 origin_y, u8* output, const u8* input) { - const u32 stride = width * BYTES_PER_PIXEL; - const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) / GOB_SIZE_X; - const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height); - - const u32 block_height_mask = (1U << block_height) - 1; - const u32 x_shift = GOB_SIZE_SHIFT + block_height; - - for (u32 line = 0; line < line_count; ++line) { - const u32 src_y = line + origin_y; - const u32 swizzled_y = pdep(src_y); - - const u32 block_y = src_y >> GOB_SIZE_Y_SHIFT; - const u32 src_offset_y = (block_y >> block_height) * block_size + - ((block_y & block_height_mask) << GOB_SIZE_SHIFT); - - u32 swizzled_x = pdep(origin_x * BYTES_PER_PIXEL); - for (u32 column = 0; column < line_length_in; - ++column, incrpdep(swizzled_x)) { - const u32 src_x = (column + origin_x) * BYTES_PER_PIXEL; - const u32 src_offset_x = (src_x >> GOB_SIZE_X_SHIFT) << x_shift; - - const u32 swizzled_offset = src_offset_y + src_offset_x + (swizzled_x | swizzled_y); - const u32 unswizzled_offset = line * pitch + column * BYTES_PER_PIXEL; - - std::memcpy(output + unswizzled_offset, input + swizzled_offset, BYTES_PER_PIXEL); - } - } -} - -template -void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height, - u32 block_height, u32 block_depth, u32 origin_x, u32 origin_y, u8* output, - const u8* input) { - UNIMPLEMENTED_IF(origin_x > 0); - UNIMPLEMENTED_IF(origin_y > 0); - - const u32 stride = width * BYTES_PER_PIXEL; - const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) / GOB_SIZE_X; - const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth); - - const u32 block_height_mask = (1U << block_height) - 1; - const u32 x_shift = static_cast(GOB_SIZE_SHIFT) + block_height + block_depth; - - for (u32 line = 0; line < line_count; ++line) { - const u32 swizzled_y = pdep(line); - const u32 block_y = line / GOB_SIZE_Y; - const u32 dst_offset_y = - (block_y >> block_height) * block_size + (block_y & block_height_mask) * GOB_SIZE; - - u32 swizzled_x = 0; - for (u32 x = 0; x < line_length_in; ++x, incrpdep(swizzled_x)) { - const u32 dst_offset = - ((x / GOB_SIZE_X) << x_shift) + dst_offset_y + (swizzled_x | swizzled_y); - const u32 src_offset = x * BYTES_PER_PIXEL + line * pitch; - std::memcpy(output + dst_offset, input + src_offset, BYTES_PER_PIXEL); - } - } -} } // Anonymous namespace void UnswizzleTexture(std::span output, std::span input, u32 bytes_per_pixel, @@ -218,15 +190,15 @@ void SwizzleTexture(std::span output, std::span input, u32 bytes_p stride_alignment); } -void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, - u32 bytes_per_pixel, u8* swizzled_data, const u8* unswizzled_data, - u32 block_height_bit, u32 offset_x, u32 offset_y) { +void SwizzleSubrect(std::span output, std::span input, u32 bytes_per_pixel, u32 width, + u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 extent_y, + u32 block_height, u32 block_depth, u32 pitch_linear) { switch (bytes_per_pixel) { #define BPP_CASE(x) \ case x: \ - return SwizzleSubrect(subrect_width, subrect_height, source_pitch, swizzled_width, \ - swizzled_data, unswizzled_data, block_height_bit, offset_x, \ - offset_y); + return SwizzleSubrectImpl(output, input, width, height, depth, origin_x, \ + origin_y, extent_x, extent_y, block_height, \ + block_depth, pitch_linear); BPP_CASE(1) BPP_CASE(2) BPP_CASE(3) @@ -241,13 +213,15 @@ void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 } } -void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 bytes_per_pixel, - u32 block_height, u32 origin_x, u32 origin_y, u8* output, const u8* input) { +void UnswizzleSubrect(std::span output, std::span input, u32 bytes_per_pixel, + u32 width, u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, + u32 extent_y, u32 block_height, u32 block_depth, u32 pitch_linear) { switch (bytes_per_pixel) { #define BPP_CASE(x) \ case x: \ - return UnswizzleSubrect(line_length_in, line_count, pitch, width, block_height, \ - origin_x, origin_y, output, input); + return SwizzleSubrectImpl(output, input, width, height, depth, origin_x, \ + origin_y, extent_x, extent_y, block_height, \ + block_depth, pitch_linear); BPP_CASE(1) BPP_CASE(2) BPP_CASE(3) @@ -262,55 +236,6 @@ void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, } } -void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height, - u32 bytes_per_pixel, u32 block_height, u32 block_depth, u32 origin_x, - u32 origin_y, u8* output, const u8* input) { - switch (bytes_per_pixel) { -#define BPP_CASE(x) \ - case x: \ - return SwizzleSliceToVoxel(line_length_in, line_count, pitch, width, height, \ - block_height, block_depth, origin_x, origin_y, output, \ - input); - BPP_CASE(1) - BPP_CASE(2) - BPP_CASE(3) - BPP_CASE(4) - BPP_CASE(6) - BPP_CASE(8) - BPP_CASE(12) - BPP_CASE(16) -#undef BPP_CASE - default: - ASSERT_MSG(false, "Invalid bytes_per_pixel={}", bytes_per_pixel); - } -} - -void SwizzleKepler(const u32 width, const u32 height, const u32 dst_x, const u32 dst_y, - const u32 block_height_bit, const std::size_t copy_size, const u8* source_data, - u8* swizzle_data) { - const u32 block_height = 1U << block_height_bit; - const u32 image_width_in_gobs{(width + GOB_SIZE_X - 1) / GOB_SIZE_X}; - std::size_t count = 0; - for (std::size_t y = dst_y; y < height && count < copy_size; ++y) { - const std::size_t gob_address_y = - (y / (GOB_SIZE_Y * block_height)) * GOB_SIZE * block_height * image_width_in_gobs + - ((y % (GOB_SIZE_Y * block_height)) / GOB_SIZE_Y) * GOB_SIZE; - const u32 swizzled_y = pdep(static_cast(y)); - u32 swizzled_x = pdep(dst_x); - for (std::size_t x = dst_x; x < width && count < copy_size; - ++x, incrpdep(swizzled_x)) { - const std::size_t gob_address = - gob_address_y + (x / GOB_SIZE_X) * GOB_SIZE * block_height; - const std::size_t swizzled_offset = gob_address + (swizzled_x | swizzled_y); - const u8* source_line = source_data + count; - u8* dest_addr = swizzle_data + swizzled_offset; - count++; - - *dest_addr = *source_line; - } - } -} - std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth) { if (tiled) { diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h index 31a11708f0..e704076923 100644 --- a/src/video_core/textures/decoders.h +++ b/src/video_core/textures/decoders.h @@ -40,7 +40,6 @@ constexpr SwizzleTable MakeSwizzleTable() { } return table; } -constexpr SwizzleTable SWIZZLE_TABLE = MakeSwizzleTable(); /// Unswizzles a block linear texture into linear memory. void UnswizzleTexture(std::span output, std::span input, u32 bytes_per_pixel, @@ -57,34 +56,14 @@ std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height u32 block_height, u32 block_depth); /// Copies an untiled subrectangle into a tiled surface. -void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, - u32 bytes_per_pixel, u8* swizzled_data, const u8* unswizzled_data, - u32 block_height_bit, u32 offset_x, u32 offset_y); +void SwizzleSubrect(std::span output, std::span input, u32 bytes_per_pixel, u32 width, + u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 extent_y, + u32 block_height, u32 block_depth, u32 pitch_linear); /// Copies a tiled subrectangle into a linear surface. -void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 bytes_per_pixel, - u32 block_height, u32 origin_x, u32 origin_y, u8* output, const u8* input); - -/// @brief Swizzles a 2D array of pixels into a 3D texture -/// @param line_length_in Number of pixels per line -/// @param line_count Number of lines -/// @param pitch Number of bytes per line -/// @param width Width of the swizzled texture -/// @param height Height of the swizzled texture -/// @param bytes_per_pixel Number of bytes used per pixel -/// @param block_height Block height shift -/// @param block_depth Block depth shift -/// @param origin_x Column offset in pixels of the swizzled texture -/// @param origin_y Row offset in pixels of the swizzled texture -/// @param output Pointer to the pixels of the swizzled texture -/// @param input Pointer to the 2D array of pixels used as input -/// @pre input and output points to an array large enough to hold the number of bytes used -void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height, - u32 bytes_per_pixel, u32 block_height, u32 block_depth, u32 origin_x, - u32 origin_y, u8* output, const u8* input); - -void SwizzleKepler(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height, - std::size_t copy_size, const u8* source_data, u8* swizzle_data); +void UnswizzleSubrect(std::span output, std::span input, u32 bytes_per_pixel, + u32 width, u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, + u32 extent_y, u32 block_height, u32 block_depth, u32 pitch_linear); /// Obtains the offset of the gob for positions 'dst_x' & 'dst_y' u64 GetGOBOffset(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height, From 98317f2b7799360ee4e5e55cbbb123bce72fbf2c Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 3 Apr 2022 02:18:03 +0200 Subject: [PATCH 43/68] Decoders: Improve overall speed. --- src/video_core/textures/decoders.cpp | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp index fcc636e0b9..52d067a2dc 100644 --- a/src/video_core/textures/decoders.cpp +++ b/src/video_core/textures/decoders.cpp @@ -35,7 +35,7 @@ void incrpdep(u32& value) { template void SwizzleImpl(std::span output, std::span input, u32 width, u32 height, u32 depth, - u32 block_height, u32 block_depth, u32 stride_alignment) { + u32 block_height, u32 block_depth, u32 stride) { // The origin of the transformation can be configured here, leave it as zero as the current API // doesn't expose it. static constexpr u32 origin_x = 0; @@ -45,7 +45,6 @@ void SwizzleImpl(std::span output, std::span input, u32 width, u32 // We can configure here a custom pitch // As it's not exposed 'width * BYTES_PER_PIXEL' will be the expected pitch. const u32 pitch = width * BYTES_PER_PIXEL; - const u32 stride = Common::AlignUpLog2(width, stride_alignment) * BYTES_PER_PIXEL; const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT); const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth); @@ -179,15 +178,23 @@ void Swizzle(std::span output, std::span input, u32 bytes_per_pixe void UnswizzleTexture(std::span output, std::span input, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth, u32 stride_alignment) { + const u32 stride = Common::AlignUpLog2(width, stride_alignment) * bytes_per_pixel; + const u32 new_bpp = std::min(4U, static_cast(std::countr_zero(width * bytes_per_pixel))); + width = (width * bytes_per_pixel) >> new_bpp; + bytes_per_pixel = 1U << new_bpp; Swizzle(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth, - stride_alignment); + stride); } void SwizzleTexture(std::span output, std::span input, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth, u32 stride_alignment) { + const u32 stride = Common::AlignUpLog2(width, stride_alignment) * bytes_per_pixel; + const u32 new_bpp = std::min(4U, static_cast(std::countr_zero(width * bytes_per_pixel))); + width = (width * bytes_per_pixel) >> new_bpp; + bytes_per_pixel = 1U << new_bpp; Swizzle(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth, - stride_alignment); + stride); } void SwizzleSubrect(std::span output, std::span input, u32 bytes_per_pixel, u32 width, From b59ca4df0cf45f1c17555ae3029bab234d4241ac Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 6 Mar 2022 14:52:35 +0100 Subject: [PATCH 44/68] Buffer Cache: Basic fixes. --- src/video_core/buffer_cache/buffer_cache.h | 37 +++++++++++++--------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 359c11d6f2..6eb6724759 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -350,7 +350,7 @@ private: void NotifyBufferDeletion(); - [[nodiscard]] Binding StorageBufferBinding(GPUVAddr ssbo_addr) const; + [[nodiscard]] Binding StorageBufferBinding(GPUVAddr ssbo_addr, bool is_written = false) const; [[nodiscard]] TextureBufferBinding GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size, PixelFormat format); @@ -725,7 +725,7 @@ void BufferCache

::BindGraphicsStorageBuffer(size_t stage, size_t ssbo_index, const auto& cbufs = maxwell3d->state.shader_stages[stage]; const GPUVAddr ssbo_addr = cbufs.const_buffers[cbuf_index].address + cbuf_offset; - storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr); + storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr, is_written); } template @@ -765,7 +765,7 @@ void BufferCache

::BindComputeStorageBuffer(size_t ssbo_index, u32 cbuf_index, const auto& cbufs = launch_desc.const_buffer_config; const GPUVAddr ssbo_addr = cbufs[cbuf_index].Address() + cbuf_offset; - compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr); + compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr, is_written); } template @@ -1242,16 +1242,19 @@ void BufferCache

::BindHostComputeTextureBuffers() { template void BufferCache

::DoUpdateGraphicsBuffers(bool is_indexed) { - if (is_indexed) { - UpdateIndexBuffer(); - } - UpdateVertexBuffers(); - UpdateTransformFeedbackBuffers(); - for (size_t stage = 0; stage < NUM_STAGES; ++stage) { - UpdateUniformBuffers(stage); - UpdateStorageBuffers(stage); - UpdateTextureBuffers(stage); - } + do { + has_deleted_buffers = false; + if (is_indexed) { + UpdateIndexBuffer(); + } + UpdateVertexBuffers(); + UpdateTransformFeedbackBuffers(); + for (size_t stage = 0; stage < NUM_STAGES; ++stage) { + UpdateUniformBuffers(stage); + UpdateStorageBuffers(stage); + UpdateTextureBuffers(stage); + } + } while (has_deleted_buffers); } template @@ -1557,6 +1560,8 @@ BufferId BufferCache

::CreateBuffer(VAddr cpu_addr, u32 wanted_size) { const OverlapResult overlap = ResolveOverlaps(cpu_addr, wanted_size); const u32 size = static_cast(overlap.end - overlap.begin); const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, overlap.begin, size); + auto& new_buffer = slot_buffers[new_buffer_id]; + runtime.ClearBuffer(new_buffer, 0, new_buffer.SizeBytes(), 0); for (const BufferId overlap_id : overlap.ids) { JoinOverlap(new_buffer_id, overlap_id, !overlap.has_stream_leap); } @@ -1831,16 +1836,18 @@ void BufferCache

::NotifyBufferDeletion() { } template -typename BufferCache

::Binding BufferCache

::StorageBufferBinding(GPUVAddr ssbo_addr) const { +typename BufferCache

::Binding BufferCache

::StorageBufferBinding(GPUVAddr ssbo_addr, + bool is_written) const { const GPUVAddr gpu_addr = gpu_memory->Read(ssbo_addr); const u32 size = gpu_memory->Read(ssbo_addr + 8); const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); if (!cpu_addr || size == 0) { return NULL_BINDING; } + const VAddr cpu_end = Common::AlignUp(*cpu_addr + size, Core::Memory::YUZU_PAGESIZE); const Binding binding{ .cpu_addr = *cpu_addr, - .size = size, + .size = is_written ? size : static_cast(cpu_end - *cpu_addr), .buffer_id = BufferId{}, }; return binding; From a9ca39f8591532ba6d37f7a3e068d5eefe416464 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Mon, 7 Feb 2022 07:52:04 +0100 Subject: [PATCH 45/68] NVDRV: Further improvements. --- src/core/hle/service/nvdrv/core/container.cpp | 8 +- src/core/hle/service/nvdrv/core/container.h | 8 +- src/core/hle/service/nvdrv/core/nvmap.cpp | 7 +- src/core/hle/service/nvdrv/core/nvmap.h | 7 +- .../service/nvdrv/core/syncpoint_manager.cpp | 112 +++++++++++++--- .../service/nvdrv/core/syncpoint_manager.h | 124 ++++++++++++------ .../hle/service/nvdrv/devices/nvhost_ctrl.cpp | 18 ++- .../hle/service/nvdrv/devices/nvhost_gpu.cpp | 59 ++++----- .../hle/service/nvdrv/devices/nvhost_gpu.h | 19 ++- .../service/nvdrv/devices/nvhost_nvdec.cpp | 2 +- .../nvdrv/devices/nvhost_nvdec_common.cpp | 15 +-- .../nvdrv/devices/nvhost_nvdec_common.h | 6 +- .../hle/service/nvdrv/devices/nvhost_vic.cpp | 2 +- src/video_core/engines/maxwell_3d.cpp | 18 +-- src/video_core/engines/maxwell_dma.cpp | 18 ++- src/video_core/engines/puller.cpp | 18 +-- 16 files changed, 280 insertions(+), 161 deletions(-) diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp index fbd66f0016..4175d3d9c3 100644 --- a/src/core/hle/service/nvdrv/core/container.cpp +++ b/src/core/hle/service/nvdrv/core/container.cpp @@ -1,7 +1,7 @@ -// Copyright 2021 yuzu emulator team -// Copyright 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors +// (https://github.com/skyline-emu/) +// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 +// or any later version Refer to the license.txt file included. #include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/core/nvmap.h" diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h index da75d74ff7..e069ade4e5 100644 --- a/src/core/hle/service/nvdrv/core/container.h +++ b/src/core/hle/service/nvdrv/core/container.h @@ -1,7 +1,7 @@ -// Copyright 2021 yuzu emulator team -// Copyright 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors +// (https://github.com/skyline-emu/) +// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 +// or any later version Refer to the license.txt file included. #pragma once diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index 9acec7ba66..86d825af96 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -1,6 +1,7 @@ -// Copyright 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors +// (https://github.com/skyline-emu/) +// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 +// or any later version Refer to the license.txt file included. #include "common/alignment.h" #include "common/assert.h" diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index 5acdc961e6..4f37dcf437 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -1,6 +1,7 @@ -// Copyright 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors +// (https://github.com/skyline-emu/) +// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 +// or any later version Refer to the license.txt file included. #pragma once diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp index 61e00448c9..b34481b486 100644 --- a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp @@ -1,5 +1,7 @@ -// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors +// (https://github.com/skyline-emu/) +// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 +// or any later version Refer to the license.txt file included. #include "common/assert.h" #include "core/hle/service/nvdrv/core/syncpoint_manager.h" @@ -7,32 +9,108 @@ namespace Service::Nvidia::NvCore { -SyncpointManager::SyncpointManager(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {} +SyncpointManager::SyncpointManager(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} { + constexpr u32 VBlank0SyncpointId{26}; + constexpr u32 VBlank1SyncpointId{27}; + + // Reserve both vblank syncpoints as client managed as they use Continuous Mode + // Refer to section 14.3.5.3 of the TRM for more information on Continuous Mode + // https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/drm/dc.c#L660 + ReserveSyncpoint(VBlank0SyncpointId, true); + ReserveSyncpoint(VBlank1SyncpointId, true); + + for (u32 syncpointId : channel_syncpoints) { + if (syncpointId) { + ReserveSyncpoint(syncpointId, false); + } + } +} SyncpointManager::~SyncpointManager() = default; -u32 SyncpointManager::RefreshSyncpoint(u32 syncpoint_id) { - syncpoints[syncpoint_id].min = host1x.GetSyncpointManager().GetHostSyncpointValue(syncpoint_id); - return GetSyncpointMin(syncpoint_id); +u32 SyncpointManager::ReserveSyncpoint(u32 id, bool clientManaged) { + if (syncpoints.at(id).reserved) { + UNREACHABLE_MSG("Requested syncpoint is in use"); + return 0; + } + + syncpoints.at(id).reserved = true; + syncpoints.at(id).interfaceManaged = clientManaged; + + return id; } -u32 SyncpointManager::AllocateSyncpoint() { - for (u32 syncpoint_id = 1; syncpoint_id < MaxSyncPoints; syncpoint_id++) { - if (!syncpoints[syncpoint_id].is_allocated) { - syncpoints[syncpoint_id].is_allocated = true; - return syncpoint_id; +u32 SyncpointManager::FindFreeSyncpoint() { + for (u32 i{1}; i < syncpoints.size(); i++) { + if (!syncpoints[i].reserved) { + return i; } } - ASSERT_MSG(false, "No more available syncpoints!"); - return {}; + UNREACHABLE_MSG("Failed to find a free syncpoint!"); + return 0; } -u32 SyncpointManager::IncreaseSyncpoint(u32 syncpoint_id, u32 value) { - for (u32 index = 0; index < value; ++index) { - syncpoints[syncpoint_id].max.fetch_add(1, std::memory_order_relaxed); +u32 SyncpointManager::AllocateSyncpoint(bool clientManaged) { + std::lock_guard lock(reservation_lock); + return ReserveSyncpoint(FindFreeSyncpoint(), clientManaged); +} + +bool SyncpointManager::IsSyncpointAllocated(u32 id) { + return (id <= SyncpointCount) && syncpoints[id].reserved; +} + +bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) { + const SyncpointInfo& syncpoint{syncpoints.at(id)}; + + if (!syncpoint.reserved) { + UNREACHABLE(); + return 0; } - return GetSyncpointMax(syncpoint_id); + // If the interface manages counters then we don't keep track of the maximum value as it handles + // sanity checking the values then + if (syncpoint.interfaceManaged) { + return static_cast(syncpoint.counterMin - threshold) >= 0; + } else { + return (syncpoint.counterMax - threshold) >= (syncpoint.counterMin - threshold); + } +} + +u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) { + if (!syncpoints.at(id).reserved) { + UNREACHABLE(); + return 0; + } + + return syncpoints.at(id).counterMax += amount; +} + +u32 SyncpointManager::ReadSyncpointMinValue(u32 id) { + if (!syncpoints.at(id).reserved) { + UNREACHABLE(); + return 0; + } + + return syncpoints.at(id).counterMin; +} + +u32 SyncpointManager::UpdateMin(u32 id) { + if (!syncpoints.at(id).reserved) { + UNREACHABLE(); + return 0; + } + + syncpoints.at(id).counterMin = host1x.GetSyncpointManager().GetHostSyncpointValue(id); + return syncpoints.at(id).counterMin; +} + +NvFence SyncpointManager::GetSyncpointFence(u32 id) { + if (!syncpoints.at(id).reserved) { + UNREACHABLE(); + return NvFence{}; + } + + return {.id = static_cast(id), .value = syncpoints.at(id).counterMax}; } } // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.h b/src/core/hle/service/nvdrv/core/syncpoint_manager.h index f332edc6e4..bfc8ba84b0 100644 --- a/src/core/hle/service/nvdrv/core/syncpoint_manager.h +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.h @@ -1,10 +1,13 @@ -// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors +// (https://github.com/skyline-emu/) +// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 +// or any later version Refer to the license.txt file included. #pragma once #include #include +#include #include "common/common_types.h" #include "core/hle/service/nvdrv/nvdata.h" @@ -19,68 +22,111 @@ class Host1x; namespace Service::Nvidia::NvCore { +enum class ChannelType : u32 { + MsEnc = 0, + VIC = 1, + GPU = 2, + NvDec = 3, + Display = 4, + NvJpg = 5, + TSec = 6, + Max = 7 +}; + +/** + * @brief SyncpointManager handles allocating and accessing host1x syncpoints, these are cached + * versions of the HW syncpoints which are intermittently synced + * @note Refer to Chapter 14 of the Tegra X1 TRM for an exhaustive overview of them + * @url https://http.download.nvidia.com/tegra-public-appnotes/host1x.html + * @url + * https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/jetson-tx1/drivers/video/tegra/host/nvhost_syncpt.c + */ class SyncpointManager final { public: explicit SyncpointManager(Tegra::Host1x::Host1x& host1x); ~SyncpointManager(); /** - * Returns true if the specified syncpoint is expired for the given value. - * @param syncpoint_id Syncpoint ID to check. - * @param value Value to check against the specified syncpoint. - * @returns True if the specified syncpoint is expired for the given value, otherwise False. + * @brief Checks if the given syncpoint is both allocated and below the number of HW syncpoints */ - bool IsSyncpointExpired(u32 syncpoint_id, u32 value) const { - return (GetSyncpointMax(syncpoint_id) - value) >= (GetSyncpointMin(syncpoint_id) - value); + bool IsSyncpointAllocated(u32 id); + + /** + * @brief Finds a free syncpoint and reserves it + * @return The ID of the reserved syncpoint + */ + u32 AllocateSyncpoint(bool clientManaged); + + /** + * @url + * https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/syncpt.c#L259 + */ + bool HasSyncpointExpired(u32 id, u32 threshold); + + bool IsFenceSignalled(NvFence fence) { + return HasSyncpointExpired(fence.id, fence.value); } /** - * Gets the lower bound for the specified syncpoint. - * @param syncpoint_id Syncpoint ID to get the lower bound for. - * @returns The lower bound for the specified syncpoint. + * @brief Atomically increments the maximum value of a syncpoint by the given amount + * @return The new max value of the syncpoint */ - u32 GetSyncpointMin(u32 syncpoint_id) const { - return syncpoints.at(syncpoint_id).min.load(std::memory_order_relaxed); - } + u32 IncrementSyncpointMaxExt(u32 id, u32 amount); /** - * Gets the uper bound for the specified syncpoint. - * @param syncpoint_id Syncpoint ID to get the upper bound for. - * @returns The upper bound for the specified syncpoint. + * @return The minimum value of the syncpoint */ - u32 GetSyncpointMax(u32 syncpoint_id) const { - return syncpoints.at(syncpoint_id).max.load(std::memory_order_relaxed); - } + u32 ReadSyncpointMinValue(u32 id); /** - * Refreshes the minimum value for the specified syncpoint. - * @param syncpoint_id Syncpoint ID to be refreshed. - * @returns The new syncpoint minimum value. + * @brief Synchronises the minimum value of the syncpoint to with the GPU + * @return The new minimum value of the syncpoint */ - u32 RefreshSyncpoint(u32 syncpoint_id); + u32 UpdateMin(u32 id); /** - * Allocates a new syncoint. - * @returns The syncpoint ID for the newly allocated syncpoint. + * @return A fence that will be signalled once this syncpoint hits its maximum value */ - u32 AllocateSyncpoint(); + NvFence GetSyncpointFence(u32 id); - /** - * Increases the maximum value for the specified syncpoint. - * @param syncpoint_id Syncpoint ID to be increased. - * @param value Value to increase the specified syncpoint by. - * @returns The new syncpoint maximum value. - */ - u32 IncreaseSyncpoint(u32 syncpoint_id, u32 value); + static constexpr std::array(ChannelType::Max)> channel_syncpoints{ + 0x0, // `MsEnc` is unimplemented + 0xC, // `VIC` + 0x0, // `GPU` syncpoints are allocated per-channel instead + 0x36, // `NvDec` + 0x0, // `Display` is unimplemented + 0x37, // `NvJpg` + 0x0, // `TSec` is unimplemented + }; //!< Maps each channel ID to a constant syncpoint private: - struct Syncpoint { - std::atomic min; - std::atomic max; - std::atomic is_allocated; + /** + * @note reservation_lock should be locked when calling this + */ + u32 ReserveSyncpoint(u32 id, bool clientManaged); + + /** + * @return The ID of the first free syncpoint + */ + u32 FindFreeSyncpoint(); + + struct SyncpointInfo { + std::atomic counterMin; //!< The least value the syncpoint can be (The value it was + //!< when it was last synchronized with host1x) + std::atomic counterMax; //!< The maximum value the syncpoint can reach according to the + //!< current usage + bool interfaceManaged; //!< If the syncpoint is managed by a host1x client interface, a + //!< client interface is a HW block that can handle host1x + //!< transactions on behalf of a host1x client (Which would otherwise + //!< need to be manually synced using PIO which is synchronous and + //!< requires direct cooperation of the CPU) + bool reserved; //!< If the syncpoint is reserved or not, not to be confused with a reserved + //!< value }; - std::array syncpoints{}; + constexpr static std::size_t SyncpointCount{192}; + std::array syncpoints{}; + std::mutex reservation_lock; Tegra::Host1x::Host1x& host1x; }; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index 076edb02f7..a84e4d4251 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -112,17 +112,23 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector } if (params.fence.value == 0) { - params.value.raw = syncpoint_manager.GetSyncpointMin(fence_id); + if (!syncpoint_manager.IsSyncpointAllocated(params.fence.id)) { + LOG_WARNING(Service_NVDRV, + "Unallocated syncpt_id={}, threshold={}, timeout={}, is_allocation={}", + params.fence.id, params.fence.value, params.timeout, is_allocation); + } else { + params.value.raw = syncpoint_manager.ReadSyncpointMinValue(fence_id); + } return NvResult::Success; } - if (syncpoint_manager.IsSyncpointExpired(fence_id, params.fence.value)) { - params.value.raw = syncpoint_manager.GetSyncpointMin(fence_id); + if (syncpoint_manager.IsFenceSignalled(params.fence)) { + params.value.raw = syncpoint_manager.ReadSyncpointMinValue(fence_id); return NvResult::Success; } - if (const auto new_value = syncpoint_manager.RefreshSyncpoint(fence_id); - syncpoint_manager.IsSyncpointExpired(fence_id, params.fence.value)) { + if (const auto new_value = syncpoint_manager.UpdateMin(fence_id); + syncpoint_manager.IsFenceSignalled(params.fence)) { params.value.raw = new_value; return NvResult::Success; } @@ -296,7 +302,7 @@ NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector& input, std::v EventState::Waiting) { auto& host1x_syncpoint_manager = system.Host1x().GetSyncpointManager(); host1x_syncpoint_manager.DeregisterHostAction(event.assigned_syncpt, event.wait_handle); - syncpoint_manager.RefreshSyncpoint(event.assigned_syncpt); + syncpoint_manager.UpdateMin(event.assigned_syncpt); event.wait_handle = {}; } event.fails++; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index 3f981af5a1..c2cc09993f 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp @@ -31,9 +31,7 @@ nvhost_gpu::nvhost_gpu(Core::System& system_, EventInterface& events_interface_, : nvdevice{system_}, events_interface{events_interface_}, core{core_}, syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()}, channel_state{system.GPU().AllocateChannel()} { - channel_fence.id = syncpoint_manager.AllocateSyncpoint(); - channel_fence.value = - system_.Host1x().GetSyncpointManager().GetGuestSyncpointValue(channel_fence.id); + channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false); sm_exception_breakpoint_int_report_event = events_interface.CreateEvent("GpuChannelSMExceptionBreakpointInt"); sm_exception_breakpoint_pause_report_event = @@ -191,10 +189,8 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(const std::vector& input, std::vector BuildWaitCommandList(NvFence fence) { }; } -static std::vector BuildIncrementCommandList(NvFence fence, - u32 add_increment) { +static std::vector BuildIncrementCommandList(NvFence fence) { std::vector result{ Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointPayload, 1, Tegra::SubmissionMode::Increasing), {}}; - for (u32 count = 0; count < add_increment; ++count) { + for (u32 count = 0; count < 2; ++count) { result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointOperation, 1, Tegra::SubmissionMode::Increasing)); result.emplace_back( @@ -239,14 +234,12 @@ static std::vector BuildIncrementCommandList(NvFence fence return result; } -static std::vector BuildIncrementWithWfiCommandList(NvFence fence, - u32 add_increment) { +static std::vector BuildIncrementWithWfiCommandList(NvFence fence) { std::vector result{ Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForIdle, 1, Tegra::SubmissionMode::Increasing), {}}; - const std::vector increment{ - BuildIncrementCommandList(fence, add_increment)}; + const std::vector increment{BuildIncrementCommandList(fence)}; result.insert(result.end(), increment.begin(), increment.end()); @@ -260,35 +253,41 @@ NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::vector auto& gpu = system.GPU(); + std::scoped_lock lock(channel_mutex); + const auto bind_id = channel_state->bind_id; - params.fence_out.id = channel_fence.id; + auto& flags = params.flags; - if (params.flags.add_wait.Value() && - !syncpoint_manager.IsSyncpointExpired(params.fence_out.id, params.fence_out.value)) { - gpu.PushGPUEntries(bind_id, Tegra::CommandList{BuildWaitCommandList(params.fence_out)}); - } + if (flags.fence_wait.Value()) { + if (flags.increment_value.Value()) { + return NvResult::BadParameter; + } - if (params.flags.add_increment.Value() || params.flags.increment.Value()) { - const u32 increment_value = params.flags.increment.Value() ? params.fence_out.value : 0; - params.fence_out.value = syncpoint_manager.IncreaseSyncpoint( - params.fence_out.id, params.AddIncrementValue() + increment_value); - } else { - params.fence_out.value = syncpoint_manager.GetSyncpointMax(params.fence_out.id); + if (!syncpoint_manager.IsFenceSignalled(params.fence)) { + gpu.PushGPUEntries(bind_id, Tegra::CommandList{BuildWaitCommandList(params.fence)}); + } } gpu.PushGPUEntries(bind_id, std::move(entries)); + params.fence.id = channel_syncpoint; - if (params.flags.add_increment.Value()) { - if (params.flags.suppress_wfi) { - gpu.PushGPUEntries(bind_id, Tegra::CommandList{BuildIncrementCommandList( - params.fence_out, params.AddIncrementValue())}); + u32 increment{(flags.fence_increment.Value() != 0 ? 2 : 0) + + (flags.increment_value.Value() != 0 ? params.fence.value : 0)}; + params.fence.value = syncpoint_manager.IncrementSyncpointMaxExt(channel_syncpoint, increment); + + if (flags.fence_increment.Value()) { + if (flags.suppress_wfi.Value()) { + gpu.PushGPUEntries(bind_id, + Tegra::CommandList{BuildIncrementCommandList(params.fence)}); } else { - gpu.PushGPUEntries(bind_id, Tegra::CommandList{BuildIncrementWithWfiCommandList( - params.fence_out, params.AddIncrementValue())}); + gpu.PushGPUEntries(bind_id, + Tegra::CommandList{BuildIncrementWithWfiCommandList(params.fence)}); } } + flags.raw = 0; + std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmitGpfifo)); return NvResult::Success; } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h index 3a65ed06db..1e4ecd55b3 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h @@ -163,17 +163,13 @@ private: u32_le num_entries{}; // number of fence objects being submitted union { u32_le raw; - BitField<0, 1, u32_le> add_wait; // append a wait sync_point to the list - BitField<1, 1, u32_le> add_increment; // append an increment to the list - BitField<2, 1, u32_le> new_hw_format; // mostly ignored - BitField<4, 1, u32_le> suppress_wfi; // suppress wait for interrupt - BitField<8, 1, u32_le> increment; // increment the returned fence + BitField<0, 1, u32_le> fence_wait; // append a wait sync_point to the list + BitField<1, 1, u32_le> fence_increment; // append an increment to the list + BitField<2, 1, u32_le> new_hw_format; // mostly ignored + BitField<4, 1, u32_le> suppress_wfi; // suppress wait for interrupt + BitField<8, 1, u32_le> increment_value; // increment the returned fence } flags; - NvFence fence_out{}; // returned new fence object for others to wait on - - u32 AddIncrementValue() const { - return flags.add_increment.Value() << 1; - } + NvFence fence{}; // returned new fence object for others to wait on }; static_assert(sizeof(IoctlSubmitGpfifo) == 16 + sizeof(NvFence), "IoctlSubmitGpfifo is incorrect size"); @@ -213,7 +209,8 @@ private: NvCore::SyncpointManager& syncpoint_manager; NvCore::NvMap& nvmap; std::shared_ptr channel_state; - NvFence channel_fence; + u32 channel_syncpoint; + std::mutex channel_mutex; // Events Kernel::KEvent* sm_exception_breakpoint_int_report_event; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp index 00947ea199..5e3820085a 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp @@ -13,7 +13,7 @@ namespace Service::Nvidia::Devices { u32 nvhost_nvdec::next_id{}; nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core) - : nvhost_nvdec_common{system_, core} {} + : nvhost_nvdec_common{system_, core, NvCore::ChannelType::NvDec} {} nvhost_nvdec::~nvhost_nvdec() = default; NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index b17589aa3e..008092dbb3 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp @@ -48,9 +48,10 @@ std::size_t WriteVectors(std::vector& dst, const std::vector& src, std::s std::unordered_map nvhost_nvdec_common::fd_to_id{}; -nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_) - : nvdevice{system_}, core{core_}, - syncpoint_manager{core.GetSyncpointManager()}, nvmap{core.GetNvMapFile()} {} +nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_, + NvCore::ChannelType channel_type_) + : nvdevice{system_}, core{core_}, syncpoint_manager{core.GetSyncpointManager()}, + nvmap{core.GetNvMapFile()}, channel_type{channel_type_} {} nvhost_nvdec_common::~nvhost_nvdec_common() = default; NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector& input) { @@ -88,7 +89,7 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, const std::vector& input, for (std::size_t i = 0; i < syncpt_increments.size(); i++) { const SyncptIncr& syncpt_incr = syncpt_increments[i]; fence_thresholds[i] = - syncpoint_manager.IncreaseSyncpoint(syncpt_incr.id, syncpt_incr.increments); + syncpoint_manager.IncrementSyncpointMaxExt(syncpt_incr.id, syncpt_incr.increments); } } for (const auto& cmd_buffer : command_buffers) { @@ -116,10 +117,8 @@ NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector& input, std::ve std::memcpy(¶ms, input.data(), sizeof(IoctlGetSyncpoint)); LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param); - if (device_syncpoints[params.param] == 0 && system.GPU().UseNvdec()) { - device_syncpoints[params.param] = syncpoint_manager.AllocateSyncpoint(); - } - params.value = device_syncpoints[params.param]; + const u32 id{NvCore::SyncpointManager::channel_syncpoints[static_cast(channel_type)]}; + params.value = id; std::memcpy(output.data(), ¶ms, sizeof(IoctlGetSyncpoint)); return NvResult::Success; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h index 53029af6ad..51bb7c2cb6 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h @@ -6,6 +6,7 @@ #include #include "common/common_types.h" #include "common/swap.h" +#include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "core/hle/service/nvdrv/devices/nvdevice.h" namespace Service::Nvidia { @@ -13,14 +14,14 @@ namespace Service::Nvidia { namespace NvCore { class Container; class NvMap; -class SyncpointManager; } // namespace NvCore namespace Devices { class nvhost_nvdec_common : public nvdevice { public: - explicit nvhost_nvdec_common(Core::System& system_, NvCore::Container& core); + explicit nvhost_nvdec_common(Core::System& system_, NvCore::Container& core, + NvCore::ChannelType channel_type); ~nvhost_nvdec_common() override; protected: @@ -121,6 +122,7 @@ protected: NvCore::Container& core; NvCore::SyncpointManager& syncpoint_manager; NvCore::NvMap& nvmap; + NvCore::ChannelType channel_type; std::array device_syncpoints{}; }; }; // namespace Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index c89ff6b27d..490e399f4c 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp @@ -12,7 +12,7 @@ namespace Service::Nvidia::Devices { u32 nvhost_vic::next_id{}; nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core) - : nvhost_nvdec_common{system_, core} {} + : nvhost_nvdec_common{system_, core, NvCore::ChannelType::VIC} {} nvhost_vic::~nvhost_vic() = default; diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index 632052c53c..3c6e44a252 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -453,18 +453,10 @@ void Maxwell3D::ProcessFirmwareCall4() { } void Maxwell3D::StampQueryResult(u64 payload, bool long_query) { - struct LongQueryResult { - u64_le value; - u64_le timestamp; - }; - static_assert(sizeof(LongQueryResult) == 16, "LongQueryResult has wrong size"); const GPUVAddr sequence_address{regs.query.QueryAddress()}; if (long_query) { - // Write the 128-bit result structure in long mode. Note: We emulate an infinitely fast - // GPU, this command may actually take a while to complete in real hardware due to GPU - // wait queues. - LongQueryResult query_result{payload, system.GPU().GetTicks()}; - memory_manager.WriteBlock(sequence_address, &query_result, sizeof(query_result)); + memory_manager.Write(sequence_address + sizeof(u64), system.GPU().GetTicks()); + memory_manager.Write(sequence_address, payload); } else { memory_manager.Write(sequence_address, static_cast(payload)); } @@ -493,10 +485,10 @@ void Maxwell3D::ProcessQueryGet() { const GPUVAddr sequence_address{regs.query.QueryAddress()}; const u32 payload = regs.query.query_sequence; std::function operation([this, sequence_address, payload] { - LongQueryResult query_result{payload, system.GPU().GetTicks()}; - memory_manager.WriteBlock(sequence_address, &query_result, sizeof(query_result)); + memory_manager.Write(sequence_address + sizeof(u64), system.GPU().GetTicks()); + memory_manager.Write(sequence_address, payload); }); - rasterizer->SignalFence(std::move(operation)); + rasterizer->SyncOperation(std::move(operation)); } break; case Regs::QueryOperation::Acquire: diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index a12a95ce27..bcffd1862d 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp @@ -274,16 +274,24 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() { void MaxwellDMA::ReleaseSemaphore() { const auto type = regs.launch_dma.semaphore_type; const GPUVAddr address = regs.semaphore.address; + const u32 payload = regs.semaphore.payload; switch (type) { case LaunchDMA::SemaphoreType::NONE: break; - case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE: - memory_manager.Write(address, regs.semaphore.payload); + case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE: { + std::function operation( + [this, address, payload] { memory_manager.Write(address, payload); }); + rasterizer->SignalFence(std::move(operation)); break; - case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE: - memory_manager.Write(address, static_cast(regs.semaphore.payload)); - memory_manager.Write(address + 8, system.GPU().GetTicks()); + } + case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE: { + std::function operation([this, address, payload] { + memory_manager.Write(address + sizeof(u64), system.GPU().GetTicks()); + memory_manager.Write(address, payload); + }); + rasterizer->SignalFence(std::move(operation)); break; + } default: ASSERT_MSG(false, "Unknown semaphore type: {}", static_cast(type.Value())); } diff --git a/src/video_core/engines/puller.cpp b/src/video_core/engines/puller.cpp index dd9494efab..c3ed11c13e 100644 --- a/src/video_core/engines/puller.cpp +++ b/src/video_core/engines/puller.cpp @@ -59,6 +59,7 @@ void Puller::ProcessFenceActionMethod() { case Puller::FenceOperation::Acquire: // UNIMPLEMENTED_MSG("Channel Scheduling pending."); // WaitFence(regs.fence_action.syncpoint_id, regs.fence_value); + rasterizer->ReleaseFences(); break; case Puller::FenceOperation::Increment: rasterizer->SignalSyncPoint(regs.fence_action.syncpoint_id); @@ -73,19 +74,11 @@ void Puller::ProcessSemaphoreTriggerMethod() { const auto op = static_cast(regs.semaphore_trigger & semaphoreOperationMask); if (op == GpuSemaphoreOperation::WriteLong) { - struct Block { - u32 sequence; - u32 zeros = 0; - u64 timestamp; - }; - const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()}; const u32 payload = regs.semaphore_sequence; std::function operation([this, sequence_address, payload] { - Block block{}; - block.sequence = payload; - block.timestamp = gpu.GetTicks(); - memory_manager.WriteBlock(sequence_address, &block, sizeof(block)); + memory_manager.Write(sequence_address + sizeof(u64), gpu.GetTicks()); + memory_manager.Write(sequence_address, payload); }); rasterizer->SignalFence(std::move(operation)); } else { @@ -98,7 +91,6 @@ void Puller::ProcessSemaphoreTriggerMethod() { regs.acquire_mode = false; if (word != regs.acquire_value) { rasterizer->ReleaseFences(); - std::this_thread::sleep_for(std::chrono::milliseconds(1)); continue; } } else if (op == GpuSemaphoreOperation::AcquireGequal) { @@ -106,13 +98,11 @@ void Puller::ProcessSemaphoreTriggerMethod() { regs.acquire_mode = true; if (word < regs.acquire_value) { rasterizer->ReleaseFences(); - std::this_thread::sleep_for(std::chrono::milliseconds(1)); continue; } } else if (op == GpuSemaphoreOperation::AcquireMask) { if (word && regs.semaphore_sequence == 0) { rasterizer->ReleaseFences(); - std::this_thread::sleep_for(std::chrono::milliseconds(1)); continue; } } else { @@ -128,7 +118,7 @@ void Puller::ProcessSemaphoreRelease() { std::function operation([this, sequence_address, payload] { memory_manager.Write(sequence_address, payload); }); - rasterizer->SignalFence(std::move(operation)); + rasterizer->SyncOperation(std::move(operation)); } void Puller::ProcessSemaphoreAcquire() { From 8bb604b3bec4792ee642d865f49690e1ea181a44 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sat, 26 Mar 2022 14:03:02 +0100 Subject: [PATCH 46/68] VideoCore: Add option to dump the macros. --- src/video_core/macro/macro.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/video_core/macro/macro.cpp b/src/video_core/macro/macro.cpp index 43f8b59049..f61d5998ec 100644 --- a/src/video_core/macro/macro.cpp +++ b/src/video_core/macro/macro.cpp @@ -8,6 +8,7 @@ #include +#include #include "common/assert.h" #include "common/fs/fs.h" #include "common/fs/path_util.h" From 770e19f51a0767c8fc2b204d9d50fa504a3c4e44 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Wed, 13 Apr 2022 16:20:34 +0200 Subject: [PATCH 47/68] Buffer Cache: Deduce vertex array limit from memory layout when limit is the highest possible. --- src/video_core/buffer_cache/buffer_cache.h | 10 +++++++--- src/video_core/memory_manager.cpp | 2 +- src/video_core/memory_manager.h | 4 ++++ 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 6eb6724759..2e616cee4d 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -1316,12 +1316,16 @@ void BufferCache

::UpdateVertexBuffer(u32 index) { const GPUVAddr gpu_addr_begin = array.StartAddress(); const GPUVAddr gpu_addr_end = limit.LimitAddress() + 1; const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin); - const u32 address_size = static_cast(gpu_addr_end - gpu_addr_begin); - const u32 size = address_size; // TODO: Analyze stride and number of vertices - if (array.enable == 0 || size == 0 || !cpu_addr) { + u32 address_size = static_cast( + std::min(gpu_addr_end - gpu_addr_begin, static_cast(std::numeric_limits::max()))); + if (array.enable == 0 || address_size == 0 || !cpu_addr) { vertex_buffers[index] = NULL_BINDING; return; } + if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) { + address_size = gpu_memory->MaxContinousRange(gpu_addr_begin, address_size); + } + const u32 size = address_size; // TODO: Analyze stride and number of vertices vertex_buffers[index] = Binding{ .cpu_addr = *cpu_addr, .size = size, diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 4a692448ea..3cb2d9224b 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -193,7 +193,7 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { } std::optional MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { - if (gpu_addr >= address_space_size) [[unlikely]] { + if (!IsWithinGPUAddressRange(gpu_addr)) [[unlikely]] { return std::nullopt; } if (GetEntry(gpu_addr) != EntryType::Mapped) [[unlikely]] { diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 9c08edc20d..ae4fd98dfa 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -110,6 +110,10 @@ public: size_t MaxContinousRange(GPUVAddr gpu_addr, size_t size) const; + bool IsWithinGPUAddressRange(GPUVAddr gpu_addr) const { + return gpu_addr < address_space_size; + } + private: template inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped, From fd7afda1e802fffe843ac28811470432949fe7ee Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Wed, 13 Apr 2022 20:19:06 +0200 Subject: [PATCH 48/68] VideoCore: Implement formats needed for N64 emulation. --- src/video_core/renderer_opengl/maxwell_to_gl.h | 2 +- src/video_core/renderer_vulkan/maxwell_to_vk.cpp | 2 +- src/video_core/renderer_vulkan/vk_texture_cache.cpp | 2 +- src/video_core/surface.h | 8 ++++---- src/video_core/texture_cache/format_lookup_table.cpp | 2 +- src/video_core/texture_cache/formatter.h | 4 ++-- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h index dfe7f26ca2..0044212365 100644 --- a/src/video_core/renderer_opengl/maxwell_to_gl.h +++ b/src/video_core/renderer_opengl/maxwell_to_gl.h @@ -87,7 +87,7 @@ constexpr std::array FORMAT_TAB {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT}, // BC3_SRGB {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM}, // BC7_SRGB {GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV}, // A4B4G4R4_UNORM - {GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // R4G4_UNORM + {GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // G4R4_UNORM {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR}, // ASTC_2D_4X4_SRGB {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR}, // ASTC_2D_8X8_SRGB {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR}, // ASTC_2D_8X5_SRGB diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp index 6703b8e688..e7104d377c 100644 --- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp +++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp @@ -184,7 +184,7 @@ struct FormatTuple { {VK_FORMAT_BC3_SRGB_BLOCK}, // BC3_SRGB {VK_FORMAT_BC7_SRGB_BLOCK}, // BC7_SRGB {VK_FORMAT_R4G4B4A4_UNORM_PACK16, Attachable}, // A4B4G4R4_UNORM - {VK_FORMAT_R4G4_UNORM_PACK8}, // R4G4_UNORM + {VK_FORMAT_R4G4_UNORM_PACK8}, // G4R4_UNORM {VK_FORMAT_ASTC_4x4_SRGB_BLOCK}, // ASTC_2D_4X4_SRGB {VK_FORMAT_ASTC_8x8_SRGB_BLOCK}, // ASTC_2D_8X8_SRGB {VK_FORMAT_ASTC_8x5_SRGB_BLOCK}, // ASTC_2D_8X5_SRGB diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index caca79d79f..8d2728cd78 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp @@ -592,7 +592,7 @@ void TryTransformSwizzleIfNeeded(PixelFormat format, std::array BLOCK_WIDTH_TABLE = {{ 4, // BC3_SRGB 4, // BC7_SRGB 1, // A4B4G4R4_UNORM - 1, // R4G4_UNORM + 1, // G4R4_UNORM 4, // ASTC_2D_4X4_SRGB 8, // ASTC_2D_8X8_SRGB 8, // ASTC_2D_8X5_SRGB @@ -323,7 +323,7 @@ constexpr std::array BLOCK_HEIGHT_TABLE = {{ 4, // BC3_SRGB 4, // BC7_SRGB 1, // A4B4G4R4_UNORM - 1, // R4G4_UNORM + 1, // G4R4_UNORM 4, // ASTC_2D_4X4_SRGB 8, // ASTC_2D_8X8_SRGB 5, // ASTC_2D_8X5_SRGB @@ -428,7 +428,7 @@ constexpr std::array BITS_PER_BLOCK_TABLE = {{ 128, // BC3_SRGB 128, // BC7_UNORM 16, // A4B4G4R4_UNORM - 8, // R4G4_UNORM + 8, // G4R4_UNORM 128, // ASTC_2D_4X4_SRGB 128, // ASTC_2D_8X8_SRGB 128, // ASTC_2D_8X5_SRGB diff --git a/src/video_core/texture_cache/format_lookup_table.cpp b/src/video_core/texture_cache/format_lookup_table.cpp index c71694d2ab..ad935d3860 100644 --- a/src/video_core/texture_cache/format_lookup_table.cpp +++ b/src/video_core/texture_cache/format_lookup_table.cpp @@ -63,7 +63,7 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red, case Hash(TextureFormat::A4B4G4R4, UNORM): return PixelFormat::A4B4G4R4_UNORM; case Hash(TextureFormat::G4R4, UNORM): - return PixelFormat::R4G4_UNORM; + return PixelFormat::G4R4_UNORM; case Hash(TextureFormat::A5B5G5R1, UNORM): return PixelFormat::A5B5G5R1_UNORM; case Hash(TextureFormat::R8, UNORM): diff --git a/src/video_core/texture_cache/formatter.h b/src/video_core/texture_cache/formatter.h index 6881e4c901..acc8547159 100644 --- a/src/video_core/texture_cache/formatter.h +++ b/src/video_core/texture_cache/formatter.h @@ -153,8 +153,8 @@ struct fmt::formatter : fmt::formatter Date: Wed, 13 Apr 2022 21:02:55 +0200 Subject: [PATCH 49/68] General: Fix compilation for GCC --- src/common/address_space.h | 10 +++-- src/common/algorithm.h | 2 +- src/common/bit_field.h | 13 +++---- src/common/multi_level_page_table.cpp | 4 +- src/common/multi_level_page_table.inc | 2 +- src/core/hle/service/nvdrv/core/nvmap.cpp | 38 ++++++++++++------- src/core/hle/service/nvdrv/core/nvmap.h | 1 + .../service/nvdrv/devices/nvhost_as_gpu.cpp | 3 ++ .../hle/service/nvdrv/devices/nvhost_ctrl.cpp | 8 ++-- .../service/nvdrv/devices/nvhost_nvdec.cpp | 4 +- .../hle/service/nvdrv/devices/nvhost_vic.cpp | 4 +- src/core/hle/service/nvdrv/devices/nvmap.cpp | 2 +- src/core/hle/service/nvdrv/nvdrv.h | 1 + src/core/hle/service/vi/vi.cpp | 1 + src/shader_recompiler/ir_opt/texture_pass.cpp | 2 +- src/video_core/buffer_cache/buffer_cache.h | 3 +- 16 files changed, 56 insertions(+), 42 deletions(-) diff --git a/src/common/address_space.h b/src/common/address_space.h index fd2f32b7d3..8e13935aff 100644 --- a/src/common/address_space.h +++ b/src/common/address_space.h @@ -22,7 +22,8 @@ struct EmptyStruct {}; */ template -requires AddressSpaceValid class FlatAddressSpaceMap { +requires AddressSpaceValid +class FlatAddressSpaceMap { private: std::function unmapCallback{}; //!< Callback called when the mappings in an region have changed @@ -40,8 +41,8 @@ protected: Block() = default; - Block(VaType virt, PaType phys, ExtraBlockInfo extraInfo) - : virt(virt), phys(phys), extraInfo(extraInfo) {} + Block(VaType virt_, PaType phys_, ExtraBlockInfo extraInfo_) + : virt(virt_), phys(phys_), extraInfo(extraInfo_) {} constexpr bool Valid() { return virt != UnmappedVa; @@ -102,7 +103,8 @@ public: * initial, fast linear pass and a subsequent slower pass that iterates until it finds a free block */ template -requires AddressSpaceValid class FlatAllocator +requires AddressSpaceValid +class FlatAllocator : public FlatAddressSpaceMap { private: using Base = FlatAddressSpaceMap; diff --git a/src/common/algorithm.h b/src/common/algorithm.h index 055dca142d..c27c9241d9 100644 --- a/src/common/algorithm.h +++ b/src/common/algorithm.h @@ -27,7 +27,7 @@ template > template T FoldRight(T initial_value, Func&& func, Args&&... args) { T value{initial_value}; - const auto high_func = [&value, &func](T x) { value = func(value, x); }; + const auto high_func = [&value, &func](U x) { value = func(value, x); }; (std::invoke(high_func, std::forward(args)), ...); return value; } diff --git a/src/common/bit_field.h b/src/common/bit_field.h index 368b7b98c9..7e1df62b1c 100644 --- a/src/common/bit_field.h +++ b/src/common/bit_field.h @@ -127,14 +127,11 @@ public: } } - BitField(T val) { - Assign(val); - } - - BitField& operator=(T val) { - Assign(val); - return *this; - } + // This constructor and assignment operator might be considered ambiguous: + // Would they initialize the storage or just the bitfield? + // Hence, delete them. Use the Assign method to set bitfield values! + BitField(T val) = delete; + BitField& operator=(T val) = delete; constexpr BitField() noexcept = default; diff --git a/src/common/multi_level_page_table.cpp b/src/common/multi_level_page_table.cpp index aed04d0b59..3a7a75aa71 100644 --- a/src/common/multi_level_page_table.cpp +++ b/src/common/multi_level_page_table.cpp @@ -1,8 +1,6 @@ #include "common/multi_level_page_table.inc" namespace Common { -template class Common::MultiLevelPageTable; -template class Common::MultiLevelPageTable; -template class Common::MultiLevelPageTable; +template class Common::MultiLevelPageTable; template class Common::MultiLevelPageTable; } // namespace Common diff --git a/src/common/multi_level_page_table.inc b/src/common/multi_level_page_table.inc index 9a68cad936..4def6dba85 100644 --- a/src/common/multi_level_page_table.inc +++ b/src/common/multi_level_page_table.inc @@ -30,7 +30,7 @@ MultiLevelPageTable::MultiLevelPageTable(std::size_t address_space_bit #ifdef _WIN32 void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)}; #else - void* base{mmap(nullptr, alloc_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)}; + void* base{mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)}; if (base == MAP_FAILED) { base = nullptr; diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index 86d825af96..b02dbb9c98 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -13,7 +13,8 @@ using Core::Memory::YUZU_PAGESIZE; namespace Service::Nvidia::NvCore { -NvMap::Handle::Handle(u64 size, Id id) : size(size), aligned_size(size), orig_size(size), id(id) { +NvMap::Handle::Handle(u64 size_, Id id_) + : size(size_), aligned_size(size), orig_size(size), id(id_) { flags.raw = 0; } @@ -21,19 +22,21 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) std::scoped_lock lock(mutex); // Handles cannot be allocated twice - if (allocated) + if (allocated) { return NvResult::AccessDenied; + } flags = pFlags; kind = pKind; align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign; // This flag is only applicable for handles with an address passed - if (pAddress) - flags.keep_uncached_after_free = 0; - else + if (pAddress) { + flags.keep_uncached_after_free.Assign(0); + } else { LOG_CRITICAL(Service_NVDRV, "Mapping nvmap handles without a CPU side address is unimplemented!"); + } size = Common::AlignUp(size, YUZU_PAGESIZE); aligned_size = Common::AlignUp(size, align); @@ -48,17 +51,19 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) NvResult NvMap::Handle::Duplicate(bool internal_session) { // Unallocated handles cannot be duplicated as duplication requires memory accounting (in HOS) - if (!allocated) [[unlikely]] + if (!allocated) [[unlikely]] { return NvResult::BadValue; + } std::scoped_lock lock(mutex); // If we internally use FromId the duplication tracking of handles won't work accurately due to // us not implementing per-process handle refs. - if (internal_session) + if (internal_session) { internal_dupes++; - else + } else { dupes++; + } return NvResult::Success; } @@ -92,8 +97,9 @@ bool NvMap::TryRemoveHandle(const Handle& handle_description) { std::scoped_lock lock(handles_lock); auto it{handles.find(handle_description.id)}; - if (it != handles.end()) + if (it != handles.end()) { handles.erase(it); + } return true; } else { @@ -102,8 +108,9 @@ bool NvMap::TryRemoveHandle(const Handle& handle_description) { } NvResult NvMap::CreateHandle(u64 size, std::shared_ptr& result_out) { - if (!size) [[unlikely]] + if (!size) [[unlikely]] { return NvResult::BadValue; + } u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)}; auto handle_description{std::make_shared(size, id)}; @@ -133,8 +140,9 @@ VAddr NvMap::GetHandleAddress(Handle::Id handle) { u32 NvMap::PinHandle(NvMap::Handle::Id handle) { auto handle_description{GetHandle(handle)}; - if (!handle_description) [[unlikely]] + if (!handle_description) [[unlikely]] { return 0; + } std::scoped_lock lock(handle_description->mutex); if (!handle_description->pins) { @@ -183,8 +191,9 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) { void NvMap::UnpinHandle(Handle::Id handle) { auto handle_description{GetHandle(handle)}; - if (!handle_description) + if (!handle_description) { return; + } std::scoped_lock lock(handle_description->mutex); if (--handle_description->pins < 0) { @@ -226,12 +235,13 @@ std::optional NvMap::FreeHandle(Handle::Id handle, bool interna // Try to remove the shared ptr to the handle from the map, if nothing else is using the // handle then it will now be freed when `handle_description` goes out of scope - if (TryRemoveHandle(*handle_description)) + if (TryRemoveHandle(*handle_description)) { LOG_DEBUG(Service_NVDRV, "Removed nvmap handle: {}", handle); - else + } else { LOG_DEBUG(Service_NVDRV, "Tried to free nvmap handle: {} but didn't as it still has duplicates", handle); + } freeInfo = { .address = handle_description->address, diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index 4f37dcf437..1082bb58df 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -5,6 +5,7 @@ #pragma once +#include #include #include #include diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index d95a883931..d1beefba6a 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -188,6 +188,7 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector& input, std::vector< allocation_map[params.offset] = { .size = size, + .mappings{}, .page_size = params.page_size, .sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None, .big_pages = params.page_size != VM::YUZU_PAGESIZE, @@ -474,11 +475,13 @@ void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) { VaRegion{ .offset = vm.small_page_allocator->vaStart << VM::PAGE_SIZE_BITS, .page_size = VM::YUZU_PAGESIZE, + ._pad0_{}, .pages = vm.small_page_allocator->vaLimit - vm.small_page_allocator->vaStart, }, VaRegion{ .offset = vm.big_page_allocator->vaStart << vm.big_page_size_bits, .page_size = vm.big_page_size, + ._pad0_{}, .pages = vm.big_page_allocator->vaLimit - vm.big_page_allocator->vaStart, }, }; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index a84e4d4251..7fffb8e48c 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -204,12 +204,12 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector event.wait_handle = host1x_syncpoint_manager.RegisterHostAction(fence_id, target_value, [this, slot]() { - auto& event = events[slot]; - if (event.status.exchange(EventState::Signalling, std::memory_order_acq_rel) == + auto& event_ = events[slot]; + if (event_.status.exchange(EventState::Signalling, std::memory_order_acq_rel) == EventState::Waiting) { - event.kevent->GetWritableEvent().Signal(); + event_.kevent->GetWritableEvent().Signal(); } - event.status.store(EventState::Signalled, std::memory_order_release); + event_.status.store(EventState::Signalled, std::memory_order_release); }); return NvResult::Timeout; } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp index 5e3820085a..fed5370394 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp @@ -12,8 +12,8 @@ namespace Service::Nvidia::Devices { u32 nvhost_nvdec::next_id{}; -nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core) - : nvhost_nvdec_common{system_, core, NvCore::ChannelType::NvDec} {} +nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core_) + : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::NvDec} {} nvhost_nvdec::~nvhost_nvdec() = default; NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index 490e399f4c..2e4ff988cd 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp @@ -11,8 +11,8 @@ namespace Service::Nvidia::Devices { u32 nvhost_vic::next_id{}; -nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core) - : nvhost_nvdec_common{system_, core, NvCore::ChannelType::VIC} {} +nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core_) + : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::VIC} {} nvhost_vic::~nvhost_vic() = default; diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 992c117f1a..f84fc8c379 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -269,7 +269,7 @@ NvResult nvmap::IocFree(const std::vector& input, std::vector& output) { params.address = freeInfo->address; params.size = static_cast(freeInfo->size); params.flags.raw = 0; - params.flags.map_uncached = freeInfo->was_uncached; + params.flags.map_uncached.Assign(freeInfo->was_uncached); } else { // This is possible when there's internel dups or other duplicates. } diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index 31c45236e6..b262547539 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -6,6 +6,7 @@ #pragma once #include +#include #include #include #include diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp index f083811ec9..9c917cacfe 100644 --- a/src/core/hle/service/vi/vi.cpp +++ b/src/core/hle/service/vi/vi.cpp @@ -58,6 +58,7 @@ static_assert(sizeof(DisplayInfo) == 0x60, "DisplayInfo has wrong size"); class NativeWindow final { public: constexpr explicit NativeWindow(u32 id_) : id{id_} {} + constexpr explicit NativeWindow(const NativeWindow& other) = default; private: const u32 magic = 2; diff --git a/src/shader_recompiler/ir_opt/texture_pass.cpp b/src/shader_recompiler/ir_opt/texture_pass.cpp index 0726d4d218..e8be58357b 100644 --- a/src/shader_recompiler/ir_opt/texture_pass.cpp +++ b/src/shader_recompiler/ir_opt/texture_pass.cpp @@ -269,7 +269,7 @@ std::optional TryGetConstBuffer(const IR::Inst* inst, Environme } std::optional lhs{Track(op1, env)}; if (lhs) { - lhs->shift_left = std::countr_zero(op2.U32()); + lhs->shift_left = static_cast(std::countr_zero(op2.U32())); } return lhs; break; diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 2e616cee4d..8e26b3f953 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -1323,7 +1323,8 @@ void BufferCache

::UpdateVertexBuffer(u32 index) { return; } if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) { - address_size = gpu_memory->MaxContinousRange(gpu_addr_begin, address_size); + address_size = + static_cast(gpu_memory->MaxContinousRange(gpu_addr_begin, address_size)); } const u32 size = address_size; // TODO: Analyze stride and number of vertices vertex_buffers[index] = Binding{ From 8fd1d769fe1c079d3a3f1ddc531233eabf0dd7ba Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Fri, 15 Apr 2022 13:43:27 +0200 Subject: [PATCH 50/68] ImageBase: Basic fixes. --- src/video_core/texture_cache/image_base.cpp | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/src/video_core/texture_cache/image_base.cpp b/src/video_core/texture_cache/image_base.cpp index f61e09ac72..91512022fe 100644 --- a/src/video_core/texture_cache/image_base.cpp +++ b/src/video_core/texture_cache/image_base.cpp @@ -7,6 +7,7 @@ #include #include "common/common_types.h" +#include "common/div_ceil.h" #include "video_core/surface.h" #include "video_core/texture_cache/formatter.h" #include "video_core/texture_cache/image_base.h" @@ -182,10 +183,6 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i }; const bool is_lhs_compressed = lhs_block.width > 1 || lhs_block.height > 1; const bool is_rhs_compressed = rhs_block.width > 1 || rhs_block.height > 1; - if (is_lhs_compressed && is_rhs_compressed) { - LOG_ERROR(HW_GPU, "Compressed to compressed image aliasing is not implemented"); - return; - } const s32 lhs_mips = lhs.info.resources.levels; const s32 rhs_mips = rhs.info.resources.levels; const s32 num_mips = std::min(lhs_mips - base->level, rhs_mips); @@ -199,12 +196,12 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i Extent3D lhs_size = MipSize(lhs.info.size, base->level + mip_level); Extent3D rhs_size = MipSize(rhs.info.size, mip_level); if (is_lhs_compressed) { - lhs_size.width /= lhs_block.width; - lhs_size.height /= lhs_block.height; + lhs_size.width = Common::DivCeil(lhs_size.width, lhs_block.width); + lhs_size.height = Common::DivCeil(lhs_size.height, lhs_block.height); } if (is_rhs_compressed) { - rhs_size.width /= rhs_block.width; - rhs_size.height /= rhs_block.height; + rhs_size.width = Common::DivCeil(rhs_size.width, rhs_block.width); + rhs_size.height = Common::DivCeil(rhs_size.height, rhs_block.height); } const Extent3D copy_size{ .width = std::min(lhs_size.width, rhs_size.width), From ada09778d97d39d83353ca54d0d6c9abd5eefc60 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Fri, 15 Apr 2022 12:29:49 +0200 Subject: [PATCH 51/68] Vulkan Texture Cache: Limit render area to the max width/height of the targets. --- .../renderer_vulkan/vk_texture_cache.cpp | 30 ++++++++++++++----- .../renderer_vulkan/vk_texture_cache.h | 5 ++-- src/video_core/texture_cache/render_targets.h | 1 + src/video_core/texture_cache/texture_cache.h | 2 ++ 4 files changed, 29 insertions(+), 9 deletions(-) diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index 8d2728cd78..305ad8aeee 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp @@ -1474,13 +1474,14 @@ bool Image::BlitScaleHelper(bool scale_up) { }; const VkExtent2D extent{ .width = std::max(scaled_width, info.size.width), - .height = std::max(scaled_height, info.size.width), + .height = std::max(scaled_height, info.size.height), }; auto* view_ptr = blit_view.get(); if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) { if (!blit_framebuffer) { - blit_framebuffer = std::make_unique(*runtime, view_ptr, nullptr, extent); + blit_framebuffer = + std::make_unique(*runtime, view_ptr, nullptr, extent, scale_up); } const auto color_view = blit_view->Handle(Shader::TextureType::Color2D); @@ -1488,7 +1489,8 @@ bool Image::BlitScaleHelper(bool scale_up) { src_region, operation, BLIT_OPERATION); } else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) { if (!blit_framebuffer) { - blit_framebuffer = std::make_unique(*runtime, nullptr, view_ptr, extent); + blit_framebuffer = + std::make_unique(*runtime, nullptr, view_ptr, extent, scale_up); } runtime->blit_image_helper.BlitDepthStencil(blit_framebuffer.get(), blit_view->DepthView(), blit_view->StencilView(), dst_region, @@ -1756,34 +1758,42 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span color_buffers{color_buffer}; - CreateFramebuffer(runtime, color_buffers, depth_buffer); + CreateFramebuffer(runtime, color_buffers, depth_buffer, is_rescaled); } Framebuffer::~Framebuffer() = default; void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime, std::span color_buffers, - ImageView* depth_buffer) { + ImageView* depth_buffer, bool is_rescaled) { std::vector attachments; RenderPassKey renderpass_key{}; s32 num_layers = 1; + const auto& resolution = runtime.resolution; + + u32 width = 0; + u32 height = 0; for (size_t index = 0; index < NUM_RT; ++index) { const ImageView* const color_buffer = color_buffers[index]; if (!color_buffer) { renderpass_key.color_formats[index] = PixelFormat::Invalid; continue; } + width = std::max(width, is_rescaled ? resolution.ScaleUp(color_buffer->size.width) + : color_buffer->size.width); + height = std::max(height, is_rescaled ? resolution.ScaleUp(color_buffer->size.height) + : color_buffer->size.height); attachments.push_back(color_buffer->RenderTarget()); renderpass_key.color_formats[index] = color_buffer->format; num_layers = std::max(num_layers, color_buffer->range.extent.layers); @@ -1794,6 +1804,10 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime, } const size_t num_colors = attachments.size(); if (depth_buffer) { + width = std::max(width, is_rescaled ? resolution.ScaleUp(depth_buffer->size.width) + : depth_buffer->size.width); + height = std::max(height, is_rescaled ? resolution.ScaleUp(depth_buffer->size.height) + : depth_buffer->size.height); attachments.push_back(depth_buffer->RenderTarget()); renderpass_key.depth_format = depth_buffer->format; num_layers = std::max(num_layers, depth_buffer->range.extent.layers); @@ -1810,6 +1824,8 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime, renderpass_key.samples = samples; renderpass = runtime.render_pass_cache.Get(renderpass_key); + render_area.width = std::min(render_area.width, width); + render_area.height = std::min(render_area.height, height); num_color_buffers = static_cast(num_colors); framebuffer = runtime.device.GetLogical().CreateFramebuffer({ diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h index 69f06ee7b7..0b7ac0df16 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.h +++ b/src/video_core/renderer_vulkan/vk_texture_cache.h @@ -268,7 +268,7 @@ public: ImageView* depth_buffer, const VideoCommon::RenderTargets& key); explicit Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer, - ImageView* depth_buffer, VkExtent2D extent); + ImageView* depth_buffer, VkExtent2D extent, bool is_rescaled); ~Framebuffer(); @@ -279,7 +279,8 @@ public: Framebuffer& operator=(Framebuffer&&) = default; void CreateFramebuffer(TextureCacheRuntime& runtime, - std::span color_buffers, ImageView* depth_buffer); + std::span color_buffers, ImageView* depth_buffer, + bool is_rescaled = false); [[nodiscard]] VkFramebuffer Handle() const noexcept { return *framebuffer; diff --git a/src/video_core/texture_cache/render_targets.h b/src/video_core/texture_cache/render_targets.h index da8ffe9ece..1efbd65079 100644 --- a/src/video_core/texture_cache/render_targets.h +++ b/src/video_core/texture_cache/render_targets.h @@ -26,6 +26,7 @@ struct RenderTargets { ImageViewId depth_buffer_id{}; std::array draw_buffers{}; Extent2D size{}; + bool is_rescaled{}; }; } // namespace VideoCommon diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 66de41f047..9a835cefce 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -357,6 +357,7 @@ void TextureCache

::UpdateRenderTargets(bool is_clear) { (maxwell3d->regs.render_area.width * up_scale) >> down_shift, (maxwell3d->regs.render_area.height * up_scale) >> down_shift, }; + render_targets.is_rescaled = is_rescaling; flags[Dirty::DepthBiasGlobal] = true; } @@ -1962,6 +1963,7 @@ std::pair TextureCache

::RenderTargetFromImage( .color_buffer_ids = {color_view_id}, .depth_buffer_id = depth_view_id, .size = {extent.width >> samples_x, extent.height >> samples_y}, + .is_rescaled = is_rescaled, }); return {framebuffer_id, view_id}; } From 8d774e7415fac1153d8944baa2cc250cc4831107 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Mon, 18 Apr 2022 21:07:21 +0200 Subject: [PATCH 52/68] NvDec: Fix regressions. --- .../hle/service/nvdrv/core/syncpoint_manager.cpp | 6 ++++++ .../hle/service/nvdrv/core/syncpoint_manager.h | 5 +++++ .../hle/service/nvdrv/devices/nvhost_gpu.cpp | 1 + .../nvdrv/devices/nvhost_nvdec_common.cpp | 16 ++++++++++++---- .../service/nvdrv/devices/nvhost_nvdec_common.h | 3 +++ src/core/hle/service/nvdrv/nvdrv.cpp | 5 ++++- 6 files changed, 31 insertions(+), 5 deletions(-) diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp index b34481b486..fc4ff3c2fa 100644 --- a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp @@ -55,6 +55,12 @@ u32 SyncpointManager::AllocateSyncpoint(bool clientManaged) { return ReserveSyncpoint(FindFreeSyncpoint(), clientManaged); } +void SyncpointManager::FreeSyncpoint(u32 id) { + std::lock_guard lock(reservation_lock); + ASSERT(syncpoints.at(id).reserved); + syncpoints.at(id).reserved = false; +} + bool SyncpointManager::IsSyncpointAllocated(u32 id) { return (id <= SyncpointCount) && syncpoints[id].reserved; } diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.h b/src/core/hle/service/nvdrv/core/syncpoint_manager.h index bfc8ba84b0..da456f2062 100644 --- a/src/core/hle/service/nvdrv/core/syncpoint_manager.h +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.h @@ -84,6 +84,11 @@ public: */ u32 UpdateMin(u32 id); + /** + * @brief Frees the usage of a syncpoint. + */ + void FreeSyncpoint(u32 id); + /** * @return A fence that will be signalled once this syncpoint hits its maximum value */ diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index c2cc09993f..908e60191d 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp @@ -43,6 +43,7 @@ nvhost_gpu::~nvhost_gpu() { events_interface.FreeEvent(sm_exception_breakpoint_int_report_event); events_interface.FreeEvent(sm_exception_breakpoint_pause_report_event); events_interface.FreeEvent(error_notifier_event); + syncpoint_manager.FreeSyncpoint(channel_syncpoint); } NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index 008092dbb3..fe83423d53 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp @@ -51,8 +51,12 @@ std::unordered_map nvhost_nvdec_common::fd_to_id{}; nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_, NvCore::ChannelType channel_type_) : nvdevice{system_}, core{core_}, syncpoint_manager{core.GetSyncpointManager()}, - nvmap{core.GetNvMapFile()}, channel_type{channel_type_} {} -nvhost_nvdec_common::~nvhost_nvdec_common() = default; + nvmap{core.GetNvMapFile()}, channel_type{channel_type_} { + channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false); +} +nvhost_nvdec_common::~nvhost_nvdec_common() { + syncpoint_manager.FreeSyncpoint(channel_syncpoint); +} NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector& input) { IoctlSetNvmapFD params{}; @@ -117,8 +121,8 @@ NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector& input, std::ve std::memcpy(¶ms, input.data(), sizeof(IoctlGetSyncpoint)); LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param); - const u32 id{NvCore::SyncpointManager::channel_syncpoints[static_cast(channel_type)]}; - params.value = id; + // const u32 id{NvCore::SyncpointManager::channel_syncpoints[static_cast(channel_type)]}; + params.value = channel_syncpoint; std::memcpy(output.data(), ¶ms, sizeof(IoctlGetSyncpoint)); return NvResult::Success; @@ -176,4 +180,8 @@ Kernel::KEvent* nvhost_nvdec_common::QueryEvent(u32 event_id) { return nullptr; } +void nvhost_nvdec_common::Reset() { + fd_to_id.clear(); +} + } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h index 51bb7c2cb6..4046b0e13f 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h @@ -24,6 +24,8 @@ public: NvCore::ChannelType channel_type); ~nvhost_nvdec_common() override; + static void Reset(); + protected: struct IoctlSetNvmapFD { s32_le nvmap_fd{}; @@ -117,6 +119,7 @@ protected: Kernel::KEvent* QueryEvent(u32 event_id) override; static std::unordered_map fd_to_id; + u32 channel_syncpoint; s32_le nvmap_fd{}; u32_le submit_timeout{}; NvCore::Container& core; diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index 1be51e4018..20bf24ec8c 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -18,6 +18,7 @@ #include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h" #include "core/hle/service/nvdrv/devices/nvhost_gpu.h" #include "core/hle/service/nvdrv/devices/nvhost_nvdec.h" +#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h" #include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h" #include "core/hle/service/nvdrv/devices/nvhost_vic.h" #include "core/hle/service/nvdrv/devices/nvmap.h" @@ -101,7 +102,9 @@ Module::Module(Core::System& system) }; } -Module::~Module() = default; +Module::~Module() { + Devices::nvhost_nvdec_common::Reset(); +} NvResult Module::VerifyFD(DeviceFD fd) const { if (fd < 0) { From cdce7f781bafabc364e61fa5cabf938349c9b82e Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Mon, 18 Apr 2022 23:21:02 +0200 Subject: [PATCH 53/68] Vulkan Swapchain: Overall improvements. --- .../renderer_vulkan/renderer_vulkan.cpp | 2 +- src/video_core/renderer_vulkan/vk_swapchain.cpp | 15 ++++++++++++--- src/yuzu/main.cpp | 6 ++++-- 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index d12669c9d4..51d9e8f68c 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp @@ -147,7 +147,7 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { const auto recreate_swapchain = [&] { if (!has_been_recreated) { has_been_recreated = true; - scheduler.WaitWorker(); + scheduler.Finish(); } const Layout::FramebufferLayout layout = render_window.GetFramebufferLayout(); swapchain.Create(layout.width, layout.height, is_srgb); diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp index a69ae7725e..706d9ba746 100644 --- a/src/video_core/renderer_vulkan/vk_swapchain.cpp +++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp @@ -36,7 +36,8 @@ VkPresentModeKHR ChooseSwapPresentMode(vk::Span modes) { // Mailbox (triple buffering) doesn't lock the application like fifo (vsync), // prefer it if vsync option is not selected const auto found_mailbox = std::find(modes.begin(), modes.end(), VK_PRESENT_MODE_MAILBOX_KHR); - if (found_mailbox != modes.end() && !Settings::values.use_vsync.GetValue()) { + if (Settings::values.fullscreen_mode.GetValue() == Settings::FullscreenMode::Borderless && + found_mailbox != modes.end() && !Settings::values.use_vsync.GetValue()) { return VK_PRESENT_MODE_MAILBOX_KHR; } if (!Settings::values.use_speed_limit.GetValue()) { @@ -156,8 +157,16 @@ void Swapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u3 present_mode = ChooseSwapPresentMode(present_modes); u32 requested_image_count{capabilities.minImageCount + 1}; - if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) { - requested_image_count = capabilities.maxImageCount; + // Ensure Tripple buffering if possible. + if (capabilities.maxImageCount > 0) { + if (requested_image_count > capabilities.maxImageCount) { + requested_image_count = capabilities.maxImageCount; + } else { + requested_image_count = + std::max(requested_image_count, std::min(3U, capabilities.maxImageCount)); + } + } else { + requested_image_count = std::max(requested_image_count, 3U); } VkSwapchainCreateInfoKHR swapchain_ci{ .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp index c63ce3a308..4146ebc2c1 100644 --- a/src/yuzu/main.cpp +++ b/src/yuzu/main.cpp @@ -3,6 +3,7 @@ #include #include +#include #include #include #ifdef __APPLE__ @@ -3451,9 +3452,10 @@ void GMainWindow::UpdateStatusBar() { } if (!Settings::values.use_speed_limit) { game_fps_label->setText( - tr("Game: %1 FPS (Unlocked)").arg(results.average_game_fps, 0, 'f', 0)); + tr("Game: %1 FPS (Unlocked)").arg(std::round(results.average_game_fps), 0, 'f', 0)); } else { - game_fps_label->setText(tr("Game: %1 FPS").arg(results.average_game_fps, 0, 'f', 0)); + game_fps_label->setText( + tr("Game: %1 FPS").arg(std::round(results.average_game_fps), 0, 'f', 0)); } emu_frametime_label->setText(tr("Frame: %1 ms").arg(results.frametime * 1000.0, 0, 'f', 2)); From 1a9b71b1c6a5b6fb2a41fc485a986e9c505b2856 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Thu, 16 Jun 2022 02:00:29 +0200 Subject: [PATCH 54/68] Common: Fix variable shadowing. --- src/common/address_space.inc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/common/address_space.inc b/src/common/address_space.inc index 907c55d88e..e1241d0992 100644 --- a/src/common/address_space.inc +++ b/src/common/address_space.inc @@ -30,9 +30,9 @@ FlatAllocator namespace Common { -MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType vaLimit, - std::function unmapCallback) - : unmapCallback(std::move(unmapCallback)), vaLimit(vaLimit) { +MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType vaLimit_, + std::function unmapCallback_) + : unmapCallback(std::move(unmapCallback_)), vaLimit(vaLimit_) { if (vaLimit > VaMaximum) UNREACHABLE_MSG("Invalid VA limit!"); } @@ -261,8 +261,8 @@ MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { unmapCallback(virt, size); } -ALLOC_MEMBER_CONST()::FlatAllocator(VaType vaStart, VaType vaLimit) - : Base(vaLimit), currentLinearAllocEnd(vaStart), vaStart(vaStart) {} +ALLOC_MEMBER_CONST()::FlatAllocator(VaType vaStart_, VaType vaLimit) + : Base(vaLimit), currentLinearAllocEnd(vaStart_), vaStart(vaStart_) {} ALLOC_MEMBER(VaType)::Allocate(VaType size) { std::scoped_lock lock(this->blockMutex); From fe24c65153349d3a759a2eef02ec703851a96847 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Thu, 16 Jun 2022 02:12:21 +0200 Subject: [PATCH 55/68] General: Fix clang format. --- src/common/address_space.inc | 4 ++-- src/video_core/control/channel_state_cache.h | 1 + src/video_core/host1x/codecs/vp9.cpp | 5 +++-- src/video_core/host1x/syncpoint_manager.h | 4 ++-- src/video_core/host1x/vic.cpp | 4 ++-- src/video_core/memory_manager.cpp | 1 - src/video_core/renderer_vulkan/renderer_vulkan.cpp | 13 ++++--------- 7 files changed, 14 insertions(+), 18 deletions(-) diff --git a/src/common/address_space.inc b/src/common/address_space.inc index e1241d0992..7cfbb150b0 100644 --- a/src/common/address_space.inc +++ b/src/common/address_space.inc @@ -261,8 +261,8 @@ MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { unmapCallback(virt, size); } -ALLOC_MEMBER_CONST()::FlatAllocator(VaType vaStart_, VaType vaLimit) - : Base(vaLimit), currentLinearAllocEnd(vaStart_), vaStart(vaStart_) {} +ALLOC_MEMBER_CONST()::FlatAllocator(VaType vaStart_, VaType vaLimit_) + : Base(vaLimit_), currentLinearAllocEnd(vaStart_), vaStart(vaStart_) {} ALLOC_MEMBER(VaType)::Allocate(VaType size) { std::scoped_lock lock(this->blockMutex); diff --git a/src/video_core/control/channel_state_cache.h b/src/video_core/control/channel_state_cache.h index dbf833de74..102947adb2 100644 --- a/src/video_core/control/channel_state_cache.h +++ b/src/video_core/control/channel_state_cache.h @@ -9,6 +9,7 @@ #include #include #include +#include #include "common/common_types.h" diff --git a/src/video_core/host1x/codecs/vp9.cpp b/src/video_core/host1x/codecs/vp9.cpp index 667aadc6a9..cf40c90121 100644 --- a/src/video_core/host1x/codecs/vp9.cpp +++ b/src/video_core/host1x/codecs/vp9.cpp @@ -382,8 +382,9 @@ Vp9FrameContainer VP9::GetCurrentFrame(const Host1x::NvdecCommon::NvdecRegisters // gpu.SyncGuestHost(); epic, why? current_frame.info = GetVp9PictureInfo(state); current_frame.bit_stream.resize(current_frame.info.bitstream_size); - host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, current_frame.bit_stream.data(), - current_frame.info.bitstream_size); + host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, + current_frame.bit_stream.data(), + current_frame.info.bitstream_size); } if (!next_frame.bit_stream.empty()) { Vp9FrameContainer temp{ diff --git a/src/video_core/host1x/syncpoint_manager.h b/src/video_core/host1x/syncpoint_manager.h index 0ecc040ab8..440b1508a3 100644 --- a/src/video_core/host1x/syncpoint_manager.h +++ b/src/video_core/host1x/syncpoint_manager.h @@ -49,9 +49,9 @@ public: expected_value, func); } - void DeregisterGuestAction(u32 syncpoint_id,ActionHandle& handle); + void DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle); - void DeregisterHostAction(u32 syncpoint_id,ActionHandle& handle); + void DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle); void IncrementGuest(u32 syncpoint_id); diff --git a/src/video_core/host1x/vic.cpp b/src/video_core/host1x/vic.cpp index b9ac415295..ac0b7d20e0 100644 --- a/src/video_core/host1x/vic.cpp +++ b/src/video_core/host1x/vic.cpp @@ -157,8 +157,8 @@ void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) { const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0); luma_buffer.resize(size); std::span frame_buff(converted_frame_buf_addr, 4 * width * height); - Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, - 0, 0, width, height, block_height, 0, width * 4); + Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, 0, 0, width, height, + block_height, 0, width * 4); host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size); } else { diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 3cb2d9224b..cca401c74b 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -619,7 +619,6 @@ bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const } bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) const { - std::optional old_page_addr{}; bool result{true}; auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, [[maybe_unused]] std::size_t copy_amount) { diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index 51d9e8f68c..d8131232ac 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp @@ -95,19 +95,14 @@ RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_, Core::Frontend::EmuWindow& emu_window, Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_, std::unique_ptr context_) try - : RendererBase(emu_window, std::move(context_)), - telemetry_session(telemetry_session_), - cpu_memory(cpu_memory_), - gpu(gpu_), - library(OpenLibrary()), + : RendererBase(emu_window, std::move(context_)), telemetry_session(telemetry_session_), + cpu_memory(cpu_memory_), gpu(gpu_), library(OpenLibrary()), instance(CreateInstance(library, dld, VK_API_VERSION_1_1, render_window.GetWindowInfo().type, true, Settings::values.renderer_debug.GetValue())), debug_callback(Settings::values.renderer_debug ? CreateDebugCallback(instance) : nullptr), surface(CreateSurface(instance, render_window)), - device(CreateDevice(instance, dld, *surface)), - memory_allocator(device, false), - state_tracker(), - scheduler(device, state_tracker), + device(CreateDevice(instance, dld, *surface)), memory_allocator(device, false), + state_tracker(), scheduler(device, state_tracker), swapchain(*surface, device, scheduler, render_window.GetFramebufferLayout().width, render_window.GetFramebufferLayout().height, false), blit_screen(cpu_memory, render_window, device, memory_allocator, swapchain, scheduler, From 9982cff98b4db38565715cc515ea496b6195725b Mon Sep 17 00:00:00 2001 From: VonChenPlus Date: Mon, 27 Jun 2022 12:39:57 +0800 Subject: [PATCH 56/68] Core: Fix get nvmap object random crash --- src/core/hle/service/nvdrv/core/nvmap.cpp | 15 ++++++++++++++- src/core/hle/service/nvdrv/core/nvmap.h | 5 +++++ src/core/hle/service/nvdrv/devices/nvmap.cpp | 12 ------------ src/core/hle/service/nvdrv/devices/nvmap.h | 5 ----- src/core/hle/service/nvdrv/nvdrv.h | 3 +-- .../service/nvflinger/buffer_queue_consumer.cpp | 9 +++++++-- .../service/nvflinger/buffer_queue_consumer.h | 8 +++++++- .../service/nvflinger/buffer_queue_producer.cpp | 10 +++++++--- .../service/nvflinger/buffer_queue_producer.h | 9 ++++++++- src/core/hle/service/nvflinger/nvflinger.cpp | 2 +- src/core/hle/service/vi/display/vi_display.cpp | 16 ++++++++++------ src/core/hle/service/vi/display/vi_display.h | 7 ++++++- 12 files changed, 66 insertions(+), 35 deletions(-) diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index b02dbb9c98..dd30e156eb 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -207,6 +207,19 @@ void NvMap::UnpinHandle(Handle::Id handle) { } } +void NvMap::DuplicateHandle(Handle::Id handle) { + auto handle_description{GetHandle(handle)}; + if (!handle_description) { + LOG_CRITICAL(Service_NVDRV, "Unregistered handle!"); + return; + } + + auto result = handle_description->Duplicate(false); + if (result != NvResult::Success) { + LOG_CRITICAL(Service_NVDRV, "Could not duplicate handle!"); + } +} + std::optional NvMap::FreeHandle(Handle::Id handle, bool internal_session) { std::weak_ptr hWeak{GetHandle(handle)}; FreeInfo freeInfo; @@ -254,7 +267,7 @@ std::optional NvMap::FreeHandle(Handle::Id handle, bool interna // Handle hasn't been freed from memory, set address to 0 to mark that the handle wasn't freed if (!hWeak.expired()) { - LOG_ERROR(Service_NVDRV, "nvmap handle: {} wasn't freed as it is still in use", handle); + LOG_DEBUG(Service_NVDRV, "nvmap handle: {} wasn't freed as it is still in use", handle); freeInfo.address = 0; } diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index 1082bb58df..b6613a521e 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -162,6 +162,11 @@ public: */ void UnpinHandle(Handle::Id handle); + /** + * @brief Tries to duplicate a handle + */ + void DuplicateHandle(Handle::Id handle); + /** * @brief Tries to free a handle and remove a single dupe * @note If a handle has no dupes left and has no other users a FreeInfo struct will be returned diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index f84fc8c379..ddf273b5ec 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -69,18 +69,6 @@ NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, const std::vector& input, void nvmap::OnOpen(DeviceFD fd) {} void nvmap::OnClose(DeviceFD fd) {} -VAddr nvmap::GetObjectAddress(u32 handle) const { - auto obj = file.GetHandle(handle); - if (obj) { - return obj->address; - } - return 0; -} - -std::shared_ptr nvmap::GetObject(u32 handle) const { - return file.GetHandle(handle); -} - NvResult nvmap::IocCreate(const std::vector& input, std::vector& output) { IocCreateParams params; std::memcpy(¶ms, input.data(), sizeof(params)); diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h index c22eb57a40..52e1d7cff9 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.h +++ b/src/core/hle/service/nvdrv/devices/nvmap.h @@ -36,11 +36,6 @@ public: void OnOpen(DeviceFD fd) override; void OnClose(DeviceFD fd) override; - /// Returns the allocated address of an nvmap object given its handle. - VAddr GetObjectAddress(u32 handle) const; - - std::shared_ptr GetObject(u32 handle) const; - enum class HandleParameterType : u32_le { Size = 1, Alignment = 2, diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index b262547539..22836529d0 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -96,6 +96,7 @@ public: private: friend class EventInterface; + friend class Service::NVFlinger::NVFlinger; /// Id to use for the next open file descriptor. DeviceFD next_fd = 1; @@ -111,8 +112,6 @@ private: /// Manages syncpoints on the host NvCore::Container container; - void CreateEvent(u32 event_id); - void FreeEvent(u32 event_id); std::unordered_map> builders; }; diff --git a/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp b/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp index 4b3d5efd6a..a0330ab4aa 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp @@ -5,15 +5,18 @@ // https://cs.android.com/android/platform/superproject/+/android-5.1.1_r38:frameworks/native/libs/gui/BufferQueueConsumer.cpp #include "common/logging/log.h" +#include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvflinger/buffer_item.h" #include "core/hle/service/nvflinger/buffer_queue_consumer.h" #include "core/hle/service/nvflinger/buffer_queue_core.h" #include "core/hle/service/nvflinger/producer_listener.h" +#include "core/hle/service/nvflinger/ui/graphic_buffer.h" namespace Service::android { -BufferQueueConsumer::BufferQueueConsumer(std::shared_ptr core_) - : core{std::move(core_)}, slots{core->slots} {} +BufferQueueConsumer::BufferQueueConsumer(std::shared_ptr core_, + Service::Nvidia::NvCore::NvMap& nvmap_) + : core{std::move(core_)}, slots{core->slots}, nvmap(nvmap_) {} BufferQueueConsumer::~BufferQueueConsumer() = default; @@ -133,6 +136,8 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc slots[slot].buffer_state = BufferState::Free; + nvmap.FreeHandle(slots[slot].graphic_buffer->BufferId(), false); + listener = core->connected_producer_listener; LOG_DEBUG(Service_NVFlinger, "releasing slot {}", slot); diff --git a/src/core/hle/service/nvflinger/buffer_queue_consumer.h b/src/core/hle/service/nvflinger/buffer_queue_consumer.h index b598c314ff..4ec06ca131 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_consumer.h +++ b/src/core/hle/service/nvflinger/buffer_queue_consumer.h @@ -13,6 +13,10 @@ #include "core/hle/service/nvflinger/buffer_queue_defs.h" #include "core/hle/service/nvflinger/status.h" +namespace Service::Nvidia::NvCore { +class NvMap; +} // namespace Service::Nvidia::NvCore + namespace Service::android { class BufferItem; @@ -21,7 +25,8 @@ class IConsumerListener; class BufferQueueConsumer final { public: - explicit BufferQueueConsumer(std::shared_ptr core_); + explicit BufferQueueConsumer(std::shared_ptr core_, + Service::Nvidia::NvCore::NvMap& nvmap_); ~BufferQueueConsumer(); Status AcquireBuffer(BufferItem* out_buffer, std::chrono::nanoseconds expected_present); @@ -32,6 +37,7 @@ public: private: std::shared_ptr core; BufferQueueDefs::SlotsType& slots; + Service::Nvidia::NvCore::NvMap& nvmap; }; } // namespace Service::android diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp index 337431488e..a4e46964ce 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp @@ -14,7 +14,7 @@ #include "core/hle/kernel/k_writable_event.h" #include "core/hle/kernel/kernel.h" #include "core/hle/service/kernel_helpers.h" -#include "core/hle/service/nvdrv/nvdrv.h" +#include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvflinger/buffer_queue_core.h" #include "core/hle/service/nvflinger/buffer_queue_producer.h" #include "core/hle/service/nvflinger/consumer_listener.h" @@ -26,8 +26,10 @@ namespace Service::android { BufferQueueProducer::BufferQueueProducer(Service::KernelHelpers::ServiceContext& service_context_, - std::shared_ptr buffer_queue_core_) - : service_context{service_context_}, core{std::move(buffer_queue_core_)}, slots(core->slots) { + std::shared_ptr buffer_queue_core_, + Service::Nvidia::NvCore::NvMap& nvmap_) + : service_context{service_context_}, core{std::move(buffer_queue_core_)}, slots(core->slots), + nvmap(nvmap_) { buffer_wait_event = service_context.CreateEvent("BufferQueue:WaitEvent"); } @@ -530,6 +532,8 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input, item.is_droppable = core->dequeue_buffer_cannot_block || async; item.swap_interval = swap_interval; + nvmap.DuplicateHandle(item.graphic_buffer->BufferId()); + sticky_transform = sticky_transform_; if (core->queue.empty()) { diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.h b/src/core/hle/service/nvflinger/buffer_queue_producer.h index 42d4722dc3..0ba03a568e 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_producer.h +++ b/src/core/hle/service/nvflinger/buffer_queue_producer.h @@ -31,6 +31,10 @@ namespace Service::KernelHelpers { class ServiceContext; } // namespace Service::KernelHelpers +namespace Service::Nvidia::NvCore { +class NvMap; +} // namespace Service::Nvidia::NvCore + namespace Service::android { class BufferQueueCore; @@ -39,7 +43,8 @@ class IProducerListener; class BufferQueueProducer final : public IBinder { public: explicit BufferQueueProducer(Service::KernelHelpers::ServiceContext& service_context_, - std::shared_ptr buffer_queue_core_); + std::shared_ptr buffer_queue_core_, + Service::Nvidia::NvCore::NvMap& nvmap_); ~BufferQueueProducer(); void Transact(Kernel::HLERequestContext& ctx, android::TransactionId code, u32 flags) override; @@ -78,6 +83,8 @@ private: s32 next_callback_ticket{}; s32 current_callback_ticket{}; std::condition_variable_any callback_condition; + + Service::Nvidia::NvCore::NvMap& nvmap; }; } // namespace Service::android diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index 4658f1e8b8..aa14d2cbcf 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp @@ -149,7 +149,7 @@ std::optional NVFlinger::CreateLayer(u64 display_id) { void NVFlinger::CreateLayerAtId(VI::Display& display, u64 layer_id) { const auto buffer_id = next_buffer_queue_id++; - display.CreateLayer(layer_id, buffer_id); + display.CreateLayer(layer_id, buffer_id, nvdrv->container); } void NVFlinger::CloseLayer(u64 layer_id) { diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp index aa49aa7752..288aafaaf6 100644 --- a/src/core/hle/service/vi/display/vi_display.cpp +++ b/src/core/hle/service/vi/display/vi_display.cpp @@ -12,6 +12,7 @@ #include "core/hle/kernel/k_readable_event.h" #include "core/hle/kernel/k_writable_event.h" #include "core/hle/service/kernel_helpers.h" +#include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvflinger/buffer_item_consumer.h" #include "core/hle/service/nvflinger/buffer_queue_consumer.h" #include "core/hle/service/nvflinger/buffer_queue_core.h" @@ -29,11 +30,13 @@ struct BufferQueue { std::unique_ptr consumer; }; -static BufferQueue CreateBufferQueue(KernelHelpers::ServiceContext& service_context) { +static BufferQueue CreateBufferQueue(KernelHelpers::ServiceContext& service_context, + Service::Nvidia::NvCore::NvMap& nvmap) { auto buffer_queue_core = std::make_shared(); - return {buffer_queue_core, - std::make_unique(service_context, buffer_queue_core), - std::make_unique(buffer_queue_core)}; + return { + buffer_queue_core, + std::make_unique(service_context, buffer_queue_core, nvmap), + std::make_unique(buffer_queue_core, nvmap)}; } Display::Display(u64 id, std::string name_, @@ -74,10 +77,11 @@ void Display::SignalVSyncEvent() { vsync_event->GetWritableEvent().Signal(); } -void Display::CreateLayer(u64 layer_id, u32 binder_id) { +void Display::CreateLayer(u64 layer_id, u32 binder_id, + Service::Nvidia::NvCore::Container& nv_core) { ASSERT_MSG(layers.empty(), "Only one layer is supported per display at the moment"); - auto [core, producer, consumer] = CreateBufferQueue(service_context); + auto [core, producer, consumer] = CreateBufferQueue(service_context, nv_core.GetNvMapFile()); auto buffer_item_consumer = std::make_shared(std::move(consumer)); buffer_item_consumer->Connect(false); diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h index 8dbb0ef80d..33d5f398cd 100644 --- a/src/core/hle/service/vi/display/vi_display.h +++ b/src/core/hle/service/vi/display/vi_display.h @@ -27,6 +27,11 @@ namespace Service::NVFlinger { class HosBinderDriverServer; } +namespace Service::Nvidia::NvCore { +class Container; +class NvMap; +} // namespace Service::Nvidia::NvCore + namespace Service::VI { class Layer; @@ -93,7 +98,7 @@ public: /// @param layer_id The ID to assign to the created layer. /// @param binder_id The ID assigned to the buffer queue. /// - void CreateLayer(u64 layer_id, u32 binder_id); + void CreateLayer(u64 layer_id, u32 binder_id, Service::Nvidia::NvCore::Container& core); /// Closes and removes a layer from this display with the given ID. /// From 8a372035db35465b8241d1e13eeb979e8682bb3f Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Mon, 27 Jun 2022 21:45:33 +0200 Subject: [PATCH 57/68] Nvflinger: correct duplication. --- src/core/hle/service/nvdrv/core/nvmap.cpp | 4 ++-- src/core/hle/service/nvdrv/core/nvmap.h | 2 +- src/core/hle/service/nvflinger/buffer_queue_consumer.cpp | 2 +- src/core/hle/service/nvflinger/buffer_queue_producer.cpp | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index dd30e156eb..f811b66a0d 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -207,14 +207,14 @@ void NvMap::UnpinHandle(Handle::Id handle) { } } -void NvMap::DuplicateHandle(Handle::Id handle) { +void NvMap::DuplicateHandle(Handle::Id handle, bool internal_session) { auto handle_description{GetHandle(handle)}; if (!handle_description) { LOG_CRITICAL(Service_NVDRV, "Unregistered handle!"); return; } - auto result = handle_description->Duplicate(false); + auto result = handle_description->Duplicate(internal_session); if (result != NvResult::Success) { LOG_CRITICAL(Service_NVDRV, "Could not duplicate handle!"); } diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index b6613a521e..ef2df3ad75 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -165,7 +165,7 @@ public: /** * @brief Tries to duplicate a handle */ - void DuplicateHandle(Handle::Id handle); + void DuplicateHandle(Handle::Id handle, bool internal_session = false); /** * @brief Tries to free a handle and remove a single dupe diff --git a/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp b/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp index a0330ab4aa..1ce67c771c 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp @@ -136,7 +136,7 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc slots[slot].buffer_state = BufferState::Free; - nvmap.FreeHandle(slots[slot].graphic_buffer->BufferId(), false); + nvmap.FreeHandle(slots[slot].graphic_buffer->BufferId(), true); listener = core->connected_producer_listener; diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp index a4e46964ce..d4ab23a108 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp @@ -532,7 +532,7 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input, item.is_droppable = core->dequeue_buffer_cannot_block || async; item.swap_interval = swap_interval; - nvmap.DuplicateHandle(item.graphic_buffer->BufferId()); + nvmap.DuplicateHandle(item.graphic_buffer->BufferId(), true); sticky_transform = sticky_transform_; From c2b7de66b325b52cebb7e26948db0d5b0eefee25 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Mon, 27 Jun 2022 21:23:06 +0200 Subject: [PATCH 58/68] Address Feedback from bylaws. --- src/core/hle/service/nvdrv/core/nvmap.cpp | 6 +----- src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp | 2 +- src/video_core/renderer_vulkan/vk_query_cache.cpp | 2 +- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index f811b66a0d..9b21da6b1e 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -41,22 +41,18 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) size = Common::AlignUp(size, YUZU_PAGESIZE); aligned_size = Common::AlignUp(size, align); address = pAddress; - - // TODO: pin init - allocated = true; return NvResult::Success; } NvResult NvMap::Handle::Duplicate(bool internal_session) { + std::scoped_lock lock(mutex); // Unallocated handles cannot be duplicated as duplication requires memory accounting (in HOS) if (!allocated) [[unlikely]] { return NvResult::BadValue; } - std::scoped_lock lock(mutex); - // If we internally use FromId the duplication tracking of handles won't work accurately due to // us not implementing per-process handle refs. if (internal_session) { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index 908e60191d..32e45540d6 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp @@ -270,12 +270,12 @@ NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::vector } } - gpu.PushGPUEntries(bind_id, std::move(entries)); params.fence.id = channel_syncpoint; u32 increment{(flags.fence_increment.Value() != 0 ? 2 : 0) + (flags.increment_value.Value() != 0 ? params.fence.value : 0)}; params.fence.value = syncpoint_manager.IncrementSyncpointMaxExt(channel_syncpoint, increment); + gpu.PushGPUEntries(bind_id, std::move(entries)); if (flags.fence_increment.Value()) { if (flags.suppress_wfi.Value()) { diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp index 393bbdf378..7cb02631c4 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp @@ -66,7 +66,7 @@ void QueryPool::Reserve(std::pair query) { } QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_, - Scheduler& scheduler_) + Scheduler& scheduler_) : QueryCacheBase{rasterizer_}, device{device_}, scheduler{scheduler_}, query_pools{ QueryPool{device_, scheduler_, QueryType::SamplesPassed}, From d97d409647686aefe701aec1363e328be11d1443 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Fri, 1 Jul 2022 02:18:33 +0200 Subject: [PATCH 59/68] NvHostChannels: improve hack for supporting multiple channels. --- .../hle/service/nvdrv/devices/nvhost_nvdec_common.cpp | 10 ++++++++-- .../hle/service/nvdrv/devices/nvhost_nvdec_common.h | 3 +++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index fe83423d53..2ec1ad3e9e 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp @@ -47,15 +47,21 @@ std::size_t WriteVectors(std::vector& dst, const std::vector& src, std::s } // Anonymous namespace std::unordered_map nvhost_nvdec_common::fd_to_id{}; +std::deque nvhost_nvdec_common::syncpts_accumulated{}; nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_, NvCore::ChannelType channel_type_) : nvdevice{system_}, core{core_}, syncpoint_manager{core.GetSyncpointManager()}, nvmap{core.GetNvMapFile()}, channel_type{channel_type_} { - channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false); + if (syncpts_accumulated.empty()) { + channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false); + } else { + channel_syncpoint = syncpts_accumulated.front(); + syncpts_accumulated.pop_front(); + } } nvhost_nvdec_common::~nvhost_nvdec_common() { - syncpoint_manager.FreeSyncpoint(channel_syncpoint); + syncpts_accumulated.push_back(channel_syncpoint); } NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector& input) { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h index 4046b0e13f..93990bb9b8 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h @@ -3,6 +3,7 @@ #pragma once +#include #include #include "common/common_types.h" #include "common/swap.h" @@ -127,6 +128,8 @@ protected: NvCore::NvMap& nvmap; NvCore::ChannelType channel_type; std::array device_syncpoints{}; + + static std::deque syncpts_accumulated; }; }; // namespace Devices } // namespace Service::Nvidia From fedd983f96bcbcc0c39f651db1cca0503d582fd9 Mon Sep 17 00:00:00 2001 From: Morph <39850852+Morph1984@users.noreply.github.com> Date: Wed, 29 Jun 2022 19:27:49 -0400 Subject: [PATCH 60/68] general: Format licenses as per SPDX guidelines --- src/common/address_space.cpp | 5 ++--- src/common/address_space.h | 5 ++--- src/common/address_space.inc | 4 ++-- src/common/multi_level_page_table.cpp | 3 +++ src/common/multi_level_page_table.h | 5 ++--- src/common/multi_level_page_table.inc | 5 ++--- src/core/hle/service/nvdrv/core/container.cpp | 7 +++---- src/core/hle/service/nvdrv/core/container.h | 7 +++---- src/core/hle/service/nvdrv/core/nvmap.cpp | 7 +++---- src/core/hle/service/nvdrv/core/nvmap.h | 7 +++---- src/core/hle/service/nvdrv/core/syncpoint_manager.cpp | 7 +++---- src/core/hle/service/nvdrv/core/syncpoint_manager.h | 7 +++---- src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp | 7 +++---- src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h | 7 +++---- src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp | 7 +++---- src/core/hle/service/nvdrv/devices/nvhost_ctrl.h | 7 +++---- src/core/hle/service/nvdrv/nvdata.h | 7 +++---- src/core/hle/service/nvdrv/nvdrv.cpp | 7 +++---- src/core/hle/service/nvdrv/nvdrv.h | 7 +++---- src/core/hle/service/nvdrv/nvdrv_interface.cpp | 7 +++---- src/video_core/control/channel_state.cpp | 5 ++--- src/video_core/control/channel_state.h | 5 ++--- src/video_core/control/channel_state_cache.cpp | 3 +++ src/video_core/control/channel_state_cache.h | 5 ++--- src/video_core/control/channel_state_cache.inc | 2 ++ src/video_core/control/scheduler.cpp | 5 ++--- src/video_core/control/scheduler.h | 5 ++--- src/video_core/engines/puller.cpp | 5 ++--- src/video_core/engines/puller.h | 5 ++--- src/video_core/host1x/control.cpp | 5 ++--- src/video_core/host1x/control.h | 7 +++---- src/video_core/host1x/host1x.cpp | 5 ++--- src/video_core/host1x/host1x.h | 5 ++--- src/video_core/host1x/syncpoint_manager.cpp | 5 ++--- src/video_core/host1x/syncpoint_manager.h | 5 ++--- src/video_core/texture_cache/texture_cache.cpp | 5 ++--- src/video_core/texture_cache/texture_cache.h | 6 ++---- src/video_core/texture_cache/texture_cache_base.h | 6 ++---- 38 files changed, 93 insertions(+), 121 deletions(-) diff --git a/src/common/address_space.cpp b/src/common/address_space.cpp index 6db85be87b..866e78dbe0 100644 --- a/src/common/address_space.cpp +++ b/src/common/address_space.cpp @@ -1,6 +1,5 @@ -// Copyright © 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) -// Licensed under GPLv3 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #include "common/address_space.inc" diff --git a/src/common/address_space.h b/src/common/address_space.h index 8e13935aff..5b3832f079 100644 --- a/src/common/address_space.h +++ b/src/common/address_space.h @@ -1,6 +1,5 @@ -// Copyright © 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) -// Licensed under GPLv3 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once diff --git a/src/common/address_space.inc b/src/common/address_space.inc index 7cfbb150b0..a063782b3f 100644 --- a/src/common/address_space.inc +++ b/src/common/address_space.inc @@ -1,5 +1,5 @@ -// SPDX-License-Identifier: GPLv3 or later -// Copyright © 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #include "common/address_space.h" #include "common/assert.h" diff --git a/src/common/multi_level_page_table.cpp b/src/common/multi_level_page_table.cpp index 3a7a75aa71..46e362f3ba 100644 --- a/src/common/multi_level_page_table.cpp +++ b/src/common/multi_level_page_table.cpp @@ -1,3 +1,6 @@ +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + #include "common/multi_level_page_table.inc" namespace Common { diff --git a/src/common/multi_level_page_table.h b/src/common/multi_level_page_table.h index dde1cc962e..08092c89a6 100644 --- a/src/common/multi_level_page_table.h +++ b/src/common/multi_level_page_table.h @@ -1,6 +1,5 @@ -// Copyright 2021 yuzu Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later #pragma once diff --git a/src/common/multi_level_page_table.inc b/src/common/multi_level_page_table.inc index 4def6dba85..8ac506fa0a 100644 --- a/src/common/multi_level_page_table.inc +++ b/src/common/multi_level_page_table.inc @@ -1,6 +1,5 @@ -// Copyright 2021 yuzu Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later #ifdef _WIN32 #include diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp index 4175d3d9c3..d2a6326466 100644 --- a/src/core/hle/service/nvdrv/core/container.cpp +++ b/src/core/hle/service/nvdrv/core/container.cpp @@ -1,7 +1,6 @@ -// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors -// (https://github.com/skyline-emu/) -// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 -// or any later version Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/core/nvmap.h" diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h index e069ade4e5..5c8b958034 100644 --- a/src/core/hle/service/nvdrv/core/container.h +++ b/src/core/hle/service/nvdrv/core/container.h @@ -1,7 +1,6 @@ -// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors -// (https://github.com/skyline-emu/) -// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 -// or any later version Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index 9b21da6b1e..e63ec77174 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -1,7 +1,6 @@ -// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors -// (https://github.com/skyline-emu/) -// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 -// or any later version Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #include "common/alignment.h" #include "common/assert.h" diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index ef2df3ad75..6d6dac023a 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -1,7 +1,6 @@ -// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors -// (https://github.com/skyline-emu/) -// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 -// or any later version Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp index fc4ff3c2fa..0bb2aec974 100644 --- a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp @@ -1,7 +1,6 @@ -// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors -// (https://github.com/skyline-emu/) -// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 -// or any later version Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #include "common/assert.h" #include "core/hle/service/nvdrv/core/syncpoint_manager.h" diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.h b/src/core/hle/service/nvdrv/core/syncpoint_manager.h index da456f2062..6b71cd33df 100644 --- a/src/core/hle/service/nvdrv/core/syncpoint_manager.h +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.h @@ -1,7 +1,6 @@ -// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors -// (https://github.com/skyline-emu/) -// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 -// or any later version Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index d1beefba6a..b48f7fcafd 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -1,7 +1,6 @@ -// SPDX-FileCopyrightText: 2021 yuzu emulator team, Skyline Team and Contributors -// (https://github.com/skyline-emu/) -// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 -// or any later version Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #include #include diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index 12e881f0db..86fe71c750 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h @@ -1,7 +1,6 @@ -// SPDX-FileCopyrightText: 2021 yuzu emulator team, Skyline Team and Contributors -// (https://github.com/skyline-emu/) -// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 -// or any later version Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index 7fffb8e48c..5bee4a3d3f 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -1,7 +1,6 @@ -// SPDX-FileCopyrightText: 2021 yuzu emulator team, Skyline Team and Contributors -// (https://github.com/skyline-emu/) -// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 -// or any later version Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #include #include diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h index f511c02961..4aa738b41a 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h @@ -1,7 +1,6 @@ -// SPDX-FileCopyrightText: 2021 yuzu emulator team, Skyline Team and Contributors -// (https://github.com/skyline-emu/) -// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 -// or any later version Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once diff --git a/src/core/hle/service/nvdrv/nvdata.h b/src/core/hle/service/nvdrv/nvdata.h index 2ee91f9c43..0e2f47075f 100644 --- a/src/core/hle/service/nvdrv/nvdata.h +++ b/src/core/hle/service/nvdrv/nvdata.h @@ -1,7 +1,6 @@ -// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors -// (https://github.com/skyline-emu/) -// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 -// or any later version Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index 20bf24ec8c..7929443d27 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -1,7 +1,6 @@ -// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors -// (https://github.com/skyline-emu/) -// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 -// or any later version Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #include diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index 22836529d0..a2aeb80b49 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -1,7 +1,6 @@ -// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors -// (https://github.com/skyline-emu/) -// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 -// or any later version Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.cpp b/src/core/hle/service/nvdrv/nvdrv_interface.cpp index 5e50a04e8a..edbdfee43c 100644 --- a/src/core/hle/service/nvdrv/nvdrv_interface.cpp +++ b/src/core/hle/service/nvdrv/nvdrv_interface.cpp @@ -1,7 +1,6 @@ -// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors -// (https://github.com/skyline-emu/) -// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 -// or any later version Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #include #include "common/logging/log.h" diff --git a/src/video_core/control/channel_state.cpp b/src/video_core/control/channel_state.cpp index 3613c49927..b04922ac0e 100644 --- a/src/video_core/control/channel_state.cpp +++ b/src/video_core/control/channel_state.cpp @@ -1,6 +1,5 @@ -// Copyright 2021 yuzu Emulator Project -// Licensed under GPLv3 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #include "common/assert.h" #include "video_core/control/channel_state.h" diff --git a/src/video_core/control/channel_state.h b/src/video_core/control/channel_state.h index 08a7591e17..305b21cbaa 100644 --- a/src/video_core/control/channel_state.h +++ b/src/video_core/control/channel_state.h @@ -1,6 +1,5 @@ -// Copyright 2021 yuzu Emulator Project -// Licensed under GPLv3 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once diff --git a/src/video_core/control/channel_state_cache.cpp b/src/video_core/control/channel_state_cache.cpp index ec7ba907ca..4ebeb6356b 100644 --- a/src/video_core/control/channel_state_cache.cpp +++ b/src/video_core/control/channel_state_cache.cpp @@ -1,3 +1,6 @@ +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + #include "video_core/control/channel_state_cache.inc" namespace VideoCommon { diff --git a/src/video_core/control/channel_state_cache.h b/src/video_core/control/channel_state_cache.h index 102947adb2..5246192a80 100644 --- a/src/video_core/control/channel_state_cache.h +++ b/src/video_core/control/channel_state_cache.h @@ -1,6 +1,5 @@ -// Copyright 2021 yuzu Emulator Project -// Licensed under GPLv3 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once diff --git a/src/video_core/control/channel_state_cache.inc b/src/video_core/control/channel_state_cache.inc index d3ae758b2c..460313893f 100644 --- a/src/video_core/control/channel_state_cache.inc +++ b/src/video_core/control/channel_state_cache.inc @@ -1,3 +1,5 @@ +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #include diff --git a/src/video_core/control/scheduler.cpp b/src/video_core/control/scheduler.cpp index a9bb00aa77..7330426909 100644 --- a/src/video_core/control/scheduler.cpp +++ b/src/video_core/control/scheduler.cpp @@ -1,6 +1,5 @@ -// Copyright 2021 yuzu Emulator Project -// Licensed under GPLv3 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #include diff --git a/src/video_core/control/scheduler.h b/src/video_core/control/scheduler.h index c1a7739467..305a01e0a0 100644 --- a/src/video_core/control/scheduler.h +++ b/src/video_core/control/scheduler.h @@ -1,6 +1,5 @@ -// Copyright 2021 yuzu Emulator Project -// Licensed under GPLv3 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once diff --git a/src/video_core/engines/puller.cpp b/src/video_core/engines/puller.cpp index c3ed11c13e..cca890792a 100644 --- a/src/video_core/engines/puller.cpp +++ b/src/video_core/engines/puller.cpp @@ -1,6 +1,5 @@ -// Copyright 2021 yuzu Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #include "common/assert.h" #include "common/logging/log.h" diff --git a/src/video_core/engines/puller.h b/src/video_core/engines/puller.h index b4619e9a88..d4175ee945 100644 --- a/src/video_core/engines/puller.h +++ b/src/video_core/engines/puller.h @@ -1,6 +1,5 @@ -// Copyright 2021 yuzu Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once diff --git a/src/video_core/host1x/control.cpp b/src/video_core/host1x/control.cpp index a81c635aec..dceefdb7f4 100644 --- a/src/video_core/host1x/control.cpp +++ b/src/video_core/host1x/control.cpp @@ -1,6 +1,5 @@ -// Copyright 2022 yuzu Emulator Project -// Licensed under GPLv3 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #include "common/assert.h" #include "video_core/host1x/control.h" diff --git a/src/video_core/host1x/control.h b/src/video_core/host1x/control.h index 18a9b56c0e..e117888a3b 100644 --- a/src/video_core/host1x/control.h +++ b/src/video_core/host1x/control.h @@ -1,7 +1,6 @@ -// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors -// (https://github.com/skyline-emu/) -// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 -// or any later version Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once diff --git a/src/video_core/host1x/host1x.cpp b/src/video_core/host1x/host1x.cpp index eb00f48557..7c317a85d4 100644 --- a/src/video_core/host1x/host1x.cpp +++ b/src/video_core/host1x/host1x.cpp @@ -1,6 +1,5 @@ -// Copyright 2022 yuzu Emulator Project -// Licensed under GPLv3 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #include "core/core.h" #include "video_core/host1x/host1x.h" diff --git a/src/video_core/host1x/host1x.h b/src/video_core/host1x/host1x.h index e4b69d75aa..7ecf853d95 100644 --- a/src/video_core/host1x/host1x.h +++ b/src/video_core/host1x/host1x.h @@ -1,6 +1,5 @@ -// Copyright 2022 yuzu Emulator Project -// Licensed under GPLv3 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once diff --git a/src/video_core/host1x/syncpoint_manager.cpp b/src/video_core/host1x/syncpoint_manager.cpp index 825bd551e7..4471bacae2 100644 --- a/src/video_core/host1x/syncpoint_manager.cpp +++ b/src/video_core/host1x/syncpoint_manager.cpp @@ -1,6 +1,5 @@ -// Copyright 2021 yuzu Emulator Project -// Licensed under GPLv3 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #include "common/microprofile.h" #include "video_core/host1x/syncpoint_manager.h" diff --git a/src/video_core/host1x/syncpoint_manager.h b/src/video_core/host1x/syncpoint_manager.h index 440b1508a3..72220a09a5 100644 --- a/src/video_core/host1x/syncpoint_manager.h +++ b/src/video_core/host1x/syncpoint_manager.h @@ -1,6 +1,5 @@ -// Copyright 2021 yuzu Emulator Project -// Licensed under GPLv3 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once diff --git a/src/video_core/texture_cache/texture_cache.cpp b/src/video_core/texture_cache/texture_cache.cpp index bc905a1a4f..8a9a32f44a 100644 --- a/src/video_core/texture_cache/texture_cache.cpp +++ b/src/video_core/texture_cache/texture_cache.cpp @@ -1,6 +1,5 @@ -// Copyright 2021 yuzu Emulator Project -// Licensed under GPLv3 or any later version -// Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #include "video_core/control/channel_state_cache.inc" #include "video_core/texture_cache/texture_cache_base.h" diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 9a835cefce..eaf4a1c959 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -1,7 +1,5 @@ -// SPDX-FileCopyrightText: 2021 yuzu emulator team -// (https://github.com/skyline-emu/) -// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 -// or any later version Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h index 2f4db50478..2fa8445eb4 100644 --- a/src/video_core/texture_cache/texture_cache_base.h +++ b/src/video_core/texture_cache/texture_cache_base.h @@ -1,7 +1,5 @@ -// SPDX-FileCopyrightText: 2021 yuzu emulator team -// (https://github.com/skyline-emu/) -// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3 -// or any later version Refer to the license.txt file included. +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once From fa342cae227666c861806b9bf63e4286aff1e4d7 Mon Sep 17 00:00:00 2001 From: Morph <39850852+Morph1984@users.noreply.github.com> Date: Wed, 29 Jun 2022 20:33:04 -0400 Subject: [PATCH 61/68] address_space: Address feedback --- src/common/address_space.h | 105 +++--- src/common/address_space.inc | 337 ++++++++++-------- .../service/nvdrv/devices/nvhost_as_gpu.cpp | 8 +- 3 files changed, 246 insertions(+), 204 deletions(-) diff --git a/src/common/address_space.h b/src/common/address_space.h index 5b3832f079..bf649018c5 100644 --- a/src/common/address_space.h +++ b/src/common/address_space.h @@ -23,9 +23,29 @@ template requires AddressSpaceValid class FlatAddressSpaceMap { -private: - std::function - unmapCallback{}; //!< Callback called when the mappings in an region have changed +public: + /// The maximum VA that this AS can technically reach + static constexpr VaType VaMaximum{(1ULL << (AddressSpaceBits - 1)) + + ((1ULL << (AddressSpaceBits - 1)) - 1)}; + + explicit FlatAddressSpaceMap(VaType va_limit, + std::function unmap_callback = {}); + + FlatAddressSpaceMap() = default; + + void Map(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info = {}) { + std::scoped_lock lock(block_mutex); + MapLocked(virt, phys, size, extra_info); + } + + void Unmap(VaType virt, VaType size) { + std::scoped_lock lock(block_mutex); + UnmapLocked(virt, size); + } + + VaType GetVALimit() const { + return va_limit; + } protected: /** @@ -33,68 +53,55 @@ protected: * another block with a different phys address is hit */ struct Block { - VaType virt{UnmappedVa}; //!< VA of the block - PaType phys{UnmappedPa}; //!< PA of the block, will increase 1-1 with VA until a new block - //!< is encountered - [[no_unique_address]] ExtraBlockInfo extraInfo; + /// VA of the block + VaType virt{UnmappedVa}; + /// PA of the block, will increase 1-1 with VA until a new block is encountered + PaType phys{UnmappedPa}; + [[no_unique_address]] ExtraBlockInfo extra_info; Block() = default; - Block(VaType virt_, PaType phys_, ExtraBlockInfo extraInfo_) - : virt(virt_), phys(phys_), extraInfo(extraInfo_) {} + Block(VaType virt_, PaType phys_, ExtraBlockInfo extra_info_) + : virt(virt_), phys(phys_), extra_info(extra_info_) {} - constexpr bool Valid() { + bool Valid() const { return virt != UnmappedVa; } - constexpr bool Mapped() { + bool Mapped() const { return phys != UnmappedPa; } - constexpr bool Unmapped() { + bool Unmapped() const { return phys == UnmappedPa; } - bool operator<(const VaType& pVirt) const { - return virt < pVirt; + bool operator<(const VaType& p_virt) const { + return virt < p_virt; } }; - std::mutex blockMutex; - std::vector blocks{Block{}}; - /** * @brief Maps a PA range into the given AS region - * @note blockMutex MUST be locked when calling this + * @note block_mutex MUST be locked when calling this */ - void MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extraInfo); + void MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info); /** * @brief Unmaps the given range and merges it with other unmapped regions - * @note blockMutex MUST be locked when calling this + * @note block_mutex MUST be locked when calling this */ void UnmapLocked(VaType virt, VaType size); -public: - static constexpr VaType VaMaximum{(1ULL << (AddressSpaceBits - 1)) + - ((1ULL << (AddressSpaceBits - 1)) - - 1)}; //!< The maximum VA that this AS can technically reach + std::mutex block_mutex; + std::vector blocks{Block{}}; - VaType vaLimit{VaMaximum}; //!< A soft limit on the maximum VA of the AS + /// a soft limit on the maximum VA of the AS + VaType va_limit{VaMaximum}; - FlatAddressSpaceMap(VaType vaLimit, std::function unmapCallback = {}); - - FlatAddressSpaceMap() = default; - - void Map(VaType virt, PaType phys, VaType size, ExtraBlockInfo extraInfo = {}) { - std::scoped_lock lock(blockMutex); - MapLocked(virt, phys, size, extraInfo); - } - - void Unmap(VaType virt, VaType size) { - std::scoped_lock lock(blockMutex); - UnmapLocked(virt, size); - } +private: + /// Callback called when the mappings in an region have changed + std::function unmap_callback{}; }; /** @@ -108,14 +115,8 @@ class FlatAllocator private: using Base = FlatAddressSpaceMap; - VaType currentLinearAllocEnd; //!< The end address for the initial linear allocation pass, once - //!< this reaches the AS limit the slower allocation path will be - //!< used - public: - VaType vaStart; //!< The base VA of the allocator, no allocations will be below this - - FlatAllocator(VaType vaStart, VaType vaLimit = Base::VaMaximum); + explicit FlatAllocator(VaType va_start, VaType va_limit = Base::VaMaximum); /** * @brief Allocates a region in the AS of the given size and returns its address @@ -131,5 +132,19 @@ public: * @brief Frees an AS region so it can be used again */ void Free(VaType virt, VaType size); + + VaType GetVAStart() const { + return va_start; + } + +private: + /// The base VA of the allocator, no allocations will be below this + VaType va_start; + + /** + * The end address for the initial linear allocation pass + * Once this reaches the AS limit the slower allocation path will be used + */ + VaType current_linear_alloc_end; }; } // namespace Common diff --git a/src/common/address_space.inc b/src/common/address_space.inc index a063782b3f..3661b298ef 100644 --- a/src/common/address_space.inc +++ b/src/common/address_space.inc @@ -30,137 +30,151 @@ FlatAllocator namespace Common { -MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType vaLimit_, - std::function unmapCallback_) - : unmapCallback(std::move(unmapCallback_)), vaLimit(vaLimit_) { - if (vaLimit > VaMaximum) +MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType va_limit_, + std::function unmap_callback_) + : va_limit{va_limit_}, unmap_callback{std::move(unmap_callback_)} { + if (va_limit > VaMaximum) { UNREACHABLE_MSG("Invalid VA limit!"); + } } -MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extraInfo) { - VaType virtEnd{virt + size}; +MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info) { + VaType virt_end{virt + size}; - if (virtEnd > vaLimit) - UNREACHABLE_MSG("Trying to map a block past the VA limit: virtEnd: 0x{:X}, vaLimit: 0x{:X}", - virtEnd, vaLimit); + if (virt_end > va_limit) { + UNREACHABLE_MSG( + "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", virt_end, + va_limit); + } - auto blockEndSuccessor{std::lower_bound(blocks.begin(), blocks.end(), virtEnd)}; - if (blockEndSuccessor == blocks.begin()) - UNREACHABLE_MSG("Trying to map a block before the VA start: virtEnd: 0x{:X}", virtEnd); + auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)}; + if (block_end_successor == blocks.begin()) { + UNREACHABLE_MSG("Trying to map a block before the VA start: virt_end: 0x{:X}", virt_end); + } - auto blockEndPredecessor{std::prev(blockEndSuccessor)}; + auto block_end_predecessor{std::prev(block_end_successor)}; - if (blockEndSuccessor != blocks.end()) { + if (block_end_successor != blocks.end()) { // We have blocks in front of us, if one is directly in front then we don't have to add a // tail - if (blockEndSuccessor->virt != virtEnd) { + if (block_end_successor->virt != virt_end) { PaType tailPhys{[&]() -> PaType { if constexpr (!PaContigSplit) { - return blockEndPredecessor - ->phys; // Always propagate unmapped regions rather than calculating offset + // Always propagate unmapped regions rather than calculating offset + return block_end_predecessor->phys; } else { - if (blockEndPredecessor->Unmapped()) - return blockEndPredecessor->phys; // Always propagate unmapped regions - // rather than calculating offset - else - return blockEndPredecessor->phys + virtEnd - blockEndPredecessor->virt; + if (block_end_predecessor->Unmapped()) { + // Always propagate unmapped regions rather than calculating offset + return block_end_predecessor->phys; + } else { + return block_end_predecessor->phys + virt_end - block_end_predecessor->virt; + } } }()}; - if (blockEndPredecessor->virt >= virt) { + if (block_end_predecessor->virt >= virt) { // If this block's start would be overlapped by the map then reuse it as a tail // block - blockEndPredecessor->virt = virtEnd; - blockEndPredecessor->phys = tailPhys; - blockEndPredecessor->extraInfo = blockEndPredecessor->extraInfo; + block_end_predecessor->virt = virt_end; + block_end_predecessor->phys = tailPhys; + block_end_predecessor->extra_info = block_end_predecessor->extra_info; // No longer predecessor anymore - blockEndSuccessor = blockEndPredecessor--; + block_end_successor = block_end_predecessor--; } else { // Else insert a new one and we're done - blocks.insert(blockEndSuccessor, - {Block(virt, phys, extraInfo), - Block(virtEnd, tailPhys, blockEndPredecessor->extraInfo)}); - if (unmapCallback) - unmapCallback(virt, size); + blocks.insert(block_end_successor, + {Block(virt, phys, extra_info), + Block(virt_end, tailPhys, block_end_predecessor->extra_info)}); + if (unmap_callback) { + unmap_callback(virt, size); + } return; } } } else { - // blockEndPredecessor will always be unmapped as blocks has to be terminated by an unmapped - // chunk - if (blockEndPredecessor != blocks.begin() && blockEndPredecessor->virt >= virt) { + // block_end_predecessor will always be unmapped as blocks has to be terminated by an + // unmapped chunk + if (block_end_predecessor != blocks.begin() && block_end_predecessor->virt >= virt) { // Move the unmapped block start backwards - blockEndPredecessor->virt = virtEnd; + block_end_predecessor->virt = virt_end; // No longer predecessor anymore - blockEndSuccessor = blockEndPredecessor--; + block_end_successor = block_end_predecessor--; } else { // Else insert a new one and we're done - blocks.insert(blockEndSuccessor, - {Block(virt, phys, extraInfo), Block(virtEnd, UnmappedPa, {})}); - if (unmapCallback) - unmapCallback(virt, size); + blocks.insert(block_end_successor, + {Block(virt, phys, extra_info), Block(virt_end, UnmappedPa, {})}); + if (unmap_callback) { + unmap_callback(virt, size); + } return; } } - auto blockStartSuccessor{blockEndSuccessor}; + auto block_start_successor{block_end_successor}; // Walk the block vector to find the start successor as this is more efficient than another // binary search in most scenarios - while (std::prev(blockStartSuccessor)->virt >= virt) - blockStartSuccessor--; - - // Check that the start successor is either the end block or something in between - if (blockStartSuccessor->virt > virtEnd) { - UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", blockStartSuccessor->virt); - } else if (blockStartSuccessor->virt == virtEnd) { - // We need to create a new block as there are none spare that we would overwrite - blocks.insert(blockStartSuccessor, Block(virt, phys, extraInfo)); - } else { - // Erase overwritten blocks - if (auto eraseStart{std::next(blockStartSuccessor)}; eraseStart != blockEndSuccessor) - blocks.erase(eraseStart, blockEndSuccessor); - - // Reuse a block that would otherwise be overwritten as a start block - blockStartSuccessor->virt = virt; - blockStartSuccessor->phys = phys; - blockStartSuccessor->extraInfo = extraInfo; + while (std::prev(block_start_successor)->virt >= virt) { + block_start_successor--; } - if (unmapCallback) - unmapCallback(virt, size); + // Check that the start successor is either the end block or something in between + if (block_start_successor->virt > virt_end) { + UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); + } else if (block_start_successor->virt == virt_end) { + // We need to create a new block as there are none spare that we would overwrite + blocks.insert(block_start_successor, Block(virt, phys, extra_info)); + } else { + // Erase overwritten blocks + if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) { + blocks.erase(eraseStart, block_end_successor); + } + + // Reuse a block that would otherwise be overwritten as a start block + block_start_successor->virt = virt; + block_start_successor->phys = phys; + block_start_successor->extra_info = extra_info; + } + + if (unmap_callback) { + unmap_callback(virt, size); + } } MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { - VaType virtEnd{virt + size}; + VaType virt_end{virt + size}; - if (virtEnd > vaLimit) - UNREACHABLE_MSG("Trying to map a block past the VA limit: virtEnd: 0x{:X}, vaLimit: 0x{:X}", - virtEnd, vaLimit); + if (virt_end > va_limit) { + UNREACHABLE_MSG( + "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", virt_end, + va_limit); + } - auto blockEndSuccessor{std::lower_bound(blocks.begin(), blocks.end(), virtEnd)}; - if (blockEndSuccessor == blocks.begin()) - UNREACHABLE_MSG("Trying to unmap a block before the VA start: virtEnd: 0x{:X}", virtEnd); + auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)}; + if (block_end_successor == blocks.begin()) { + UNREACHABLE_MSG("Trying to unmap a block before the VA start: virt_end: 0x{:X}", virt_end); + } - auto blockEndPredecessor{std::prev(blockEndSuccessor)}; + auto block_end_predecessor{std::prev(block_end_successor)}; - auto walkBackToPredecessor{[&](auto iter) { - while (iter->virt >= virt) + auto walk_back_to_predecessor{[&](auto iter) { + while (iter->virt >= virt) { iter--; + } return iter; }}; - auto eraseBlocksWithEndUnmapped{[&](auto unmappedEnd) { - auto blockStartPredecessor{walkBackToPredecessor(unmappedEnd)}; - auto blockStartSuccessor{std::next(blockStartPredecessor)}; + auto erase_blocks_with_end_unmapped{[&](auto unmappedEnd) { + auto block_start_predecessor{walk_back_to_predecessor(unmappedEnd)}; + auto block_start_successor{std::next(block_start_predecessor)}; auto eraseEnd{[&]() { - if (blockStartPredecessor->Unmapped()) { + if (block_start_predecessor->Unmapped()) { // If the start predecessor is unmapped then we can erase everything in our region // and be done return std::next(unmappedEnd); @@ -174,158 +188,171 @@ MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { // We can't have two unmapped regions after each other if (eraseEnd != blocks.end() && - (eraseEnd == blockStartSuccessor || - (blockStartPredecessor->Unmapped() && eraseEnd->Unmapped()))) + (eraseEnd == block_start_successor || + (block_start_predecessor->Unmapped() && eraseEnd->Unmapped()))) { UNREACHABLE_MSG("Multiple contiguous unmapped regions are unsupported!"); + } - blocks.erase(blockStartSuccessor, eraseEnd); + blocks.erase(block_start_successor, eraseEnd); }}; // We can avoid any splitting logic if these are the case - if (blockEndPredecessor->Unmapped()) { - if (blockEndPredecessor->virt > virt) - eraseBlocksWithEndUnmapped(blockEndPredecessor); + if (block_end_predecessor->Unmapped()) { + if (block_end_predecessor->virt > virt) { + erase_blocks_with_end_unmapped(block_end_predecessor); + } - if (unmapCallback) - unmapCallback(virt, size); + if (unmap_callback) { + unmap_callback(virt, size); + } return; // The region is unmapped, bail out early - } else if (blockEndSuccessor->virt == virtEnd && blockEndSuccessor->Unmapped()) { - eraseBlocksWithEndUnmapped(blockEndSuccessor); + } else if (block_end_successor->virt == virt_end && block_end_successor->Unmapped()) { + erase_blocks_with_end_unmapped(block_end_successor); - if (unmapCallback) - unmapCallback(virt, size); + if (unmap_callback) { + unmap_callback(virt, size); + } return; // The region is unmapped here and doesn't need splitting, bail out early - } else if (blockEndSuccessor == blocks.end()) { + } else if (block_end_successor == blocks.end()) { // This should never happen as the end should always follow an unmapped block UNREACHABLE_MSG("Unexpected Memory Manager state!"); - } else if (blockEndSuccessor->virt != virtEnd) { + } else if (block_end_successor->virt != virt_end) { // If one block is directly in front then we don't have to add a tail // The previous block is mapped so we will need to add a tail with an offset PaType tailPhys{[&]() { - if constexpr (PaContigSplit) - return blockEndPredecessor->phys + virtEnd - blockEndPredecessor->virt; - else - return blockEndPredecessor->phys; + if constexpr (PaContigSplit) { + return block_end_predecessor->phys + virt_end - block_end_predecessor->virt; + } else { + return block_end_predecessor->phys; + } }()}; - if (blockEndPredecessor->virt >= virt) { + if (block_end_predecessor->virt >= virt) { // If this block's start would be overlapped by the unmap then reuse it as a tail block - blockEndPredecessor->virt = virtEnd; - blockEndPredecessor->phys = tailPhys; + block_end_predecessor->virt = virt_end; + block_end_predecessor->phys = tailPhys; // No longer predecessor anymore - blockEndSuccessor = blockEndPredecessor--; + block_end_successor = block_end_predecessor--; } else { - blocks.insert(blockEndSuccessor, + blocks.insert(block_end_successor, {Block(virt, UnmappedPa, {}), - Block(virtEnd, tailPhys, blockEndPredecessor->extraInfo)}); - if (unmapCallback) - unmapCallback(virt, size); + Block(virt_end, tailPhys, block_end_predecessor->extra_info)}); + if (unmap_callback) { + unmap_callback(virt, size); + } - return; // The previous block is mapped and ends before + // The previous block is mapped and ends before + return; } } // Walk the block vector to find the start predecessor as this is more efficient than another // binary search in most scenarios - auto blockStartPredecessor{walkBackToPredecessor(blockEndSuccessor)}; - auto blockStartSuccessor{std::next(blockStartPredecessor)}; + auto block_start_predecessor{walk_back_to_predecessor(block_end_successor)}; + auto block_start_successor{std::next(block_start_predecessor)}; - if (blockStartSuccessor->virt > virtEnd) { - UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", blockStartSuccessor->virt); - } else if (blockStartSuccessor->virt == virtEnd) { + if (block_start_successor->virt > virt_end) { + UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); + } else if (block_start_successor->virt == virt_end) { // There are no blocks between the start and the end that would let us skip inserting a new // one for head // The previous block is may be unmapped, if so we don't need to insert any unmaps after it - if (blockStartPredecessor->Mapped()) - blocks.insert(blockStartSuccessor, Block(virt, UnmappedPa, {})); - } else if (blockStartPredecessor->Unmapped()) { + if (block_start_predecessor->Mapped()) { + blocks.insert(block_start_successor, Block(virt, UnmappedPa, {})); + } + } else if (block_start_predecessor->Unmapped()) { // If the previous block is unmapped - blocks.erase(blockStartSuccessor, blockEndPredecessor); + blocks.erase(block_start_successor, block_end_predecessor); } else { // Erase overwritten blocks, skipping the first one as we have written the unmapped start // block there - if (auto eraseStart{std::next(blockStartSuccessor)}; eraseStart != blockEndSuccessor) - blocks.erase(eraseStart, blockEndSuccessor); + if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) { + blocks.erase(eraseStart, block_end_successor); + } // Add in the unmapped block header - blockStartSuccessor->virt = virt; - blockStartSuccessor->phys = UnmappedPa; + block_start_successor->virt = virt; + block_start_successor->phys = UnmappedPa; } - if (unmapCallback) - unmapCallback(virt, size); + if (unmap_callback) + unmap_callback(virt, size); } -ALLOC_MEMBER_CONST()::FlatAllocator(VaType vaStart_, VaType vaLimit_) - : Base(vaLimit_), currentLinearAllocEnd(vaStart_), vaStart(vaStart_) {} +ALLOC_MEMBER_CONST()::FlatAllocator(VaType va_start_, VaType va_limit_) + : Base{va_limit_}, va_start{va_start_}, current_linear_alloc_end{va_start_} {} ALLOC_MEMBER(VaType)::Allocate(VaType size) { - std::scoped_lock lock(this->blockMutex); + std::scoped_lock lock(this->block_mutex); - VaType allocStart{UnmappedVa}; - VaType allocEnd{currentLinearAllocEnd + size}; + VaType alloc_start{UnmappedVa}; + VaType alloc_end{current_linear_alloc_end + size}; // Avoid searching backwards in the address space if possible - if (allocEnd >= currentLinearAllocEnd && allocEnd <= this->vaLimit) { - auto allocEndSuccessor{ - std::lower_bound(this->blocks.begin(), this->blocks.end(), allocEnd)}; - if (allocEndSuccessor == this->blocks.begin()) + if (alloc_end >= current_linear_alloc_end && alloc_end <= this->va_limit) { + auto alloc_end_successor{ + std::lower_bound(this->blocks.begin(), this->blocks.end(), alloc_end)}; + if (alloc_end_successor == this->blocks.begin()) { UNREACHABLE_MSG("First block in AS map is invalid!"); + } - auto allocEndPredecessor{std::prev(allocEndSuccessor)}; - if (allocEndPredecessor->virt <= currentLinearAllocEnd) { - allocStart = currentLinearAllocEnd; + auto alloc_end_predecessor{std::prev(alloc_end_successor)}; + if (alloc_end_predecessor->virt <= current_linear_alloc_end) { + alloc_start = current_linear_alloc_end; } else { // Skip over fixed any mappings in front of us - while (allocEndSuccessor != this->blocks.end()) { - if (allocEndSuccessor->virt - allocEndPredecessor->virt < size || - allocEndPredecessor->Mapped()) { - allocStart = allocEndPredecessor->virt; + while (alloc_end_successor != this->blocks.end()) { + if (alloc_end_successor->virt - alloc_end_predecessor->virt < size || + alloc_end_predecessor->Mapped()) { + alloc_start = alloc_end_predecessor->virt; break; } - allocEndPredecessor = allocEndSuccessor++; + alloc_end_predecessor = alloc_end_successor++; // Use the VA limit to calculate if we can fit in the final block since it has no // successor - if (allocEndSuccessor == this->blocks.end()) { - allocEnd = allocEndPredecessor->virt + size; + if (alloc_end_successor == this->blocks.end()) { + alloc_end = alloc_end_predecessor->virt + size; - if (allocEnd >= allocEndPredecessor->virt && allocEnd <= this->vaLimit) - allocStart = allocEndPredecessor->virt; + if (alloc_end >= alloc_end_predecessor->virt && alloc_end <= this->va_limit) { + alloc_start = alloc_end_predecessor->virt; + } } } } } - if (allocStart != UnmappedVa) { - currentLinearAllocEnd = allocStart + size; + if (alloc_start != UnmappedVa) { + current_linear_alloc_end = alloc_start + size; } else { // If linear allocation overflows the AS then find a gap - if (this->blocks.size() <= 2) + if (this->blocks.size() <= 2) { UNREACHABLE_MSG("Unexpected allocator state!"); - - auto searchPredecessor{this->blocks.begin()}; - auto searchSuccessor{std::next(searchPredecessor)}; - - while (searchSuccessor != this->blocks.end() && - (searchSuccessor->virt - searchPredecessor->virt < size || - searchPredecessor->Mapped())) { - searchPredecessor = searchSuccessor++; } - if (searchSuccessor != this->blocks.end()) - allocStart = searchPredecessor->virt; - else + auto search_predecessor{this->blocks.begin()}; + auto search_successor{std::next(search_predecessor)}; + + while (search_successor != this->blocks.end() && + (search_successor->virt - search_predecessor->virt < size || + search_predecessor->Mapped())) { + search_predecessor = search_successor++; + } + + if (search_successor != this->blocks.end()) { + alloc_start = search_predecessor->virt; + } else { return {}; // AS is full + } } - this->MapLocked(allocStart, true, size, {}); - return allocStart; + this->MapLocked(alloc_start, true, size, {}); + return alloc_start; } ALLOC_MEMBER(void)::AllocateFixed(VaType virt, VaType size) { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index b48f7fcafd..7a95f53055 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -472,16 +472,16 @@ void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) { params.regions = std::array{ VaRegion{ - .offset = vm.small_page_allocator->vaStart << VM::PAGE_SIZE_BITS, + .offset = vm.small_page_allocator->GetVAStart() << VM::PAGE_SIZE_BITS, .page_size = VM::YUZU_PAGESIZE, ._pad0_{}, - .pages = vm.small_page_allocator->vaLimit - vm.small_page_allocator->vaStart, + .pages = vm.small_page_allocator->GetVALimit() - vm.small_page_allocator->GetVAStart(), }, VaRegion{ - .offset = vm.big_page_allocator->vaStart << vm.big_page_size_bits, + .offset = vm.big_page_allocator->GetVAStart() << vm.big_page_size_bits, .page_size = vm.big_page_size, ._pad0_{}, - .pages = vm.big_page_allocator->vaLimit - vm.big_page_allocator->vaStart, + .pages = vm.big_page_allocator->GetVALimit() - vm.big_page_allocator->GetVAStart(), }, }; } From 11e1cbbdbde8269e7cdb0e150f25639223bdd3e6 Mon Sep 17 00:00:00 2001 From: Morph <39850852+Morph1984@users.noreply.github.com> Date: Wed, 29 Jun 2022 20:36:39 -0400 Subject: [PATCH 62/68] address_space: Rename va_start to virt_start Avoids conflicting with the va_start macro --- src/common/address_space.h | 6 +++--- src/common/address_space.inc | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/common/address_space.h b/src/common/address_space.h index bf649018c5..9222b2fdc6 100644 --- a/src/common/address_space.h +++ b/src/common/address_space.h @@ -116,7 +116,7 @@ private: using Base = FlatAddressSpaceMap; public: - explicit FlatAllocator(VaType va_start, VaType va_limit = Base::VaMaximum); + explicit FlatAllocator(VaType virt_start, VaType va_limit = Base::VaMaximum); /** * @brief Allocates a region in the AS of the given size and returns its address @@ -134,12 +134,12 @@ public: void Free(VaType virt, VaType size); VaType GetVAStart() const { - return va_start; + return virt_start; } private: /// The base VA of the allocator, no allocations will be below this - VaType va_start; + VaType virt_start; /** * The end address for the initial linear allocation pass diff --git a/src/common/address_space.inc b/src/common/address_space.inc index 3661b298ef..9f957c81d9 100644 --- a/src/common/address_space.inc +++ b/src/common/address_space.inc @@ -284,8 +284,8 @@ MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { unmap_callback(virt, size); } -ALLOC_MEMBER_CONST()::FlatAllocator(VaType va_start_, VaType va_limit_) - : Base{va_limit_}, va_start{va_start_}, current_linear_alloc_end{va_start_} {} +ALLOC_MEMBER_CONST()::FlatAllocator(VaType virt_start_, VaType va_limit_) + : Base{va_limit_}, virt_start{virt_start_}, current_linear_alloc_end{virt_start_} {} ALLOC_MEMBER(VaType)::Allocate(VaType size) { std::scoped_lock lock(this->block_mutex); From 903705043dd768fe2bbeeddc621994ac61aae561 Mon Sep 17 00:00:00 2001 From: Morph <39850852+Morph1984@users.noreply.github.com> Date: Tue, 2 Aug 2022 03:38:19 -0400 Subject: [PATCH 63/68] nvdisp: End system frame after requesting to swap buffers Fixes frametime reporting --- src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp index 18c5324a9a..4122fc98de 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp @@ -50,8 +50,8 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat form const Tegra::FramebufferConfig framebuffer{addr, offset, width, height, stride, format, transform, crop_rect}; - system.GetPerfStats().EndSystemFrame(); system.GPU().RequestSwapBuffers(&framebuffer, fences, num_fences); + system.GetPerfStats().EndSystemFrame(); system.SpeedLimiter().DoSpeedLimiting(system.CoreTiming().GetGlobalTimeUs()); system.GetPerfStats().BeginSystemFrame(); } From c80ed6d81fef5858508ac4b841defe8ee3a8663d Mon Sep 17 00:00:00 2001 From: Liam Date: Fri, 19 Aug 2022 21:58:25 -0400 Subject: [PATCH 64/68] general: rework usages of UNREACHABLE macro --- src/common/address_space.inc | 31 ++++++++++--------- .../service/nvdrv/core/syncpoint_manager.cpp | 14 ++++----- .../service/nvdrv/devices/nvhost_as_gpu.cpp | 10 +++--- 3 files changed, 28 insertions(+), 27 deletions(-) diff --git a/src/common/address_space.inc b/src/common/address_space.inc index 9f957c81d9..2195dabd54 100644 --- a/src/common/address_space.inc +++ b/src/common/address_space.inc @@ -34,7 +34,7 @@ MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType va_limit_, std::function unmap_callback_) : va_limit{va_limit_}, unmap_callback{std::move(unmap_callback_)} { if (va_limit > VaMaximum) { - UNREACHABLE_MSG("Invalid VA limit!"); + ASSERT_MSG(false, "Invalid VA limit!"); } } @@ -42,14 +42,14 @@ MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInf VaType virt_end{virt + size}; if (virt_end > va_limit) { - UNREACHABLE_MSG( - "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", virt_end, - va_limit); + ASSERT_MSG(false, + "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", + virt_end, va_limit); } auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)}; if (block_end_successor == blocks.begin()) { - UNREACHABLE_MSG("Trying to map a block before the VA start: virt_end: 0x{:X}", virt_end); + ASSERT_MSG(false, "Trying to map a block before the VA start: virt_end: 0x{:X}", virt_end); } auto block_end_predecessor{std::prev(block_end_successor)}; @@ -124,7 +124,7 @@ MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInf // Check that the start successor is either the end block or something in between if (block_start_successor->virt > virt_end) { - UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); + ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); } else if (block_start_successor->virt == virt_end) { // We need to create a new block as there are none spare that we would overwrite blocks.insert(block_start_successor, Block(virt, phys, extra_info)); @@ -149,14 +149,15 @@ MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { VaType virt_end{virt + size}; if (virt_end > va_limit) { - UNREACHABLE_MSG( - "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", virt_end, - va_limit); + ASSERT_MSG(false, + "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", + virt_end, va_limit); } auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)}; if (block_end_successor == blocks.begin()) { - UNREACHABLE_MSG("Trying to unmap a block before the VA start: virt_end: 0x{:X}", virt_end); + ASSERT_MSG(false, "Trying to unmap a block before the VA start: virt_end: 0x{:X}", + virt_end); } auto block_end_predecessor{std::prev(block_end_successor)}; @@ -190,7 +191,7 @@ MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { if (eraseEnd != blocks.end() && (eraseEnd == block_start_successor || (block_start_predecessor->Unmapped() && eraseEnd->Unmapped()))) { - UNREACHABLE_MSG("Multiple contiguous unmapped regions are unsupported!"); + ASSERT_MSG(false, "Multiple contiguous unmapped regions are unsupported!"); } blocks.erase(block_start_successor, eraseEnd); @@ -217,7 +218,7 @@ MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { return; // The region is unmapped here and doesn't need splitting, bail out early } else if (block_end_successor == blocks.end()) { // This should never happen as the end should always follow an unmapped block - UNREACHABLE_MSG("Unexpected Memory Manager state!"); + ASSERT_MSG(false, "Unexpected Memory Manager state!"); } else if (block_end_successor->virt != virt_end) { // If one block is directly in front then we don't have to add a tail @@ -256,7 +257,7 @@ MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { auto block_start_successor{std::next(block_start_predecessor)}; if (block_start_successor->virt > virt_end) { - UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); + ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); } else if (block_start_successor->virt == virt_end) { // There are no blocks between the start and the end that would let us skip inserting a new // one for head @@ -298,7 +299,7 @@ ALLOC_MEMBER(VaType)::Allocate(VaType size) { auto alloc_end_successor{ std::lower_bound(this->blocks.begin(), this->blocks.end(), alloc_end)}; if (alloc_end_successor == this->blocks.begin()) { - UNREACHABLE_MSG("First block in AS map is invalid!"); + ASSERT_MSG(false, "First block in AS map is invalid!"); } auto alloc_end_predecessor{std::prev(alloc_end_successor)}; @@ -332,7 +333,7 @@ ALLOC_MEMBER(VaType)::Allocate(VaType size) { current_linear_alloc_end = alloc_start + size; } else { // If linear allocation overflows the AS then find a gap if (this->blocks.size() <= 2) { - UNREACHABLE_MSG("Unexpected allocator state!"); + ASSERT_MSG(false, "Unexpected allocator state!"); } auto search_predecessor{this->blocks.begin()}; diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp index 0bb2aec974..072b3a22f1 100644 --- a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp @@ -29,7 +29,7 @@ SyncpointManager::~SyncpointManager() = default; u32 SyncpointManager::ReserveSyncpoint(u32 id, bool clientManaged) { if (syncpoints.at(id).reserved) { - UNREACHABLE_MSG("Requested syncpoint is in use"); + ASSERT_MSG(false, "Requested syncpoint is in use"); return 0; } @@ -45,7 +45,7 @@ u32 SyncpointManager::FindFreeSyncpoint() { return i; } } - UNREACHABLE_MSG("Failed to find a free syncpoint!"); + ASSERT_MSG(false, "Failed to find a free syncpoint!"); return 0; } @@ -68,7 +68,7 @@ bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) { const SyncpointInfo& syncpoint{syncpoints.at(id)}; if (!syncpoint.reserved) { - UNREACHABLE(); + ASSERT(false); return 0; } @@ -83,7 +83,7 @@ bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) { u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) { if (!syncpoints.at(id).reserved) { - UNREACHABLE(); + ASSERT(false); return 0; } @@ -92,7 +92,7 @@ u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) { u32 SyncpointManager::ReadSyncpointMinValue(u32 id) { if (!syncpoints.at(id).reserved) { - UNREACHABLE(); + ASSERT(false); return 0; } @@ -101,7 +101,7 @@ u32 SyncpointManager::ReadSyncpointMinValue(u32 id) { u32 SyncpointManager::UpdateMin(u32 id) { if (!syncpoints.at(id).reserved) { - UNREACHABLE(); + ASSERT(false); return 0; } @@ -111,7 +111,7 @@ u32 SyncpointManager::UpdateMin(u32 id) { NvFence SyncpointManager::GetSyncpointFence(u32 id) { if (!syncpoints.at(id).reserved) { - UNREACHABLE(); + ASSERT(false); return NvFence{}; } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 7a95f53055..192503ffc3 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -96,7 +96,7 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector& input, std::vector& std::scoped_lock lock(mutex); if (vm.initialised) { - UNREACHABLE_MSG("Cannot initialise an address space twice!"); + ASSERT_MSG(false, "Cannot initialise an address space twice!"); return NvResult::InvalidState; } @@ -174,7 +174,7 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector& input, std::vector< } else { params.offset = static_cast(allocator.Allocate(params.pages)) << page_size_bits; if (!params.offset) { - UNREACHABLE_MSG("Failed to allocate free space in the GPU AS!"); + ASSERT_MSG(false, "Failed to allocate free space in the GPU AS!"); return NvResult::InsufficientMemory; } } @@ -372,7 +372,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vectoralign, VM::YUZU_PAGESIZE)) return false; else { - UNREACHABLE(); + ASSERT(false); return false; } }()}; @@ -382,7 +382,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vectorfirst) + size > alloc->second.size) { - UNREACHABLE_MSG("Cannot perform a fixed mapping into an unallocated region!"); + ASSERT_MSG(false, "Cannot perform a fixed mapping into an unallocated region!"); return NvResult::BadValue; } @@ -403,7 +403,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vector(Common::AlignUp(size, page_size) >> page_size_bits))) << page_size_bits; if (!params.offset) { - UNREACHABLE_MSG("Failed to allocate free space in the GPU AS!"); + ASSERT_MSG(false, "Failed to allocate free space in the GPU AS!"); return NvResult::InsufficientMemory; } From 0d99b7962d8e06958668168cdae155fd1e3d1757 Mon Sep 17 00:00:00 2001 From: Liam Date: Fri, 19 Aug 2022 22:15:23 -0400 Subject: [PATCH 65/68] state_tracker: workaround channel setup for homebrew --- src/video_core/query_cache.h | 6 ++++-- src/video_core/renderer_opengl/gl_state_tracker.cpp | 2 +- src/video_core/renderer_opengl/gl_state_tracker.h | 1 + src/video_core/renderer_vulkan/vk_state_tracker.cpp | 3 ++- src/video_core/renderer_vulkan/vk_state_tracker.h | 1 + 5 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h index eb68ea6388..b0ebe71b74 100644 --- a/src/video_core/query_cache.h +++ b/src/video_core/query_cache.h @@ -135,8 +135,10 @@ public: /// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch. void UpdateCounters() { std::unique_lock lock{mutex}; - const auto& regs = maxwell3d->regs; - Stream(VideoCore::QueryType::SamplesPassed).Update(regs.samplecnt_enable); + if (maxwell3d) { + const auto& regs = maxwell3d->regs; + Stream(VideoCore::QueryType::SamplesPassed).Update(regs.samplecnt_enable); + } } /// Resets a counter to zero. It doesn't disable the query after resetting. diff --git a/src/video_core/renderer_opengl/gl_state_tracker.cpp b/src/video_core/renderer_opengl/gl_state_tracker.cpp index 3657f867d3..a8f3a0f57b 100644 --- a/src/video_core/renderer_opengl/gl_state_tracker.cpp +++ b/src/video_core/renderer_opengl/gl_state_tracker.cpp @@ -237,6 +237,6 @@ void StateTracker::InvalidateState() { flags->set(); } -StateTracker::StateTracker() : flags{} {} +StateTracker::StateTracker() : flags{&default_flags} {} } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_state_tracker.h b/src/video_core/renderer_opengl/gl_state_tracker.h index 97d32768b5..19bcf3f355 100644 --- a/src/video_core/renderer_opengl/gl_state_tracker.h +++ b/src/video_core/renderer_opengl/gl_state_tracker.h @@ -223,6 +223,7 @@ public: private: Tegra::Engines::Maxwell3D::DirtyState::Flags* flags; + Tegra::Engines::Maxwell3D::DirtyState::Flags default_flags{}; GLuint framebuffer = 0; GLuint index_buffer = 0; diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp index 5a11d3267b..f234e1a310 100644 --- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp +++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp @@ -206,6 +206,7 @@ void StateTracker::InvalidateState() { flags->set(); } -StateTracker::StateTracker() : flags{}, invalidation_flags{MakeInvalidationFlags()} {} +StateTracker::StateTracker() + : flags{&default_flags}, default_flags{}, invalidation_flags{MakeInvalidationFlags()} {} } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.h b/src/video_core/renderer_vulkan/vk_state_tracker.h index c107d9c246..2296dea602 100644 --- a/src/video_core/renderer_vulkan/vk_state_tracker.h +++ b/src/video_core/renderer_vulkan/vk_state_tracker.h @@ -161,6 +161,7 @@ private: } Tegra::Engines::Maxwell3D::DirtyState::Flags* flags; + Tegra::Engines::Maxwell3D::DirtyState::Flags default_flags; Tegra::Engines::Maxwell3D::DirtyState::Flags invalidation_flags; Maxwell::PrimitiveTopology current_topology = INVALID_TOPOLOGY; }; From ca3db0d7c94a20668781830ff852dbf512598efb Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Thu, 1 Sep 2022 05:45:22 +0200 Subject: [PATCH 66/68] General: address feedback --- src/common/multi_level_page_table.h | 8 +-- src/core/hle/service/nvdrv/core/container.cpp | 12 +++- src/core/hle/service/nvdrv/core/container.h | 23 ++++++-- src/core/hle/service/nvdrv/core/nvmap.cpp | 4 +- src/core/hle/service/nvdrv/core/nvmap.h | 59 +++++++++---------- .../service/nvdrv/core/syncpoint_manager.cpp | 32 +++++----- .../service/nvdrv/core/syncpoint_manager.h | 34 +++++------ .../service/nvdrv/devices/nvhost_as_gpu.cpp | 28 +++++---- .../hle/service/nvdrv/devices/nvhost_ctrl.h | 2 +- .../hle/service/nvdrv/devices/nvhost_gpu.cpp | 2 +- .../service/nvdrv/devices/nvhost_nvdec.cpp | 13 ++-- .../hle/service/nvdrv/devices/nvhost_nvdec.h | 3 - .../nvdrv/devices/nvhost_nvdec_common.cpp | 13 ++-- .../nvdrv/devices/nvhost_nvdec_common.h | 5 -- .../hle/service/nvdrv/devices/nvhost_vic.cpp | 16 ++--- .../hle/service/nvdrv/devices/nvhost_vic.h | 3 - src/core/hle/service/nvdrv/devices/nvmap.h | 4 +- src/core/hle/service/nvdrv/nvdrv.cpp | 4 +- src/core/hle/service/nvdrv/nvdrv.h | 2 +- src/video_core/control/channel_state.cpp | 7 +-- src/video_core/control/channel_state.h | 4 +- src/video_core/control/channel_state_cache.h | 2 +- src/video_core/control/scheduler.cpp | 6 +- src/video_core/control/scheduler.h | 2 +- src/video_core/engines/maxwell_dma.h | 2 +- src/video_core/gpu.cpp | 2 +- src/video_core/host1x/host1x.h | 2 +- src/video_core/host1x/syncpoint_manager.cpp | 12 ++-- src/video_core/host1x/syncpoint_manager.h | 24 ++++---- src/video_core/memory_manager.h | 2 +- 30 files changed, 167 insertions(+), 165 deletions(-) diff --git a/src/common/multi_level_page_table.h b/src/common/multi_level_page_table.h index 08092c89a6..31f6676a06 100644 --- a/src/common/multi_level_page_table.h +++ b/src/common/multi_level_page_table.h @@ -46,19 +46,19 @@ public: void ReserveRange(u64 start, std::size_t size); - [[nodiscard]] constexpr const BaseAddr& operator[](std::size_t index) const { + [[nodiscard]] const BaseAddr& operator[](std::size_t index) const { return base_ptr[index]; } - [[nodiscard]] constexpr BaseAddr& operator[](std::size_t index) { + [[nodiscard]] BaseAddr& operator[](std::size_t index) { return base_ptr[index]; } - [[nodiscard]] constexpr BaseAddr* data() { + [[nodiscard]] BaseAddr* data() { return base_ptr; } - [[nodiscard]] constexpr const BaseAddr* data() const { + [[nodiscard]] const BaseAddr* data() const { return base_ptr; } diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp index d2a6326466..37ca24f5db 100644 --- a/src/core/hle/service/nvdrv/core/container.cpp +++ b/src/core/hle/service/nvdrv/core/container.cpp @@ -10,9 +10,11 @@ namespace Service::Nvidia::NvCore { struct ContainerImpl { - ContainerImpl(Tegra::Host1x::Host1x& host1x_) : file{host1x_}, manager{host1x_} {} + explicit ContainerImpl(Tegra::Host1x::Host1x& host1x_) + : file{host1x_}, manager{host1x_}, device_file_data{} {} NvMap file; SyncpointManager manager; + Container::Host1xDeviceFileData device_file_data; }; Container::Container(Tegra::Host1x::Host1x& host1x_) { @@ -29,6 +31,14 @@ const NvMap& Container::GetNvMapFile() const { return impl->file; } +Container::Host1xDeviceFileData& Container::Host1xDeviceFile() { + return impl->device_file_data; +} + +const Container::Host1xDeviceFileData& Container::Host1xDeviceFile() const { + return impl->device_file_data; +} + SyncpointManager& Container::GetSyncpointManager() { return impl->manager; } diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h index 5c8b958034..b4b63ac90c 100644 --- a/src/core/hle/service/nvdrv/core/container.h +++ b/src/core/hle/service/nvdrv/core/container.h @@ -4,15 +4,15 @@ #pragma once +#include #include +#include -namespace Tegra { +#include "core/hle/service/nvdrv/nvdata.h" -namespace Host1x { +namespace Tegra::Host1x { class Host1x; -} // namespace Host1x - -} // namespace Tegra +} // namespace Tegra::Host1x namespace Service::Nvidia::NvCore { @@ -23,7 +23,7 @@ struct ContainerImpl; class Container { public: - Container(Tegra::Host1x::Host1x& host1x); + explicit Container(Tegra::Host1x::Host1x& host1x); ~Container(); NvMap& GetNvMapFile(); @@ -34,6 +34,17 @@ public: const SyncpointManager& GetSyncpointManager() const; + struct Host1xDeviceFileData { + std::unordered_map fd_to_id{}; + std::deque syncpts_accumulated{}; + u32 nvdec_next_id{}; + u32 vic_next_id{}; + }; + + Host1xDeviceFileData& Host1xDeviceFile(); + + const Host1xDeviceFileData& Host1xDeviceFile() const; + private: std::unique_ptr impl; }; diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index e63ec77174..fbd8a74a55 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -119,7 +119,7 @@ std::shared_ptr NvMap::GetHandle(Handle::Id handle) { std::scoped_lock lock(handles_lock); try { return handles.at(handle); - } catch ([[maybe_unused]] std::out_of_range& e) { + } catch (std::out_of_range&) { return nullptr; } } @@ -128,7 +128,7 @@ VAddr NvMap::GetHandleAddress(Handle::Id handle) { std::scoped_lock lock(handles_lock); try { return handles.at(handle)->address; - } catch ([[maybe_unused]] std::out_of_range& e) { + } catch (std::out_of_range&) { return 0; } } diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index 6d6dac023a..b9dd3801f4 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -98,35 +98,6 @@ public: } }; -private: - std::list> unmap_queue{}; - std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue` - - std::unordered_map> - handles{}; //!< Main owning map of handles - std::mutex handles_lock; //!< Protects access to `handles` - - static constexpr u32 HandleIdIncrement{ - 4}; //!< Each new handle ID is an increment of 4 from the previous - std::atomic next_handle_id{HandleIdIncrement}; - Tegra::Host1x::Host1x& host1x; - - void AddHandle(std::shared_ptr handle); - - /** - * @brief Unmaps and frees the SMMU memory region a handle is mapped to - * @note Both `unmap_queue_lock` and `handle_description.mutex` MUST be locked when calling this - */ - void UnmapHandle(Handle& handle_description); - - /** - * @brief Removes a handle from the map taking its dupes into account - * @note handle_description.mutex MUST be locked when calling this - * @return If the handle was removed from the map - */ - bool TryRemoveHandle(const Handle& handle_description); - -public: /** * @brief Encapsulates the result of a FreeHandle operation */ @@ -136,7 +107,7 @@ public: bool was_uncached; //!< If the handle was allocated as uncached }; - NvMap(Tegra::Host1x::Host1x& host1x); + explicit NvMap(Tegra::Host1x::Host1x& host1x); /** * @brief Creates an unallocated handle of the given size @@ -172,5 +143,33 @@ public: * describing the prior state of the handle */ std::optional FreeHandle(Handle::Id handle, bool internal_session); + +private: + std::list> unmap_queue{}; + std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue` + + std::unordered_map> + handles{}; //!< Main owning map of handles + std::mutex handles_lock; //!< Protects access to `handles` + + static constexpr u32 HandleIdIncrement{ + 4}; //!< Each new handle ID is an increment of 4 from the previous + std::atomic next_handle_id{HandleIdIncrement}; + Tegra::Host1x::Host1x& host1x; + + void AddHandle(std::shared_ptr handle); + + /** + * @brief Unmaps and frees the SMMU memory region a handle is mapped to + * @note Both `unmap_queue_lock` and `handle_description.mutex` MUST be locked when calling this + */ + void UnmapHandle(Handle& handle_description); + + /** + * @brief Removes a handle from the map taking its dupes into account + * @note handle_description.mutex MUST be locked when calling this + * @return If the handle was removed from the map + */ + bool TryRemoveHandle(const Handle& handle_description); }; } // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp index 072b3a22f1..eda2041a0d 100644 --- a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp @@ -18,23 +18,23 @@ SyncpointManager::SyncpointManager(Tegra::Host1x::Host1x& host1x_) : host1x{host ReserveSyncpoint(VBlank0SyncpointId, true); ReserveSyncpoint(VBlank1SyncpointId, true); - for (u32 syncpointId : channel_syncpoints) { - if (syncpointId) { - ReserveSyncpoint(syncpointId, false); + for (u32 syncpoint_id : channel_syncpoints) { + if (syncpoint_id) { + ReserveSyncpoint(syncpoint_id, false); } } } SyncpointManager::~SyncpointManager() = default; -u32 SyncpointManager::ReserveSyncpoint(u32 id, bool clientManaged) { +u32 SyncpointManager::ReserveSyncpoint(u32 id, bool client_managed) { if (syncpoints.at(id).reserved) { ASSERT_MSG(false, "Requested syncpoint is in use"); return 0; } syncpoints.at(id).reserved = true; - syncpoints.at(id).interfaceManaged = clientManaged; + syncpoints.at(id).interface_managed = client_managed; return id; } @@ -49,9 +49,9 @@ u32 SyncpointManager::FindFreeSyncpoint() { return 0; } -u32 SyncpointManager::AllocateSyncpoint(bool clientManaged) { +u32 SyncpointManager::AllocateSyncpoint(bool client_managed) { std::lock_guard lock(reservation_lock); - return ReserveSyncpoint(FindFreeSyncpoint(), clientManaged); + return ReserveSyncpoint(FindFreeSyncpoint(), client_managed); } void SyncpointManager::FreeSyncpoint(u32 id) { @@ -64,7 +64,7 @@ bool SyncpointManager::IsSyncpointAllocated(u32 id) { return (id <= SyncpointCount) && syncpoints[id].reserved; } -bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) { +bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) const { const SyncpointInfo& syncpoint{syncpoints.at(id)}; if (!syncpoint.reserved) { @@ -74,10 +74,10 @@ bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) { // If the interface manages counters then we don't keep track of the maximum value as it handles // sanity checking the values then - if (syncpoint.interfaceManaged) { - return static_cast(syncpoint.counterMin - threshold) >= 0; + if (syncpoint.interface_managed) { + return static_cast(syncpoint.counter_min - threshold) >= 0; } else { - return (syncpoint.counterMax - threshold) >= (syncpoint.counterMin - threshold); + return (syncpoint.counter_max - threshold) >= (syncpoint.counter_min - threshold); } } @@ -87,7 +87,7 @@ u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) { return 0; } - return syncpoints.at(id).counterMax += amount; + return syncpoints.at(id).counter_max += amount; } u32 SyncpointManager::ReadSyncpointMinValue(u32 id) { @@ -96,7 +96,7 @@ u32 SyncpointManager::ReadSyncpointMinValue(u32 id) { return 0; } - return syncpoints.at(id).counterMin; + return syncpoints.at(id).counter_min; } u32 SyncpointManager::UpdateMin(u32 id) { @@ -105,8 +105,8 @@ u32 SyncpointManager::UpdateMin(u32 id) { return 0; } - syncpoints.at(id).counterMin = host1x.GetSyncpointManager().GetHostSyncpointValue(id); - return syncpoints.at(id).counterMin; + syncpoints.at(id).counter_min = host1x.GetSyncpointManager().GetHostSyncpointValue(id); + return syncpoints.at(id).counter_min; } NvFence SyncpointManager::GetSyncpointFence(u32 id) { @@ -115,7 +115,7 @@ NvFence SyncpointManager::GetSyncpointFence(u32 id) { return NvFence{}; } - return {.id = static_cast(id), .value = syncpoints.at(id).counterMax}; + return {.id = static_cast(id), .value = syncpoints.at(id).counter_max}; } } // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.h b/src/core/hle/service/nvdrv/core/syncpoint_manager.h index 6b71cd33df..b76ef90326 100644 --- a/src/core/hle/service/nvdrv/core/syncpoint_manager.h +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.h @@ -11,13 +11,9 @@ #include "common/common_types.h" #include "core/hle/service/nvdrv/nvdata.h" -namespace Tegra { - -namespace Host1x { +namespace Tegra::Host1x { class Host1x; -} // namespace Host1x - -} // namespace Tegra +} // namespace Tegra::Host1x namespace Service::Nvidia::NvCore { @@ -54,15 +50,15 @@ public: * @brief Finds a free syncpoint and reserves it * @return The ID of the reserved syncpoint */ - u32 AllocateSyncpoint(bool clientManaged); + u32 AllocateSyncpoint(bool client_managed); /** * @url * https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/syncpt.c#L259 */ - bool HasSyncpointExpired(u32 id, u32 threshold); + bool HasSyncpointExpired(u32 id, u32 threshold) const; - bool IsFenceSignalled(NvFence fence) { + bool IsFenceSignalled(NvFence fence) const { return HasSyncpointExpired(fence.id, fence.value); } @@ -107,7 +103,7 @@ private: /** * @note reservation_lock should be locked when calling this */ - u32 ReserveSyncpoint(u32 id, bool clientManaged); + u32 ReserveSyncpoint(u32 id, bool client_managed); /** * @return The ID of the first free syncpoint @@ -115,15 +111,15 @@ private: u32 FindFreeSyncpoint(); struct SyncpointInfo { - std::atomic counterMin; //!< The least value the syncpoint can be (The value it was - //!< when it was last synchronized with host1x) - std::atomic counterMax; //!< The maximum value the syncpoint can reach according to the - //!< current usage - bool interfaceManaged; //!< If the syncpoint is managed by a host1x client interface, a - //!< client interface is a HW block that can handle host1x - //!< transactions on behalf of a host1x client (Which would otherwise - //!< need to be manually synced using PIO which is synchronous and - //!< requires direct cooperation of the CPU) + std::atomic counter_min; //!< The least value the syncpoint can be (The value it was + //!< when it was last synchronized with host1x) + std::atomic counter_max; //!< The maximum value the syncpoint can reach according to + //!< the current usage + bool interface_managed; //!< If the syncpoint is managed by a host1x client interface, a + //!< client interface is a HW block that can handle host1x + //!< transactions on behalf of a host1x client (Which would + //!< otherwise need to be manually synced using PIO which is + //!< synchronous and requires direct cooperation of the CPU) bool reserved; //!< If the syncpoint is reserved or not, not to be confused with a reserved //!< value }; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 192503ffc3..6411dbf433 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -106,7 +106,7 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector& input, std::vector& return NvResult::BadValue; } - if (!(params.big_page_size & VM::SUPPORTED_BIG_PAGE_SIZES)) { + if ((params.big_page_size & VM::SUPPORTED_BIG_PAGE_SIZES) == 0) { LOG_ERROR(Service_NVDRV, "Unsupported big page size: 0x{:X}!", params.big_page_size); return NvResult::BadValue; } @@ -124,12 +124,13 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector& input, std::vector& vm.va_range_end = params.va_range_end; } - const u64 start_pages{vm.va_range_start >> VM::PAGE_SIZE_BITS}; - const u64 end_pages{vm.va_range_split >> VM::PAGE_SIZE_BITS}; + const auto start_pages{static_cast(vm.va_range_start >> VM::PAGE_SIZE_BITS)}; + const auto end_pages{static_cast(vm.va_range_split >> VM::PAGE_SIZE_BITS)}; vm.small_page_allocator = std::make_shared(start_pages, end_pages); - const u64 start_big_pages{vm.va_range_split >> vm.big_page_size_bits}; - const u64 end_big_pages{(vm.va_range_end - vm.va_range_split) >> vm.big_page_size_bits}; + const auto start_big_pages{static_cast(vm.va_range_split >> vm.big_page_size_bits)}; + const auto end_big_pages{ + static_cast((vm.va_range_end - vm.va_range_split) >> vm.big_page_size_bits)}; vm.big_page_allocator = std::make_unique(start_big_pages, end_big_pages); gmmu = std::make_shared(system, 40, vm.big_page_size_bits, @@ -210,10 +211,11 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) { // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state // Only FreeSpace can unmap them fully - if (mapping->sparse_alloc) + if (mapping->sparse_alloc) { gmmu->MapSparse(offset, mapping->size, mapping->big_page); - else + } else { gmmu->Unmap(offset, mapping->size); + } mapping_map.erase(offset); } @@ -256,7 +258,7 @@ NvResult nvhost_as_gpu::FreeSpace(const std::vector& input, std::vector& allocator.Free(static_cast(params.offset >> page_size_bits), static_cast(allocation.size >> page_size_bits)); allocation_map.erase(params.offset); - } catch ([[maybe_unused]] const std::out_of_range& e) { + } catch (const std::out_of_range&) { return NvResult::BadValue; } @@ -351,7 +353,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vectorMap(gpu_address, cpu_address, params.mapping_size, mapping->big_page); return NvResult::Success; - } catch ([[maybe_unused]] const std::out_of_range& e) { + } catch (const std::out_of_range&) { LOG_WARNING(Service_NVDRV, "Cannot remap an unmapped GPU address space region: 0x{:X}", params.offset); return NvResult::BadValue; @@ -367,11 +369,11 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vectororig_size}; bool big_page{[&]() { - if (Common::IsAligned(handle->align, vm.big_page_size)) + if (Common::IsAligned(handle->align, vm.big_page_size)) { return true; - else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) + } else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) { return false; - else { + } else { ASSERT(false); return false; } @@ -450,7 +452,7 @@ NvResult nvhost_as_gpu::UnmapBuffer(const std::vector& input, std::vector& input, std::vectorinitiated) { + if (channel_state->initialized) { LOG_CRITICAL(Service_NVDRV, "Already allocated!"); return NvResult::AlreadyAllocated; } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp index fed5370394..1703f9cc37 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp @@ -5,13 +5,12 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/devices/nvhost_nvdec.h" #include "video_core/renderer_base.h" namespace Service::Nvidia::Devices { -u32 nvhost_nvdec::next_id{}; - nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core_) : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::NvDec} {} nvhost_nvdec::~nvhost_nvdec() = default; @@ -22,8 +21,9 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& case 0x0: switch (command.cmd) { case 0x1: { - if (!fd_to_id.contains(fd)) { - fd_to_id[fd] = next_id++; + auto& host1x_file = core.Host1xDeviceFile(); + if (!host1x_file.fd_to_id.contains(fd)) { + host1x_file.fd_to_id[fd] = host1x_file.nvdec_next_id++; } return Submit(fd, input, output); } @@ -74,8 +74,9 @@ void nvhost_nvdec::OnOpen(DeviceFD fd) { void nvhost_nvdec::OnClose(DeviceFD fd) { LOG_INFO(Service_NVDRV, "NVDEC video stream ended"); - const auto iter = fd_to_id.find(fd); - if (iter != fd_to_id.end()) { + auto& host1x_file = core.Host1xDeviceFile(); + const auto iter = host1x_file.fd_to_id.find(fd); + if (iter != host1x_file.fd_to_id.end()) { system.GPU().ClearCdmaInstance(iter->second); } system.AudioCore().SetNVDECActive(false); diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h index 3261ce1d40..c1b4e53e83 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h @@ -22,9 +22,6 @@ public: void OnOpen(DeviceFD fd) override; void OnClose(DeviceFD fd) override; - -private: - static u32 next_id; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index 2ec1ad3e9e..99eede702f 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp @@ -46,13 +46,11 @@ std::size_t WriteVectors(std::vector& dst, const std::vector& src, std::s } } // Anonymous namespace -std::unordered_map nvhost_nvdec_common::fd_to_id{}; -std::deque nvhost_nvdec_common::syncpts_accumulated{}; - nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_, NvCore::ChannelType channel_type_) : nvdevice{system_}, core{core_}, syncpoint_manager{core.GetSyncpointManager()}, nvmap{core.GetNvMapFile()}, channel_type{channel_type_} { + auto& syncpts_accumulated = core.Host1xDeviceFile().syncpts_accumulated; if (syncpts_accumulated.empty()) { channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false); } else { @@ -60,8 +58,9 @@ nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Containe syncpts_accumulated.pop_front(); } } + nvhost_nvdec_common::~nvhost_nvdec_common() { - syncpts_accumulated.push_back(channel_syncpoint); + core.Host1xDeviceFile().syncpts_accumulated.push_back(channel_syncpoint); } NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector& input) { @@ -108,7 +107,7 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, const std::vector& input, Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count); system.Memory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(), cmdlist.size() * sizeof(u32)); - gpu.PushCommandBuffer(fd_to_id[fd], cmdlist); + gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist); } std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmit)); // Some games expect command_buffers to be written back @@ -186,8 +185,4 @@ Kernel::KEvent* nvhost_nvdec_common::QueryEvent(u32 event_id) { return nullptr; } -void nvhost_nvdec_common::Reset() { - fd_to_id.clear(); -} - } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h index 93990bb9b8..fe76100c86 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h @@ -25,8 +25,6 @@ public: NvCore::ChannelType channel_type); ~nvhost_nvdec_common() override; - static void Reset(); - protected: struct IoctlSetNvmapFD { s32_le nvmap_fd{}; @@ -119,7 +117,6 @@ protected: Kernel::KEvent* QueryEvent(u32 event_id) override; - static std::unordered_map fd_to_id; u32 channel_syncpoint; s32_le nvmap_fd{}; u32_le submit_timeout{}; @@ -128,8 +125,6 @@ protected: NvCore::NvMap& nvmap; NvCore::ChannelType channel_type; std::array device_syncpoints{}; - - static std::deque syncpts_accumulated; }; }; // namespace Devices } // namespace Service::Nvidia diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index 2e4ff988cd..73f97136e3 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp @@ -4,13 +4,12 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/devices/nvhost_vic.h" #include "video_core/renderer_base.h" namespace Service::Nvidia::Devices { -u32 nvhost_vic::next_id{}; - nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core_) : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::VIC} {} @@ -21,11 +20,13 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& i switch (command.group) { case 0x0: switch (command.cmd) { - case 0x1: - if (!fd_to_id.contains(fd)) { - fd_to_id[fd] = next_id++; + case 0x1: { + auto& host1x_file = core.Host1xDeviceFile(); + if (!host1x_file.fd_to_id.contains(fd)) { + host1x_file.fd_to_id[fd] = host1x_file.vic_next_id++; } return Submit(fd, input, output); + } case 0x2: return GetSyncpoint(input, output); case 0x3: @@ -69,8 +70,9 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, const std::vector& i void nvhost_vic::OnOpen(DeviceFD fd) {} void nvhost_vic::OnClose(DeviceFD fd) { - const auto iter = fd_to_id.find(fd); - if (iter != fd_to_id.end()) { + auto& host1x_file = core.Host1xDeviceFile(); + const auto iter = host1x_file.fd_to_id.find(fd); + if (iter != host1x_file.fd_to_id.end()) { system.GPU().ClearCdmaInstance(iter->second); } } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.h b/src/core/hle/service/nvdrv/devices/nvhost_vic.h index 59e23b41ef..f164caafb3 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.h @@ -21,8 +21,5 @@ public: void OnOpen(DeviceFD fd) override; void OnClose(DeviceFD fd) override; - -private: - static u32 next_id; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h index 52e1d7cff9..e9bfd03580 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.h +++ b/src/core/hle/service/nvdrv/devices/nvmap.h @@ -23,8 +23,8 @@ public: explicit nvmap(Core::System& system_, NvCore::Container& container); ~nvmap() override; - nvmap(nvmap const&) = delete; - nvmap& operator=(nvmap const&) = delete; + nvmap(const nvmap&) = delete; + nvmap& operator=(const nvmap&) = delete; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, std::vector& output) override; diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index 7929443d27..5e7b7468f4 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -101,9 +101,7 @@ Module::Module(Core::System& system) }; } -Module::~Module() { - Devices::nvhost_nvdec_common::Reset(); -} +Module::~Module() {} NvResult Module::VerifyFD(DeviceFD fd) const { if (fd < 0) { diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index a2aeb80b49..146d046a93 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -46,7 +46,7 @@ class Module; class EventInterface { public: - EventInterface(Module& module_); + explicit EventInterface(Module& module_); ~EventInterface(); Kernel::KEvent* CreateEvent(std::string name); diff --git a/src/video_core/control/channel_state.cpp b/src/video_core/control/channel_state.cpp index b04922ac0e..cdecc3a91f 100644 --- a/src/video_core/control/channel_state.cpp +++ b/src/video_core/control/channel_state.cpp @@ -14,10 +14,7 @@ namespace Tegra::Control { -ChannelState::ChannelState(s32 bind_id_) { - bind_id = bind_id_; - initiated = false; -} +ChannelState::ChannelState(s32 bind_id_) : bind_id{bind_id_}, initialized{} {} void ChannelState::Init(Core::System& system, GPU& gpu) { ASSERT(memory_manager); @@ -27,7 +24,7 @@ void ChannelState::Init(Core::System& system, GPU& gpu) { kepler_compute = std::make_unique(system, *memory_manager); maxwell_dma = std::make_unique(system, *memory_manager); kepler_memory = std::make_unique(system, *memory_manager); - initiated = true; + initialized = true; } void ChannelState::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) { diff --git a/src/video_core/control/channel_state.h b/src/video_core/control/channel_state.h index 305b21cbaa..3a7b9872c1 100644 --- a/src/video_core/control/channel_state.h +++ b/src/video_core/control/channel_state.h @@ -34,7 +34,7 @@ class DmaPusher; namespace Control { struct ChannelState { - ChannelState(s32 bind_id); + explicit ChannelState(s32 bind_id); ChannelState(const ChannelState& state) = delete; ChannelState& operator=(const ChannelState&) = delete; ChannelState(ChannelState&& other) noexcept = default; @@ -60,7 +60,7 @@ struct ChannelState { std::unique_ptr dma_pusher; - bool initiated{}; + bool initialized{}; }; } // namespace Control diff --git a/src/video_core/control/channel_state_cache.h b/src/video_core/control/channel_state_cache.h index 5246192a80..584a0c26cd 100644 --- a/src/video_core/control/channel_state_cache.h +++ b/src/video_core/control/channel_state_cache.h @@ -32,7 +32,7 @@ namespace VideoCommon { class ChannelInfo { public: ChannelInfo() = delete; - ChannelInfo(Tegra::Control::ChannelState& state); + explicit ChannelInfo(Tegra::Control::ChannelState& state); ChannelInfo(const ChannelInfo& state) = delete; ChannelInfo& operator=(const ChannelInfo&) = delete; ChannelInfo(ChannelInfo&& other) = default; diff --git a/src/video_core/control/scheduler.cpp b/src/video_core/control/scheduler.cpp index 7330426909..f7cbe204ee 100644 --- a/src/video_core/control/scheduler.cpp +++ b/src/video_core/control/scheduler.cpp @@ -3,6 +3,7 @@ #include +#include "common/assert.h" #include "video_core/control/channel_state.h" #include "video_core/control/scheduler.h" #include "video_core/gpu.h" @@ -13,8 +14,9 @@ Scheduler::Scheduler(GPU& gpu_) : gpu{gpu_} {} Scheduler::~Scheduler() = default; void Scheduler::Push(s32 channel, CommandList&& entries) { - std::unique_lock lk(scheduling_guard); + std::unique_lock lk(scheduling_guard); auto it = channels.find(channel); + ASSERT(it != channels.end()); auto channel_state = it->second; gpu.BindChannel(channel_state->bind_id); channel_state->dma_pusher->Push(std::move(entries)); @@ -23,7 +25,7 @@ void Scheduler::Push(s32 channel, CommandList&& entries) { void Scheduler::DeclareChannel(std::shared_ptr new_channel) { s32 channel = new_channel->bind_id; - std::unique_lock lk(scheduling_guard); + std::unique_lock lk(scheduling_guard); channels.emplace(channel, new_channel); } diff --git a/src/video_core/control/scheduler.h b/src/video_core/control/scheduler.h index 305a01e0a0..44addf61c4 100644 --- a/src/video_core/control/scheduler.h +++ b/src/video_core/control/scheduler.h @@ -19,7 +19,7 @@ struct ChannelState; class Scheduler { public: - Scheduler(GPU& gpu_); + explicit Scheduler(GPU& gpu_); ~Scheduler(); void Push(s32 channel, CommandList&& entries); diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h index 9c5d567a67..bc48320ce2 100644 --- a/src/video_core/engines/maxwell_dma.h +++ b/src/video_core/engines/maxwell_dma.h @@ -195,7 +195,7 @@ public: BitField<24, 2, u32> num_dst_components_minus_one; }; - Swizzle GetComponent(size_t i) { + Swizzle GetComponent(size_t i) const { const u32 raw = dst_components_raw; return static_cast((raw >> (i * 3)) & 0x7); } diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index d7a3dd96ba..28b38273e8 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -355,7 +355,7 @@ struct GPU::Impl { std::condition_variable sync_cv; - std::list> sync_requests; + std::list> sync_requests; std::atomic current_sync_fence{}; u64 last_sync_fence{}; std::mutex sync_request_mutex; diff --git a/src/video_core/host1x/host1x.h b/src/video_core/host1x/host1x.h index 7ecf853d95..57082ae54f 100644 --- a/src/video_core/host1x/host1x.h +++ b/src/video_core/host1x/host1x.h @@ -19,7 +19,7 @@ namespace Host1x { class Host1x { public: - Host1x(Core::System& system); + explicit Host1x(Core::System& system); SyncpointManager& GetSyncpointManager() { return syncpoint_manager; diff --git a/src/video_core/host1x/syncpoint_manager.cpp b/src/video_core/host1x/syncpoint_manager.cpp index 4471bacae2..326e8355ad 100644 --- a/src/video_core/host1x/syncpoint_manager.cpp +++ b/src/video_core/host1x/syncpoint_manager.cpp @@ -12,13 +12,13 @@ MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192)); SyncpointManager::ActionHandle SyncpointManager::RegisterAction( std::atomic& syncpoint, std::list& action_storage, u32 expected_value, - std::function& action) { + std::function&& action) { if (syncpoint.load(std::memory_order_acquire) >= expected_value) { action(); return {}; } - std::unique_lock lk(guard); + std::unique_lock lk(guard); if (syncpoint.load(std::memory_order_relaxed) >= expected_value) { action(); return {}; @@ -30,12 +30,12 @@ SyncpointManager::ActionHandle SyncpointManager::RegisterAction( } ++it; } - return action_storage.emplace(it, expected_value, action); + return action_storage.emplace(it, expected_value, std::move(action)); } void SyncpointManager::DeregisterAction(std::list& action_storage, ActionHandle& handle) { - std::unique_lock lk(guard); + std::unique_lock lk(guard); action_storage.erase(handle); } @@ -68,7 +68,7 @@ void SyncpointManager::Increment(std::atomic& syncpoint, std::condition_var std::list& action_storage) { auto new_value{syncpoint.fetch_add(1, std::memory_order_acq_rel) + 1}; - std::unique_lock lk(guard); + std::unique_lock lk(guard); auto it = action_storage.begin(); while (it != action_storage.end()) { if (it->expected_value > new_value) { @@ -87,7 +87,7 @@ void SyncpointManager::Wait(std::atomic& syncpoint, std::condition_variable return; } - std::unique_lock lk(guard); + std::unique_lock lk(guard); wait_cv.wait(lk, pred); } diff --git a/src/video_core/host1x/syncpoint_manager.h b/src/video_core/host1x/syncpoint_manager.h index 72220a09a5..50a264e23f 100644 --- a/src/video_core/host1x/syncpoint_manager.h +++ b/src/video_core/host1x/syncpoint_manager.h @@ -18,34 +18,34 @@ namespace Host1x { class SyncpointManager { public: - u32 GetGuestSyncpointValue(u32 id) { + u32 GetGuestSyncpointValue(u32 id) const { return syncpoints_guest[id].load(std::memory_order_acquire); } - u32 GetHostSyncpointValue(u32 id) { + u32 GetHostSyncpointValue(u32 id) const { return syncpoints_host[id].load(std::memory_order_acquire); } struct RegisteredAction { - RegisteredAction(u32 expected_value_, std::function& action_) - : expected_value{expected_value_}, action{action_} {} + explicit RegisteredAction(u32 expected_value_, std::function&& action_) + : expected_value{expected_value_}, action{std::move(action_)} {} u32 expected_value; - std::function action; + std::function action; }; using ActionHandle = std::list::iterator; template ActionHandle RegisterGuestAction(u32 syncpoint_id, u32 expected_value, Func&& action) { - std::function func(action); + std::function func(action); return RegisterAction(syncpoints_guest[syncpoint_id], guest_action_storage[syncpoint_id], - expected_value, func); + expected_value, std::move(func)); } template ActionHandle RegisterHostAction(u32 syncpoint_id, u32 expected_value, Func&& action) { - std::function func(action); + std::function func(action); return RegisterAction(syncpoints_host[syncpoint_id], host_action_storage[syncpoint_id], - expected_value, func); + expected_value, std::move(func)); } void DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle); @@ -60,11 +60,11 @@ public: void WaitHost(u32 syncpoint_id, u32 expected_value); - bool IsReadyGuest(u32 syncpoint_id, u32 expected_value) { + bool IsReadyGuest(u32 syncpoint_id, u32 expected_value) const { return syncpoints_guest[syncpoint_id].load(std::memory_order_acquire) >= expected_value; } - bool IsReadyHost(u32 syncpoint_id, u32 expected_value) { + bool IsReadyHost(u32 syncpoint_id, u32 expected_value) const { return syncpoints_host[syncpoint_id].load(std::memory_order_acquire) >= expected_value; } @@ -74,7 +74,7 @@ private: ActionHandle RegisterAction(std::atomic& syncpoint, std::list& action_storage, u32 expected_value, - std::function& action); + std::function&& action); void DeregisterAction(std::list& action_storage, ActionHandle& handle); diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index ae4fd98dfa..f992e29f3d 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -126,7 +126,7 @@ private: void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size); template - [[nodiscard]] inline std::size_t PageEntryIndex(GPUVAddr gpu_addr) const { + [[nodiscard]] std::size_t PageEntryIndex(GPUVAddr gpu_addr) const { if constexpr (is_big_page) { return (gpu_addr >> big_page_bits) & big_page_table_mask; } else { From aedd739631efcebf42a1dae601096c10b99d9cd5 Mon Sep 17 00:00:00 2001 From: Liam Date: Sat, 3 Sep 2022 21:02:02 -0400 Subject: [PATCH 67/68] maxwell_dma: remove warnings from implemented functionality --- src/video_core/engines/maxwell_dma.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index bcffd1862d..3909d36c19 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp @@ -133,7 +133,6 @@ void MaxwellDMA::CopyBlockLinearToPitch() { } // Deswizzle the input and copy it over. - UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0); const Parameters& src_params = regs.src_params; const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1; @@ -182,7 +181,6 @@ void MaxwellDMA::CopyBlockLinearToPitch() { void MaxwellDMA::CopyPitchToBlockLinear() { UNIMPLEMENTED_IF_MSG(regs.dst_params.block_size.width != 0, "Block width is not one"); UNIMPLEMENTED_IF(regs.dst_params.layer != 0); - UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0); const bool is_remapping = regs.launch_dma.remap_enable != 0; const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1; From df6dffa30baefd9f1e73399c632ab4e5f6475bab Mon Sep 17 00:00:00 2001 From: Byte Date: Thu, 6 Oct 2022 20:59:40 +0200 Subject: [PATCH 68/68] vulkan_blitter: Fix pool allocation double free. --- .../renderer_vulkan/vk_blit_screen.cpp | 13 ++++++++---- .../renderer_vulkan/vk_blit_screen.h | 2 +- src/video_core/vulkan_common/vulkan_wrapper.h | 20 ------------------- 3 files changed, 10 insertions(+), 25 deletions(-) diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp index 444c29f686..cb7fa20784 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp +++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp @@ -145,6 +145,11 @@ VkSemaphore BlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, // Finish any pending renderpass scheduler.RequestOutsideRenderPassOperationContext(); + if (const auto swapchain_images = swapchain.GetImageCount(); swapchain_images != image_count) { + image_count = swapchain_images; + Recreate(); + } + const std::size_t image_index = swapchain.GetImageIndex(); scheduler.Wait(resource_ticks[image_index]); @@ -448,15 +453,15 @@ vk::Framebuffer BlitScreen::CreateFramebuffer(const VkImageView& image_view, VkE void BlitScreen::CreateStaticResources() { CreateShaders(); + CreateSampler(); +} + +void BlitScreen::CreateDynamicResources() { CreateSemaphores(); CreateDescriptorPool(); CreateDescriptorSetLayout(); CreateDescriptorSets(); CreatePipelineLayout(); - CreateSampler(); -} - -void BlitScreen::CreateDynamicResources() { CreateRenderPass(); CreateFramebuffers(); CreateGraphicsPipeline(); diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h index b8c67bef08..29e2ea925e 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.h +++ b/src/video_core/renderer_vulkan/vk_blit_screen.h @@ -109,7 +109,7 @@ private: MemoryAllocator& memory_allocator; Swapchain& swapchain; Scheduler& scheduler; - const std::size_t image_count; + std::size_t image_count; const ScreenInfo& screen_info; vk::ShaderModule vertex_shader; diff --git a/src/video_core/vulkan_common/vulkan_wrapper.h b/src/video_core/vulkan_common/vulkan_wrapper.h index 795f16bfb7..1b3f493bde 100644 --- a/src/video_core/vulkan_common/vulkan_wrapper.h +++ b/src/video_core/vulkan_common/vulkan_wrapper.h @@ -519,9 +519,7 @@ public: dld{rhs.dld} {} /// Assign an allocation transfering ownership from another allocation. - /// Releases any previously held allocation. PoolAllocations& operator=(PoolAllocations&& rhs) noexcept { - Release(); allocations = std::move(rhs.allocations); num = rhs.num; device = rhs.device; @@ -530,11 +528,6 @@ public: return *this; } - /// Destroys any held allocation. - ~PoolAllocations() { - Release(); - } - /// Returns the number of allocations. std::size_t size() const noexcept { return num; @@ -557,19 +550,6 @@ public: } private: - /// Destroys the held allocations if they exist. - void Release() noexcept { - if (!allocations) { - return; - } - const Span span(allocations.get(), num); - const VkResult result = Free(device, pool, span, *dld); - // There's no way to report errors from a destructor. - if (result != VK_SUCCESS) { - std::terminate(); - } - } - std::unique_ptr allocations; std::size_t num = 0; VkDevice device = nullptr;