diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 2871682f62..7373cb62d7 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -164,11 +164,16 @@ public: /// Pop asynchronous downloads void PopAsyncFlushes(); - [[nodiscard]] bool DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount); + bool DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount); + + bool DMAClear(GPUVAddr src_address, u64 amount, u32 value); /// Return true when a CPU region is modified from the GPU [[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size); + /// Return true when a region is registered on the cache + [[nodiscard]] bool IsRegionRegistered(VAddr addr, size_t size); + /// Return true when a CPU region is modified from the CPU [[nodiscard]] bool IsRegionCpuModified(VAddr addr, size_t size); @@ -324,6 +329,8 @@ private: [[nodiscard]] bool HasFastUniformBufferBound(size_t stage, u32 binding_index) const noexcept; + void ClearDownload(IntervalType subtract_interval); + VideoCore::RasterizerInterface& rasterizer; Tegra::Engines::Maxwell3D& maxwell3d; Tegra::Engines::KeplerCompute& kepler_compute; @@ -462,6 +469,14 @@ void BufferCache

::DownloadMemory(VAddr cpu_addr, u64 size) { }); } +template +void BufferCache

::ClearDownload(IntervalType subtract_interval) { + uncommitted_ranges.subtract(subtract_interval); + for (auto& interval_set : committed_ranges) { + interval_set.subtract(subtract_interval); + } +} + template bool BufferCache

::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { const std::optional cpu_src_address = gpu_memory.GpuToCpuAddress(src_address); @@ -469,17 +484,14 @@ bool BufferCache

::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am if (!cpu_src_address || !cpu_dest_address) { return false; } - const bool source_dirty = IsRegionGpuModified(*cpu_src_address, amount); - const bool dest_dirty = IsRegionGpuModified(*cpu_dest_address, amount); + const bool source_dirty = IsRegionRegistered(*cpu_src_address, amount); + const bool dest_dirty = IsRegionRegistered(*cpu_dest_address, amount); if (!source_dirty && !dest_dirty) { return false; } const IntervalType subtract_interval{*cpu_dest_address, *cpu_dest_address + amount}; - uncommitted_ranges.subtract(subtract_interval); - for (auto& interval_set : committed_ranges) { - interval_set.subtract(subtract_interval); - } + ClearDownload(subtract_interval); BufferId buffer_a; BufferId buffer_b; @@ -510,12 +522,13 @@ bool BufferCache

::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am ForEachWrittenRange(*cpu_src_address, amount, mirror); // This subtraction in this order is important for overlapping copies. common_ranges.subtract(subtract_interval); + bool atleast_1_download = tmp_intervals.size() != 0; for (const IntervalType add_interval : tmp_intervals) { common_ranges.add(add_interval); } runtime.CopyBuffer(dest_buffer, src_buffer, copies); - if (source_dirty) { + if (atleast_1_download) { dest_buffer.MarkRegionAsGpuModified(*cpu_dest_address, amount); } std::vector tmp_buffer(amount); @@ -524,6 +537,33 @@ bool BufferCache

::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am return true; } +template +bool BufferCache

::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) { + const std::optional cpu_dst_address = gpu_memory.GpuToCpuAddress(dst_address); + if (!cpu_dst_address) { + return false; + } + const bool dest_dirty = IsRegionRegistered(*cpu_dst_address, amount); + if (!dest_dirty) { + return false; + } + + const size_t size = amount * sizeof(u32); + const IntervalType subtract_interval{*cpu_dst_address, *cpu_dst_address + size}; + ClearDownload(subtract_interval); + common_ranges.subtract(subtract_interval); + + BufferId buffer; + do { + has_deleted_buffers = false; + buffer = FindBuffer(*cpu_dst_address, static_cast(size)); + } while (has_deleted_buffers); + auto& dest_buffer = slot_buffers[buffer]; + const u32 offset = static_cast(*cpu_dst_address - dest_buffer.CpuAddr()); + runtime.ClearBuffer(dest_buffer, offset, size, value); + return true; +} + template void BufferCache

::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) { @@ -781,6 +821,27 @@ bool BufferCache

::IsRegionGpuModified(VAddr addr, size_t size) { return false; } +template +bool BufferCache

::IsRegionRegistered(VAddr addr, size_t size) { + const VAddr end_addr = addr + size; + const u64 page_end = Common::DivCeil(end_addr, PAGE_SIZE); + for (u64 page = addr >> PAGE_BITS; page < page_end;) { + const BufferId buffer_id = page_table[page]; + if (!buffer_id) { + ++page; + continue; + } + Buffer& buffer = slot_buffers[buffer_id]; + const VAddr buf_start_addr = buffer.CpuAddr(); + const VAddr buf_end_addr = buf_start_addr + buffer.SizeBytes(); + if (buf_start_addr < end_addr && addr < buf_end_addr) { + return true; + } + page = Common::DivCeil(end_addr, PAGE_SIZE); + } + return false; +} + template bool BufferCache

::IsRegionCpuModified(VAddr addr, size_t size) { const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE); @@ -1425,6 +1486,7 @@ void BufferCache

::DownloadBufferMemory(Buffer& buffer, VAddr cpu_addr, u64 si const VAddr end_address = start_address + range_size; ForEachWrittenRange(start_address, range_size, add_download); const IntervalType subtract_interval{start_address, end_address}; + ClearDownload(subtract_interval); common_ranges.subtract(subtract_interval); }); if (total_size_bytes == 0) { diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index 24481952be..c517764661 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp @@ -4,6 +4,7 @@ #include "common/assert.h" #include "common/logging/log.h" +#include "common/microprofile.h" #include "common/settings.h" #include "core/core.h" #include "video_core/engines/maxwell_3d.h" @@ -12,6 +13,9 @@ #include "video_core/renderer_base.h" #include "video_core/textures/decoders.h" +MICROPROFILE_DECLARE(GPU_DMAEngine); +MICROPROFILE_DEFINE(GPU_DMAEngine, "GPU", "DMA Engine", MP_RGB(224, 224, 128)); + namespace Tegra::Engines { using namespace Texture; @@ -43,6 +47,7 @@ void MaxwellDMA::CallMultiMethod(u32 method, const u32* base_start, u32 amount, } void MaxwellDMA::Launch() { + MICROPROFILE_SCOPE(GPU_DMAEngine); LOG_TRACE(Render_OpenGL, "DMA copy 0x{:x} -> 0x{:x}", static_cast(regs.offset_in), static_cast(regs.offset_out)); @@ -87,9 +92,11 @@ void MaxwellDMA::CopyPitchToPitch() { // TODO: allow multisized components. if (is_buffer_clear) { ASSERT(regs.remap_const.component_size_minus_one == 3); + accelerate.BufferClear(regs.offset_out, regs.line_length_in, regs.remap_consta_value); std::vector tmp_buffer(regs.line_length_in, regs.remap_consta_value); - memory_manager.WriteBlock(regs.offset_out, reinterpret_cast(tmp_buffer.data()), - regs.line_length_in * sizeof(u32)); + memory_manager.WriteBlockUnsafe(regs.offset_out, + reinterpret_cast(tmp_buffer.data()), + regs.line_length_in * sizeof(u32)); return; } UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0); @@ -179,8 +186,13 @@ void MaxwellDMA::CopyPitchToBlockLinear() { write_buffer.resize(dst_size); } - memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); - memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); + if (Settings::IsGPULevelExtreme()) { + memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); + memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); + } else { + memory_manager.ReadBlockUnsafe(regs.offset_in, read_buffer.data(), src_size); + memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size); + } // If the input is linear and the output is tiled, swizzle the input and copy it over. if (regs.dst_params.block_size.depth > 0) { diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h index 4ed0d09962..d3329b0f84 100644 --- a/src/video_core/engines/maxwell_dma.h +++ b/src/video_core/engines/maxwell_dma.h @@ -31,6 +31,8 @@ class AccelerateDMAInterface { public: /// Write the value to the register identified by method. virtual bool BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) = 0; + + virtual bool BufferClear(GPUVAddr src_address, u64 amount, u32 value) = 0; }; /** diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp index c225d1fc98..c4189fb60e 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp @@ -98,6 +98,12 @@ void BufferCacheRuntime::CopyBuffer(Buffer& dst_buffer, Buffer& src_buffer, } } +void BufferCacheRuntime::ClearBuffer(Buffer& dest_buffer, u32 offset, size_t size, u32 value) { + glClearNamedBufferSubData(dest_buffer.Handle(), GL_R32UI, static_cast(offset), + static_cast(size / sizeof(u32)), GL_RGBA, GL_UNSIGNED_INT, + &value); +} + void BufferCacheRuntime::BindIndexBuffer(Buffer& buffer, u32 offset, u32 size) { if (has_unified_vertex_buffers) { buffer.MakeResident(GL_READ_ONLY); diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h index d8b20a9af8..fe91aa4528 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.h +++ b/src/video_core/renderer_opengl/gl_buffer_cache.h @@ -57,6 +57,8 @@ public: void CopyBuffer(Buffer& dst_buffer, Buffer& src_buffer, std::span copies); + void ClearBuffer(Buffer& dest_buffer, u32 offset, size_t size, u32 value); + void BindIndexBuffer(Buffer& buffer, u32 offset, u32 size); void BindVertexBuffer(u32 index, Buffer& buffer, u32 offset, u32 size, u32 stride); diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 82c84127ad..ceb3abcb2c 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -1407,4 +1407,9 @@ bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 return buffer_cache.DMACopy(src_address, dest_address, amount); } +bool AccelerateDMA::BufferClear(GPUVAddr src_address, u64 amount, u32 value) { + std::scoped_lock lock{buffer_cache.mutex}; + return buffer_cache.DMAClear(src_address, amount, value); +} + } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index ccee9ba337..d30ad698f0 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -65,6 +65,8 @@ public: bool BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) override; + bool BufferClear(GPUVAddr src_address, u64 amount, u32 value) override; + private: BufferCache& buffer_cache; }; diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp index 7d4e6ea7bd..0def1e7697 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp @@ -136,6 +136,30 @@ void BufferCacheRuntime::CopyBuffer(VkBuffer dst_buffer, VkBuffer src_buffer, }); } +void BufferCacheRuntime::ClearBuffer(VkBuffer dest_buffer, u32 offset, size_t size, u32 value) { + static constexpr VkMemoryBarrier READ_BARRIER{ + .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, + .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT, + }; + static constexpr VkMemoryBarrier WRITE_BARRIER{ + .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, + }; + + scheduler.RequestOutsideRenderPassOperationContext(); + scheduler.Record([dest_buffer, offset, size, value](vk::CommandBuffer cmdbuf) { + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, + 0, READ_BARRIER); + cmdbuf.FillBuffer(dest_buffer, offset, size, value); + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, + 0, WRITE_BARRIER); + }); +} + void BufferCacheRuntime::BindIndexBuffer(PrimitiveTopology topology, IndexFormat index_format, u32 base_vertex, u32 num_indices, VkBuffer buffer, u32 offset, [[maybe_unused]] u32 size) { diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h index 6ea8448d73..3bb81d5b3d 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.h +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h @@ -60,6 +60,8 @@ public: void CopyBuffer(VkBuffer src_buffer, VkBuffer dst_buffer, std::span copies); + void ClearBuffer(VkBuffer dest_buffer, u32 offset, size_t size, u32 value); + void BindIndexBuffer(PrimitiveTopology topology, IndexFormat index_format, u32 num_indices, u32 base_vertex, VkBuffer buffer, u32 offset, u32 size); diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index a8ffbe6ba7..f57c15b371 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -706,6 +706,11 @@ void RasterizerVulkan::FlushWork() { AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {} +bool AccelerateDMA::BufferClear(GPUVAddr src_address, u64 amount, u32 value) { + std::scoped_lock lock{buffer_cache.mutex}; + return buffer_cache.DMAClear(src_address, amount, value); +} + bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { std::scoped_lock lock{buffer_cache.mutex}; return buffer_cache.DMACopy(src_address, dest_address, amount); diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index 3a78de258d..2065209bef 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h @@ -56,6 +56,8 @@ public: bool BufferCopy(GPUVAddr start_address, GPUVAddr end_address, u64 amount) override; + bool BufferClear(GPUVAddr src_address, u64 amount, u32 value) override; + private: BufferCache& buffer_cache; };