mirror of
https://github.com/yuzu-emu/yuzu.git
synced 2024-11-14 10:14:58 +00:00
Buffer Cache: Deduce vertex array limit from memory layout when limit is the highest possible.
This commit is contained in:
parent
8bb604b3be
commit
770e19f51a
3 changed files with 12 additions and 4 deletions
|
@ -1316,12 +1316,16 @@ void BufferCache<P>::UpdateVertexBuffer(u32 index) {
|
||||||
const GPUVAddr gpu_addr_begin = array.StartAddress();
|
const GPUVAddr gpu_addr_begin = array.StartAddress();
|
||||||
const GPUVAddr gpu_addr_end = limit.LimitAddress() + 1;
|
const GPUVAddr gpu_addr_end = limit.LimitAddress() + 1;
|
||||||
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin);
|
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin);
|
||||||
const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin);
|
u32 address_size = static_cast<u32>(
|
||||||
const u32 size = address_size; // TODO: Analyze stride and number of vertices
|
std::min(gpu_addr_end - gpu_addr_begin, static_cast<u64>(std::numeric_limits<u32>::max())));
|
||||||
if (array.enable == 0 || size == 0 || !cpu_addr) {
|
if (array.enable == 0 || address_size == 0 || !cpu_addr) {
|
||||||
vertex_buffers[index] = NULL_BINDING;
|
vertex_buffers[index] = NULL_BINDING;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) {
|
||||||
|
address_size = gpu_memory->MaxContinousRange(gpu_addr_begin, address_size);
|
||||||
|
}
|
||||||
|
const u32 size = address_size; // TODO: Analyze stride and number of vertices
|
||||||
vertex_buffers[index] = Binding{
|
vertex_buffers[index] = Binding{
|
||||||
.cpu_addr = *cpu_addr,
|
.cpu_addr = *cpu_addr,
|
||||||
.size = size,
|
.size = size,
|
||||||
|
|
|
@ -193,7 +193,7 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
|
std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
|
||||||
if (gpu_addr >= address_space_size) [[unlikely]] {
|
if (!IsWithinGPUAddressRange(gpu_addr)) [[unlikely]] {
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
}
|
}
|
||||||
if (GetEntry<true>(gpu_addr) != EntryType::Mapped) [[unlikely]] {
|
if (GetEntry<true>(gpu_addr) != EntryType::Mapped) [[unlikely]] {
|
||||||
|
|
|
@ -110,6 +110,10 @@ public:
|
||||||
|
|
||||||
size_t MaxContinousRange(GPUVAddr gpu_addr, size_t size) const;
|
size_t MaxContinousRange(GPUVAddr gpu_addr, size_t size) const;
|
||||||
|
|
||||||
|
bool IsWithinGPUAddressRange(GPUVAddr gpu_addr) const {
|
||||||
|
return gpu_addr < address_space_size;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped>
|
template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped>
|
||||||
inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped,
|
inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped,
|
||||||
|
|
Loading…
Reference in a new issue