core: Make use of fastmem
This commit is contained in:
parent
740edacc8d
commit
621f3f5f47
2
externals/dynarmic
vendored
2
externals/dynarmic
vendored
|
@ -1 +1 @@
|
|||
Subproject commit 828959caedfac2d456a0c877fda4612e35fffc03
|
||||
Subproject commit 0c12614d1a7a72d778609920dde96a4c63074ece
|
|
@ -111,6 +111,8 @@ struct PageTable {
|
|||
VirtualBuffer<u64> backing_addr;
|
||||
|
||||
size_t current_address_space_width_in_bits;
|
||||
|
||||
u8* fastmem_arena;
|
||||
};
|
||||
|
||||
} // namespace Common
|
||||
|
|
|
@ -128,6 +128,7 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
|
|||
if (page_table) {
|
||||
config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>(
|
||||
page_table->pointers.data());
|
||||
config.fastmem_pointer = page_table->fastmem_arena;
|
||||
}
|
||||
config.absolute_offset_page_table = true;
|
||||
config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;
|
||||
|
|
|
@ -160,6 +160,10 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
|
|||
config.absolute_offset_page_table = true;
|
||||
config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
|
||||
config.only_detect_misalignment_via_page_table_on_page_boundary = true;
|
||||
|
||||
config.fastmem_pointer = page_table->fastmem_arena;
|
||||
config.fastmem_address_space_bits = address_space_bits;
|
||||
config.silently_mirror_fastmem = false;
|
||||
}
|
||||
|
||||
// Multi-process state
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
namespace Core {
|
||||
|
||||
DeviceMemory::DeviceMemory() : buffer{DramMemoryMap::Size} {}
|
||||
DeviceMemory::DeviceMemory() : buffer{DramMemoryMap::Size, 1ULL << 39} {}
|
||||
DeviceMemory::~DeviceMemory() = default;
|
||||
|
||||
} // namespace Core
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/virtual_buffer.h"
|
||||
#include "common/host_memory.h"
|
||||
|
||||
namespace Core {
|
||||
|
||||
|
@ -21,27 +21,30 @@ enum : u64 {
|
|||
};
|
||||
}; // namespace DramMemoryMap
|
||||
|
||||
class DeviceMemory : NonCopyable {
|
||||
class DeviceMemory {
|
||||
public:
|
||||
explicit DeviceMemory();
|
||||
~DeviceMemory();
|
||||
|
||||
DeviceMemory& operator=(const DeviceMemory&) = delete;
|
||||
DeviceMemory(const DeviceMemory&) = delete;
|
||||
|
||||
template <typename T>
|
||||
PAddr GetPhysicalAddr(const T* ptr) const {
|
||||
return (reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(buffer.data())) +
|
||||
return (reinterpret_cast<uintptr_t>(ptr) -
|
||||
reinterpret_cast<uintptr_t>(buffer.BackingBasePointer())) +
|
||||
DramMemoryMap::Base;
|
||||
}
|
||||
|
||||
u8* GetPointer(PAddr addr) {
|
||||
return buffer.data() + (addr - DramMemoryMap::Base);
|
||||
return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base);
|
||||
}
|
||||
|
||||
const u8* GetPointer(PAddr addr) const {
|
||||
return buffer.data() + (addr - DramMemoryMap::Base);
|
||||
return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base);
|
||||
}
|
||||
|
||||
private:
|
||||
Common::VirtualBuffer<u8> buffer;
|
||||
Common::HostMemory buffer;
|
||||
};
|
||||
|
||||
} // namespace Core
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/page_table.h"
|
||||
#include "common/settings.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
#include "core/core.h"
|
||||
|
@ -32,6 +33,7 @@ struct Memory::Impl {
|
|||
|
||||
void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
|
||||
current_page_table = &process.PageTable().PageTableImpl();
|
||||
current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer();
|
||||
|
||||
const std::size_t address_space_width = process.PageTable().GetAddressSpaceWidth();
|
||||
|
||||
|
@ -41,13 +43,19 @@ struct Memory::Impl {
|
|||
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
|
||||
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
|
||||
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
|
||||
ASSERT_MSG(target >= DramMemoryMap::Base && target < DramMemoryMap::End,
|
||||
"Out of bounds target: {:016X}", target);
|
||||
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory);
|
||||
|
||||
system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size);
|
||||
}
|
||||
|
||||
void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
|
||||
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
|
||||
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
|
||||
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped);
|
||||
|
||||
system.DeviceMemory().buffer.Unmap(base, size);
|
||||
}
|
||||
|
||||
bool IsValidVirtualAddress(const Kernel::KProcess& process, const VAddr vaddr) const {
|
||||
|
@ -466,6 +474,10 @@ struct Memory::Impl {
|
|||
if (vaddr == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const bool is_read_enable = Settings::IsGPULevelHigh() || !cached;
|
||||
system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached);
|
||||
|
||||
// Iterate over a contiguous CPU address space, which corresponds to the specified GPU
|
||||
// address space, marking the region as un/cached. The region is marked un/cached at a
|
||||
// granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
|
||||
|
|
Loading…
Reference in a new issue