Merge pull request #1680 from lioncash/mem
kernel/process: Migrate heap-related memory management out of the process class and into the vm manager
This commit is contained in:
commit
70f189d7af
|
@ -5,11 +5,9 @@
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/common_funcs.h"
|
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/file_sys/program_metadata.h"
|
#include "core/file_sys/program_metadata.h"
|
||||||
#include "core/hle/kernel/errors.h"
|
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/process.h"
|
#include "core/hle/kernel/process.h"
|
||||||
#include "core/hle/kernel/resource_limit.h"
|
#include "core/hle/kernel/resource_limit.h"
|
||||||
|
@ -241,83 +239,15 @@ void Process::LoadModule(CodeSet module_, VAddr base_addr) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultVal<VAddr> Process::HeapAllocate(VAddr target, u64 size, VMAPermission perms) {
|
ResultVal<VAddr> Process::HeapAllocate(VAddr target, u64 size, VMAPermission perms) {
|
||||||
if (target < vm_manager.GetHeapRegionBaseAddress() ||
|
return vm_manager.HeapAllocate(target, size, perms);
|
||||||
target + size > vm_manager.GetHeapRegionEndAddress() || target + size < target) {
|
|
||||||
return ERR_INVALID_ADDRESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (heap_memory == nullptr) {
|
|
||||||
// Initialize heap
|
|
||||||
heap_memory = std::make_shared<std::vector<u8>>();
|
|
||||||
heap_start = heap_end = target;
|
|
||||||
} else {
|
|
||||||
vm_manager.UnmapRange(heap_start, heap_end - heap_start);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If necessary, expand backing vector to cover new heap extents.
|
|
||||||
if (target < heap_start) {
|
|
||||||
heap_memory->insert(begin(*heap_memory), heap_start - target, 0);
|
|
||||||
heap_start = target;
|
|
||||||
vm_manager.RefreshMemoryBlockMappings(heap_memory.get());
|
|
||||||
}
|
|
||||||
if (target + size > heap_end) {
|
|
||||||
heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0);
|
|
||||||
heap_end = target + size;
|
|
||||||
vm_manager.RefreshMemoryBlockMappings(heap_memory.get());
|
|
||||||
}
|
|
||||||
ASSERT(heap_end - heap_start == heap_memory->size());
|
|
||||||
|
|
||||||
CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, heap_memory, target - heap_start,
|
|
||||||
size, MemoryState::Heap));
|
|
||||||
vm_manager.Reprotect(vma, perms);
|
|
||||||
|
|
||||||
heap_used = size;
|
|
||||||
|
|
||||||
return MakeResult<VAddr>(heap_end - size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode Process::HeapFree(VAddr target, u32 size) {
|
ResultCode Process::HeapFree(VAddr target, u32 size) {
|
||||||
if (target < vm_manager.GetHeapRegionBaseAddress() ||
|
return vm_manager.HeapFree(target, size);
|
||||||
target + size > vm_manager.GetHeapRegionEndAddress() || target + size < target) {
|
|
||||||
return ERR_INVALID_ADDRESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (size == 0) {
|
|
||||||
return RESULT_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
ResultCode result = vm_manager.UnmapRange(target, size);
|
|
||||||
if (result.IsError())
|
|
||||||
return result;
|
|
||||||
|
|
||||||
heap_used -= size;
|
|
||||||
|
|
||||||
return RESULT_SUCCESS;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode Process::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
|
ResultCode Process::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
|
||||||
auto vma = vm_manager.FindVMA(src_addr);
|
return vm_manager.MirrorMemory(dst_addr, src_addr, size);
|
||||||
|
|
||||||
ASSERT_MSG(vma != vm_manager.vma_map.end(), "Invalid memory address");
|
|
||||||
ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address");
|
|
||||||
|
|
||||||
// The returned VMA might be a bigger one encompassing the desired address.
|
|
||||||
auto vma_offset = src_addr - vma->first;
|
|
||||||
ASSERT_MSG(vma_offset + size <= vma->second.size,
|
|
||||||
"Shared memory exceeds bounds of mapped block");
|
|
||||||
|
|
||||||
const std::shared_ptr<std::vector<u8>>& backing_block = vma->second.backing_block;
|
|
||||||
std::size_t backing_block_offset = vma->second.offset + vma_offset;
|
|
||||||
|
|
||||||
CASCADE_RESULT(auto new_vma,
|
|
||||||
vm_manager.MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size,
|
|
||||||
MemoryState::Mapped));
|
|
||||||
// Protect mirror with permissions from old region
|
|
||||||
vm_manager.Reprotect(new_vma, vma->second.permissions);
|
|
||||||
// Remove permissions from old region
|
|
||||||
vm_manager.Reprotect(vma, VMAPermission::None);
|
|
||||||
|
|
||||||
return RESULT_SUCCESS;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode Process::UnmapMemory(VAddr dst_addr, VAddr /*src_addr*/, u64 size) {
|
ResultCode Process::UnmapMemory(VAddr dst_addr, VAddr /*src_addr*/, u64 size) {
|
||||||
|
|
|
@ -292,17 +292,6 @@ private:
|
||||||
u32 allowed_thread_priority_mask = 0xFFFFFFFF;
|
u32 allowed_thread_priority_mask = 0xFFFFFFFF;
|
||||||
u32 is_virtual_address_memory_enabled = 0;
|
u32 is_virtual_address_memory_enabled = 0;
|
||||||
|
|
||||||
// Memory used to back the allocations in the regular heap. A single vector is used to cover
|
|
||||||
// the entire virtual address space extents that bound the allocations, including any holes.
|
|
||||||
// This makes deallocation and reallocation of holes fast and keeps process memory contiguous
|
|
||||||
// in the emulator address space, allowing Memory::GetPointer to be reasonably safe.
|
|
||||||
std::shared_ptr<std::vector<u8>> heap_memory;
|
|
||||||
|
|
||||||
// The left/right bounds of the address space covered by heap_memory.
|
|
||||||
VAddr heap_start = 0;
|
|
||||||
VAddr heap_end = 0;
|
|
||||||
u64 heap_used = 0;
|
|
||||||
|
|
||||||
/// The Thread Local Storage area is allocated as processes create threads,
|
/// The Thread Local Storage area is allocated as processes create threads,
|
||||||
/// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part
|
/// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part
|
||||||
/// holds the TLS for a specific thread. This vector contains which parts are in use for each
|
/// holds the TLS for a specific thread. This vector contains which parts are in use for each
|
||||||
|
|
|
@ -243,6 +243,85 @@ ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_p
|
||||||
return RESULT_SUCCESS;
|
return RESULT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission perms) {
|
||||||
|
if (target < GetHeapRegionBaseAddress() || target + size > GetHeapRegionEndAddress() ||
|
||||||
|
target + size < target) {
|
||||||
|
return ERR_INVALID_ADDRESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (heap_memory == nullptr) {
|
||||||
|
// Initialize heap
|
||||||
|
heap_memory = std::make_shared<std::vector<u8>>();
|
||||||
|
heap_start = heap_end = target;
|
||||||
|
} else {
|
||||||
|
UnmapRange(heap_start, heap_end - heap_start);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If necessary, expand backing vector to cover new heap extents.
|
||||||
|
if (target < heap_start) {
|
||||||
|
heap_memory->insert(begin(*heap_memory), heap_start - target, 0);
|
||||||
|
heap_start = target;
|
||||||
|
RefreshMemoryBlockMappings(heap_memory.get());
|
||||||
|
}
|
||||||
|
if (target + size > heap_end) {
|
||||||
|
heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0);
|
||||||
|
heap_end = target + size;
|
||||||
|
RefreshMemoryBlockMappings(heap_memory.get());
|
||||||
|
}
|
||||||
|
ASSERT(heap_end - heap_start == heap_memory->size());
|
||||||
|
|
||||||
|
CASCADE_RESULT(auto vma, MapMemoryBlock(target, heap_memory, target - heap_start, size,
|
||||||
|
MemoryState::Heap));
|
||||||
|
Reprotect(vma, perms);
|
||||||
|
|
||||||
|
heap_used = size;
|
||||||
|
|
||||||
|
return MakeResult<VAddr>(heap_end - size);
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultCode VMManager::HeapFree(VAddr target, u64 size) {
|
||||||
|
if (target < GetHeapRegionBaseAddress() || target + size > GetHeapRegionEndAddress() ||
|
||||||
|
target + size < target) {
|
||||||
|
return ERR_INVALID_ADDRESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (size == 0) {
|
||||||
|
return RESULT_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
const ResultCode result = UnmapRange(target, size);
|
||||||
|
if (result.IsError()) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
heap_used -= size;
|
||||||
|
return RESULT_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
|
||||||
|
const auto vma = FindVMA(src_addr);
|
||||||
|
|
||||||
|
ASSERT_MSG(vma != vma_map.end(), "Invalid memory address");
|
||||||
|
ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address");
|
||||||
|
|
||||||
|
// The returned VMA might be a bigger one encompassing the desired address.
|
||||||
|
const auto vma_offset = src_addr - vma->first;
|
||||||
|
ASSERT_MSG(vma_offset + size <= vma->second.size,
|
||||||
|
"Shared memory exceeds bounds of mapped block");
|
||||||
|
|
||||||
|
const std::shared_ptr<std::vector<u8>>& backing_block = vma->second.backing_block;
|
||||||
|
const std::size_t backing_block_offset = vma->second.offset + vma_offset;
|
||||||
|
|
||||||
|
CASCADE_RESULT(auto new_vma, MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size,
|
||||||
|
MemoryState::Mapped));
|
||||||
|
// Protect mirror with permissions from old region
|
||||||
|
Reprotect(new_vma, vma->second.permissions);
|
||||||
|
// Remove permissions from old region
|
||||||
|
Reprotect(vma, VMAPermission::None);
|
||||||
|
|
||||||
|
return RESULT_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
void VMManager::RefreshMemoryBlockMappings(const std::vector<u8>* block) {
|
void VMManager::RefreshMemoryBlockMappings(const std::vector<u8>* block) {
|
||||||
// If this ever proves to have a noticeable performance impact, allow users of the function to
|
// If this ever proves to have a noticeable performance impact, allow users of the function to
|
||||||
// specify a specific range of addresses to limit the scan to.
|
// specify a specific range of addresses to limit the scan to.
|
||||||
|
@ -495,8 +574,7 @@ u64 VMManager::GetTotalMemoryUsage() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 VMManager::GetTotalHeapUsage() const {
|
u64 VMManager::GetTotalHeapUsage() const {
|
||||||
LOG_WARNING(Kernel, "(STUBBED) called");
|
return heap_used;
|
||||||
return 0x0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
VAddr VMManager::GetAddressSpaceBaseAddress() const {
|
VAddr VMManager::GetAddressSpaceBaseAddress() const {
|
||||||
|
|
|
@ -186,6 +186,11 @@ public:
|
||||||
/// Changes the permissions of a range of addresses, splitting VMAs as necessary.
|
/// Changes the permissions of a range of addresses, splitting VMAs as necessary.
|
||||||
ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms);
|
ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms);
|
||||||
|
|
||||||
|
ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms);
|
||||||
|
ResultCode HeapFree(VAddr target, u64 size);
|
||||||
|
|
||||||
|
ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Scans all VMAs and updates the page table range of any that use the given vector as backing
|
* Scans all VMAs and updates the page table range of any that use the given vector as backing
|
||||||
* memory. This should be called after any operation that causes reallocation of the vector.
|
* memory. This should be called after any operation that causes reallocation of the vector.
|
||||||
|
@ -343,5 +348,15 @@ private:
|
||||||
|
|
||||||
VAddr tls_io_region_base = 0;
|
VAddr tls_io_region_base = 0;
|
||||||
VAddr tls_io_region_end = 0;
|
VAddr tls_io_region_end = 0;
|
||||||
|
|
||||||
|
// Memory used to back the allocations in the regular heap. A single vector is used to cover
|
||||||
|
// the entire virtual address space extents that bound the allocations, including any holes.
|
||||||
|
// This makes deallocation and reallocation of holes fast and keeps process memory contiguous
|
||||||
|
// in the emulator address space, allowing Memory::GetPointer to be reasonably safe.
|
||||||
|
std::shared_ptr<std::vector<u8>> heap_memory;
|
||||||
|
// The left/right bounds of the address space covered by heap_memory.
|
||||||
|
VAddr heap_start = 0;
|
||||||
|
VAddr heap_end = 0;
|
||||||
|
u64 heap_used = 0;
|
||||||
};
|
};
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
Loading…
Reference in a new issue