2019-03-04 04:54:16 +00:00
|
|
|
// Copyright 2018 yuzu emulator team
|
2018-02-08 02:54:35 +00:00
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
2019-03-04 04:54:16 +00:00
|
|
|
#include <map>
|
2018-10-30 04:03:25 +00:00
|
|
|
#include <optional>
|
2018-04-21 16:31:30 +00:00
|
|
|
|
2018-02-08 02:54:35 +00:00
|
|
|
#include "common/common_types.h"
|
2019-03-04 04:54:16 +00:00
|
|
|
#include "common/page_table.h"
|
2018-02-08 02:54:35 +00:00
|
|
|
|
2019-03-29 01:58:28 +00:00
|
|
|
namespace VideoCore {
|
|
|
|
class RasterizerInterface;
|
|
|
|
}
|
|
|
|
|
2018-02-12 04:44:12 +00:00
|
|
|
namespace Tegra {
|
|
|
|
|
2019-03-04 04:54:16 +00:00
|
|
|
/**
|
|
|
|
* Represents a VMA in an address space. A VMA is a contiguous region of virtual addressing space
|
|
|
|
* with homogeneous attributes across its extents. In this particular implementation each VMA is
|
|
|
|
* also backed by a single host memory allocation.
|
|
|
|
*/
|
|
|
|
struct VirtualMemoryArea {
|
|
|
|
enum class Type : u8 {
|
|
|
|
Unmapped,
|
|
|
|
Allocated,
|
|
|
|
Mapped,
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Virtual base address of the region.
|
|
|
|
GPUVAddr base{};
|
|
|
|
/// Size of the region.
|
|
|
|
u64 size{};
|
|
|
|
/// Memory area mapping type.
|
|
|
|
Type type{Type::Unmapped};
|
|
|
|
/// CPU memory mapped address corresponding to this memory area.
|
|
|
|
VAddr backing_addr{};
|
|
|
|
/// Offset into the backing_memory the mapping starts from.
|
|
|
|
std::size_t offset{};
|
|
|
|
/// Pointer backing this VMA.
|
|
|
|
u8* backing_memory{};
|
|
|
|
|
|
|
|
/// Tests if this area can be merged to the right with `next`.
|
|
|
|
bool CanBeMergedWith(const VirtualMemoryArea& next) const;
|
|
|
|
};
|
|
|
|
|
2018-02-08 02:54:35 +00:00
|
|
|
class MemoryManager final {
|
|
|
|
public:
|
2019-05-09 23:05:50 +00:00
|
|
|
explicit MemoryManager(VideoCore::RasterizerInterface& rasterizer);
|
2019-05-09 23:04:41 +00:00
|
|
|
~MemoryManager();
|
2018-02-08 02:54:35 +00:00
|
|
|
|
2018-04-21 15:16:21 +00:00
|
|
|
GPUVAddr AllocateSpace(u64 size, u64 align);
|
2019-03-09 19:06:51 +00:00
|
|
|
GPUVAddr AllocateSpace(GPUVAddr addr, u64 size, u64 align);
|
2019-03-21 02:28:35 +00:00
|
|
|
GPUVAddr MapBufferEx(VAddr cpu_addr, u64 size);
|
|
|
|
GPUVAddr MapBufferEx(VAddr cpu_addr, GPUVAddr addr, u64 size);
|
2019-03-09 19:06:51 +00:00
|
|
|
GPUVAddr UnmapBuffer(GPUVAddr addr, u64 size);
|
2019-04-06 00:18:27 +00:00
|
|
|
std::optional<VAddr> GpuToCpuAddress(GPUVAddr addr) const;
|
2018-02-08 02:54:35 +00:00
|
|
|
|
2019-03-04 04:54:16 +00:00
|
|
|
template <typename T>
|
2019-04-06 00:30:46 +00:00
|
|
|
T Read(GPUVAddr addr) const;
|
2019-02-24 05:15:35 +00:00
|
|
|
|
2019-03-04 04:54:16 +00:00
|
|
|
template <typename T>
|
2019-03-09 19:06:51 +00:00
|
|
|
void Write(GPUVAddr addr, T data);
|
2019-02-24 05:15:35 +00:00
|
|
|
|
2019-03-09 19:06:51 +00:00
|
|
|
u8* GetPointer(GPUVAddr addr);
|
2019-04-06 00:25:25 +00:00
|
|
|
const u8* GetPointer(GPUVAddr addr) const;
|
2019-02-24 05:15:35 +00:00
|
|
|
|
2019-05-09 23:13:14 +00:00
|
|
|
/// Returns true if the block is continuous in host memory, false otherwise
|
|
|
|
bool IsBlockContinuous(GPUVAddr start, std::size_t size) const;
|
2019-04-16 19:45:24 +00:00
|
|
|
|
|
|
|
/**
|
2019-04-16 14:11:35 +00:00
|
|
|
* ReadBlock and WriteBlock are full read and write operations over virtual
|
2019-05-09 23:02:52 +00:00
|
|
|
* GPU Memory. It's important to use these when GPU memory may not be continuous
|
2019-04-16 14:11:35 +00:00
|
|
|
* in the Host Memory counterpart. Note: This functions cause Host GPU Memory
|
|
|
|
* Flushes and Invalidations, respectively to each operation.
|
|
|
|
*/
|
2019-05-09 22:59:47 +00:00
|
|
|
void ReadBlock(GPUVAddr src_addr, void* dest_buffer, std::size_t size) const;
|
|
|
|
void WriteBlock(GPUVAddr dest_addr, const void* src_buffer, std::size_t size);
|
|
|
|
void CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size);
|
2019-04-16 14:11:35 +00:00
|
|
|
|
2019-04-16 19:45:24 +00:00
|
|
|
/**
|
2019-04-16 14:11:35 +00:00
|
|
|
* ReadBlockUnsafe and WriteBlockUnsafe are special versions of ReadBlock and
|
|
|
|
* WriteBlock respectively. In this versions, no flushing or invalidation is actually
|
|
|
|
* done and their performance is similar to a memcpy. This functions can be used
|
|
|
|
* on either of this 2 scenarios instead of their safe counterpart:
|
|
|
|
* - Memory which is sure to never be represented in the Host GPU.
|
|
|
|
* - Memory Managed by a Cache Manager. Example: Texture Flushing should use
|
|
|
|
* WriteBlockUnsafe instead of WriteBlock since it shouldn't invalidate the texture
|
|
|
|
* being flushed.
|
|
|
|
*/
|
2019-05-09 22:59:47 +00:00
|
|
|
void ReadBlockUnsafe(GPUVAddr src_addr, void* dest_buffer, std::size_t size) const;
|
|
|
|
void WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer, std::size_t size);
|
|
|
|
void CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size);
|
2019-04-16 14:11:35 +00:00
|
|
|
|
2018-02-08 02:54:35 +00:00
|
|
|
private:
|
2019-03-04 04:54:16 +00:00
|
|
|
using VMAMap = std::map<GPUVAddr, VirtualMemoryArea>;
|
|
|
|
using VMAHandle = VMAMap::const_iterator;
|
|
|
|
using VMAIter = VMAMap::iterator;
|
|
|
|
|
2019-03-09 19:06:51 +00:00
|
|
|
bool IsAddressValid(GPUVAddr addr) const;
|
2019-03-04 04:54:16 +00:00
|
|
|
void MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type,
|
|
|
|
VAddr backing_addr = 0);
|
|
|
|
void MapMemoryRegion(GPUVAddr base, u64 size, u8* target, VAddr backing_addr);
|
|
|
|
void UnmapRegion(GPUVAddr base, u64 size);
|
|
|
|
|
|
|
|
/// Finds the VMA in which the given address is included in, or `vma_map.end()`.
|
|
|
|
VMAHandle FindVMA(GPUVAddr target) const;
|
|
|
|
|
|
|
|
VMAHandle AllocateMemory(GPUVAddr target, std::size_t offset, u64 size);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Maps an unmanaged host memory pointer at a given address.
|
|
|
|
*
|
2019-05-09 23:02:52 +00:00
|
|
|
* @param target The guest address to start the mapping at.
|
|
|
|
* @param memory The memory to be mapped.
|
|
|
|
* @param size Size of the mapping in bytes.
|
|
|
|
* @param backing_addr The base address of the range to back this mapping.
|
2019-03-04 04:54:16 +00:00
|
|
|
*/
|
|
|
|
VMAHandle MapBackingMemory(GPUVAddr target, u8* memory, u64 size, VAddr backing_addr);
|
|
|
|
|
|
|
|
/// Unmaps a range of addresses, splitting VMAs as necessary.
|
|
|
|
void UnmapRange(GPUVAddr target, u64 size);
|
|
|
|
|
|
|
|
/// Converts a VMAHandle to a mutable VMAIter.
|
|
|
|
VMAIter StripIterConstness(const VMAHandle& iter);
|
|
|
|
|
2019-05-09 23:02:52 +00:00
|
|
|
/// Marks as the specified VMA as allocated.
|
2019-03-21 02:28:35 +00:00
|
|
|
VMAIter Allocate(VMAIter vma);
|
2019-03-04 04:54:16 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing
|
|
|
|
* the appropriate error checking.
|
|
|
|
*/
|
|
|
|
VMAIter CarveVMA(GPUVAddr base, u64 size);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Splits the edges of the given range of non-Free VMAs so that there is a VMA split at each
|
|
|
|
* end of the range.
|
|
|
|
*/
|
|
|
|
VMAIter CarveVMARange(GPUVAddr base, u64 size);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Splits a VMA in two, at the specified offset.
|
|
|
|
* @returns the right side of the split, with the original iterator becoming the left side.
|
|
|
|
*/
|
|
|
|
VMAIter SplitVMA(VMAIter vma, u64 offset_in_vma);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Checks for and merges the specified VMA with adjacent ones if possible.
|
|
|
|
* @returns the merged VMA or the original if no merging was possible.
|
|
|
|
*/
|
|
|
|
VMAIter MergeAdjacent(VMAIter vma);
|
|
|
|
|
|
|
|
/// Updates the pages corresponding to this VMA so they match the VMA's attributes.
|
|
|
|
void UpdatePageTableForVMA(const VirtualMemoryArea& vma);
|
|
|
|
|
2019-03-21 03:12:28 +00:00
|
|
|
/// Finds a free (unmapped region) of the specified size starting at the specified address.
|
2019-04-06 00:22:53 +00:00
|
|
|
GPUVAddr FindFreeRegion(GPUVAddr region_start, u64 size) const;
|
2018-04-21 18:40:51 +00:00
|
|
|
|
2019-03-04 04:54:16 +00:00
|
|
|
private:
|
|
|
|
static constexpr u64 page_bits{16};
|
|
|
|
static constexpr u64 page_size{1 << page_bits};
|
|
|
|
static constexpr u64 page_mask{page_size - 1};
|
|
|
|
|
|
|
|
/// Address space in bits, this is fairly arbitrary but sufficiently large.
|
2019-03-21 02:28:35 +00:00
|
|
|
static constexpr u32 address_space_width{39};
|
2019-03-04 04:54:16 +00:00
|
|
|
/// Start address for mapping, this is fairly arbitrary but must be non-zero.
|
2019-03-21 02:28:35 +00:00
|
|
|
static constexpr GPUVAddr address_space_base{0x100000};
|
2019-03-04 04:54:16 +00:00
|
|
|
/// End of address space, based on address space in bits.
|
2019-03-21 02:28:35 +00:00
|
|
|
static constexpr GPUVAddr address_space_end{1ULL << address_space_width};
|
2019-03-04 04:54:16 +00:00
|
|
|
|
|
|
|
Common::PageTable page_table{page_bits};
|
|
|
|
VMAMap vma_map;
|
2019-03-29 01:58:28 +00:00
|
|
|
VideoCore::RasterizerInterface& rasterizer;
|
2018-02-08 02:54:35 +00:00
|
|
|
};
|
|
|
|
|
2018-02-12 04:44:12 +00:00
|
|
|
} // namespace Tegra
|