General: Make use of std::nullopt where applicable
Allows some implementations to avoid completely zeroing out the internal buffer of the optional, and instead only set the validity byte within the structure. This also makes it consistent how we return empty optionals.
This commit is contained in:
parent
c07fd2898b
commit
ff45c39578
|
@ -34,7 +34,7 @@ std::optional<Callback> DynarmicCP15::CompileInternalOperation(bool two, unsigne
|
|||
CoprocReg CRm, unsigned opc2) {
|
||||
LOG_CRITICAL(Core_ARM, "CP15: cdp{} p15, {}, {}, {}, {}, {}", two ? "2" : "", opc1, CRd, CRn,
|
||||
CRm, opc2);
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
CallbackOrAccessOneWord DynarmicCP15::CompileSendOneWord(bool two, unsigned opc1, CoprocReg CRn,
|
||||
|
@ -115,7 +115,7 @@ std::optional<Callback> DynarmicCP15::CompileLoadWords(bool two, bool long_trans
|
|||
LOG_CRITICAL(Core_ARM, "CP15: mrrc{}{} p15, {}, [...]", two ? "2" : "",
|
||||
long_transfer ? "l" : "", CRd);
|
||||
}
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<Callback> DynarmicCP15::CompileStoreWords(bool two, bool long_transfer, CoprocReg CRd,
|
||||
|
@ -127,7 +127,7 @@ std::optional<Callback> DynarmicCP15::CompileStoreWords(bool two, bool long_tran
|
|||
LOG_CRITICAL(Core_ARM, "CP15: mrrc{}{} p15, {}, [...]", two ? "2" : "",
|
||||
long_transfer ? "l" : "", CRd);
|
||||
}
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
} // namespace Core
|
||||
|
|
|
@ -323,7 +323,7 @@ bool NCA::ReadRomFSSection(const NCASectionHeader& section, const NCASectionTabl
|
|||
subsection_buckets.back().entries.push_back({section.bktr.relocation.offset, {0}, ctr_low});
|
||||
subsection_buckets.back().entries.push_back({size, {0}, 0});
|
||||
|
||||
std::optional<Core::Crypto::Key128> key = {};
|
||||
std::optional<Core::Crypto::Key128> key;
|
||||
if (encrypted) {
|
||||
if (has_rights_id) {
|
||||
status = Loader::ResultStatus::Success;
|
||||
|
@ -442,18 +442,18 @@ std::optional<Core::Crypto::Key128> NCA::GetTitlekey() {
|
|||
memcpy(rights_id.data(), header.rights_id.data(), 16);
|
||||
if (rights_id == u128{}) {
|
||||
status = Loader::ResultStatus::ErrorInvalidRightsID;
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
auto titlekey = keys.GetKey(Core::Crypto::S128KeyType::Titlekey, rights_id[1], rights_id[0]);
|
||||
if (titlekey == Core::Crypto::Key128{}) {
|
||||
status = Loader::ResultStatus::ErrorMissingTitlekey;
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
if (!keys.HasKey(Core::Crypto::S128KeyType::Titlekek, master_key_id)) {
|
||||
status = Loader::ResultStatus::ErrorMissingTitlekek;
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(
|
||||
|
@ -477,7 +477,7 @@ VirtualFile NCA::Decrypt(const NCASectionHeader& s_header, VirtualFile in, u64 s
|
|||
case NCASectionCryptoType::BKTR:
|
||||
LOG_TRACE(Crypto, "called with mode=CTR, starting_offset={:016X}", starting_offset);
|
||||
{
|
||||
std::optional<Core::Crypto::Key128> key = {};
|
||||
std::optional<Core::Crypto::Key128> key;
|
||||
if (has_rights_id) {
|
||||
status = Loader::ResultStatus::Success;
|
||||
key = GetTitlekey();
|
||||
|
|
|
@ -169,11 +169,12 @@ VfsDirectory::~VfsDirectory() = default;
|
|||
|
||||
std::optional<u8> VfsFile::ReadByte(std::size_t offset) const {
|
||||
u8 out{};
|
||||
std::size_t size = Read(&out, 1, offset);
|
||||
if (size == 1)
|
||||
const std::size_t size = Read(&out, sizeof(u8), offset);
|
||||
if (size == 1) {
|
||||
return out;
|
||||
}
|
||||
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::vector<u8> VfsFile::ReadBytes(std::size_t size, std::size_t offset) const {
|
||||
|
|
|
@ -58,10 +58,11 @@ std::size_t OffsetVfsFile::Write(const u8* data, std::size_t length, std::size_t
|
|||
}
|
||||
|
||||
std::optional<u8> OffsetVfsFile::ReadByte(std::size_t r_offset) const {
|
||||
if (r_offset < size)
|
||||
return file->ReadByte(offset + r_offset);
|
||||
if (r_offset >= size) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return {};
|
||||
return file->ReadByte(offset + r_offset);
|
||||
}
|
||||
|
||||
std::vector<u8> OffsetVfsFile::ReadBytes(std::size_t r_size, std::size_t r_offset) const {
|
||||
|
|
|
@ -54,9 +54,11 @@ public:
|
|||
}
|
||||
|
||||
std::optional<u8> ReadByte(std::size_t offset) const override {
|
||||
if (offset < size)
|
||||
if (offset >= size) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return value;
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<u8> ReadBytes(std::size_t length, std::size_t offset) const override {
|
||||
|
|
|
@ -265,7 +265,7 @@ std::optional<nvhost_as_gpu::BufferMap> nvhost_as_gpu::FindBufferMap(GPUVAddr gp
|
|||
}
|
||||
}
|
||||
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
void nvhost_as_gpu::AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr,
|
||||
|
@ -286,7 +286,7 @@ std::optional<std::size_t> nvhost_as_gpu::RemoveBufferMap(GPUVAddr gpu_addr) {
|
|||
return size;
|
||||
}
|
||||
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
|
|
@ -54,7 +54,7 @@ struct EventInterface {
|
|||
}
|
||||
mask = mask >> 1;
|
||||
}
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
void SetEventStatus(const u32 event_id, EventState new_status) {
|
||||
EventState old_status = status[event_id];
|
||||
|
|
|
@ -114,7 +114,7 @@ std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) {
|
|||
[&](const VI::Display& display) { return display.GetName() == name; });
|
||||
|
||||
if (itr == displays.end()) {
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return itr->GetID();
|
||||
|
@ -124,7 +124,7 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) {
|
|||
auto* const display = FindDisplay(display_id);
|
||||
|
||||
if (display == nullptr) {
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
const u64 layer_id = next_layer_id++;
|
||||
|
@ -144,7 +144,7 @@ std::optional<u32> NVFlinger::FindBufferQueueId(u64 display_id, u64 layer_id) co
|
|||
const auto* const layer = FindLayer(display_id, layer_id);
|
||||
|
||||
if (layer == nullptr) {
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return layer->GetBufferQueue().GetId();
|
||||
|
|
|
@ -497,7 +497,7 @@ std::pair<s32, Errno> BSD::PollImpl(std::vector<u8>& write_buffer, std::vector<u
|
|||
return {0, Errno::SUCCESS};
|
||||
}
|
||||
|
||||
std::optional<FileDescriptor>& descriptor = file_descriptors[pollfd.fd];
|
||||
const std::optional<FileDescriptor>& descriptor = file_descriptors[pollfd.fd];
|
||||
if (!descriptor) {
|
||||
LOG_ERROR(Service, "File descriptor handle={} is not allocated", pollfd.fd);
|
||||
pollfd.revents = POLL_NVAL;
|
||||
|
|
|
@ -76,16 +76,16 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, Core::S
|
|||
bool should_pass_arguments, bool load_into_process,
|
||||
std::optional<FileSys::PatchManager> pm) {
|
||||
if (file.GetSize() < sizeof(NSOHeader)) {
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
NSOHeader nso_header{};
|
||||
if (sizeof(NSOHeader) != file.ReadObject(&nso_header)) {
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
if (nso_header.magic != Common::MakeMagic('N', 'S', 'O', '0')) {
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// Build program image
|
||||
|
|
|
@ -597,7 +597,7 @@ std::optional<u64> Maxwell3D::GetQueryResult() {
|
|||
// Deferred.
|
||||
rasterizer->Query(regs.query.QueryAddress(), VideoCore::QueryType::SamplesPassed,
|
||||
system.GPU().GetTicks());
|
||||
return {};
|
||||
return std::nullopt;
|
||||
default:
|
||||
LOG_DEBUG(HW_GPU, "Unimplemented query select type {}",
|
||||
static_cast<u32>(regs.query.query_get.select.Value()));
|
||||
|
|
|
@ -36,7 +36,7 @@ void MacroEngine::Execute(Engines::Maxwell3D& maxwell3d, u32 method,
|
|||
}
|
||||
} else {
|
||||
// Macro not compiled, check if it's uploaded and if so, compile it
|
||||
std::optional<u32> mid_method = std::nullopt;
|
||||
std::optional<u32> mid_method;
|
||||
const auto macro_code = uploaded_macro_code.find(method);
|
||||
if (macro_code == uploaded_macro_code.end()) {
|
||||
for (const auto& [method_base, code] : uploaded_macro_code) {
|
||||
|
|
|
@ -58,7 +58,7 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
|
|||
std::optional<GPUVAddr> MemoryManager::AllocateFixed(GPUVAddr gpu_addr, std::size_t size) {
|
||||
for (u64 offset{}; offset < size; offset += page_size) {
|
||||
if (!GetPageEntry(gpu_addr + offset).IsUnmapped()) {
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -135,13 +135,13 @@ std::optional<GPUVAddr> MemoryManager::FindFreeRange(std::size_t size, std::size
|
|||
}
|
||||
}
|
||||
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
|
||||
const auto page_entry{GetPageEntry(gpu_addr)};
|
||||
if (!page_entry.IsValid()) {
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return page_entry.ToAddress() + (gpu_addr & page_mask);
|
||||
|
|
|
@ -813,7 +813,7 @@ private:
|
|||
const u8 location = static_cast<u8>(static_cast<u32>(index) * 4 + element);
|
||||
const auto it = transform_feedback.find(location);
|
||||
if (it == transform_feedback.end()) {
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
return it->second.components;
|
||||
}
|
||||
|
@ -1295,21 +1295,21 @@ private:
|
|||
switch (element) {
|
||||
case 0:
|
||||
UNIMPLEMENTED();
|
||||
return {};
|
||||
return std::nullopt;
|
||||
case 1:
|
||||
if (stage == ShaderType::Vertex && !device.HasVertexViewportLayer()) {
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
return {{"gl_Layer", Type::Int}};
|
||||
case 2:
|
||||
if (stage == ShaderType::Vertex && !device.HasVertexViewportLayer()) {
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
return {{"gl_ViewportIndex", Type::Int}};
|
||||
case 3:
|
||||
return {{"gl_PointSize", Type::Float}};
|
||||
}
|
||||
return {};
|
||||
return std::nullopt;
|
||||
case Attribute::Index::FrontColor:
|
||||
return {{"gl_FrontColor"s + GetSwizzle(element), Type::Float}};
|
||||
case Attribute::Index::FrontSecondaryColor:
|
||||
|
@ -1332,7 +1332,7 @@ private:
|
|||
Type::Float}};
|
||||
}
|
||||
UNIMPLEMENTED_MSG("Unhandled output attribute: {}", static_cast<u32>(attribute));
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -199,55 +199,48 @@ public:
|
|||
}
|
||||
|
||||
std::optional<u32> GetGotoLabel() const {
|
||||
auto inner = std::get_if<ASTGoto>(&data);
|
||||
if (inner) {
|
||||
if (const auto* inner = std::get_if<ASTGoto>(&data)) {
|
||||
return {inner->label};
|
||||
}
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
Expr GetGotoCondition() const {
|
||||
auto inner = std::get_if<ASTGoto>(&data);
|
||||
if (inner) {
|
||||
if (const auto* inner = std::get_if<ASTGoto>(&data)) {
|
||||
return inner->condition;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void MarkLabelUnused() {
|
||||
auto inner = std::get_if<ASTLabel>(&data);
|
||||
if (inner) {
|
||||
if (auto* inner = std::get_if<ASTLabel>(&data)) {
|
||||
inner->unused = true;
|
||||
}
|
||||
}
|
||||
|
||||
bool IsLabelUnused() const {
|
||||
auto inner = std::get_if<ASTLabel>(&data);
|
||||
if (inner) {
|
||||
if (const auto* inner = std::get_if<ASTLabel>(&data)) {
|
||||
return inner->unused;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
std::optional<u32> GetLabelIndex() const {
|
||||
auto inner = std::get_if<ASTLabel>(&data);
|
||||
if (inner) {
|
||||
if (const auto* inner = std::get_if<ASTLabel>(&data)) {
|
||||
return {inner->index};
|
||||
}
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
Expr GetIfCondition() const {
|
||||
auto inner = std::get_if<ASTIfThen>(&data);
|
||||
if (inner) {
|
||||
if (const auto* inner = std::get_if<ASTIfThen>(&data)) {
|
||||
return inner->condition;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void SetGotoCondition(Expr new_condition) {
|
||||
auto inner = std::get_if<ASTGoto>(&data);
|
||||
if (inner) {
|
||||
if (auto* inner = std::get_if<ASTGoto>(&data)) {
|
||||
inner->condition = std::move(new_condition);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -205,12 +205,12 @@ std::optional<u32> ShaderIR::TrackImmediate(Node tracked, const NodeBlock& code,
|
|||
const auto result = TrackRegister(&std::get<GprNode>(*tracked), code, cursor - 1);
|
||||
const auto& found = result.first;
|
||||
if (!found) {
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
if (const auto immediate = std::get_if<ImmediateNode>(&*found)) {
|
||||
return immediate->GetValue();
|
||||
}
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::pair<Node, s64> ShaderIR::TrackRegister(const GprNode* tracked, const NodeBlock& code,
|
||||
|
|
|
@ -115,20 +115,24 @@ std::optional<std::pair<u32, u32>> SurfaceBaseImpl::GetLayerMipmap(
|
|||
if (gpu_addr == candidate_gpu_addr) {
|
||||
return {{0, 0}};
|
||||
}
|
||||
|
||||
if (candidate_gpu_addr < gpu_addr) {
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
const auto relative_address{static_cast<GPUVAddr>(candidate_gpu_addr - gpu_addr)};
|
||||
const auto layer{static_cast<u32>(relative_address / layer_size)};
|
||||
if (layer >= params.depth) {
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
const GPUVAddr mipmap_address = relative_address - layer_size * layer;
|
||||
const auto mipmap_it =
|
||||
Common::BinaryFind(mipmap_offsets.begin(), mipmap_offsets.end(), mipmap_address);
|
||||
if (mipmap_it == mipmap_offsets.end()) {
|
||||
return {};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
const auto level{static_cast<u32>(std::distance(mipmap_offsets.begin(), mipmap_it))};
|
||||
return std::make_pair(layer, level);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue