summaryrefslogtreecommitdiffstats
path: root/src/core/hle
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle')
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp3
-rw-r--r--src/core/hle/kernel/k_page_linked_list.h4
-rw-r--r--src/core/hle/kernel/k_page_table.cpp242
-rw-r--r--src/core/hle/kernel/k_page_table.h33
-rw-r--r--src/core/hle/kernel/svc.cpp13
-rw-r--r--src/core/hle/service/filesystem/filesystem.cpp2
-rw-r--r--src/core/hle/service/hid/hid.cpp4
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp2
-rw-r--r--src/core/hle/service/nvdrv/nvdrv_interface.cpp12
9 files changed, 253 insertions, 62 deletions
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index b365ce7b7..63bbe02e9 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -28,7 +28,8 @@ ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr
auto& page_table = m_owner->PageTable();
// Construct the page group.
- m_page_group = KPageLinkedList(addr, Common::DivideUp(size, PageSize));
+ m_page_group =
+ KPageLinkedList(page_table.GetPhysicalAddr(addr), Common::DivideUp(size, PageSize));
// Lock the memory.
R_TRY(page_table.LockForCodeMemory(addr, size))
diff --git a/src/core/hle/kernel/k_page_linked_list.h b/src/core/hle/kernel/k_page_linked_list.h
index 0e2ae582a..869228322 100644
--- a/src/core/hle/kernel/k_page_linked_list.h
+++ b/src/core/hle/kernel/k_page_linked_list.h
@@ -89,6 +89,10 @@ public:
return ResultSuccess;
}
+ bool Empty() const {
+ return nodes.empty();
+ }
+
private:
std::list<Node> nodes;
};
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 02d93b12e..599013cf6 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -486,6 +486,58 @@ VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
return address;
}
+ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ const size_t size = num_pages * PageSize;
+
+ // We're making a new group, not adding to an existing one.
+ R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory);
+
+ // Begin traversal.
+ Common::PageTable::TraversalContext context;
+ Common::PageTable::TraversalEntry next_entry;
+ R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory);
+
+ // Prepare tracking variables.
+ PAddr cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ // Iterate, adding to group as we go.
+ const auto& memory_layout = system.Kernel().MemoryLayout();
+ while (tot_size < size) {
+ R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context),
+ ResultInvalidCurrentMemory);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ const size_t cur_pages = cur_size / PageSize;
+
+ R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
+ R_TRY(pg.AddBlock(cur_addr, cur_pages));
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we add the right amount for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Add the last block.
+ const size_t cur_pages = cur_size / PageSize;
+ R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
+ R_TRY(pg.AddBlock(cur_addr, cur_pages));
+
+ return ResultSuccess;
+}
+
ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
KPageTable& src_page_table, VAddr src_addr) {
KScopedLightLock lk(general_lock);
@@ -1223,6 +1275,31 @@ ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryS
return ResultSuccess;
}
+ResultCode KPageTable::MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) {
+ // Ensure that the page group isn't null.
+ ASSERT(out != nullptr);
+
+ // Make sure that the region we're mapping is valid for the table.
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ // Check if state allows us to create the group.
+ R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Create a new page group for the region.
+ R_TRY(this->MakePageGroup(*out, address, num_pages));
+
+ return ResultSuccess;
+}
+
ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
Svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
@@ -1605,57 +1682,21 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
}
ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
- KScopedLightLock lk(general_lock);
-
- KMemoryPermission new_perm = KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite;
-
- KMemoryPermission old_perm{};
-
- if (const ResultCode result{CheckMemoryState(
- nullptr, &old_perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
- KMemoryState::FlagCanCodeMemory, KMemoryPermission::All,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)};
- result.IsError()) {
- return result;
- }
-
- new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
-
- block_manager->UpdateLock(
- addr, size / PageSize,
- [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
- block->ShareToDevice(permission);
- },
- new_perm);
-
- return ResultSuccess;
+ return this->LockMemoryAndOpen(
+ nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
+ KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::All, KMemoryAttribute::None,
+ static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
+ KMemoryPermission::KernelReadWrite),
+ KMemoryAttribute::Locked);
}
ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) {
- KScopedLightLock lk(general_lock);
-
- KMemoryPermission new_perm = KMemoryPermission::UserReadWrite;
-
- KMemoryPermission old_perm{};
-
- if (const ResultCode result{CheckMemoryState(
- nullptr, &old_perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
- KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::All, KMemoryAttribute::Locked)};
- result.IsError()) {
- return result;
- }
-
- new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
-
- block_manager->UpdateLock(
- addr, size / PageSize,
- [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
- block->UnshareToDevice(permission);
- },
- new_perm);
-
- return ResultSuccess;
+ return this->UnlockMemory(addr, size, KMemoryState::FlagCanCodeMemory,
+ KMemoryState::FlagCanCodeMemory, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Locked, nullptr);
}
ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
@@ -1991,4 +2032,109 @@ ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermissi
return ResultSuccess;
}
+ResultCode KPageTable::LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr,
+ size_t size, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
+ // Validate basic preconditions.
+ ASSERT((lock_attr & attr) == KMemoryAttribute::None);
+ ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
+ KMemoryAttribute::None);
+
+ // Validate the lock request.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ // Check that the output page group is empty, if it exists.
+ if (out_pg) {
+ ASSERT(out_pg->GetNumPages() == 0);
+ }
+
+ // Check the state.
+ KMemoryState old_state{};
+ KMemoryPermission old_perm{};
+ KMemoryAttribute old_attr{};
+ size_t num_allocator_blocks{};
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+ std::addressof(old_attr), std::addressof(num_allocator_blocks),
+ addr, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Get the physical address, if we're supposed to.
+ if (out_paddr != nullptr) {
+ ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr));
+ }
+
+ // Make the page group, if we're supposed to.
+ if (out_pg != nullptr) {
+ R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
+ }
+
+ // Decide on new perm and attr.
+ new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
+ KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
+
+ // Update permission, if we need to.
+ if (new_perm != old_perm) {
+ R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
+ }
+
+ // Apply the memory block updates.
+ block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
+
+ return ResultSuccess;
+}
+
+ResultCode KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryPermission new_perm,
+ KMemoryAttribute lock_attr, const KPageLinkedList* pg) {
+ // Validate basic preconditions.
+ ASSERT((attr_mask & lock_attr) == lock_attr);
+ ASSERT((attr & lock_attr) == lock_attr);
+
+ // Validate the unlock request.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ // Check the state.
+ KMemoryState old_state{};
+ KMemoryPermission old_perm{};
+ KMemoryAttribute old_attr{};
+ size_t num_allocator_blocks{};
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+ std::addressof(old_attr), std::addressof(num_allocator_blocks),
+ addr, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Check the page group.
+ if (pg != nullptr) {
+ UNIMPLEMENTED_MSG("PageGroup support is unimplemented!");
+ }
+
+ // Decide on new perm and attr.
+ new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
+ KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
+
+ // Update permission, if we need to.
+ if (new_perm != old_perm) {
+ R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
+ }
+
+ // Apply the memory block updates.
+ block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
+
+ return ResultSuccess;
+}
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 54c6adf8d..bfabdf38c 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -12,6 +12,7 @@
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_memory_block.h"
+#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/result.h"
@@ -71,6 +72,10 @@ public:
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
ResultCode LockForCodeMemory(VAddr addr, std::size_t size);
ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size);
+ ResultCode MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr);
Common::PageTable& PageTableImpl() {
return page_table_impl;
@@ -159,10 +164,37 @@ private:
attr_mask, attr, ignore_attr);
}
+ ResultCode LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr);
+ ResultCode UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr,
+ const KPageLinkedList* pg);
+
+ ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages);
+
bool IsLockedByCurrentThread() const {
return general_lock.IsLockedByCurrentThread();
}
+ bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr);
+ }
+
+ bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ *out = GetPhysicalAddr(virt_addr);
+
+ return *out != 0;
+ }
+
mutable KLightLock general_lock;
mutable KLightLock map_physical_memory_lock;
@@ -322,6 +354,7 @@ private:
bool is_aslr_enabled{};
u32 heap_fill_value{};
+ const KMemoryRegion* cached_physical_heap_region{};
KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application};
KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront};
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 839171e85..976d63234 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -1362,8 +1362,11 @@ static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Hand
ResultInvalidMemoryRegion);
// Create a new page group.
- KMemoryInfo kBlockInfo = dst_pt.QueryInfo(dst_address);
- KPageLinkedList pg(kBlockInfo.GetAddress(), kBlockInfo.GetNumPages());
+ KPageLinkedList pg;
+ R_TRY(src_pt.MakeAndOpenPageGroup(
+ std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
+ KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::All, KMemoryAttribute::None));
// Map the group.
R_TRY(dst_pt.MapPages(dst_address, pg, KMemoryState::SharedCode,
@@ -1408,8 +1411,8 @@ static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Ha
}
static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
- LOG_TRACE(Kernel_SVC, "called, handle_out={}, address=0x{:X}, size=0x{:X}",
- static_cast<void*>(out), address, size);
+ LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, size=0x{:X}", address, size);
+
// Get kernel instance.
auto& kernel = system.Kernel();
@@ -1664,7 +1667,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
return ResultInvalidAddress;
}
- if (size == 0 || Common::Is4KBAligned(size)) {
+ if (size == 0 || !Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
return ResultInvalidSize;
}
diff --git a/src/core/hle/service/filesystem/filesystem.cpp b/src/core/hle/service/filesystem/filesystem.cpp
index 3703ca4c6..4208337db 100644
--- a/src/core/hle/service/filesystem/filesystem.cpp
+++ b/src/core/hle/service/filesystem/filesystem.cpp
@@ -174,7 +174,7 @@ ResultCode VfsDirectoryServiceWrapper::RenameFile(const std::string& src_path_,
ASSERT_MSG(dest != nullptr, "Newly created file with success cannot be found.");
ASSERT_MSG(dest->WriteBytes(src->ReadAllBytes()) == src->GetSize(),
- "Could not write all of the bytes but everything else has succeded.");
+ "Could not write all of the bytes but everything else has succeeded.");
if (!src->GetContainingDirectory()->DeleteFile(Common::FS::GetFilename(src_path))) {
// TODO(DarkLordZach): Find a better error code for this
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index ec9fda248..92e6bf889 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -874,6 +874,10 @@ void Hid::AcquireNpadStyleSetUpdateEventHandle(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_HID, "called, npad_id={}, applet_resource_user_id={}, unknown={}",
parameters.npad_id, parameters.applet_resource_user_id, parameters.unknown);
+ // Games expect this event to be signaled after calling this function
+ applet_resource->GetController<Controller_NPad>(HidController::NPad)
+ .SignalStyleSetChangedEvent(parameters.npad_id);
+
IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(ResultSuccess);
rb.PushCopyObjects(applet_resource->GetController<Controller_NPad>(HidController::NPad)
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
index f9b82b504..44c54c665 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
@@ -134,7 +134,7 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector
}
EventState status = events_interface.status[event_id];
- const bool bad_parameter = status != EventState::Free && status != EventState::Registered;
+ const bool bad_parameter = status == EventState::Busy;
if (bad_parameter) {
std::memcpy(output.data(), &params, sizeof(params));
return NvResult::BadParameter;
diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.cpp b/src/core/hle/service/nvdrv/nvdrv_interface.cpp
index c16babe14..1ce2a856b 100644
--- a/src/core/hle/service/nvdrv/nvdrv_interface.cpp
+++ b/src/core/hle/service/nvdrv/nvdrv_interface.cpp
@@ -26,7 +26,7 @@ void NVDRV::Open(Kernel::HLERequestContext& ctx) {
rb.Push<DeviceFD>(0);
rb.PushEnum(NvResult::NotInitialized);
- LOG_ERROR(Service_NVDRV, "NvServices is not initalized!");
+ LOG_ERROR(Service_NVDRV, "NvServices is not initialized!");
return;
}
@@ -61,7 +61,7 @@ void NVDRV::Ioctl1(Kernel::HLERequestContext& ctx) {
if (!is_initialized) {
ServiceError(ctx, NvResult::NotInitialized);
- LOG_ERROR(Service_NVDRV, "NvServices is not initalized!");
+ LOG_ERROR(Service_NVDRV, "NvServices is not initialized!");
return;
}
@@ -87,7 +87,7 @@ void NVDRV::Ioctl2(Kernel::HLERequestContext& ctx) {
if (!is_initialized) {
ServiceError(ctx, NvResult::NotInitialized);
- LOG_ERROR(Service_NVDRV, "NvServices is not initalized!");
+ LOG_ERROR(Service_NVDRV, "NvServices is not initialized!");
return;
}
@@ -114,7 +114,7 @@ void NVDRV::Ioctl3(Kernel::HLERequestContext& ctx) {
if (!is_initialized) {
ServiceError(ctx, NvResult::NotInitialized);
- LOG_ERROR(Service_NVDRV, "NvServices is not initalized!");
+ LOG_ERROR(Service_NVDRV, "NvServices is not initialized!");
return;
}
@@ -139,7 +139,7 @@ void NVDRV::Close(Kernel::HLERequestContext& ctx) {
if (!is_initialized) {
ServiceError(ctx, NvResult::NotInitialized);
- LOG_ERROR(Service_NVDRV, "NvServices is not initalized!");
+ LOG_ERROR(Service_NVDRV, "NvServices is not initialized!");
return;
}
@@ -170,7 +170,7 @@ void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) {
if (!is_initialized) {
ServiceError(ctx, NvResult::NotInitialized);
- LOG_ERROR(Service_NVDRV, "NvServices is not initalized!");
+ LOG_ERROR(Service_NVDRV, "NvServices is not initialized!");
return;
}