diff options
author | bunnei <bunneidev@gmail.com> | 2019-04-16 03:43:56 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2019-04-16 03:43:56 +0200 |
commit | fc64156533488e73b814c00bb42b5b6079ac8fa8 (patch) | |
tree | 36bf55884fdcd294f5318c44931e755fb23d6894 /src | |
parent | Merge pull request #2398 from lioncash/boost (diff) | |
parent | kernel/svc: Implement svcUnmapProcessCodeMemory (diff) | |
download | yuzu-fc64156533488e73b814c00bb42b5b6079ac8fa8.tar yuzu-fc64156533488e73b814c00bb42b5b6079ac8fa8.tar.gz yuzu-fc64156533488e73b814c00bb42b5b6079ac8fa8.tar.bz2 yuzu-fc64156533488e73b814c00bb42b5b6079ac8fa8.tar.lz yuzu-fc64156533488e73b814c00bb42b5b6079ac8fa8.tar.xz yuzu-fc64156533488e73b814c00bb42b5b6079ac8fa8.tar.zst yuzu-fc64156533488e73b814c00bb42b5b6079ac8fa8.zip |
Diffstat (limited to 'src')
-rw-r--r-- | src/core/hle/kernel/svc.cpp | 140 | ||||
-rw-r--r-- | src/core/hle/kernel/svc_wrap.h | 7 | ||||
-rw-r--r-- | src/core/hle/kernel/vm_manager.cpp | 80 | ||||
-rw-r--r-- | src/core/hle/kernel/vm_manager.h | 49 |
4 files changed, 274 insertions, 2 deletions
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index e5d4d6b55..d48a2203a 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -1189,6 +1189,142 @@ static ResultCode QueryMemory(Core::System& system, VAddr memory_info_address, query_address); } +static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address, + u64 src_address, u64 size) { + LOG_DEBUG(Kernel_SVC, + "called. process_handle=0x{:08X}, dst_address=0x{:016X}, " + "src_address=0x{:016X}, size=0x{:016X}", + process_handle, dst_address, src_address, size); + + if (!Common::Is4KBAligned(src_address)) { + LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).", + src_address); + return ERR_INVALID_ADDRESS; + } + + if (!Common::Is4KBAligned(dst_address)) { + LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).", + dst_address); + return ERR_INVALID_ADDRESS; + } + + if (size == 0 || !Common::Is4KBAligned(size)) { + LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X})", size); + return ERR_INVALID_SIZE; + } + + if (!IsValidAddressRange(dst_address, size)) { + LOG_ERROR(Kernel_SVC, + "Destination address range overflows the address space (dst_address=0x{:016X}, " + "size=0x{:016X}).", + dst_address, size); + return ERR_INVALID_ADDRESS_STATE; + } + + if (!IsValidAddressRange(src_address, size)) { + LOG_ERROR(Kernel_SVC, + "Source address range overflows the address space (src_address=0x{:016X}, " + "size=0x{:016X}).", + src_address, size); + return ERR_INVALID_ADDRESS_STATE; + } + + const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); + auto process = handle_table.Get<Process>(process_handle); + if (!process) { + LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).", + process_handle); + return ERR_INVALID_HANDLE; + } + + auto& vm_manager = process->VMManager(); + if (!vm_manager.IsWithinAddressSpace(src_address, size)) { + LOG_ERROR(Kernel_SVC, + "Source address range is not within the address space (src_address=0x{:016X}, " + "size=0x{:016X}).", + src_address, size); + return ERR_INVALID_ADDRESS_STATE; + } + + if (!vm_manager.IsWithinASLRRegion(dst_address, size)) { + LOG_ERROR(Kernel_SVC, + "Destination address range is not within the ASLR region (dst_address=0x{:016X}, " + "size=0x{:016X}).", + dst_address, size); + return ERR_INVALID_MEMORY_RANGE; + } + + return vm_manager.MapCodeMemory(dst_address, src_address, size); +} + +ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address, + u64 src_address, u64 size) { + LOG_DEBUG(Kernel_SVC, + "called. process_handle=0x{:08X}, dst_address=0x{:016X}, src_address=0x{:016X}, " + "size=0x{:016X}", + process_handle, dst_address, src_address, size); + + if (!Common::Is4KBAligned(dst_address)) { + LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).", + dst_address); + return ERR_INVALID_ADDRESS; + } + + if (!Common::Is4KBAligned(src_address)) { + LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).", + src_address); + return ERR_INVALID_ADDRESS; + } + + if (size == 0 || Common::Is4KBAligned(size)) { + LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size); + return ERR_INVALID_SIZE; + } + + if (!IsValidAddressRange(dst_address, size)) { + LOG_ERROR(Kernel_SVC, + "Destination address range overflows the address space (dst_address=0x{:016X}, " + "size=0x{:016X}).", + dst_address, size); + return ERR_INVALID_ADDRESS_STATE; + } + + if (!IsValidAddressRange(src_address, size)) { + LOG_ERROR(Kernel_SVC, + "Source address range overflows the address space (src_address=0x{:016X}, " + "size=0x{:016X}).", + src_address, size); + return ERR_INVALID_ADDRESS_STATE; + } + + const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); + auto process = handle_table.Get<Process>(process_handle); + if (!process) { + LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).", + process_handle); + return ERR_INVALID_HANDLE; + } + + auto& vm_manager = process->VMManager(); + if (!vm_manager.IsWithinAddressSpace(src_address, size)) { + LOG_ERROR(Kernel_SVC, + "Source address range is not within the address space (src_address=0x{:016X}, " + "size=0x{:016X}).", + src_address, size); + return ERR_INVALID_ADDRESS_STATE; + } + + if (!vm_manager.IsWithinASLRRegion(dst_address, size)) { + LOG_ERROR(Kernel_SVC, + "Destination address range is not within the ASLR region (dst_address=0x{:016X}, " + "size=0x{:016X}).", + dst_address, size); + return ERR_INVALID_MEMORY_RANGE; + } + + return vm_manager.UnmapCodeMemory(dst_address, src_address, size); +} + /// Exits the current process static void ExitProcess(Core::System& system) { auto* current_process = system.Kernel().CurrentProcess(); @@ -2217,8 +2353,8 @@ static const FunctionDef SVC_Table[] = { {0x74, nullptr, "MapProcessMemory"}, {0x75, nullptr, "UnmapProcessMemory"}, {0x76, SvcWrap<QueryProcessMemory>, "QueryProcessMemory"}, - {0x77, nullptr, "MapProcessCodeMemory"}, - {0x78, nullptr, "UnmapProcessCodeMemory"}, + {0x77, SvcWrap<MapProcessCodeMemory>, "MapProcessCodeMemory"}, + {0x78, SvcWrap<UnmapProcessCodeMemory>, "UnmapProcessCodeMemory"}, {0x79, nullptr, "CreateProcess"}, {0x7A, nullptr, "StartProcess"}, {0x7B, nullptr, "TerminateProcess"}, diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index b3690b5f3..865473c6f 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h @@ -44,6 +44,13 @@ void SvcWrap(Core::System& system) { func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw); } +template <ResultCode func(Core::System&, u32, u64, u64, u64)> +void SvcWrap(Core::System& system) { + FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), + Param(system, 2), Param(system, 3)) + .raw); +} + template <ResultCode func(Core::System&, u32*)> void SvcWrap(Core::System& system) { u32 param = 0; diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index ec0a480ce..f0c0c12fc 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp @@ -302,6 +302,86 @@ ResultVal<VAddr> VMManager::SetHeapSize(u64 size) { return MakeResult<VAddr>(heap_region_base); } +ResultCode VMManager::MapCodeMemory(VAddr dst_address, VAddr src_address, u64 size) { + constexpr auto ignore_attribute = MemoryAttribute::LockedForIPC | MemoryAttribute::DeviceMapped; + const auto src_check_result = CheckRangeState( + src_address, size, MemoryState::All, MemoryState::Heap, VMAPermission::All, + VMAPermission::ReadWrite, MemoryAttribute::Mask, MemoryAttribute::None, ignore_attribute); + + if (src_check_result.Failed()) { + return src_check_result.Code(); + } + + const auto mirror_result = + MirrorMemory(dst_address, src_address, size, MemoryState::ModuleCode); + if (mirror_result.IsError()) { + return mirror_result; + } + + // Ensure we lock the source memory region. + const auto src_vma_result = CarveVMARange(src_address, size); + if (src_vma_result.Failed()) { + return src_vma_result.Code(); + } + auto src_vma_iter = *src_vma_result; + src_vma_iter->second.attribute = MemoryAttribute::Locked; + Reprotect(src_vma_iter, VMAPermission::Read); + + // The destination memory region is fine as is, however we need to make it read-only. + return ReprotectRange(dst_address, size, VMAPermission::Read); +} + +ResultCode VMManager::UnmapCodeMemory(VAddr dst_address, VAddr src_address, u64 size) { + constexpr auto ignore_attribute = MemoryAttribute::LockedForIPC | MemoryAttribute::DeviceMapped; + const auto src_check_result = CheckRangeState( + src_address, size, MemoryState::All, MemoryState::Heap, VMAPermission::None, + VMAPermission::None, MemoryAttribute::Mask, MemoryAttribute::Locked, ignore_attribute); + + if (src_check_result.Failed()) { + return src_check_result.Code(); + } + + // Yes, the kernel only checks the first page of the region. + const auto dst_check_result = + CheckRangeState(dst_address, Memory::PAGE_SIZE, MemoryState::FlagModule, + MemoryState::FlagModule, VMAPermission::None, VMAPermission::None, + MemoryAttribute::Mask, MemoryAttribute::None, ignore_attribute); + + if (dst_check_result.Failed()) { + return dst_check_result.Code(); + } + + const auto dst_memory_state = std::get<MemoryState>(*dst_check_result); + const auto dst_contiguous_check_result = CheckRangeState( + dst_address, size, MemoryState::All, dst_memory_state, VMAPermission::None, + VMAPermission::None, MemoryAttribute::Mask, MemoryAttribute::None, ignore_attribute); + + if (dst_contiguous_check_result.Failed()) { + return dst_contiguous_check_result.Code(); + } + + const auto unmap_result = UnmapRange(dst_address, size); + if (unmap_result.IsError()) { + return unmap_result; + } + + // With the mirrored portion unmapped, restore the original region's traits. + const auto src_vma_result = CarveVMARange(src_address, size); + if (src_vma_result.Failed()) { + return src_vma_result.Code(); + } + auto src_vma_iter = *src_vma_result; + src_vma_iter->second.state = MemoryState::Heap; + src_vma_iter->second.attribute = MemoryAttribute::None; + Reprotect(src_vma_iter, VMAPermission::ReadWrite); + + if (dst_memory_state == MemoryState::ModuleCode) { + Core::System::GetInstance().InvalidateCpuInstructionCaches(); + } + + return unmap_result; +} + MemoryInfo VMManager::QueryMemory(VAddr address) const { const auto vma = FindVMA(address); MemoryInfo memory_info{}; diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index 6f484b7bf..288eb9450 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h @@ -43,6 +43,9 @@ enum class VMAPermission : u8 { ReadExecute = Read | Execute, WriteExecute = Write | Execute, ReadWriteExecute = Read | Write | Execute, + + // Used as a wildcard when checking permissions across memory ranges + All = 0xFF, }; constexpr VMAPermission operator|(VMAPermission lhs, VMAPermission rhs) { @@ -152,6 +155,9 @@ enum class MemoryState : u32 { FlagUncached = 1U << 24, FlagCodeMemory = 1U << 25, + // Wildcard used in range checking to indicate all states. + All = 0xFFFFFFFF, + // Convenience flag sets to reduce repetition IPCFlags = FlagIPC0 | FlagIPC3 | FlagIPC1, @@ -415,6 +421,49 @@ public: /// ResultVal<VAddr> SetHeapSize(u64 size); + /// Maps a region of memory as code memory. + /// + /// @param dst_address The base address of the region to create the aliasing memory region. + /// @param src_address The base address of the region to be aliased. + /// @param size The total amount of memory to map in bytes. + /// + /// @pre Both memory regions lie within the actual addressable address space. + /// + /// @post After this function finishes execution, assuming success, then the address range + /// [dst_address, dst_address+size) will alias the memory region, + /// [src_address, src_address+size). + /// <p> + /// What this also entails is as follows: + /// 1. The aliased region gains the Locked memory attribute. + /// 2. The aliased region becomes read-only. + /// 3. The aliasing region becomes read-only. + /// 4. The aliasing region is created with a memory state of MemoryState::CodeModule. + /// + ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, u64 size); + + /// Unmaps a region of memory designated as code module memory. + /// + /// @param dst_address The base address of the memory region aliasing the source memory region. + /// @param src_address The base address of the memory region being aliased. + /// @param size The size of the memory region to unmap in bytes. + /// + /// @pre Both memory ranges lie within the actual addressable address space. + /// + /// @pre The memory region being unmapped has been previously been mapped + /// by a call to MapCodeMemory. + /// + /// @post After execution of the function, if successful. the aliasing memory region + /// will be unmapped and the aliased region will have various traits about it + /// restored to what they were prior to the original mapping call preceding + /// this function call. + /// <p> + /// What this also entails is as follows: + /// 1. The state of the memory region will now indicate a general heap region. + /// 2. All memory attributes for the memory region are cleared. + /// 3. Memory permissions for the region are restored to user read/write. + /// + ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, u64 size); + /// Queries the memory manager for information about the given address. /// /// @param address The address to query the memory manager about for information. |