summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/audio_core/audio_out.cpp3
-rw-r--r--src/audio_core/cubeb_sink.cpp2
-rw-r--r--src/audio_core/cubeb_sink.h2
-rw-r--r--src/audio_core/null_sink.h2
-rw-r--r--src/audio_core/sink_details.cpp53
-rw-r--r--src/audio_core/sink_details.h25
-rw-r--r--src/common/thread_queue_list.h16
-rw-r--r--src/core/hle/kernel/process.cpp2
-rw-r--r--src/core/hle/kernel/process.h3
-rw-r--r--src/core/hle/kernel/scheduler.cpp66
-rw-r--r--src/core/hle/kernel/scheduler.h69
-rw-r--r--src/core/hle/kernel/svc.cpp90
-rw-r--r--src/core/hle/kernel/svc.h16
-rw-r--r--src/core/hle/kernel/svc_wrap.h24
-rw-r--r--src/core/hle/kernel/thread.h1
-rw-r--r--src/core/hle/kernel/vm_manager.cpp25
-rw-r--r--src/core/hle/kernel/vm_manager.h152
-rw-r--r--src/yuzu/configuration/configure_audio.cpp7
-rw-r--r--src/yuzu/debugger/wait_tree.cpp2
-rw-r--r--src/yuzu/debugger/wait_tree.h2
20 files changed, 430 insertions, 132 deletions
diff --git a/src/audio_core/audio_out.cpp b/src/audio_core/audio_out.cpp
index 0c8f5b18e..cbba17632 100644
--- a/src/audio_core/audio_out.cpp
+++ b/src/audio_core/audio_out.cpp
@@ -30,8 +30,7 @@ static Stream::Format ChannelsToStreamFormat(u32 num_channels) {
StreamPtr AudioOut::OpenStream(u32 sample_rate, u32 num_channels, std::string&& name,
Stream::ReleaseCallback&& release_callback) {
if (!sink) {
- const SinkDetails& sink_details = GetSinkDetails(Settings::values.sink_id);
- sink = sink_details.factory(Settings::values.audio_device_id);
+ sink = CreateSinkFromID(Settings::values.sink_id, Settings::values.audio_device_id);
}
return std::make_shared<Stream>(
diff --git a/src/audio_core/cubeb_sink.cpp b/src/audio_core/cubeb_sink.cpp
index d31a1c844..097328901 100644
--- a/src/audio_core/cubeb_sink.cpp
+++ b/src/audio_core/cubeb_sink.cpp
@@ -107,7 +107,7 @@ private:
static void StateCallback(cubeb_stream* stream, void* user_data, cubeb_state state);
};
-CubebSink::CubebSink(std::string target_device_name) {
+CubebSink::CubebSink(std::string_view target_device_name) {
if (cubeb_init(&ctx, "yuzu", nullptr) != CUBEB_OK) {
LOG_CRITICAL(Audio_Sink, "cubeb_init failed");
return;
diff --git a/src/audio_core/cubeb_sink.h b/src/audio_core/cubeb_sink.h
index 59cbf05e9..efb9d1634 100644
--- a/src/audio_core/cubeb_sink.h
+++ b/src/audio_core/cubeb_sink.h
@@ -15,7 +15,7 @@ namespace AudioCore {
class CubebSink final : public Sink {
public:
- explicit CubebSink(std::string device_id);
+ explicit CubebSink(std::string_view device_id);
~CubebSink() override;
SinkStream& AcquireSinkStream(u32 sample_rate, u32 num_channels,
diff --git a/src/audio_core/null_sink.h b/src/audio_core/null_sink.h
index a78d78893..61a28d542 100644
--- a/src/audio_core/null_sink.h
+++ b/src/audio_core/null_sink.h
@@ -10,7 +10,7 @@ namespace AudioCore {
class NullSink final : public Sink {
public:
- explicit NullSink(std::string){};
+ explicit NullSink(std::string_view) {}
~NullSink() override = default;
SinkStream& AcquireSinkStream(u32 /*sample_rate*/, u32 /*num_channels*/,
diff --git a/src/audio_core/sink_details.cpp b/src/audio_core/sink_details.cpp
index 67cf1f3b2..a848eb1c9 100644
--- a/src/audio_core/sink_details.cpp
+++ b/src/audio_core/sink_details.cpp
@@ -14,31 +14,68 @@
#include "common/logging/log.h"
namespace AudioCore {
+namespace {
+struct SinkDetails {
+ using FactoryFn = std::unique_ptr<Sink> (*)(std::string_view);
+ using ListDevicesFn = std::vector<std::string> (*)();
-// g_sink_details is ordered in terms of desirability, with the best choice at the top.
-const std::vector<SinkDetails> g_sink_details = {
+ /// Name for this sink.
+ const char* id;
+ /// A method to call to construct an instance of this type of sink.
+ FactoryFn factory;
+ /// A method to call to list available devices.
+ ListDevicesFn list_devices;
+};
+
+// sink_details is ordered in terms of desirability, with the best choice at the top.
+constexpr SinkDetails sink_details[] = {
#ifdef HAVE_CUBEB
- SinkDetails{"cubeb", &std::make_unique<CubebSink, std::string>, &ListCubebSinkDevices},
+ SinkDetails{"cubeb",
+ [](std::string_view device_id) -> std::unique_ptr<Sink> {
+ return std::make_unique<CubebSink>(device_id);
+ },
+ &ListCubebSinkDevices},
#endif
- SinkDetails{"null", &std::make_unique<NullSink, std::string>,
+ SinkDetails{"null",
+ [](std::string_view device_id) -> std::unique_ptr<Sink> {
+ return std::make_unique<NullSink>(device_id);
+ },
[] { return std::vector<std::string>{"null"}; }},
};
const SinkDetails& GetSinkDetails(std::string_view sink_id) {
auto iter =
- std::find_if(g_sink_details.begin(), g_sink_details.end(),
+ std::find_if(std::begin(sink_details), std::end(sink_details),
[sink_id](const auto& sink_detail) { return sink_detail.id == sink_id; });
- if (sink_id == "auto" || iter == g_sink_details.end()) {
+ if (sink_id == "auto" || iter == std::end(sink_details)) {
if (sink_id != "auto") {
LOG_ERROR(Audio, "AudioCore::SelectSink given invalid sink_id {}", sink_id);
}
// Auto-select.
- // g_sink_details is ordered in terms of desirability, with the best choice at the front.
- iter = g_sink_details.begin();
+ // sink_details is ordered in terms of desirability, with the best choice at the front.
+ iter = std::begin(sink_details);
}
return *iter;
}
+} // Anonymous namespace
+
+std::vector<const char*> GetSinkIDs() {
+ std::vector<const char*> sink_ids(std::size(sink_details));
+
+ std::transform(std::begin(sink_details), std::end(sink_details), std::begin(sink_ids),
+ [](const auto& sink) { return sink.id; });
+
+ return sink_ids;
+}
+
+std::vector<std::string> GetDeviceListForSink(std::string_view sink_id) {
+ return GetSinkDetails(sink_id).list_devices();
+}
+
+std::unique_ptr<Sink> CreateSinkFromID(std::string_view sink_id, std::string_view device_id) {
+ return GetSinkDetails(sink_id).factory(device_id);
+}
} // namespace AudioCore
diff --git a/src/audio_core/sink_details.h b/src/audio_core/sink_details.h
index 03534b187..bc8786270 100644
--- a/src/audio_core/sink_details.h
+++ b/src/audio_core/sink_details.h
@@ -4,34 +4,21 @@
#pragma once
-#include <functional>
-#include <memory>
#include <string>
#include <string_view>
-#include <utility>
#include <vector>
namespace AudioCore {
class Sink;
-struct SinkDetails {
- using FactoryFn = std::function<std::unique_ptr<Sink>(std::string)>;
- using ListDevicesFn = std::function<std::vector<std::string>()>;
+/// Retrieves the IDs for all available audio sinks.
+std::vector<const char*> GetSinkIDs();
- SinkDetails(const char* id_, FactoryFn factory_, ListDevicesFn list_devices_)
- : id(id_), factory(std::move(factory_)), list_devices(std::move(list_devices_)) {}
+/// Gets the list of devices for a particular sink identified by the given ID.
+std::vector<std::string> GetDeviceListForSink(std::string_view sink_id);
- /// Name for this sink.
- const char* id;
- /// A method to call to construct an instance of this type of sink.
- FactoryFn factory;
- /// A method to call to list available devices.
- ListDevicesFn list_devices;
-};
-
-extern const std::vector<SinkDetails> g_sink_details;
-
-const SinkDetails& GetSinkDetails(std::string_view sink_id);
+/// Creates an audio sink identified by the given device ID.
+std::unique_ptr<Sink> CreateSinkFromID(std::string_view sink_id, std::string_view device_id);
} // namespace AudioCore
diff --git a/src/common/thread_queue_list.h b/src/common/thread_queue_list.h
index 133122c5f..e7594db68 100644
--- a/src/common/thread_queue_list.h
+++ b/src/common/thread_queue_list.h
@@ -49,6 +49,22 @@ struct ThreadQueueList {
return T();
}
+ template <typename UnaryPredicate>
+ T get_first_filter(UnaryPredicate filter) const {
+ const Queue* cur = first;
+ while (cur != nullptr) {
+ if (!cur->data.empty()) {
+ for (const auto& item : cur->data) {
+ if (filter(item))
+ return item;
+ }
+ }
+ cur = cur->next_nonempty;
+ }
+
+ return T();
+ }
+
T pop_first() {
Queue* cur = first;
while (cur != nullptr) {
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index c817fb449..5356a4a3f 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -150,7 +150,7 @@ void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) {
vm_manager
.MapMemoryBlock(vm_manager.GetTLSIORegionEndAddress() - stack_size,
std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size,
- MemoryState::Mapped)
+ MemoryState::Stack)
.Unwrap();
vm_manager.LogLayout();
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index bcb9ac4b8..459eedfa6 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -262,8 +262,7 @@ public:
ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms);
ResultCode HeapFree(VAddr target, u32 size);
- ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size,
- MemoryState state = MemoryState::Mapped);
+ ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state);
ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, u64 size);
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 5a5f4cef1..df4d6cf0a 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -9,6 +9,7 @@
#include "common/logging/log.h"
#include "core/arm/arm_interface.h"
#include "core/core.h"
+#include "core/core_cpu.h"
#include "core/core_timing.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
@@ -179,4 +180,69 @@ void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
ready_queue.prepare(priority);
}
+Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
+ std::lock_guard<std::mutex> lock(scheduler_mutex);
+
+ const u32 mask = 1U << core;
+ return ready_queue.get_first_filter([mask, maximum_priority](Thread const* thread) {
+ return (thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority;
+ });
+}
+
+void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
+ ASSERT(thread != nullptr);
+ // Avoid yielding if the thread isn't even running.
+ ASSERT(thread->GetStatus() == ThreadStatus::Running);
+
+ // Sanity check that the priority is valid
+ ASSERT(thread->GetPriority() < THREADPRIO_COUNT);
+
+ // Yield this thread -- sleep for zero time and force reschedule to different thread
+ WaitCurrentThread_Sleep();
+ GetCurrentThread()->WakeAfterDelay(0);
+}
+
+void Scheduler::YieldWithLoadBalancing(Thread* thread) {
+ ASSERT(thread != nullptr);
+ const auto priority = thread->GetPriority();
+ const auto core = static_cast<u32>(thread->GetProcessorID());
+
+ // Avoid yielding if the thread isn't even running.
+ ASSERT(thread->GetStatus() == ThreadStatus::Running);
+
+ // Sanity check that the priority is valid
+ ASSERT(priority < THREADPRIO_COUNT);
+
+ // Sleep for zero time to be able to force reschedule to different thread
+ WaitCurrentThread_Sleep();
+ GetCurrentThread()->WakeAfterDelay(0);
+
+ Thread* suggested_thread = nullptr;
+
+ // Search through all of the cpu cores (except this one) for a suggested thread.
+ // Take the first non-nullptr one
+ for (unsigned cur_core = 0; cur_core < Core::NUM_CPU_CORES; ++cur_core) {
+ const auto res =
+ Core::System::GetInstance().CpuCore(cur_core).Scheduler().GetNextSuggestedThread(
+ core, priority);
+
+ // If scheduler provides a suggested thread
+ if (res != nullptr) {
+ // And its better than the current suggested thread (or is the first valid one)
+ if (suggested_thread == nullptr ||
+ suggested_thread->GetPriority() > res->GetPriority()) {
+ suggested_thread = res;
+ }
+ }
+ }
+
+ // If a suggested thread was found, queue that for this core
+ if (suggested_thread != nullptr)
+ suggested_thread->ChangeCore(core, suggested_thread->GetAffinityMask());
+}
+
+void Scheduler::YieldAndWaitForLoadBalancing(Thread* thread) {
+ UNIMPLEMENTED_MSG("Wait for load balancing thread yield type is not implemented!");
+}
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index c63032b7d..97ced4dfc 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -51,6 +51,75 @@ public:
/// Sets the priority of a thread in the scheduler
void SetThreadPriority(Thread* thread, u32 priority);
+ /// Gets the next suggested thread for load balancing
+ Thread* GetNextSuggestedThread(u32 core, u32 minimum_priority) const;
+
+ /**
+ * YieldWithoutLoadBalancing -- analogous to normal yield on a system
+ * Moves the thread to the end of the ready queue for its priority, and then reschedules the
+ * system to the new head of the queue.
+ *
+ * Example (Single Core -- but can be extrapolated to multi):
+ * ready_queue[prio=0]: ThreadA, ThreadB, ThreadC (->exec order->)
+ * Currently Running: ThreadR
+ *
+ * ThreadR calls YieldWithoutLoadBalancing
+ *
+ * ThreadR is moved to the end of ready_queue[prio=0]:
+ * ready_queue[prio=0]: ThreadA, ThreadB, ThreadC, ThreadR (->exec order->)
+ * Currently Running: Nothing
+ *
+ * System is rescheduled (ThreadA is popped off of queue):
+ * ready_queue[prio=0]: ThreadB, ThreadC, ThreadR (->exec order->)
+ * Currently Running: ThreadA
+ *
+ * If the queue is empty at time of call, no yielding occurs. This does not cross between cores
+ * or priorities at all.
+ */
+ void YieldWithoutLoadBalancing(Thread* thread);
+
+ /**
+ * YieldWithLoadBalancing -- yield but with better selection of the new running thread
+ * Moves the current thread to the end of the ready queue for its priority, then selects a
+ * 'suggested thread' (a thread on a different core that could run on this core) from the
+ * scheduler, changes its core, and reschedules the current core to that thread.
+ *
+ * Example (Dual Core -- can be extrapolated to Quad Core, this is just normal yield if it were
+ * single core):
+ * ready_queue[core=0][prio=0]: ThreadA, ThreadB (affinities not pictured as irrelevant
+ * ready_queue[core=1][prio=0]: ThreadC[affinity=both], ThreadD[affinity=core1only]
+ * Currently Running: ThreadQ on Core 0 || ThreadP on Core 1
+ *
+ * ThreadQ calls YieldWithLoadBalancing
+ *
+ * ThreadQ is moved to the end of ready_queue[core=0][prio=0]:
+ * ready_queue[core=0][prio=0]: ThreadA, ThreadB
+ * ready_queue[core=1][prio=0]: ThreadC[affinity=both], ThreadD[affinity=core1only]
+ * Currently Running: ThreadQ on Core 0 || ThreadP on Core 1
+ *
+ * A list of suggested threads for each core is compiled
+ * Suggested Threads: {ThreadC on Core 1}
+ * If this were quad core (as the switch is), there could be between 0 and 3 threads in this
+ * list. If there are more than one, the thread is selected by highest prio.
+ *
+ * ThreadC is core changed to Core 0:
+ * ready_queue[core=0][prio=0]: ThreadC, ThreadA, ThreadB, ThreadQ
+ * ready_queue[core=1][prio=0]: ThreadD
+ * Currently Running: None on Core 0 || ThreadP on Core 1
+ *
+ * System is rescheduled (ThreadC is popped off of queue):
+ * ready_queue[core=0][prio=0]: ThreadA, ThreadB, ThreadQ
+ * ready_queue[core=1][prio=0]: ThreadD
+ * Currently Running: ThreadC on Core 0 || ThreadP on Core 1
+ *
+ * If no suggested threads can be found this will behave just as normal yield. If there are
+ * multiple candidates for the suggested thread on a core, the highest prio is taken.
+ */
+ void YieldWithLoadBalancing(Thread* thread);
+
+ /// Currently unknown -- asserts as unimplemented on call
+ void YieldAndWaitForLoadBalancing(Thread* thread);
+
/// Returns a list of all threads managed by the scheduler
const std::vector<SharedPtr<Thread>>& GetThreadList() const {
return thread_list;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 8b0b3671a..348a22904 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -35,6 +35,7 @@
#include "core/hle/lock.h"
#include "core/hle/result.h"
#include "core/hle/service/service.h"
+#include "core/memory.h"
namespace Kernel {
namespace {
@@ -273,7 +274,7 @@ static ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
return result;
}
- return current_process->MirrorMemory(dst_addr, src_addr, size);
+ return current_process->MirrorMemory(dst_addr, src_addr, size, MemoryState::Stack);
}
/// Unmaps a region that was previously mapped with svcMapMemory
@@ -1066,10 +1067,9 @@ static ResultCode UnmapSharedMemory(Handle shared_memory_handle, VAddr addr, u64
return shared_memory->Unmap(*current_process, addr);
}
-/// Query process memory
-static ResultCode QueryProcessMemory(MemoryInfo* memory_info, PageInfo* /*page_info*/,
- Handle process_handle, u64 addr) {
- LOG_TRACE(Kernel_SVC, "called process=0x{:08X} addr={:X}", process_handle, addr);
+static ResultCode QueryProcessMemory(VAddr memory_info_address, VAddr page_info_address,
+ Handle process_handle, VAddr address) {
+ LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address);
const auto& handle_table = Core::CurrentProcess()->GetHandleTable();
SharedPtr<Process> process = handle_table.Get<Process>(process_handle);
if (!process) {
@@ -1079,28 +1079,32 @@ static ResultCode QueryProcessMemory(MemoryInfo* memory_info, PageInfo* /*page_i
}
const auto& vm_manager = process->VMManager();
- const auto vma = vm_manager.FindVMA(addr);
-
- memory_info->attributes = 0;
- if (vm_manager.IsValidHandle(vma)) {
- memory_info->base_address = vma->second.base;
- memory_info->permission = static_cast<u32>(vma->second.permissions);
- memory_info->size = vma->second.size;
- memory_info->type = static_cast<u32>(vma->second.meminfo_state);
- } else {
- memory_info->base_address = 0;
- memory_info->permission = static_cast<u32>(VMAPermission::None);
- memory_info->size = 0;
- memory_info->type = static_cast<u32>(MemoryState::Unmapped);
- }
+ const MemoryInfo memory_info = vm_manager.QueryMemory(address);
+
+ Memory::Write64(memory_info_address, memory_info.base_address);
+ Memory::Write64(memory_info_address + 8, memory_info.size);
+ Memory::Write32(memory_info_address + 16, memory_info.state);
+ Memory::Write32(memory_info_address + 20, memory_info.attributes);
+ Memory::Write32(memory_info_address + 24, memory_info.permission);
+ Memory::Write32(memory_info_address + 32, memory_info.ipc_ref_count);
+ Memory::Write32(memory_info_address + 28, memory_info.device_ref_count);
+ Memory::Write32(memory_info_address + 36, 0);
+
+ // Page info appears to be currently unused by the kernel and is always set to zero.
+ Memory::Write32(page_info_address, 0);
return RESULT_SUCCESS;
}
-/// Query memory
-static ResultCode QueryMemory(MemoryInfo* memory_info, PageInfo* page_info, VAddr addr) {
- LOG_TRACE(Kernel_SVC, "called, addr={:X}", addr);
- return QueryProcessMemory(memory_info, page_info, CurrentProcess, addr);
+static ResultCode QueryMemory(VAddr memory_info_address, VAddr page_info_address,
+ VAddr query_address) {
+ LOG_TRACE(Kernel_SVC,
+ "called, memory_info_address=0x{:016X}, page_info_address=0x{:016X}, "
+ "query_address=0x{:016X}",
+ memory_info_address, page_info_address, query_address);
+
+ return QueryProcessMemory(memory_info_address, page_info_address, CurrentProcess,
+ query_address);
}
/// Exits the current process
@@ -1204,18 +1208,38 @@ static void ExitThread() {
static void SleepThread(s64 nanoseconds) {
LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
- // Don't attempt to yield execution if there are no available threads to run,
- // this way we avoid a useless reschedule to the idle thread.
- if (nanoseconds == 0 && !Core::System::GetInstance().CurrentScheduler().HaveReadyThreads())
- return;
+ enum class SleepType : s64 {
+ YieldWithoutLoadBalancing = 0,
+ YieldWithLoadBalancing = -1,
+ YieldAndWaitForLoadBalancing = -2,
+ };
- // Sleep current thread and check for next thread to schedule
- WaitCurrentThread_Sleep();
+ if (nanoseconds <= 0) {
+ auto& scheduler{Core::System::GetInstance().CurrentScheduler()};
+ switch (static_cast<SleepType>(nanoseconds)) {
+ case SleepType::YieldWithoutLoadBalancing:
+ scheduler.YieldWithoutLoadBalancing(GetCurrentThread());
+ break;
+ case SleepType::YieldWithLoadBalancing:
+ scheduler.YieldWithLoadBalancing(GetCurrentThread());
+ break;
+ case SleepType::YieldAndWaitForLoadBalancing:
+ scheduler.YieldAndWaitForLoadBalancing(GetCurrentThread());
+ break;
+ default:
+ UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
+ }
+ } else {
+ // Sleep current thread and check for next thread to schedule
+ WaitCurrentThread_Sleep();
- // Create an event to wake the thread up after the specified nanosecond delay has passed
- GetCurrentThread()->WakeAfterDelay(nanoseconds);
+ // Create an event to wake the thread up after the specified nanosecond delay has passed
+ GetCurrentThread()->WakeAfterDelay(nanoseconds);
+ }
- Core::System::GetInstance().PrepareReschedule();
+ // Reschedule all CPU cores
+ for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i)
+ Core::System::GetInstance().CpuCore(i).PrepareReschedule();
}
/// Wait process wide key atomic
@@ -1907,7 +1931,7 @@ static const FunctionDef SVC_Table[] = {
{0x73, nullptr, "SetProcessMemoryPermission"},
{0x74, nullptr, "MapProcessMemory"},
{0x75, nullptr, "UnmapProcessMemory"},
- {0x76, nullptr, "QueryProcessMemory"},
+ {0x76, SvcWrap<QueryProcessMemory>, "QueryProcessMemory"},
{0x77, nullptr, "MapProcessCodeMemory"},
{0x78, nullptr, "UnmapProcessCodeMemory"},
{0x79, nullptr, "CreateProcess"},
diff --git a/src/core/hle/kernel/svc.h b/src/core/hle/kernel/svc.h
index b06aac4ec..c37ae0f98 100644
--- a/src/core/hle/kernel/svc.h
+++ b/src/core/hle/kernel/svc.h
@@ -8,22 +8,6 @@
namespace Kernel {
-struct MemoryInfo {
- u64 base_address;
- u64 size;
- u32 type;
- u32 attributes;
- u32 permission;
- u32 device_refcount;
- u32 ipc_refcount;
- INSERT_PADDING_WORDS(1);
-};
-static_assert(sizeof(MemoryInfo) == 0x28, "MemoryInfo has incorrect size.");
-
-struct PageInfo {
- u64 flags;
-};
-
void CallSVC(u32 immediate);
} // namespace Kernel
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 24aef46c9..2f758b959 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -7,9 +7,7 @@
#include "common/common_types.h"
#include "core/arm/arm_interface.h"
#include "core/core.h"
-#include "core/hle/kernel/svc.h"
#include "core/hle/result.h"
-#include "core/memory.h"
namespace Kernel {
@@ -129,7 +127,12 @@ void SvcWrap() {
template <ResultCode func(u64, u64, u32, u32)>
void SvcWrap() {
FuncReturn(
- func(Param(0), Param(1), static_cast<u32>(Param(3)), static_cast<u32>(Param(3))).raw);
+ func(Param(0), Param(1), static_cast<u32>(Param(2)), static_cast<u32>(Param(3))).raw);
+}
+
+template <ResultCode func(u64, u64, u32, u64)>
+void SvcWrap() {
+ FuncReturn(func(Param(0), Param(1), static_cast<u32>(Param(2)), Param(3)).raw);
}
template <ResultCode func(u32, u64, u32)>
@@ -191,21 +194,6 @@ void SvcWrap() {
FuncReturn(retval);
}
-template <ResultCode func(MemoryInfo*, PageInfo*, u64)>
-void SvcWrap() {
- MemoryInfo memory_info = {};
- PageInfo page_info = {};
- u32 retval = func(&memory_info, &page_info, Param(2)).raw;
-
- Memory::Write64(Param(0), memory_info.base_address);
- Memory::Write64(Param(0) + 8, memory_info.size);
- Memory::Write32(Param(0) + 16, memory_info.type);
- Memory::Write32(Param(0) + 20, memory_info.attributes);
- Memory::Write32(Param(0) + 24, memory_info.permission);
-
- FuncReturn(retval);
-}
-
template <ResultCode func(u32*, u64, u64, u32)>
void SvcWrap() {
u32 param_1 = 0;
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index d384d50db..77aec099a 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -26,6 +26,7 @@ enum ThreadPriority : u32 {
THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps
THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps
THREADPRIO_LOWEST = 63, ///< Lowest thread priority
+ THREADPRIO_COUNT = 64, ///< Total number of possible thread priorities.
};
enum ThreadProcessorId : s32 {
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index 6187993ce..d3b55a51e 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -25,14 +25,14 @@ static const char* GetMemoryStateName(MemoryState state) {
"CodeMutable", "Heap",
"Shared", "Unknown1",
"ModuleCodeStatic", "ModuleCodeMutable",
- "IpcBuffer0", "Mapped",
+ "IpcBuffer0", "Stack",
"ThreadLocal", "TransferMemoryIsolated",
"TransferMemory", "ProcessMemory",
- "Unknown2", "IpcBuffer1",
+ "Inaccessible", "IpcBuffer1",
"IpcBuffer3", "KernelStack",
};
- return names[static_cast<int>(state)];
+ return names[ToSvcMemoryState(state)];
}
bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
@@ -302,6 +302,25 @@ ResultCode VMManager::HeapFree(VAddr target, u64 size) {
return RESULT_SUCCESS;
}
+MemoryInfo VMManager::QueryMemory(VAddr address) const {
+ const auto vma = FindVMA(address);
+ MemoryInfo memory_info{};
+
+ if (IsValidHandle(vma)) {
+ memory_info.base_address = vma->second.base;
+ memory_info.permission = static_cast<u32>(vma->second.permissions);
+ memory_info.size = vma->second.size;
+ memory_info.state = ToSvcMemoryState(vma->second.meminfo_state);
+ } else {
+ memory_info.base_address = address_space_end;
+ memory_info.permission = static_cast<u32>(VMAPermission::None);
+ memory_info.size = 0 - address_space_end;
+ memory_info.state = static_cast<u32>(MemoryState::Inaccessible);
+ }
+
+ return memory_info;
+}
+
ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state) {
const auto vma = FindVMA(src_addr);
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index a12419d1e..10bacac3e 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -43,26 +43,129 @@ enum class VMAPermission : u8 {
ReadWriteExecute = Read | Write | Execute,
};
-/// Set of values returned in MemoryInfo.state by svcQueryMemory.
+// clang-format off
+/// Represents memory states and any relevant flags, as used by the kernel.
+/// svcQueryMemory interprets these by masking away all but the first eight
+/// bits when storing memory state into a MemoryInfo instance.
enum class MemoryState : u32 {
- Unmapped = 0x0,
- Io = 0x1,
- Normal = 0x2,
- CodeStatic = 0x3,
- CodeMutable = 0x4,
- Heap = 0x5,
- Shared = 0x6,
- ModuleCodeStatic = 0x8,
- ModuleCodeMutable = 0x9,
- IpcBuffer0 = 0xA,
- Mapped = 0xB,
- ThreadLocal = 0xC,
- TransferMemoryIsolated = 0xD,
- TransferMemory = 0xE,
- ProcessMemory = 0xF,
- IpcBuffer1 = 0x11,
- IpcBuffer3 = 0x12,
- KernelStack = 0x13,
+ Mask = 0xFF,
+ FlagProtect = 1U << 8,
+ FlagDebug = 1U << 9,
+ FlagIPC0 = 1U << 10,
+ FlagIPC3 = 1U << 11,
+ FlagIPC1 = 1U << 12,
+ FlagMapped = 1U << 13,
+ FlagCode = 1U << 14,
+ FlagAlias = 1U << 15,
+ FlagModule = 1U << 16,
+ FlagTransfer = 1U << 17,
+ FlagQueryPhysicalAddressAllowed = 1U << 18,
+ FlagSharedDevice = 1U << 19,
+ FlagSharedDeviceAligned = 1U << 20,
+ FlagIPCBuffer = 1U << 21,
+ FlagMemoryPoolAllocated = 1U << 22,
+ FlagMapProcess = 1U << 23,
+ FlagUncached = 1U << 24,
+ FlagCodeMemory = 1U << 25,
+
+ // Convenience flag sets to reduce repetition
+ IPCFlags = FlagIPC0 | FlagIPC3 | FlagIPC1,
+
+ CodeFlags = FlagDebug | IPCFlags | FlagMapped | FlagCode | FlagQueryPhysicalAddressAllowed |
+ FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
+
+ DataFlags = FlagProtect | IPCFlags | FlagMapped | FlagAlias | FlagTransfer |
+ FlagQueryPhysicalAddressAllowed | FlagSharedDevice | FlagSharedDeviceAligned |
+ FlagMemoryPoolAllocated | FlagIPCBuffer | FlagUncached,
+
+ Unmapped = 0x00,
+ Io = 0x01 | FlagMapped,
+ Normal = 0x02 | FlagMapped | FlagQueryPhysicalAddressAllowed,
+ CodeStatic = 0x03 | CodeFlags | FlagMapProcess,
+ CodeMutable = 0x04 | CodeFlags | FlagMapProcess | FlagCodeMemory,
+ Heap = 0x05 | DataFlags | FlagCodeMemory,
+ Shared = 0x06 | FlagMapped | FlagMemoryPoolAllocated,
+ ModuleCodeStatic = 0x08 | CodeFlags | FlagModule | FlagMapProcess,
+ ModuleCodeMutable = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory,
+
+ IpcBuffer0 = 0x0A | FlagMapped | FlagQueryPhysicalAddressAllowed | FlagMemoryPoolAllocated |
+ IPCFlags | FlagSharedDevice | FlagSharedDeviceAligned,
+
+ Stack = 0x0B | FlagMapped | IPCFlags | FlagQueryPhysicalAddressAllowed |
+ FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
+
+ ThreadLocal = 0x0C | FlagMapped | FlagMemoryPoolAllocated,
+
+ TransferMemoryIsolated = 0x0D | IPCFlags | FlagMapped | FlagQueryPhysicalAddressAllowed |
+ FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated |
+ FlagUncached,
+
+ TransferMemory = 0x0E | FlagIPC3 | FlagIPC1 | FlagMapped | FlagQueryPhysicalAddressAllowed |
+ FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
+
+ ProcessMemory = 0x0F | FlagIPC3 | FlagIPC1 | FlagMapped | FlagMemoryPoolAllocated,
+
+ // Used to signify an inaccessible or invalid memory region with memory queries
+ Inaccessible = 0x10,
+
+ IpcBuffer1 = 0x11 | FlagIPC3 | FlagIPC1 | FlagMapped | FlagQueryPhysicalAddressAllowed |
+ FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
+
+ IpcBuffer3 = 0x12 | FlagIPC3 | FlagMapped | FlagQueryPhysicalAddressAllowed |
+ FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
+
+ KernelStack = 0x13 | FlagMapped,
+};
+// clang-format on
+
+constexpr MemoryState operator|(MemoryState lhs, MemoryState rhs) {
+ return static_cast<MemoryState>(u32(lhs) | u32(rhs));
+}
+
+constexpr MemoryState operator&(MemoryState lhs, MemoryState rhs) {
+ return static_cast<MemoryState>(u32(lhs) & u32(rhs));
+}
+
+constexpr MemoryState operator^(MemoryState lhs, MemoryState rhs) {
+ return static_cast<MemoryState>(u32(lhs) ^ u32(rhs));
+}
+
+constexpr MemoryState operator~(MemoryState lhs) {
+ return static_cast<MemoryState>(~u32(lhs));
+}
+
+constexpr MemoryState& operator|=(MemoryState& lhs, MemoryState rhs) {
+ lhs = lhs | rhs;
+ return lhs;
+}
+
+constexpr MemoryState& operator&=(MemoryState& lhs, MemoryState rhs) {
+ lhs = lhs & rhs;
+ return lhs;
+}
+
+constexpr MemoryState& operator^=(MemoryState& lhs, MemoryState rhs) {
+ lhs = lhs ^ rhs;
+ return lhs;
+}
+
+constexpr u32 ToSvcMemoryState(MemoryState state) {
+ return static_cast<u32>(state & MemoryState::Mask);
+}
+
+struct MemoryInfo {
+ u64 base_address;
+ u64 size;
+ u32 state;
+ u32 attributes;
+ u32 permission;
+ u32 ipc_ref_count;
+ u32 device_ref_count;
+};
+static_assert(sizeof(MemoryInfo) == 0x28, "MemoryInfo has incorrect size.");
+
+struct PageInfo {
+ u32 flags;
};
/**
@@ -186,8 +289,15 @@ public:
ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms);
ResultCode HeapFree(VAddr target, u64 size);
- ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size,
- MemoryState state = MemoryState::Mapped);
+ ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state);
+
+ /// Queries the memory manager for information about the given address.
+ ///
+ /// @param address The address to query the memory manager about for information.
+ ///
+ /// @return A MemoryInfo instance containing information about the given address.
+ ///
+ MemoryInfo QueryMemory(VAddr address) const;
/**
* Scans all VMAs and updates the page table range of any that use the given vector as backing
diff --git a/src/yuzu/configuration/configure_audio.cpp b/src/yuzu/configuration/configure_audio.cpp
index eb1da0f9e..5d9ccc6e8 100644
--- a/src/yuzu/configuration/configure_audio.cpp
+++ b/src/yuzu/configuration/configure_audio.cpp
@@ -17,8 +17,8 @@ ConfigureAudio::ConfigureAudio(QWidget* parent)
ui->output_sink_combo_box->clear();
ui->output_sink_combo_box->addItem("auto");
- for (const auto& sink_detail : AudioCore::g_sink_details) {
- ui->output_sink_combo_box->addItem(sink_detail.id);
+ for (const char* id : AudioCore::GetSinkIDs()) {
+ ui->output_sink_combo_box->addItem(id);
}
connect(ui->volume_slider, &QSlider::valueChanged, this,
@@ -97,8 +97,7 @@ void ConfigureAudio::updateAudioDevices(int sink_index) {
ui->audio_device_combo_box->addItem(AudioCore::auto_device_name);
const std::string sink_id = ui->output_sink_combo_box->itemText(sink_index).toStdString();
- const std::vector<std::string> device_list = AudioCore::GetSinkDetails(sink_id).list_devices();
- for (const auto& device : device_list) {
+ for (const auto& device : AudioCore::GetDeviceListForSink(sink_id)) {
ui->audio_device_combo_box->addItem(QString::fromStdString(device));
}
}
diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp
index f9c18ede4..6b3a757e0 100644
--- a/src/yuzu/debugger/wait_tree.cpp
+++ b/src/yuzu/debugger/wait_tree.cpp
@@ -75,7 +75,7 @@ std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList()
return item_list;
}
-WaitTreeText::WaitTreeText(const QString& t) : text(t) {}
+WaitTreeText::WaitTreeText(QString t) : text(std::move(t)) {}
WaitTreeText::~WaitTreeText() = default;
QString WaitTreeText::GetText() const {
diff --git a/src/yuzu/debugger/wait_tree.h b/src/yuzu/debugger/wait_tree.h
index 492fb6ac9..e639ef412 100644
--- a/src/yuzu/debugger/wait_tree.h
+++ b/src/yuzu/debugger/wait_tree.h
@@ -52,7 +52,7 @@ private:
class WaitTreeText : public WaitTreeItem {
Q_OBJECT
public:
- explicit WaitTreeText(const QString& text);
+ explicit WaitTreeText(QString text);
~WaitTreeText() override;
QString GetText() const override;