diff options
Diffstat (limited to 'src')
465 files changed, 22370 insertions, 10239 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 54de1dc94..0ac3d254e 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -58,13 +58,11 @@ if (MSVC) # Warnings /W3 - /we4018 # 'expression': signed/unsigned mismatch + /WX + /we4062 # Enumerator 'identifier' in a switch of enum 'enumeration' is not handled - /we4101 # 'identifier': unreferenced local variable /we4189 # 'identifier': local variable is initialized but not referenced /we4265 # 'class': class has virtual functions, but destructor is not virtual - /we4267 # 'var': conversion from 'size_t' to 'type', possible loss of data - /we4305 # 'context': truncation from 'type1' to 'type2' /we4388 # 'expression': signed/unsigned mismatch /we4389 # 'operator': signed/unsigned mismatch /we4456 # Declaration of 'identifier' hides previous local declaration @@ -75,10 +73,13 @@ if (MSVC) /we4547 # 'operator': operator before comma has no effect; expected operator with side-effect /we4549 # 'operator1': operator before comma has no effect; did you intend 'operator2'? /we4555 # Expression has no effect; expected expression with side-effect - /we4715 # 'function': not all control paths return a value - /we4834 # Discarding return value of function with 'nodiscard' attribute + /we4826 # Conversion from 'type1' to 'type2' is sign-extended. This may cause unexpected runtime behavior. /we5038 # data member 'member1' will be initialized after data member 'member2' + /we5233 # explicit lambda capture 'identifier' is not used /we5245 # 'function': unreferenced function with internal linkage has been removed + + /wd4100 # 'identifier': unreferenced formal parameter + /wd4324 # 'struct_name': structure was padded due to __declspec(align()) ) if (USE_CCACHE) @@ -99,28 +100,23 @@ if (MSVC) set(CMAKE_EXE_LINKER_FLAGS_RELEASE "/DEBUG /MANIFEST:NO /INCREMENTAL:NO /OPT:REF,ICF" CACHE STRING "" FORCE) else() add_compile_options( - -Wall - -Werror=array-bounds - -Werror=implicit-fallthrough + -Werror=all + -Werror=extra -Werror=missing-declarations - -Werror=missing-field-initializers - -Werror=reorder -Werror=shadow - -Werror=sign-compare - -Werror=switch - -Werror=uninitialized - -Werror=unused-function - -Werror=unused-result - -Werror=unused-variable - -Wextra - -Wmissing-declarations + -Werror=unused + -Wno-attributes -Wno-invalid-offsetof -Wno-unused-parameter + + $<$<CXX_COMPILER_ID:Clang>:-Wno-braced-scalar-init> + $<$<CXX_COMPILER_ID:Clang>:-Wno-unused-private-field> ) if (ARCHITECTURE_x86_64) add_compile_options("-mcx16") + add_compile_options("-fwrapv") endif() if (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL Clang) diff --git a/src/audio_core/CMakeLists.txt b/src/audio_core/CMakeLists.txt index 5fe1d5fa5..0a1f3bf18 100644 --- a/src/audio_core/CMakeLists.txt +++ b/src/audio_core/CMakeLists.txt @@ -194,6 +194,7 @@ add_library(audio_core STATIC sink/sink.h sink/sink_details.cpp sink/sink_details.h + sink/sink_stream.cpp sink/sink_stream.h ) @@ -205,20 +206,11 @@ if (MSVC) /we4244 # 'conversion': conversion from 'type1' to 'type2', possible loss of data /we4245 # 'conversion': conversion from 'type1' to 'type2', signed/unsigned mismatch /we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data - /we4456 # Declaration of 'identifier' hides previous local declaration - /we4457 # Declaration of 'identifier' hides function parameter - /we4458 # Declaration of 'identifier' hides class member - /we4459 # Declaration of 'identifier' hides global declaration + /we4800 # Implicit conversion from 'type' to bool. Possible information loss ) else() target_compile_options(audio_core PRIVATE -Werror=conversion - -Werror=ignored-qualifiers - -Werror=shadow - -Werror=unused-variable - - $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter> - $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable> -Wno-sign-conversion ) diff --git a/src/audio_core/audio_core.cpp b/src/audio_core/audio_core.cpp index 78e615a10..07a679c32 100644 --- a/src/audio_core/audio_core.cpp +++ b/src/audio_core/audio_core.cpp @@ -8,7 +8,7 @@ namespace AudioCore { -AudioCore::AudioCore(Core::System& system) : audio_manager{std::make_unique<AudioManager>(system)} { +AudioCore::AudioCore(Core::System& system) : audio_manager{std::make_unique<AudioManager>()} { CreateSinks(); // Must be created after the sinks adsp = std::make_unique<AudioRenderer::ADSP::ADSP>(system, *output_sink); @@ -47,22 +47,12 @@ AudioRenderer::ADSP::ADSP& AudioCore::GetADSP() { return *adsp; } -void AudioCore::PauseSinks(const bool pausing) const { - if (pausing) { - output_sink->PauseStreams(); - input_sink->PauseStreams(); - } else { - output_sink->UnpauseStreams(); - input_sink->UnpauseStreams(); - } +void AudioCore::SetNVDECActive(bool active) { + nvdec_active = active; } -u32 AudioCore::GetStreamQueue() const { - return estimated_queue.load(); -} - -void AudioCore::SetStreamQueue(u32 size) { - estimated_queue.store(size); +bool AudioCore::IsNVDECActive() const { + return nvdec_active; } } // namespace AudioCore diff --git a/src/audio_core/audio_core.h b/src/audio_core/audio_core.h index 0f7d61ee4..e33e00a3e 100644 --- a/src/audio_core/audio_core.h +++ b/src/audio_core/audio_core.h @@ -17,7 +17,7 @@ namespace AudioCore { class AudioManager; /** - * Main audio class, sotred inside the core, and holding the audio manager, all sinks, and the ADSP. + * Main audio class, stored inside the core, and holding the audio manager, all sinks, and the ADSP. */ class AudioCore { public: @@ -58,26 +58,16 @@ public: AudioRenderer::ADSP::ADSP& GetADSP(); /** - * Pause the sink. Called from the core. + * Toggle NVDEC state, used to avoid stall in playback. * - * @param pausing - Is this pause due to an actual pause, or shutdown? - * Unfortunately, shutdown also pauses streams, which can cause issues. + * @param active - Set true if nvdec is active, otherwise false. */ - void PauseSinks(bool pausing) const; + void SetNVDECActive(bool active); /** - * Get the size of the current stream queue. - * - * @return Current stream queue size. - */ - u32 GetStreamQueue() const; - - /** - * Get the size of the current stream queue. - * - * @param size - New stream size. + * Get NVDEC state. */ - void SetStreamQueue(u32 size); + bool IsNVDECActive() const; private: /** @@ -93,8 +83,8 @@ private: std::unique_ptr<Sink::Sink> input_sink; /// The ADSP in the sysmodule std::unique_ptr<AudioRenderer::ADSP::ADSP> adsp; - /// Current size of the stream queue - std::atomic<u32> estimated_queue{0}; + /// Is NVDec currently active? + bool nvdec_active{false}; }; } // namespace AudioCore diff --git a/src/audio_core/audio_event.h b/src/audio_core/audio_event.h index 82dd32dca..012d2ed70 100644 --- a/src/audio_core/audio_event.h +++ b/src/audio_core/audio_event.h @@ -14,7 +14,7 @@ namespace AudioCore { * Responsible for the input/output events, set by the stream backend when buffers are consumed, and * waited on by the audio manager. These callbacks signal the game's events to keep the audio buffer * recycling going. - * In a real Switch this is not a seprate class, and exists entirely within the audio manager. + * In a real Switch this is not a separate class, and exists entirely within the audio manager. * On the Switch it's implemented more simply through a MultiWaitEventHolder, where it can * wait on multiple events at once, and the events are not needed by the backend. */ @@ -81,7 +81,7 @@ public: void ClearEvents(); private: - /// Lock, used bythe audio manager + /// Lock, used by the audio manager std::mutex event_lock; /// Array of events, one per system type (see Type), last event is used to terminate std::array<std::atomic<bool>, 4> events_signalled; diff --git a/src/audio_core/audio_in_manager.cpp b/src/audio_core/audio_in_manager.cpp index 4aadb7fd6..f39fb4002 100644 --- a/src/audio_core/audio_in_manager.cpp +++ b/src/audio_core/audio_in_manager.cpp @@ -82,7 +82,7 @@ u32 Manager::GetDeviceNames(std::vector<AudioRenderer::AudioDevice::AudioDeviceN auto input_devices{Sink::GetDeviceListForSink(Settings::values.sink_id.GetValue(), true)}; if (input_devices.size() > 1) { - names.push_back(AudioRenderer::AudioDevice::AudioDeviceName("Uac")); + names.emplace_back("Uac"); return 1; } return 0; diff --git a/src/audio_core/audio_in_manager.h b/src/audio_core/audio_in_manager.h index 75b73a0b6..8a519df99 100644 --- a/src/audio_core/audio_in_manager.h +++ b/src/audio_core/audio_in_manager.h @@ -59,9 +59,10 @@ public: /** * Get a list of audio in device names. * - * @oaram names - Output container to write names to. - * @param max_count - Maximum numebr of deivce names to write. Unused + * @param names - Output container to write names to. + * @param max_count - Maximum number of device names to write. Unused * @param filter - Should the list be filtered? Unused. + * * @return Number of names written. */ u32 GetDeviceNames(std::vector<AudioRenderer::AudioDevice::AudioDeviceName>& names, diff --git a/src/audio_core/audio_manager.cpp b/src/audio_core/audio_manager.cpp index 2f1bba9c3..2acde668e 100644 --- a/src/audio_core/audio_manager.cpp +++ b/src/audio_core/audio_manager.cpp @@ -1,14 +1,13 @@ // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later -#include "audio_core/audio_in_manager.h" #include "audio_core/audio_manager.h" -#include "audio_core/audio_out_manager.h" #include "core/core.h" +#include "core/hle/service/audio/errors.h" namespace AudioCore { -AudioManager::AudioManager(Core::System& system_) : system{system_} { +AudioManager::AudioManager() { thread = std::jthread([this]() { ThreadFunc(); }); } @@ -27,7 +26,7 @@ Result AudioManager::SetOutManager(BufferEventFunc buffer_func) { const auto index{events.GetManagerIndex(Event::Type::AudioOutManager)}; if (buffer_events[index] == nullptr) { - buffer_events[index] = buffer_func; + buffer_events[index] = std::move(buffer_func); needs_update = true; events.SetAudioEvent(Event::Type::AudioOutManager, true); } @@ -43,7 +42,7 @@ Result AudioManager::SetInManager(BufferEventFunc buffer_func) { const auto index{events.GetManagerIndex(Event::Type::AudioInManager)}; if (buffer_events[index] == nullptr) { - buffer_events[index] = buffer_func; + buffer_events[index] = std::move(buffer_func); needs_update = true; events.SetAudioEvent(Event::Type::AudioInManager, true); } @@ -60,19 +59,21 @@ void AudioManager::ThreadFunc() { running = true; while (running) { - auto timed_out{events.Wait(l, std::chrono::seconds(2))}; + const auto timed_out{events.Wait(l, std::chrono::seconds(2))}; if (events.CheckAudioEventSet(Event::Type::Max)) { break; } for (size_t i = 0; i < buffer_events.size(); i++) { - if (events.CheckAudioEventSet(Event::Type(i)) || timed_out) { + const auto event_type = static_cast<Event::Type>(i); + + if (events.CheckAudioEventSet(event_type) || timed_out) { if (buffer_events[i]) { buffer_events[i](); } } - events.SetAudioEvent(Event::Type(i), false); + events.SetAudioEvent(event_type, false); } } } diff --git a/src/audio_core/audio_manager.h b/src/audio_core/audio_manager.h index 70316e9cb..abf077de4 100644 --- a/src/audio_core/audio_manager.h +++ b/src/audio_core/audio_manager.h @@ -10,22 +10,11 @@ #include <thread> #include "audio_core/audio_event.h" -#include "core/hle/service/audio/errors.h" -namespace Core { -class System; -} +union Result; namespace AudioCore { -namespace AudioOut { -class Manager; -} - -namespace AudioIn { -class Manager; -} - /** * The AudioManager's main purpose is to wait for buffer events for the audio in and out managers, * and call an associated callback to release buffers. @@ -43,7 +32,7 @@ class AudioManager { using BufferEventFunc = std::function<void()>; public: - explicit AudioManager(Core::System& system); + explicit AudioManager(); /** * Shutdown the audio manager. @@ -76,14 +65,10 @@ public: private: /** - * Main thread, waiting on a manager signal and calling the registered fucntion. + * Main thread, waiting on a manager signal and calling the registered function. */ void ThreadFunc(); - /// Core system - Core::System& system; - /// Have sessions started palying? - bool sessions_started{}; /// Is the main thread running? std::atomic<bool> running{}; /// Unused diff --git a/src/audio_core/audio_out_manager.cpp b/src/audio_core/audio_out_manager.cpp index 71d67de64..1766efde1 100644 --- a/src/audio_core/audio_out_manager.cpp +++ b/src/audio_core/audio_out_manager.cpp @@ -74,7 +74,7 @@ void Manager::BufferReleaseAndRegister() { u32 Manager::GetAudioOutDeviceNames( std::vector<AudioRenderer::AudioDevice::AudioDeviceName>& names) const { - names.push_back({"DeviceOut"}); + names.emplace_back("DeviceOut"); return 1; } diff --git a/src/audio_core/audio_render_manager.cpp b/src/audio_core/audio_render_manager.cpp index 7a846835b..7aba2b423 100644 --- a/src/audio_core/audio_render_manager.cpp +++ b/src/audio_core/audio_render_manager.cpp @@ -25,8 +25,8 @@ SystemManager& Manager::GetSystemManager() { return *system_manager; } -auto Manager::GetWorkBufferSize(const AudioRendererParameterInternal& params, u64& out_count) - -> Result { +Result Manager::GetWorkBufferSize(const AudioRendererParameterInternal& params, + u64& out_count) const { if (!CheckValidRevision(params.revision)) { return Service::Audio::ERR_INVALID_REVISION; } @@ -54,7 +54,7 @@ void Manager::ReleaseSessionId(const s32 session_id) { session_ids[--session_count] = session_id; } -u32 Manager::GetSessionCount() { +u32 Manager::GetSessionCount() const { std::scoped_lock l{session_lock}; return session_count; } diff --git a/src/audio_core/audio_render_manager.h b/src/audio_core/audio_render_manager.h index 6a508ec56..bf4837190 100644 --- a/src/audio_core/audio_render_manager.h +++ b/src/audio_core/audio_render_manager.h @@ -46,7 +46,7 @@ public: * @param out_count - Output size of the required workbuffer. * @return Result code. */ - Result GetWorkBufferSize(const AudioRendererParameterInternal& params, u64& out_count); + Result GetWorkBufferSize(const AudioRendererParameterInternal& params, u64& out_count) const; /** * Get a new session id. @@ -60,14 +60,14 @@ public: * * @return The number of active sessions. */ - u32 GetSessionCount(); + u32 GetSessionCount() const; /** * Add a renderer system to the manager. - * The system will be reguarly called to generate commands for the AudioRenderer. + * The system will be regularly called to generate commands for the AudioRenderer. * * @param system - The system to add. - * @return True if the system was sucessfully added, otherwise false. + * @return True if the system was successfully added, otherwise false. */ bool AddSystem(System& system); @@ -75,7 +75,7 @@ public: * Remove a renderer system from the manager. * * @param system - The system to remove. - * @return True if the system was sucessfully removed, otherwise false. + * @return True if the system was successfully removed, otherwise false. */ bool RemoveSystem(System& system); @@ -94,7 +94,7 @@ private: /// Number of active renderers u32 session_count{}; /// Lock for interacting with the sessions - std::mutex session_lock{}; + mutable std::mutex session_lock{}; /// Regularly generates commands from the registered systems for the AudioRenderer std::unique_ptr<SystemManager> system_manager{}; }; diff --git a/src/audio_core/device/audio_buffer.h b/src/audio_core/device/audio_buffer.h index cae7fa970..7128ef72a 100644 --- a/src/audio_core/device/audio_buffer.h +++ b/src/audio_core/device/audio_buffer.h @@ -8,6 +8,10 @@ namespace AudioCore { struct AudioBuffer { + /// Timestamp this buffer started playing. + u64 start_timestamp; + /// Timestamp this buffer should finish playing. + u64 end_timestamp; /// Timestamp this buffer completed playing. s64 played_timestamp; /// Game memory address for these samples. diff --git a/src/audio_core/device/audio_buffers.h b/src/audio_core/device/audio_buffers.h index 5d1979ea0..3dae1a3b7 100644 --- a/src/audio_core/device/audio_buffers.h +++ b/src/audio_core/device/audio_buffers.h @@ -36,7 +36,7 @@ public: * * @param buffer - The new buffer. */ - void AppendBuffer(AudioBuffer& buffer) { + void AppendBuffer(const AudioBuffer& buffer) { std::scoped_lock l{lock}; buffers[appended_index] = buffer; appended_count++; @@ -58,6 +58,7 @@ public: if (index < 0) { index += N; } + out_buffers.push_back(buffers[index]); registered_count++; registered_index = (registered_index + 1) % append_limit; @@ -87,10 +88,12 @@ public: /** * Release all registered buffers. * - * @param timestamp - The released timestamp for this buffer. + * @param core_timing - The CoreTiming instance + * @param session - The device session + * * @return Is the buffer was released. */ - bool ReleaseBuffers(Core::Timing::CoreTiming& core_timing, DeviceSession& session) { + bool ReleaseBuffers(const Core::Timing::CoreTiming& core_timing, const DeviceSession& session) { std::scoped_lock l{lock}; bool buffer_released{false}; while (registered_count > 0) { @@ -100,7 +103,7 @@ public: } // Check with the backend if this buffer can be released yet. - if (!session.IsBufferConsumed(buffers[index].tag)) { + if (!session.IsBufferConsumed(buffers[index])) { break; } @@ -280,6 +283,16 @@ public: return true; } + u64 GetNextTimestamp() const { + // Iterate backwards through the buffer queue, and take the most recent buffer's end + std::scoped_lock l{lock}; + auto index{appended_index - 1}; + if (index < 0) { + index += append_limit; + } + return buffers[index].end_timestamp; + } + private: /// Buffer lock mutable std::recursive_mutex lock{}; diff --git a/src/audio_core/device/device_session.cpp b/src/audio_core/device/device_session.cpp index 095fc96ce..995060414 100644 --- a/src/audio_core/device/device_session.cpp +++ b/src/audio_core/device/device_session.cpp @@ -7,11 +7,20 @@ #include "audio_core/device/device_session.h" #include "audio_core/sink/sink_stream.h" #include "core/core.h" +#include "core/core_timing.h" #include "core/memory.h" namespace AudioCore { -DeviceSession::DeviceSession(Core::System& system_) : system{system_} {} +using namespace std::literals; +constexpr auto INCREMENT_TIME{5ms}; + +DeviceSession::DeviceSession(Core::System& system_) + : system{system_}, thread_event{Core::Timing::CreateEvent( + "AudioOutSampleTick", + [this](std::uintptr_t, s64 time, std::chrono::nanoseconds) { + return ThreadFunc(); + })} {} DeviceSession::~DeviceSession() { Finalize(); @@ -50,25 +59,26 @@ void DeviceSession::Finalize() { } void DeviceSession::Start() { - stream->SetPlayedSampleCount(played_sample_count); - stream->Start(); + if (stream) { + stream->Start(); + system.CoreTiming().ScheduleLoopingEvent(std::chrono::nanoseconds::zero(), INCREMENT_TIME, + thread_event); + } } void DeviceSession::Stop() { if (stream) { - played_sample_count = stream->GetPlayedSampleCount(); stream->Stop(); + system.CoreTiming().UnscheduleEvent(thread_event, {}); } } -void DeviceSession::AppendBuffers(std::span<AudioBuffer> buffers) const { - auto& memory{system.Memory()}; - - for (size_t i = 0; i < buffers.size(); i++) { +void DeviceSession::AppendBuffers(std::span<const AudioBuffer> buffers) const { + for (const auto& buffer : buffers) { Sink::SinkBuffer new_buffer{ - .frames = buffers[i].size / (channel_count * sizeof(s16)), + .frames = buffer.size / (channel_count * sizeof(s16)), .frames_played = 0, - .tag = buffers[i].tag, + .tag = buffer.tag, .consumed = false, }; @@ -76,26 +86,22 @@ void DeviceSession::AppendBuffers(std::span<AudioBuffer> buffers) const { std::vector<s16> samples{}; stream->AppendBuffer(new_buffer, samples); } else { - std::vector<s16> samples(buffers[i].size / sizeof(s16)); - memory.ReadBlockUnsafe(buffers[i].samples, samples.data(), buffers[i].size); + std::vector<s16> samples(buffer.size / sizeof(s16)); + system.Memory().ReadBlockUnsafe(buffer.samples, samples.data(), buffer.size); stream->AppendBuffer(new_buffer, samples); } } } -void DeviceSession::ReleaseBuffer(AudioBuffer& buffer) const { +void DeviceSession::ReleaseBuffer(const AudioBuffer& buffer) const { if (type == Sink::StreamType::In) { - auto& memory{system.Memory()}; auto samples{stream->ReleaseBuffer(buffer.size / sizeof(s16))}; - memory.WriteBlockUnsafe(buffer.samples, samples.data(), buffer.size); + system.Memory().WriteBlockUnsafe(buffer.samples, samples.data(), buffer.size); } } -bool DeviceSession::IsBufferConsumed(u64 tag) const { - if (stream) { - return stream->IsBufferConsumed(tag); - } - return true; +bool DeviceSession::IsBufferConsumed(const AudioBuffer& buffer) const { + return played_sample_count >= buffer.end_timestamp; } void DeviceSession::SetVolume(f32 volume) const { @@ -105,10 +111,22 @@ void DeviceSession::SetVolume(f32 volume) const { } u64 DeviceSession::GetPlayedSampleCount() const { - if (stream) { - return stream->GetPlayedSampleCount(); + return played_sample_count; +} + +std::optional<std::chrono::nanoseconds> DeviceSession::ThreadFunc() { + // Add 5ms of samples at a 48K sample rate. + played_sample_count += 48'000 * INCREMENT_TIME / 1s; + if (type == Sink::StreamType::Out) { + system.AudioCore().GetAudioManager().SetEvent(Event::Type::AudioOutManager, true); + } else { + system.AudioCore().GetAudioManager().SetEvent(Event::Type::AudioInManager, true); } - return 0; + return std::nullopt; +} + +void DeviceSession::SetRingSize(u32 ring_size) { + stream->SetRingSize(ring_size); } } // namespace AudioCore diff --git a/src/audio_core/device/device_session.h b/src/audio_core/device/device_session.h index 4a031b765..74f4dc085 100644 --- a/src/audio_core/device/device_session.h +++ b/src/audio_core/device/device_session.h @@ -3,6 +3,9 @@ #pragma once +#include <chrono> +#include <memory> +#include <optional> #include <span> #include "audio_core/common/common.h" @@ -11,9 +14,13 @@ namespace Core { class System; -} +namespace Timing { +struct EventType; +} // namespace Timing +} // namespace Core namespace AudioCore { + namespace Sink { class SinkStream; struct SinkBuffer; @@ -55,22 +62,23 @@ public: * * @param buffers - The buffers to play. */ - void AppendBuffers(std::span<AudioBuffer> buffers) const; + void AppendBuffers(std::span<const AudioBuffer> buffers) const; /** * (Audio In only) Pop samples from the backend, and write them back to this buffer's address. * * @param buffer - The buffer to write to. */ - void ReleaseBuffer(AudioBuffer& buffer) const; + void ReleaseBuffer(const AudioBuffer& buffer) const; /** * Check if the buffer for the given tag has been consumed by the backend. * - * @param tag - Unqiue tag of the buffer to check. + * @param buffer - the buffer to check. + * * @return true if the buffer has been consumed, otherwise false. */ - bool IsBufferConsumed(u64 tag) const; + bool IsBufferConsumed(const AudioBuffer& buffer) const; /** * Start this device session, starting the backend stream. @@ -96,6 +104,16 @@ public: */ u64 GetPlayedSampleCount() const; + /* + * CoreTiming callback to increment played_sample_count over time. + */ + std::optional<std::chrono::nanoseconds> ThreadFunc(); + + /* + * Set the size of the ring buffer. + */ + void SetRingSize(u32 ring_size); + private: /// System Core::System& system; @@ -118,9 +136,13 @@ private: /// Applet resource user id of this device session u64 applet_resource_user_id{}; /// Total number of samples played by this device session - u64 played_sample_count{}; + std::atomic<u64> played_sample_count{}; + /// Event increasing the played sample count every 5ms + std::shared_ptr<Core::Timing::EventType> thread_event; /// Is this session initialised? bool initialized{}; + /// Buffer queue + std::vector<AudioBuffer> buffer_queue{}; }; } // namespace AudioCore diff --git a/src/audio_core/in/audio_in.cpp b/src/audio_core/in/audio_in.cpp index c946895d6..91ccd5ad7 100644 --- a/src/audio_core/in/audio_in.cpp +++ b/src/audio_core/in/audio_in.cpp @@ -72,7 +72,7 @@ Kernel::KReadableEvent& In::GetBufferEvent() { return event->GetReadableEvent(); } -f32 In::GetVolume() { +f32 In::GetVolume() const { std::scoped_lock l{parent_mutex}; return system.GetVolume(); } @@ -82,17 +82,17 @@ void In::SetVolume(f32 volume) { system.SetVolume(volume); } -bool In::ContainsAudioBuffer(u64 tag) { +bool In::ContainsAudioBuffer(u64 tag) const { std::scoped_lock l{parent_mutex}; return system.ContainsAudioBuffer(tag); } -u32 In::GetBufferCount() { +u32 In::GetBufferCount() const { std::scoped_lock l{parent_mutex}; return system.GetBufferCount(); } -u64 In::GetPlayedSampleCount() { +u64 In::GetPlayedSampleCount() const { std::scoped_lock l{parent_mutex}; return system.GetPlayedSampleCount(); } diff --git a/src/audio_core/in/audio_in.h b/src/audio_core/in/audio_in.h index 6253891d5..092ab7236 100644 --- a/src/audio_core/in/audio_in.h +++ b/src/audio_core/in/audio_in.h @@ -102,7 +102,7 @@ public: * * @return The current volume. */ - f32 GetVolume(); + f32 GetVolume() const; /** * Set the system volume. @@ -117,21 +117,21 @@ public: * @param tag - The tag to search for. * @return True if the buffer is in the system, otherwise false. */ - bool ContainsAudioBuffer(u64 tag); + bool ContainsAudioBuffer(u64 tag) const; /** * Get the maximum number of buffers. * * @return The maximum number of buffers. */ - u32 GetBufferCount(); + u32 GetBufferCount() const; /** * Get the total played sample count for this audio in. * * @return The played sample count. */ - u64 GetPlayedSampleCount(); + u64 GetPlayedSampleCount() const; private: /// The AudioIn::Manager this audio in is registered with diff --git a/src/audio_core/in/audio_in_system.cpp b/src/audio_core/in/audio_in_system.cpp index ec5d37ed4..6b7e6715c 100644 --- a/src/audio_core/in/audio_in_system.cpp +++ b/src/audio_core/in/audio_in_system.cpp @@ -23,7 +23,7 @@ System::~System() { void System::Finalize() { Stop(); session->Finalize(); - buffer_event->GetWritableEvent().Signal(); + buffer_event->Signal(); } void System::StartSession() { @@ -34,16 +34,16 @@ size_t System::GetSessionId() const { return session_id; } -std::string_view System::GetDefaultDeviceName() { +std::string_view System::GetDefaultDeviceName() const { return "BuiltInHeadset"; } -std::string_view System::GetDefaultUacDeviceName() { +std::string_view System::GetDefaultUacDeviceName() const { return "Uac"; } Result System::IsConfigValid(const std::string_view device_name, - const AudioInParameter& in_params) { + const AudioInParameter& in_params) const { if ((device_name.size() > 0) && (device_name != GetDefaultDeviceName() && device_name != GetDefaultUacDeviceName())) { return Service::Audio::ERR_INVALID_DEVICE_NAME; @@ -93,6 +93,7 @@ Result System::Start() { std::vector<AudioBuffer> buffers_to_flush{}; buffers.RegisterBuffers(buffers_to_flush); session->AppendBuffers(buffers_to_flush); + session->SetRingSize(static_cast<u32>(buffers_to_flush.size())); return ResultSuccess; } @@ -112,8 +113,15 @@ bool System::AppendBuffer(const AudioInBuffer& buffer, const u64 tag) { return false; } - AudioBuffer new_buffer{ - .played_timestamp = 0, .samples = buffer.samples, .tag = tag, .size = buffer.size}; + const auto timestamp{buffers.GetNextTimestamp()}; + const AudioBuffer new_buffer{ + .start_timestamp = timestamp, + .end_timestamp = timestamp + buffer.size / (channel_count * sizeof(s16)), + .played_timestamp = 0, + .samples = buffer.samples, + .tag = tag, + .size = buffer.size, + }; buffers.AppendBuffer(new_buffer); RegisterBuffers(); @@ -134,7 +142,7 @@ void System::ReleaseBuffers() { if (signal) { // Signal if any buffer was released, or if none are registered, we need more. - buffer_event->GetWritableEvent().Signal(); + buffer_event->Signal(); } } @@ -151,7 +159,7 @@ bool System::FlushAudioInBuffers() { buffers.FlushBuffers(buffers_released); if (buffers_released > 0) { - buffer_event->GetWritableEvent().Signal(); + buffer_event->Signal(); } return true; } @@ -194,11 +202,11 @@ void System::SetVolume(const f32 volume_) { session->SetVolume(volume_); } -bool System::ContainsAudioBuffer(const u64 tag) { +bool System::ContainsAudioBuffer(const u64 tag) const { return buffers.ContainsBuffer(tag); } -u32 System::GetBufferCount() { +u32 System::GetBufferCount() const { return buffers.GetAppendedRegisteredCount(); } diff --git a/src/audio_core/in/audio_in_system.h b/src/audio_core/in/audio_in_system.h index 165e35d83..b9dc0e60f 100644 --- a/src/audio_core/in/audio_in_system.h +++ b/src/audio_core/in/audio_in_system.h @@ -68,7 +68,7 @@ public: * * @return The default audio input device name. */ - std::string_view GetDefaultDeviceName(); + std::string_view GetDefaultDeviceName() const; /** * Get the default USB audio input device name. @@ -77,7 +77,7 @@ public: * * @return The default USB audio input device name. */ - std::string_view GetDefaultUacDeviceName(); + std::string_view GetDefaultUacDeviceName() const; /** * Is the given initialize config valid? @@ -86,7 +86,7 @@ public: * @param in_params - Input parameters, see AudioInParameter. * @return Result code. */ - Result IsConfigValid(std::string_view device_name, const AudioInParameter& in_params); + Result IsConfigValid(std::string_view device_name, const AudioInParameter& in_params) const; /** * Initialize this system. @@ -208,7 +208,7 @@ public: /** * Set this system's current volume. * - * @param The new volume. + * @param volume The new volume. */ void SetVolume(f32 volume); @@ -218,14 +218,14 @@ public: * @param tag - Unique tag to search for. * @return True if the buffer is in the system, otherwise false. */ - bool ContainsAudioBuffer(u64 tag); + bool ContainsAudioBuffer(u64 tag) const; /** * Get the maximum number of usable buffers (default 32). * * @return The number of buffers. */ - u32 GetBufferCount(); + u32 GetBufferCount() const; /** * Get the total number of samples played by this system. diff --git a/src/audio_core/out/audio_out.cpp b/src/audio_core/out/audio_out.cpp index 9a8d8a742..d3ee4f0eb 100644 --- a/src/audio_core/out/audio_out.cpp +++ b/src/audio_core/out/audio_out.cpp @@ -72,7 +72,7 @@ Kernel::KReadableEvent& Out::GetBufferEvent() { return event->GetReadableEvent(); } -f32 Out::GetVolume() { +f32 Out::GetVolume() const { std::scoped_lock l{parent_mutex}; return system.GetVolume(); } @@ -82,17 +82,17 @@ void Out::SetVolume(const f32 volume) { system.SetVolume(volume); } -bool Out::ContainsAudioBuffer(const u64 tag) { +bool Out::ContainsAudioBuffer(const u64 tag) const { std::scoped_lock l{parent_mutex}; return system.ContainsAudioBuffer(tag); } -u32 Out::GetBufferCount() { +u32 Out::GetBufferCount() const { std::scoped_lock l{parent_mutex}; return system.GetBufferCount(); } -u64 Out::GetPlayedSampleCount() { +u64 Out::GetPlayedSampleCount() const { std::scoped_lock l{parent_mutex}; return system.GetPlayedSampleCount(); } diff --git a/src/audio_core/out/audio_out.h b/src/audio_core/out/audio_out.h index f6b921645..946f345c6 100644 --- a/src/audio_core/out/audio_out.h +++ b/src/audio_core/out/audio_out.h @@ -102,7 +102,7 @@ public: * * @return The current volume. */ - f32 GetVolume(); + f32 GetVolume() const; /** * Set the system volume. @@ -117,21 +117,21 @@ public: * @param tag - The tag to search for. * @return True if the buffer is in the system, otherwise false. */ - bool ContainsAudioBuffer(u64 tag); + bool ContainsAudioBuffer(u64 tag) const; /** * Get the maximum number of buffers. * * @return The maximum number of buffers. */ - u32 GetBufferCount(); + u32 GetBufferCount() const; /** * Get the total played sample count for this audio out. * * @return The played sample count. */ - u64 GetPlayedSampleCount(); + u64 GetPlayedSampleCount() const; private: /// The AudioOut::Manager this audio out is registered with diff --git a/src/audio_core/out/audio_out_system.cpp b/src/audio_core/out/audio_out_system.cpp index 35afddf06..48a801923 100644 --- a/src/audio_core/out/audio_out_system.cpp +++ b/src/audio_core/out/audio_out_system.cpp @@ -24,14 +24,15 @@ System::~System() { void System::Finalize() { Stop(); session->Finalize(); - buffer_event->GetWritableEvent().Signal(); + buffer_event->Signal(); } -std::string_view System::GetDefaultOutputDeviceName() { +std::string_view System::GetDefaultOutputDeviceName() const { return "DeviceOut"; } -Result System::IsConfigValid(std::string_view device_name, const AudioOutParameter& in_params) { +Result System::IsConfigValid(std::string_view device_name, + const AudioOutParameter& in_params) const { if ((device_name.size() > 0) && (device_name != GetDefaultOutputDeviceName())) { return Service::Audio::ERR_INVALID_DEVICE_NAME; } @@ -92,6 +93,7 @@ Result System::Start() { std::vector<AudioBuffer> buffers_to_flush{}; buffers.RegisterBuffers(buffers_to_flush); session->AppendBuffers(buffers_to_flush); + session->SetRingSize(static_cast<u32>(buffers_to_flush.size())); return ResultSuccess; } @@ -111,8 +113,15 @@ bool System::AppendBuffer(const AudioOutBuffer& buffer, u64 tag) { return false; } - AudioBuffer new_buffer{ - .played_timestamp = 0, .samples = buffer.samples, .tag = tag, .size = buffer.size}; + const auto timestamp{buffers.GetNextTimestamp()}; + const AudioBuffer new_buffer{ + .start_timestamp = timestamp, + .end_timestamp = timestamp + buffer.size / (channel_count * sizeof(s16)), + .played_timestamp = 0, + .samples = buffer.samples, + .tag = tag, + .size = buffer.size, + }; buffers.AppendBuffer(new_buffer); RegisterBuffers(); @@ -132,7 +141,7 @@ void System::ReleaseBuffers() { bool signal{buffers.ReleaseBuffers(system.CoreTiming(), *session)}; if (signal) { // Signal if any buffer was released, or if none are registered, we need more. - buffer_event->GetWritableEvent().Signal(); + buffer_event->Signal(); } } @@ -149,7 +158,7 @@ bool System::FlushAudioOutBuffers() { buffers.FlushBuffers(buffers_released); if (buffers_released > 0) { - buffer_event->GetWritableEvent().Signal(); + buffer_event->Signal(); } return true; } @@ -192,11 +201,11 @@ void System::SetVolume(const f32 volume_) { session->SetVolume(volume_); } -bool System::ContainsAudioBuffer(const u64 tag) { +bool System::ContainsAudioBuffer(const u64 tag) const { return buffers.ContainsBuffer(tag); } -u32 System::GetBufferCount() { +u32 System::GetBufferCount() const { return buffers.GetAppendedRegisteredCount(); } diff --git a/src/audio_core/out/audio_out_system.h b/src/audio_core/out/audio_out_system.h index 4ca2f3417..0817b2f37 100644 --- a/src/audio_core/out/audio_out_system.h +++ b/src/audio_core/out/audio_out_system.h @@ -68,7 +68,7 @@ public: * * @return The default audio output device name. */ - std::string_view GetDefaultOutputDeviceName(); + std::string_view GetDefaultOutputDeviceName() const; /** * Is the given initialize config valid? @@ -77,7 +77,7 @@ public: * @param in_params - Input parameters, see AudioOutParameter. * @return Result code. */ - Result IsConfigValid(std::string_view device_name, const AudioOutParameter& in_params); + Result IsConfigValid(std::string_view device_name, const AudioOutParameter& in_params) const; /** * Initialize this system. @@ -199,7 +199,7 @@ public: /** * Set this system's current volume. * - * @param The new volume. + * @param volume The new volume. */ void SetVolume(f32 volume); @@ -209,14 +209,14 @@ public: * @param tag - Unique tag to search for. * @return True if the buffer is in the system, otherwise false. */ - bool ContainsAudioBuffer(u64 tag); + bool ContainsAudioBuffer(u64 tag) const; /** * Get the maximum number of usable buffers (default 32). * * @return The number of buffers. */ - u32 GetBufferCount(); + u32 GetBufferCount() const; /** * Get the total number of samples played by this system. diff --git a/src/audio_core/renderer/adsp/adsp.cpp b/src/audio_core/renderer/adsp/adsp.cpp index e05a22d86..a28395663 100644 --- a/src/audio_core/renderer/adsp/adsp.cpp +++ b/src/audio_core/renderer/adsp/adsp.cpp @@ -50,7 +50,7 @@ u32 ADSP::GetRemainCommandCount(const u32 session_id) const { return render_mailbox.GetRemainCommandCount(session_id); } -void ADSP::SendCommandBuffer(const u32 session_id, CommandBuffer& command_buffer) { +void ADSP::SendCommandBuffer(const u32 session_id, const CommandBuffer& command_buffer) { render_mailbox.SetCommandBuffer(session_id, command_buffer); } diff --git a/src/audio_core/renderer/adsp/adsp.h b/src/audio_core/renderer/adsp/adsp.h index 4dfcef4a5..f7a2f25e4 100644 --- a/src/audio_core/renderer/adsp/adsp.h +++ b/src/audio_core/renderer/adsp/adsp.h @@ -63,8 +63,6 @@ public: /** * Stop the ADSP. - * - * @return True if started or already running, otherwise false. */ void Stop(); @@ -133,7 +131,7 @@ public: * @param session_id - The session id to check (0 or 1). * @param command_buffer - The command buffer to process. */ - void SendCommandBuffer(u32 session_id, CommandBuffer& command_buffer); + void SendCommandBuffer(u32 session_id, const CommandBuffer& command_buffer); /** * Clear the command buffers (does not clear the time taken or the remaining command count) diff --git a/src/audio_core/renderer/adsp/audio_renderer.cpp b/src/audio_core/renderer/adsp/audio_renderer.cpp index 3967ccfe6..d982ef630 100644 --- a/src/audio_core/renderer/adsp/audio_renderer.cpp +++ b/src/audio_core/renderer/adsp/audio_renderer.cpp @@ -47,11 +47,11 @@ RenderMessage AudioRenderer_Mailbox::ADSPWaitMessage() { return msg; } -CommandBuffer& AudioRenderer_Mailbox::GetCommandBuffer(const s32 session_id) { +CommandBuffer& AudioRenderer_Mailbox::GetCommandBuffer(const u32 session_id) { return command_buffers[session_id]; } -void AudioRenderer_Mailbox::SetCommandBuffer(const u32 session_id, CommandBuffer& buffer) { +void AudioRenderer_Mailbox::SetCommandBuffer(const u32 session_id, const CommandBuffer& buffer) { command_buffers[session_id] = buffer; } @@ -106,9 +106,6 @@ void AudioRenderer::Start(AudioRenderer_Mailbox* mailbox_) { mailbox = mailbox_; thread = std::thread(&AudioRenderer::ThreadFunc, this); - for (auto& stream : streams) { - stream->Start(); - } running = true; } @@ -130,11 +127,12 @@ void AudioRenderer::CreateSinkStreams() { std::string name{fmt::format("ADSP_RenderStream-{}", i)}; streams[i] = sink.AcquireSinkStream(system, channels, name, ::AudioCore::Sink::StreamType::Render); + streams[i]->SetRingSize(4); } } void AudioRenderer::ThreadFunc() { - constexpr char name[]{"yuzu:AudioRenderer"}; + constexpr char name[]{"AudioRenderer"}; MicroProfileOnThreadCreate(name); Common::SetCurrentThreadName(name); Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical); @@ -198,11 +196,6 @@ void AudioRenderer::ThreadFunc() { command_list_processor.Process(index) - start_time; } - if (index == 0) { - auto stream{command_list_processor.GetOutputSinkStream()}; - system.AudioCore().SetStreamQueue(stream->GetQueueSize()); - } - const auto end_time{system.CoreTiming().GetClockTicks()}; command_buffer.remaining_command_count = diff --git a/src/audio_core/renderer/adsp/audio_renderer.h b/src/audio_core/renderer/adsp/audio_renderer.h index b6ced9d2b..151f38c1b 100644 --- a/src/audio_core/renderer/adsp/audio_renderer.h +++ b/src/audio_core/renderer/adsp/audio_renderer.h @@ -52,7 +52,7 @@ public: /** * Send a message from the host to the AudioRenderer. * - * @param message_ - The message to send to the AudioRenderer. + * @param message - The message to send to the AudioRenderer. */ void HostSendMessage(RenderMessage message); @@ -66,7 +66,7 @@ public: /** * Send a message from the AudioRenderer to the host. * - * @param message_ - The message to send to the host. + * @param message - The message to send to the host. */ void ADSPSendMessage(RenderMessage message); @@ -83,7 +83,7 @@ public: * @param session_id - The session id to get (0 or 1). * @return The command buffer. */ - CommandBuffer& GetCommandBuffer(s32 session_id); + CommandBuffer& GetCommandBuffer(u32 session_id); /** * Set the command buffer with the given session id (0 or 1). @@ -91,7 +91,7 @@ public: * @param session_id - The session id to get (0 or 1). * @param buffer - The command buffer to set. */ - void SetCommandBuffer(u32 session_id, CommandBuffer& buffer); + void SetCommandBuffer(u32 session_id, const CommandBuffer& buffer); /** * Get the total render time taken for the last command lists sent. @@ -163,7 +163,7 @@ public: /** * Start the AudioRenderer. * - * @param The mailbox to use for this session. + * @param mailbox The mailbox to use for this session. */ void Start(AudioRenderer_Mailbox* mailbox); diff --git a/src/audio_core/renderer/adsp/command_list_processor.h b/src/audio_core/renderer/adsp/command_list_processor.h index 3f99173e3..d78269e1d 100644 --- a/src/audio_core/renderer/adsp/command_list_processor.h +++ b/src/audio_core/renderer/adsp/command_list_processor.h @@ -33,10 +33,10 @@ public: /** * Initialize the processor. * - * @param system_ - The core system. - * @param buffer - The command buffer to process. - * @param size - The size of the buffer. - * @param stream_ - The stream to be used for sending the samples. + * @param system - The core system. + * @param buffer - The command buffer to process. + * @param size - The size of the buffer. + * @param stream - The stream to be used for sending the samples. */ void Initialize(Core::System& system, CpuAddr buffer, u64 size, Sink::SinkStream* stream); @@ -72,7 +72,8 @@ public: /** * Process the command list. * - * @param index - Index of the current command list. + * @param session_id - Session ID for the commands being processed. + * * @return The time taken to process. */ u64 Process(u32 session_id); @@ -89,7 +90,7 @@ public: u8* commands{}; /// The command buffer size u64 commands_buffer_size{}; - /// The maximum processing time alloted + /// The maximum processing time allotted u64 max_process_time{}; /// The number of commands in the buffer u32 command_count{}; diff --git a/src/audio_core/renderer/audio_device.cpp b/src/audio_core/renderer/audio_device.cpp index d5886e55e..0d9d8f6ce 100644 --- a/src/audio_core/renderer/audio_device.cpp +++ b/src/audio_core/renderer/audio_device.cpp @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later +#include <array> +#include <span> + #include "audio_core/audio_core.h" #include "audio_core/common/feature_support.h" #include "audio_core/renderer/audio_device.h" @@ -9,14 +12,33 @@ namespace AudioCore::AudioRenderer { +constexpr std::array usb_device_names{ + AudioDevice::AudioDeviceName{"AudioStereoJackOutput"}, + AudioDevice::AudioDeviceName{"AudioBuiltInSpeakerOutput"}, + AudioDevice::AudioDeviceName{"AudioTvOutput"}, + AudioDevice::AudioDeviceName{"AudioUsbDeviceOutput"}, +}; + +constexpr std::array device_names{ + AudioDevice::AudioDeviceName{"AudioStereoJackOutput"}, + AudioDevice::AudioDeviceName{"AudioBuiltInSpeakerOutput"}, + AudioDevice::AudioDeviceName{"AudioTvOutput"}, +}; + +constexpr std::array output_device_names{ + AudioDevice::AudioDeviceName{"AudioBuiltInSpeakerOutput"}, + AudioDevice::AudioDeviceName{"AudioTvOutput"}, + AudioDevice::AudioDeviceName{"AudioExternalOutput"}, +}; + AudioDevice::AudioDevice(Core::System& system, const u64 applet_resource_user_id_, const u32 revision) : output_sink{system.AudioCore().GetOutputSink()}, applet_resource_user_id{applet_resource_user_id_}, user_revision{revision} {} u32 AudioDevice::ListAudioDeviceName(std::vector<AudioDeviceName>& out_buffer, - const size_t max_count) { - std::span<AudioDeviceName> names{}; + const size_t max_count) const { + std::span<const AudioDeviceName> names{}; if (CheckFeatureSupported(SupportTags::AudioUsbDeviceOutput, user_revision)) { names = usb_device_names; @@ -24,7 +46,7 @@ u32 AudioDevice::ListAudioDeviceName(std::vector<AudioDeviceName>& out_buffer, names = device_names; } - u32 out_count{static_cast<u32>(std::min(max_count, names.size()))}; + const u32 out_count{static_cast<u32>(std::min(max_count, names.size()))}; for (u32 i = 0; i < out_count; i++) { out_buffer.push_back(names[i]); } @@ -32,8 +54,8 @@ u32 AudioDevice::ListAudioDeviceName(std::vector<AudioDeviceName>& out_buffer, } u32 AudioDevice::ListAudioOutputDeviceName(std::vector<AudioDeviceName>& out_buffer, - const size_t max_count) { - u32 out_count{static_cast<u32>(std::min(max_count, output_device_names.size()))}; + const size_t max_count) const { + const u32 out_count{static_cast<u32>(std::min(max_count, output_device_names.size()))}; for (u32 i = 0; i < out_count; i++) { out_buffer.push_back(output_device_names[i]); @@ -45,7 +67,7 @@ void AudioDevice::SetDeviceVolumes(const f32 volume) { output_sink.SetDeviceVolume(volume); } -f32 AudioDevice::GetDeviceVolume([[maybe_unused]] std::string_view name) { +f32 AudioDevice::GetDeviceVolume([[maybe_unused]] std::string_view name) const { return output_sink.GetDeviceVolume(); } diff --git a/src/audio_core/renderer/audio_device.h b/src/audio_core/renderer/audio_device.h index 1f449f261..dd6be70ee 100644 --- a/src/audio_core/renderer/audio_device.h +++ b/src/audio_core/renderer/audio_device.h @@ -3,7 +3,7 @@ #pragma once -#include <span> +#include <string_view> #include "audio_core/audio_render_manager.h" @@ -23,21 +23,13 @@ namespace AudioRenderer { class AudioDevice { public: struct AudioDeviceName { - std::array<char, 0x100> name; + std::array<char, 0x100> name{}; - AudioDeviceName(const char* name_) { - std::strncpy(name.data(), name_, name.size()); + constexpr AudioDeviceName(std::string_view name_) { + name_.copy(name.data(), name.size() - 1); } }; - std::array<AudioDeviceName, 4> usb_device_names{"AudioStereoJackOutput", - "AudioBuiltInSpeakerOutput", "AudioTvOutput", - "AudioUsbDeviceOutput"}; - std::array<AudioDeviceName, 3> device_names{"AudioStereoJackOutput", - "AudioBuiltInSpeakerOutput", "AudioTvOutput"}; - std::array<AudioDeviceName, 3> output_device_names{"AudioBuiltInSpeakerOutput", "AudioTvOutput", - "AudioExternalOutput"}; - explicit AudioDevice(Core::System& system, u64 applet_resource_user_id, u32 revision); /** @@ -47,7 +39,7 @@ public: * @param max_count - Maximum number of devices to write (count of out_buffer). * @return Number of device names written. */ - u32 ListAudioDeviceName(std::vector<AudioDeviceName>& out_buffer, size_t max_count); + u32 ListAudioDeviceName(std::vector<AudioDeviceName>& out_buffer, size_t max_count) const; /** * Get a list of the available output devices. @@ -57,7 +49,7 @@ public: * @param max_count - Maximum number of devices to write (count of out_buffer). * @return Number of device names written. */ - u32 ListAudioOutputDeviceName(std::vector<AudioDeviceName>& out_buffer, size_t max_count); + u32 ListAudioOutputDeviceName(std::vector<AudioDeviceName>& out_buffer, size_t max_count) const; /** * Set the volume of all streams in the backend sink. @@ -73,7 +65,7 @@ public: * @param name - Name of the device to check. Unused. * @return Volume of the device. */ - f32 GetDeviceVolume(std::string_view name); + f32 GetDeviceVolume(std::string_view name) const; private: /// Backend output sink for the device diff --git a/src/audio_core/renderer/behavior/behavior_info.cpp b/src/audio_core/renderer/behavior/behavior_info.cpp index c5d4d66d8..3d2a91312 100644 --- a/src/audio_core/renderer/behavior/behavior_info.cpp +++ b/src/audio_core/renderer/behavior/behavior_info.cpp @@ -34,7 +34,7 @@ void BehaviorInfo::ClearError() { error_count = 0; } -void BehaviorInfo::AppendError(ErrorInfo& error) { +void BehaviorInfo::AppendError(const ErrorInfo& error) { LOG_ERROR(Service_Audio, "Error during RequestUpdate, reporting code {:04X} address {:08X}", error.error_code.raw, error.address); if (error_count < MaxErrors) { @@ -42,14 +42,16 @@ void BehaviorInfo::AppendError(ErrorInfo& error) { } } -void BehaviorInfo::CopyErrorInfo(std::span<ErrorInfo> out_errors, u32& out_count) { - auto error_count_{std::min(error_count, MaxErrors)}; - std::memset(out_errors.data(), 0, MaxErrors * sizeof(ErrorInfo)); +void BehaviorInfo::CopyErrorInfo(std::span<ErrorInfo> out_errors, u32& out_count) const { + out_count = std::min(error_count, MaxErrors); - for (size_t i = 0; i < error_count_; i++) { - out_errors[i] = errors[i]; + for (size_t i = 0; i < MaxErrors; i++) { + if (i < out_count) { + out_errors[i] = errors[i]; + } else { + out_errors[i] = {}; + } } - out_count = error_count_; } void BehaviorInfo::UpdateFlags(const Flags flags_) { diff --git a/src/audio_core/renderer/behavior/behavior_info.h b/src/audio_core/renderer/behavior/behavior_info.h index 7333c297f..15c948344 100644 --- a/src/audio_core/renderer/behavior/behavior_info.h +++ b/src/audio_core/renderer/behavior/behavior_info.h @@ -94,7 +94,7 @@ public: * * @param error - The new error. */ - void AppendError(ErrorInfo& error); + void AppendError(const ErrorInfo& error); /** * Copy errors to the given output container. @@ -102,7 +102,7 @@ public: * @param out_errors - Output container to receive the errors. * @param out_count - The number of errors written. */ - void CopyErrorInfo(std::span<ErrorInfo> out_errors, u32& out_count); + void CopyErrorInfo(std::span<ErrorInfo> out_errors, u32& out_count) const; /** * Update the behaviour flags. diff --git a/src/audio_core/renderer/behavior/info_updater.cpp b/src/audio_core/renderer/behavior/info_updater.cpp index 06a37e1a6..574cf0982 100644 --- a/src/audio_core/renderer/behavior/info_updater.cpp +++ b/src/audio_core/renderer/behavior/info_updater.cpp @@ -91,7 +91,7 @@ Result InfoUpdater::UpdateVoices(VoiceContext& voice_context, voice_info.Initialize(); for (u32 channel = 0; channel < in_param.channel_count; channel++) { - std::memset(voice_states[channel], 0, sizeof(VoiceState)); + *voice_states[channel] = {}; } } @@ -485,7 +485,7 @@ Result InfoUpdater::UpdateBehaviorInfo(BehaviorInfo& behaviour_) { return ResultSuccess; } -Result InfoUpdater::UpdateErrorInfo(BehaviorInfo& behaviour_) { +Result InfoUpdater::UpdateErrorInfo(const BehaviorInfo& behaviour_) { auto out_params{reinterpret_cast<BehaviorInfo::OutStatus*>(output)}; behaviour_.CopyErrorInfo(out_params->errors, out_params->error_count); diff --git a/src/audio_core/renderer/behavior/info_updater.h b/src/audio_core/renderer/behavior/info_updater.h index f0b445d9c..c817d8d8d 100644 --- a/src/audio_core/renderer/behavior/info_updater.h +++ b/src/audio_core/renderer/behavior/info_updater.h @@ -130,7 +130,7 @@ public: * @param behaviour - Behaviour to update. * @return Result code. */ - Result UpdateErrorInfo(BehaviorInfo& behaviour); + Result UpdateErrorInfo(const BehaviorInfo& behaviour); /** * Update splitter. diff --git a/src/audio_core/renderer/command/command_buffer.h b/src/audio_core/renderer/command/command_buffer.h index 496b0e50a..162170846 100644 --- a/src/audio_core/renderer/command/command_buffer.h +++ b/src/audio_core/renderer/command/command_buffer.h @@ -191,6 +191,7 @@ public: * @param volume - Current mix volume used for calculating the ramp. * @param prev_volume - Previous mix volume, used for calculating the ramp, * also applied to the input. + * @param prev_samples - Previous sample buffer. Used for depopping. * @param precision - Number of decimal bits for fixed point operations. */ void GenerateMixRampCommand(s32 node_id, s16 buffer_count, s16 input_index, s16 output_index, @@ -208,6 +209,7 @@ public: * @param volumes - Current mix volumes used for calculating the ramp. * @param prev_volumes - Previous mix volumes, used for calculating the ramp, * also applied to the input. + * @param prev_samples - Previous sample buffer. Used for depopping. * @param precision - Number of decimal bits for fixed point operations. */ void GenerateMixRampGroupedCommand(s32 node_id, s16 buffer_count, s16 input_index, @@ -297,11 +299,11 @@ public: /** * Generate a device sink command, adding it to the command list. * - * @param node_id - Node id of the voice this command is generated for. - * @param buffer_offset - Base mix buffer offset to use. - * @param sink_info - The sink_info to generate this command from. - * @session_id - System session id this command is generated from. - * @samples_buffer - The buffer to be sent to the sink if upsampling is not used. + * @param node_id - Node id of the voice this command is generated for. + * @param buffer_offset - Base mix buffer offset to use. + * @param sink_info - The sink_info to generate this command from. + * @param session_id - System session id this command is generated from. + * @param samples_buffer - The buffer to be sent to the sink if upsampling is not used. */ void GenerateDeviceSinkCommand(s32 node_id, s16 buffer_offset, SinkInfoBase& sink_info, u32 session_id, std::span<s32> samples_buffer); diff --git a/src/audio_core/renderer/command/command_generator.h b/src/audio_core/renderer/command/command_generator.h index d80d9b0d8..b3cd7b408 100644 --- a/src/audio_core/renderer/command/command_generator.h +++ b/src/audio_core/renderer/command/command_generator.h @@ -197,9 +197,9 @@ public: /** * Generate an I3DL2 reverb effect command. * - * @param buffer_offset - Base mix buffer offset to use. - * @param effect_info_base - I3DL2Reverb effect info. - * @param node_id - Node id of the mix this command is generated for. + * @param buffer_offset - Base mix buffer offset to use. + * @param effect_info - I3DL2Reverb effect info. + * @param node_id - Node id of the mix this command is generated for. */ void GenerateI3dl2ReverbEffectCommand(s16 buffer_offset, EffectInfoBase& effect_info, s32 node_id); @@ -207,18 +207,18 @@ public: /** * Generate an aux effect command. * - * @param buffer_offset - Base mix buffer offset to use. - * @param effect_info_base - Aux effect info. - * @param node_id - Node id of the mix this command is generated for. + * @param buffer_offset - Base mix buffer offset to use. + * @param effect_info - Aux effect info. + * @param node_id - Node id of the mix this command is generated for. */ void GenerateAuxCommand(s16 buffer_offset, EffectInfoBase& effect_info, s32 node_id); /** * Generate a biquad filter effect command. * - * @param buffer_offset - Base mix buffer offset to use. - * @param effect_info_base - Aux effect info. - * @param node_id - Node id of the mix this command is generated for. + * @param buffer_offset - Base mix buffer offset to use. + * @param effect_info - Aux effect info. + * @param node_id - Node id of the mix this command is generated for. */ void GenerateBiquadFilterEffectCommand(s16 buffer_offset, EffectInfoBase& effect_info, s32 node_id); @@ -226,10 +226,10 @@ public: /** * Generate a light limiter effect command. * - * @param buffer_offset - Base mix buffer offset to use. - * @param effect_info_base - Limiter effect info. - * @param node_id - Node id of the mix this command is generated for. - * @param effect_index - Index for the statistics state. + * @param buffer_offset - Base mix buffer offset to use. + * @param effect_info - Limiter effect info. + * @param node_id - Node id of the mix this command is generated for. + * @param effect_index - Index for the statistics state. */ void GenerateLightLimiterEffectCommand(s16 buffer_offset, EffectInfoBase& effect_info, s32 node_id, u32 effect_index); @@ -238,21 +238,20 @@ public: * Generate a capture effect command. * Writes a mix buffer back to game memory. * - * @param buffer_offset - Base mix buffer offset to use. - * @param effect_info_base - Capture effect info. - * @param node_id - Node id of the mix this command is generated for. + * @param buffer_offset - Base mix buffer offset to use. + * @param effect_info - Capture effect info. + * @param node_id - Node id of the mix this command is generated for. */ void GenerateCaptureCommand(s16 buffer_offset, EffectInfoBase& effect_info, s32 node_id); /** * Generate a compressor effect command. * - * @param buffer_offset - Base mix buffer offset to use. - * @param effect_info_base - Compressor effect info. - * @param node_id - Node id of the mix this command is generated for. + * @param buffer_offset - Base mix buffer offset to use. + * @param effect_info - Compressor effect info. + * @param node_id - Node id of the mix this command is generated for. */ - void GenerateCompressorCommand(const s16 buffer_offset, EffectInfoBase& effect_info, - const s32 node_id); + void GenerateCompressorCommand(s16 buffer_offset, EffectInfoBase& effect_info, s32 node_id); /** * Generate all effect commands for a mix. @@ -318,8 +317,9 @@ public: * Generate a performance command. * Used to report performance metrics of the AudioRenderer back to the game. * - * @param buffer_offset - Base mix buffer offset to use. - * @param sink_info - Sink info to generate the commands from. + * @param node_id - Node ID of the mix this command is generated for + * @param state - Output state of the generated performance command + * @param entry_addresses - Addresses to be written */ void GeneratePerformanceCommand(s32 node_id, PerformanceState state, const PerformanceEntryAddresses& entry_addresses); diff --git a/src/audio_core/renderer/command/effect/biquad_filter.cpp b/src/audio_core/renderer/command/effect/biquad_filter.cpp index 1baae74fd..edb30ce72 100644 --- a/src/audio_core/renderer/command/effect/biquad_filter.cpp +++ b/src/audio_core/renderer/command/effect/biquad_filter.cpp @@ -94,7 +94,7 @@ void BiquadFilterCommand::Dump([[maybe_unused]] const ADSP::CommandListProcessor void BiquadFilterCommand::Process(const ADSP::CommandListProcessor& processor) { auto state_{reinterpret_cast<VoiceState::BiquadFilterState*>(state)}; if (needs_init) { - std::memset(state_, 0, sizeof(VoiceState::BiquadFilterState)); + *state_ = {}; } auto input_buffer{ diff --git a/src/audio_core/renderer/command/effect/compressor.cpp b/src/audio_core/renderer/command/effect/compressor.cpp index 2ebc140f1..7229618e8 100644 --- a/src/audio_core/renderer/command/effect/compressor.cpp +++ b/src/audio_core/renderer/command/effect/compressor.cpp @@ -11,7 +11,7 @@ namespace AudioCore::AudioRenderer { -static void SetCompressorEffectParameter(CompressorInfo::ParameterVersion2& params, +static void SetCompressorEffectParameter(const CompressorInfo::ParameterVersion2& params, CompressorInfo::State& state) { const auto ratio{1.0f / params.compressor_ratio}; auto makeup_gain{0.0f}; @@ -31,9 +31,9 @@ static void SetCompressorEffectParameter(CompressorInfo::ParameterVersion2& para state.unk_20 = c; } -static void InitializeCompressorEffect(CompressorInfo::ParameterVersion2& params, +static void InitializeCompressorEffect(const CompressorInfo::ParameterVersion2& params, CompressorInfo::State& state) { - std::memset(&state, 0, sizeof(CompressorInfo::State)); + state = {}; state.unk_00 = 0; state.unk_04 = 1.0f; @@ -42,7 +42,7 @@ static void InitializeCompressorEffect(CompressorInfo::ParameterVersion2& params SetCompressorEffectParameter(params, state); } -static void ApplyCompressorEffect(CompressorInfo::ParameterVersion2& params, +static void ApplyCompressorEffect(const CompressorInfo::ParameterVersion2& params, CompressorInfo::State& state, bool enabled, std::vector<std::span<const s32>> input_buffers, std::vector<std::span<s32>> output_buffers, u32 sample_count) { @@ -103,8 +103,7 @@ static void ApplyCompressorEffect(CompressorInfo::ParameterVersion2& params, } else { for (s16 channel = 0; channel < params.channel_count; channel++) { if (params.inputs[channel] != params.outputs[channel]) { - std::memcpy((char*)output_buffers[channel].data(), - (char*)input_buffers[channel].data(), + std::memcpy(output_buffers[channel].data(), input_buffers[channel].data(), output_buffers[channel].size_bytes()); } } diff --git a/src/audio_core/renderer/command/effect/multi_tap_biquad_filter.cpp b/src/audio_core/renderer/command/effect/multi_tap_biquad_filter.cpp index b3c3ba4ba..48a7cba8a 100644 --- a/src/audio_core/renderer/command/effect/multi_tap_biquad_filter.cpp +++ b/src/audio_core/renderer/command/effect/multi_tap_biquad_filter.cpp @@ -30,7 +30,7 @@ void MultiTapBiquadFilterCommand::Process(const ADSP::CommandListProcessor& proc for (u32 i = 0; i < filter_tap_count; i++) { auto state{reinterpret_cast<VoiceState::BiquadFilterState*>(states[i])}; if (needs_init[i]) { - std::memset(state, 0, sizeof(VoiceState::BiquadFilterState)); + *state = {}; } ApplyBiquadFilterFloat(output_buffer, input_buffer, biquads[i].b, biquads[i].a, *state, diff --git a/src/audio_core/renderer/command/mix/mix_ramp.cpp b/src/audio_core/renderer/command/mix/mix_ramp.cpp index ffdafa1c8..d67123cd8 100644 --- a/src/audio_core/renderer/command/mix/mix_ramp.cpp +++ b/src/audio_core/renderer/command/mix/mix_ramp.cpp @@ -7,17 +7,7 @@ #include "common/logging/log.h" namespace AudioCore::AudioRenderer { -/** - * Mix input mix buffer into output mix buffer, with volume applied to the input. - * - * @tparam Q - Number of bits for fixed point operations. - * @param output - Output mix buffer. - * @param input - Input mix buffer. - * @param volume - Volume applied to the input. - * @param ramp - Ramp applied to volume every sample. - * @param sample_count - Number of samples to process. - * @return The final gained input sample, used for depopping. - */ + template <size_t Q> s32 ApplyMixRamp(std::span<s32> output, std::span<const s32> input, const f32 volume_, const f32 ramp_, const u32 sample_count) { @@ -40,10 +30,8 @@ s32 ApplyMixRamp(std::span<s32> output, std::span<const s32> input, const f32 vo return sample.to_int(); } -template s32 ApplyMixRamp<15>(std::span<s32>, std::span<const s32>, const f32, const f32, - const u32); -template s32 ApplyMixRamp<23>(std::span<s32>, std::span<const s32>, const f32, const f32, - const u32); +template s32 ApplyMixRamp<15>(std::span<s32>, std::span<const s32>, f32, f32, u32); +template s32 ApplyMixRamp<23>(std::span<s32>, std::span<const s32>, f32, f32, u32); void MixRampCommand::Dump(const ADSP::CommandListProcessor& processor, std::string& string) { const auto ramp{(volume - prev_volume) / static_cast<f32>(processor.sample_count)}; diff --git a/src/audio_core/renderer/command/mix/mix_ramp.h b/src/audio_core/renderer/command/mix/mix_ramp.h index 770f57e80..52f74a273 100644 --- a/src/audio_core/renderer/command/mix/mix_ramp.h +++ b/src/audio_core/renderer/command/mix/mix_ramp.h @@ -61,13 +61,13 @@ struct MixRampCommand : ICommand { * @tparam Q - Number of bits for fixed point operations. * @param output - Output mix buffer. * @param input - Input mix buffer. - * @param volume - Volume applied to the input. - * @param ramp - Ramp applied to volume every sample. + * @param volume_ - Volume applied to the input. + * @param ramp_ - Ramp applied to volume every sample. * @param sample_count - Number of samples to process. * @return The final gained input sample, used for depopping. */ template <size_t Q> -s32 ApplyMixRamp(std::span<s32> output, std::span<const s32> input, const f32 volume_, - const f32 ramp_, const u32 sample_count); +s32 ApplyMixRamp(std::span<s32> output, std::span<const s32> input, f32 volume_, f32 ramp_, + u32 sample_count); } // namespace AudioCore::AudioRenderer diff --git a/src/audio_core/renderer/command/mix/mix_ramp_grouped.h b/src/audio_core/renderer/command/mix/mix_ramp_grouped.h index 027276e5a..3b0ce67ef 100644 --- a/src/audio_core/renderer/command/mix/mix_ramp_grouped.h +++ b/src/audio_core/renderer/command/mix/mix_ramp_grouped.h @@ -50,9 +50,9 @@ struct MixRampGroupedCommand : ICommand { std::array<s16, MaxMixBuffers> inputs; /// Output mix buffer indexes for each mix buffer std::array<s16, MaxMixBuffers> outputs; - /// Previous mix vloumes for each mix buffer + /// Previous mix volumes for each mix buffer std::array<f32, MaxMixBuffers> prev_volumes; - /// Current mix vloumes for each mix buffer + /// Current mix volumes for each mix buffer std::array<f32, MaxMixBuffers> volumes; /// Pointer to the previous sample buffer, used for depop CpuAddr previous_samples; diff --git a/src/audio_core/renderer/command/sink/device.cpp b/src/audio_core/renderer/command/sink/device.cpp index 47e0c6722..e88372a75 100644 --- a/src/audio_core/renderer/command/sink/device.cpp +++ b/src/audio_core/renderer/command/sink/device.cpp @@ -46,6 +46,10 @@ void DeviceSinkCommand::Process(const ADSP::CommandListProcessor& processor) { out_buffer.tag = reinterpret_cast<u64>(samples.data()); stream->AppendBuffer(out_buffer, samples); + + if (stream->IsPaused()) { + stream->Start(); + } } bool DeviceSinkCommand::Verify(const ADSP::CommandListProcessor& processor) { diff --git a/src/audio_core/renderer/effect/effect_context.h b/src/audio_core/renderer/effect/effect_context.h index 85955bd9c..8f6d6e7d8 100644 --- a/src/audio_core/renderer/effect/effect_context.h +++ b/src/audio_core/renderer/effect/effect_context.h @@ -15,15 +15,15 @@ class EffectContext { public: /** * Initialize the effect context - * @param effect_infos List of effect infos for this context - * @param effect_count The number of effects in the list - * @param result_states_cpu The workbuffer of result states for the CPU for this context - * @param result_states_dsp The workbuffer of result states for the DSP for this context - * @param state_count The number of result states + * @param effect_infos_ - List of effect infos for this context + * @param effect_count_ - The number of effects in the list + * @param result_states_cpu_ - The workbuffer of result states for the CPU for this context + * @param result_states_dsp_ - The workbuffer of result states for the DSP for this context + * @param dsp_state_count - The number of result states */ - void Initialize(std::span<EffectInfoBase> effect_infos_, const u32 effect_count_, + void Initialize(std::span<EffectInfoBase> effect_infos_, u32 effect_count_, std::span<EffectResultState> result_states_cpu_, - std::span<EffectResultState> result_states_dsp_, const size_t dsp_state_count); + std::span<EffectResultState> result_states_dsp_, size_t dsp_state_count); /** * Get the EffectInfo for a given index diff --git a/src/audio_core/renderer/effect/effect_info_base.h b/src/audio_core/renderer/effect/effect_info_base.h index 8c9583878..8525fde05 100644 --- a/src/audio_core/renderer/effect/effect_info_base.h +++ b/src/audio_core/renderer/effect/effect_info_base.h @@ -291,7 +291,7 @@ public: * Update the info with new parameters, version 1. * * @param error_info - Used to write call result code. - * @param in_params - New parameters to update the info with. + * @param params - New parameters to update the info with. * @param pool_mapper - Pool for mapping buffers. */ virtual void Update(BehaviorInfo::ErrorInfo& error_info, @@ -305,7 +305,7 @@ public: * Update the info with new parameters, version 2. * * @param error_info - Used to write call result code. - * @param in_params - New parameters to update the info with. + * @param params - New parameters to update the info with. * @param pool_mapper - Pool for mapping buffers. */ virtual void Update(BehaviorInfo::ErrorInfo& error_info, diff --git a/src/audio_core/renderer/effect/i3dl2.h b/src/audio_core/renderer/effect/i3dl2.h index 7a088a627..1ebbc5c4c 100644 --- a/src/audio_core/renderer/effect/i3dl2.h +++ b/src/audio_core/renderer/effect/i3dl2.h @@ -99,7 +99,7 @@ public: return out_sample; } - Common::FixedPoint<50, 14> Read() { + Common::FixedPoint<50, 14> Read() const { return *output; } @@ -110,7 +110,7 @@ public: } } - Common::FixedPoint<50, 14> TapOut(const s32 index) { + Common::FixedPoint<50, 14> TapOut(const s32 index) const { auto out{input - (index + 1)}; if (out < buffer.data()) { out += max_delay + 1; diff --git a/src/audio_core/renderer/effect/reverb.h b/src/audio_core/renderer/effect/reverb.h index b4df9f6ef..a72475c3c 100644 --- a/src/audio_core/renderer/effect/reverb.h +++ b/src/audio_core/renderer/effect/reverb.h @@ -95,7 +95,7 @@ public: return out_sample; } - Common::FixedPoint<50, 14> Read() { + Common::FixedPoint<50, 14> Read() const { return *output; } @@ -106,7 +106,7 @@ public: } } - Common::FixedPoint<50, 14> TapOut(const s32 index) { + Common::FixedPoint<50, 14> TapOut(const s32 index) const { auto out{input - (index + 1)}; if (out < buffer.data()) { out += sample_count; diff --git a/src/audio_core/renderer/memory/address_info.h b/src/audio_core/renderer/memory/address_info.h index 4cfefea8e..bb5c930e1 100644 --- a/src/audio_core/renderer/memory/address_info.h +++ b/src/audio_core/renderer/memory/address_info.h @@ -19,8 +19,8 @@ public: /** * Setup a new AddressInfo. * - * @param cpu_address - The CPU address of this region. - * @param size - The size of this region. + * @param cpu_address_ - The CPU address of this region. + * @param size_ - The size of this region. */ void Setup(CpuAddr cpu_address_, u64 size_) { cpu_address = cpu_address_; @@ -42,7 +42,6 @@ public: * Assign this region to a memory pool. * * @param memory_pool_ - Memory pool to assign. - * @return The CpuAddr address of this region. */ void SetPool(MemoryPoolInfo* memory_pool_) { memory_pool = memory_pool_; diff --git a/src/audio_core/renderer/nodes/node_states.h b/src/audio_core/renderer/nodes/node_states.h index a1e0958a2..94b1d1254 100644 --- a/src/audio_core/renderer/nodes/node_states.h +++ b/src/audio_core/renderer/nodes/node_states.h @@ -56,7 +56,7 @@ class NodeStates { * * @return The current stack position. */ - u32 Count() { + u32 Count() const { return pos; } @@ -83,7 +83,7 @@ class NodeStates { * * @return The node on the top of the stack. */ - u32 top() { + u32 top() const { return stack[pos - 1]; } @@ -112,11 +112,11 @@ public: /** * Initialize the node states. * - * @param buffer - The workbuffer to use. Unused. + * @param buffer_ - The workbuffer to use. Unused. * @param node_buffer_size - The size of the workbuffer. Unused. * @param count - The number of nodes in the graph. */ - void Initialize(std::span<u8> nodes, u64 node_buffer_size, u32 count); + void Initialize(std::span<u8> buffer_, u64 node_buffer_size, u32 count); /** * Sort the graph. Only calls DepthFirstSearch. diff --git a/src/audio_core/renderer/performance/performance_manager.h b/src/audio_core/renderer/performance/performance_manager.h index b82176bef..b65caa9b6 100644 --- a/src/audio_core/renderer/performance/performance_manager.h +++ b/src/audio_core/renderer/performance/performance_manager.h @@ -73,7 +73,8 @@ public: * Calculate the required size for the performance workbuffer. * * @param behavior - Check which version is supported. - * @param params - Input parameters. + * @param params - Input parameters. + * * @return Required workbuffer size. */ static u64 GetRequiredBufferSizeForPerformanceMetricsPerFrame( @@ -104,7 +105,7 @@ public: * @param workbuffer - Workbuffer to use for performance frames. * @param workbuffer_size - Size of the workbuffer. * @param params - Input parameters. - * @param behavior - Behaviour to check version and data format. + * @param behavior - Behaviour to check version and data format. * @param memory_pool - Used to translate the workbuffer address for the DSP. */ virtual void Initialize(std::span<u8> workbuffer, u64 workbuffer_size, @@ -160,7 +161,8 @@ public: * workbuffer, to be written by the AudioRenderer. * * @param addresses - Filled with pointers to the new detail, which should be passed - * to the AudioRenderer with Performance commands to be written. + * to the AudioRenderer with Performance commands to be written. + * @param detail_type - Performance detail type. * @param entry_type - The type of this detail. See PerformanceEntryType * @param node_id - Node id for this detail. * @return True if a new detail was created and the offsets are valid, otherwise false. diff --git a/src/audio_core/renderer/system.cpp b/src/audio_core/renderer/system.cpp index 7a217969e..4fac30c7c 100644 --- a/src/audio_core/renderer/system.cpp +++ b/src/audio_core/renderer/system.cpp @@ -98,9 +98,8 @@ System::System(Core::System& core_, Kernel::KEvent* adsp_rendered_event_) : core{core_}, adsp{core.AudioCore().GetADSP()}, adsp_rendered_event{adsp_rendered_event_} {} Result System::Initialize(const AudioRendererParameterInternal& params, - Kernel::KTransferMemory* transfer_memory, const u64 transfer_memory_size, - const u32 process_handle_, const u64 applet_resource_user_id_, - const s32 session_id_) { + Kernel::KTransferMemory* transfer_memory, u64 transfer_memory_size, + u32 process_handle_, u64 applet_resource_user_id_, s32 session_id_) { if (!CheckValidRevision(params.revision)) { return Service::Audio::ERR_INVALID_REVISION; } @@ -354,6 +353,8 @@ Result System::Initialize(const AudioRendererParameterInternal& params, render_time_limit_percent = 100; drop_voice = params.voice_drop_enabled && params.execution_mode == ExecutionMode::Auto; + drop_voice_param = 1.0f; + num_voices_dropped = 0; allocator.Align(0x40); command_workbuffer_size = allocator.GetRemainingSize(); @@ -534,7 +535,7 @@ Result System::Update(std::span<const u8> input, std::span<u8> performance, std: return result; } - adsp_rendered_event->GetWritableEvent().Clear(); + adsp_rendered_event->Clear(); num_times_updated++; const auto end_time{core.CoreTiming().GetClockTicks()}; @@ -547,7 +548,7 @@ u32 System::GetRenderingTimeLimit() const { return render_time_limit_percent; } -void System::SetRenderingTimeLimit(const u32 limit) { +void System::SetRenderingTimeLimit(u32 limit) { render_time_limit_percent = limit; } @@ -625,7 +626,7 @@ void System::SendCommandToDsp() { reset_command_buffers = false; command_buffer_size = command_size; if (remaining_command_count == 0) { - adsp_rendered_event->GetWritableEvent().Signal(); + adsp_rendered_event->Signal(); } } else { adsp.ClearRemainCount(session_id); @@ -635,7 +636,7 @@ void System::SendCommandToDsp() { } u64 System::GenerateCommand(std::span<u8> in_command_buffer, - [[maybe_unused]] const u64 command_buffer_size_) { + [[maybe_unused]] u64 command_buffer_size_) { PoolMapper::ClearUseState(memory_pool_workbuffer, memory_pool_count); const auto start_time{core.CoreTiming().GetClockTicks()}; @@ -693,7 +694,8 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer, voice_context.SortInfo(); - const auto start_estimated_time{command_buffer.estimated_process_time}; + const auto start_estimated_time{drop_voice_param * + static_cast<f32>(command_buffer.estimated_process_time)}; command_generator.GenerateVoiceCommands(); command_generator.GenerateSubMixCommands(); @@ -712,11 +714,16 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer, render_context.behavior->IsAudioRendererProcessingTimeLimit70PercentSupported(); time_limit_percent = 70.0f; } + + const auto end_estimated_time{drop_voice_param * + static_cast<f32>(command_buffer.estimated_process_time)}; + const auto estimated_time{start_estimated_time - end_estimated_time}; + const auto time_limit{static_cast<u32>( - static_cast<f32>(start_estimated_time - command_buffer.estimated_process_time) + - (((time_limit_percent / 100.0f) * 2'880'000.0) * - (static_cast<f32>(render_time_limit_percent) / 100.0f)))}; - num_voices_dropped = DropVoices(command_buffer, start_estimated_time, time_limit); + estimated_time + (((time_limit_percent / 100.0f) * 2'880'000.0) * + (static_cast<f32>(render_time_limit_percent) / 100.0f)))}; + num_voices_dropped = + DropVoices(command_buffer, static_cast<u32>(start_estimated_time), time_limit); } command_list_header->buffer_size = command_buffer.size; @@ -737,24 +744,33 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer, return command_buffer.size; } -u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_process_time, - const u32 time_limit) { +f32 System::GetVoiceDropParameter() const { + return drop_voice_param; +} + +void System::SetVoiceDropParameter(f32 voice_drop_) { + drop_voice_param = voice_drop_; +} + +u32 System::DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit) { u32 i{0}; auto command_list{command_buffer.command_list.data() + sizeof(CommandListHeader)}; - ICommand* cmd{}; + ICommand* cmd{nullptr}; - for (; i < command_buffer.count; i++) { + // Find a first valid voice to drop + while (i < command_buffer.count) { cmd = reinterpret_cast<ICommand*>(command_list); - if (cmd->type != CommandId::Performance && - cmd->type != CommandId::DataSourcePcmInt16Version1 && - cmd->type != CommandId::DataSourcePcmInt16Version2 && - cmd->type != CommandId::DataSourcePcmFloatVersion1 && - cmd->type != CommandId::DataSourcePcmFloatVersion2 && - cmd->type != CommandId::DataSourceAdpcmVersion1 && - cmd->type != CommandId::DataSourceAdpcmVersion2) { + if (cmd->type == CommandId::Performance || + cmd->type == CommandId::DataSourcePcmInt16Version1 || + cmd->type == CommandId::DataSourcePcmInt16Version2 || + cmd->type == CommandId::DataSourcePcmFloatVersion1 || + cmd->type == CommandId::DataSourcePcmFloatVersion2 || + cmd->type == CommandId::DataSourceAdpcmVersion1 || + cmd->type == CommandId::DataSourceAdpcmVersion2) { break; } command_list += cmd->size; + i++; } if (cmd == nullptr || command_buffer.count == 0 || i >= command_buffer.count) { @@ -767,6 +783,7 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces const auto node_id_type{cmd->node_id >> 28}; const auto node_id_base{cmd->node_id & 0xFFF}; + // If the new estimated process time falls below the limit, we're done dropping. if (estimated_process_time <= time_limit) { break; } @@ -775,6 +792,7 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces break; } + // Don't drop voices marked with the highest priority. auto& voice_info{voice_context.GetInfo(node_id_base)}; if (voice_info.priority == HighestVoicePriority) { break; @@ -783,18 +801,23 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces voices_dropped++; voice_info.voice_dropped = true; - if (i < command_buffer.count) { - while (cmd->node_id == node_id) { - if (cmd->type == CommandId::DepopPrepare) { - cmd->enabled = true; - } else if (cmd->type == CommandId::Performance || !cmd->enabled) { - cmd->enabled = false; - } - i++; - command_list += cmd->size; - cmd = reinterpret_cast<ICommand*>(command_list); + // First iteration should drop the voice, and then iterate through all of the commands tied + // to the voice. We don't need reverb on a voice which we've just removed, for example. + // Depops can't be removed otherwise we'll introduce audio popping, and we don't + // remove perf commands. Lower the estimated time for each command dropped. + while (i < command_buffer.count && cmd->node_id == node_id) { + if (cmd->type == CommandId::DepopPrepare) { + cmd->enabled = true; + } else if (cmd->enabled && cmd->type != CommandId::Performance) { + cmd->enabled = false; + estimated_process_time -= static_cast<u32>( + drop_voice_param * static_cast<f32>(cmd->estimated_process_time)); } + command_list += cmd->size; + cmd = reinterpret_cast<ICommand*>(command_list); + i++; } + i++; } return voices_dropped; } diff --git a/src/audio_core/renderer/system.h b/src/audio_core/renderer/system.h index bcbe65b07..429196e41 100644 --- a/src/audio_core/renderer/system.h +++ b/src/audio_core/renderer/system.h @@ -196,6 +196,20 @@ public: */ u32 DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit); + /** + * Get the current voice drop parameter. + * + * @return The current voice drop. + */ + f32 GetVoiceDropParameter() const; + + /** + * Set the voice drop parameter. + * + * @param The new voice drop. + */ + void SetVoiceDropParameter(f32 voice_drop); + private: /// Core system Core::System& core; @@ -301,6 +315,8 @@ private: u32 num_voices_dropped{}; /// Tick that rendering started u64 render_start_tick{}; + /// Parameter to control the threshold for dropping voices if the audio graph gets too large + f32 drop_voice_param{1.0f}; }; } // namespace AudioRenderer diff --git a/src/audio_core/renderer/system_manager.cpp b/src/audio_core/renderer/system_manager.cpp index b326819ed..f66b2b890 100644 --- a/src/audio_core/renderer/system_manager.cpp +++ b/src/audio_core/renderer/system_manager.cpp @@ -15,17 +15,14 @@ MICROPROFILE_DEFINE(Audio_RenderSystemManager, "Audio", "Render System Manager", MP_RGB(60, 19, 97)); namespace AudioCore::AudioRenderer { -constexpr std::chrono::nanoseconds BaseRenderTime{5'000'000UL}; -constexpr std::chrono::nanoseconds RenderTimeOffset{400'000UL}; +constexpr std::chrono::nanoseconds RENDER_TIME{5'000'000UL}; SystemManager::SystemManager(Core::System& core_) : core{core_}, adsp{core.AudioCore().GetADSP()}, mailbox{adsp.GetRenderMailbox()}, thread_event{Core::Timing::CreateEvent( "AudioRendererSystemManager", [this](std::uintptr_t, s64 time, std::chrono::nanoseconds) { return ThreadFunc2(time); - })} { - core.CoreTiming().RegisterPauseCallback([this](bool paused) { PauseCallback(paused); }); -} + })} {} SystemManager::~SystemManager() { Stop(); @@ -36,8 +33,8 @@ bool SystemManager::InitializeUnsafe() { if (adsp.Start()) { active = true; thread = std::jthread([this](std::stop_token stop_token) { ThreadFunc(); }); - core.CoreTiming().ScheduleLoopingEvent(std::chrono::nanoseconds(0), - BaseRenderTime - RenderTimeOffset, thread_event); + core.CoreTiming().ScheduleLoopingEvent(std::chrono::nanoseconds(0), RENDER_TIME, + thread_event); } } @@ -97,7 +94,7 @@ bool SystemManager::Remove(System& system_) { } void SystemManager::ThreadFunc() { - constexpr char name[]{"yuzu:AudioRenderSystemManager"}; + constexpr char name[]{"AudioRenderSystemManager"}; MicroProfileOnThreadCreate(name); Common::SetCurrentThreadName(name); Common::SetCurrentThreadPriority(Common::ThreadPriority::High); @@ -121,42 +118,9 @@ void SystemManager::ThreadFunc() { } std::optional<std::chrono::nanoseconds> SystemManager::ThreadFunc2(s64 time) { - std::optional<std::chrono::nanoseconds> new_schedule_time{std::nullopt}; - const auto queue_size{core.AudioCore().GetStreamQueue()}; - switch (state) { - case StreamState::Filling: - if (queue_size >= 5) { - new_schedule_time = BaseRenderTime; - state = StreamState::Steady; - } - break; - case StreamState::Steady: - if (queue_size <= 2) { - new_schedule_time = BaseRenderTime - RenderTimeOffset; - state = StreamState::Filling; - } else if (queue_size > 5) { - new_schedule_time = BaseRenderTime + RenderTimeOffset; - state = StreamState::Draining; - } - break; - case StreamState::Draining: - if (queue_size <= 5) { - new_schedule_time = BaseRenderTime; - state = StreamState::Steady; - } - break; - } - update.store(true); update.notify_all(); - return new_schedule_time; -} - -void SystemManager::PauseCallback(bool paused) { - if (paused && core.IsPoweredOn() && core.IsShuttingDown()) { - update.store(true); - update.notify_all(); - } + return std::nullopt; } } // namespace AudioCore::AudioRenderer diff --git a/src/audio_core/renderer/system_manager.h b/src/audio_core/renderer/system_manager.h index 1291e9e0e..81457a3a1 100644 --- a/src/audio_core/renderer/system_manager.h +++ b/src/audio_core/renderer/system_manager.h @@ -73,13 +73,6 @@ private: */ std::optional<std::chrono::nanoseconds> ThreadFunc2(s64 time); - /** - * Callback from core timing when pausing, used to detect shutdowns and stop ThreadFunc. - * - * @param paused - Are we pausing or resuming? - */ - void PauseCallback(bool paused); - enum class StreamState { Filling, Steady, @@ -106,8 +99,6 @@ private: std::shared_ptr<Core::Timing::EventType> thread_event; /// Atomic for main thread to wait on std::atomic<bool> update{}; - /// Current state of the streams - StreamState state{StreamState::Filling}; }; } // namespace AudioCore::AudioRenderer diff --git a/src/audio_core/renderer/upsampler/upsampler_manager.h b/src/audio_core/renderer/upsampler/upsampler_manager.h index 70cd42b08..83c697c0c 100644 --- a/src/audio_core/renderer/upsampler/upsampler_manager.h +++ b/src/audio_core/renderer/upsampler/upsampler_manager.h @@ -27,7 +27,7 @@ public: /** * Free the given upsampler. * - * @param The upsampler to be freed. + * @param info The upsampler to be freed. */ void Free(UpsamplerInfo* info); diff --git a/src/audio_core/renderer/voice/voice_context.cpp b/src/audio_core/renderer/voice/voice_context.cpp index eafb51b01..a501a677d 100644 --- a/src/audio_core/renderer/voice/voice_context.cpp +++ b/src/audio_core/renderer/voice/voice_context.cpp @@ -74,8 +74,8 @@ void VoiceContext::SortInfo() { } std::ranges::sort(sorted_voice_info, [](const VoiceInfo* a, const VoiceInfo* b) { - return a->priority != b->priority ? a->priority < b->priority - : a->sort_order < b->sort_order; + return a->priority != b->priority ? a->priority > b->priority + : a->sort_order > b->sort_order; }); } diff --git a/src/audio_core/renderer/voice/voice_info.h b/src/audio_core/renderer/voice/voice_info.h index 896723e0c..930180895 100644 --- a/src/audio_core/renderer/voice/voice_info.h +++ b/src/audio_core/renderer/voice/voice_info.h @@ -185,7 +185,8 @@ public: /** * Does this voice ned an update? * - * @param params - Input parametetrs to check matching. + * @param params - Input parameters to check matching. + * * @return True if this voice needs an update, otherwise false. */ bool ShouldUpdateParameters(const InParameter& params) const; @@ -194,9 +195,9 @@ public: * Update the parameters of this voice. * * @param error_info - Output error code. - * @param params - Input parametters to udpate from. + * @param params - Input parameters to update from. * @param pool_mapper - Used to map buffers. - * @param behavior - behavior to check supported features. + * @param behavior - behavior to check supported features. */ void UpdateParameters(BehaviorInfo::ErrorInfo& error_info, const InParameter& params, const PoolMapper& pool_mapper, const BehaviorInfo& behavior); @@ -218,12 +219,12 @@ public: /** * Update all wavebuffers. * - * @param error_infos - Output 2D array of errors, 2 per wavebuffer. - * @param error_count - Number of errors provided. Unused. - * @param params - Input parametters to be used for the update. + * @param error_infos - Output 2D array of errors, 2 per wavebuffer. + * @param error_count - Number of errors provided. Unused. + * @param params - Input parameters to be used for the update. * @param voice_states - The voice states for each channel in this voice to be updated. - * @param pool_mapper - Used to map the wavebuffers. - * @param behavior - Used to check for supported features. + * @param pool_mapper - Used to map the wavebuffers. + * @param behavior - Used to check for supported features. */ void UpdateWaveBuffers(std::span<std::array<BehaviorInfo::ErrorInfo, 2>> error_infos, u32 error_count, const InParameter& params, @@ -233,13 +234,13 @@ public: /** * Update a wavebuffer. * - * @param error_infos - Output array of errors. + * @param error_info - Output array of errors. * @param wave_buffer - The wavebuffer to be updated. * @param wave_buffer_internal - Input parametters to be used for the update. * @param sample_format - Sample format of the wavebuffer. * @param valid - Is this wavebuffer valid? * @param pool_mapper - Used to map the wavebuffers. - * @param behavior - Used to check for supported features. + * @param behavior - Used to check for supported features. */ void UpdateWaveBuffer(std::span<BehaviorInfo::ErrorInfo> error_info, WaveBuffer& wave_buffer, const WaveBufferInternal& wave_buffer_internal, @@ -276,7 +277,7 @@ public: /** * Check if this voice has any mixing connections. * - * @return True if this voice participes in mixing, otherwise false. + * @return True if this voice participates in mixing, otherwise false. */ bool HasAnyConnection() const; @@ -301,7 +302,8 @@ public: /** * Update this voice on command generation. * - * @param voice_states - Voice states for these wavebuffers. + * @param voice_context - Voice context for these wavebuffers. + * * @return True if this voice should be generated, otherwise false. */ bool UpdateForCommandGeneration(VoiceContext& voice_context); diff --git a/src/audio_core/sink/cubeb_sink.cpp b/src/audio_core/sink/cubeb_sink.cpp index 90d049e8e..32c1b1cb3 100644 --- a/src/audio_core/sink/cubeb_sink.cpp +++ b/src/audio_core/sink/cubeb_sink.cpp @@ -1,21 +1,13 @@ // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later -#include <algorithm> -#include <atomic> #include <span> +#include <vector> -#include "audio_core/audio_core.h" -#include "audio_core/audio_event.h" -#include "audio_core/audio_manager.h" +#include "audio_core/common/common.h" #include "audio_core/sink/cubeb_sink.h" #include "audio_core/sink/sink_stream.h" -#include "common/assert.h" -#include "common/fixed_point.h" #include "common/logging/log.h" -#include "common/reader_writer_queue.h" -#include "common/ring_buffer.h" -#include "common/settings.h" #include "core/core.h" #ifdef _WIN32 @@ -42,10 +34,10 @@ public: * @param system_ - Core system. * @param event - Event used only for audio renderer, signalled on buffer consume. */ - CubebSinkStream(cubeb* ctx_, const u32 device_channels_, const u32 system_channels_, + CubebSinkStream(cubeb* ctx_, u32 device_channels_, u32 system_channels_, cubeb_devid output_device, cubeb_devid input_device, const std::string& name_, - const StreamType type_, Core::System& system_) - : ctx{ctx_}, type{type_}, system{system_} { + StreamType type_, Core::System& system_) + : SinkStream(system_, type_), ctx{ctx_} { #ifdef _WIN32 CoInitializeEx(nullptr, COINIT_MULTITHREADED); #endif @@ -74,17 +66,15 @@ public: const auto latency_error = cubeb_get_min_latency(ctx, ¶ms, &minimum_latency); if (latency_error != CUBEB_OK) { LOG_CRITICAL(Audio_Sink, "Error getting minimum latency, error: {}", latency_error); - minimum_latency = 256U; + minimum_latency = TargetSampleCount * 2; } - minimum_latency = std::max(minimum_latency, 256u); + minimum_latency = std::max(minimum_latency, TargetSampleCount * 2); - playing_buffer.consumed = true; - - LOG_DEBUG(Service_Audio, - "Opening cubeb stream {} type {} with: rate {} channels {} (system channels {}) " - "latency {}", - name, type, params.rate, params.channels, system_channels, minimum_latency); + LOG_INFO(Service_Audio, + "Opening cubeb stream {} type {} with: rate {} channels {} (system channels {}) " + "latency {}", + name, type, params.rate, params.channels, system_channels, minimum_latency); auto init_error{0}; if (type == StreamType::In) { @@ -111,6 +101,8 @@ public: ~CubebSinkStream() override { LOG_DEBUG(Service_Audio, "Destructing cubeb stream {}", name); + Unstall(); + if (!ctx) { return; } @@ -136,21 +128,14 @@ public: * @param resume - Set to true if this is resuming the stream a previously-active stream. * Default false. */ - void Start(const bool resume = false) override { - if (!ctx) { + void Start(bool resume = false) override { + if (!ctx || !paused) { return; } - if (resume && was_playing) { - if (cubeb_stream_start(stream_backend) != CUBEB_OK) { - LOG_CRITICAL(Audio_Sink, "Error starting cubeb stream"); - } - paused = false; - } else if (!resume) { - if (cubeb_stream_start(stream_backend) != CUBEB_OK) { - LOG_CRITICAL(Audio_Sink, "Error starting cubeb stream"); - } - paused = false; + paused = false; + if (cubeb_stream_start(stream_backend) != CUBEB_OK) { + LOG_CRITICAL(Audio_Sink, "Error starting cubeb stream"); } } @@ -158,207 +143,20 @@ public: * Stop the sink stream. */ void Stop() override { - if (!ctx) { + Unstall(); + + if (!ctx || paused) { return; } + paused = true; if (cubeb_stream_stop(stream_backend) != CUBEB_OK) { LOG_CRITICAL(Audio_Sink, "Error stopping cubeb stream"); } - - was_playing.store(!paused); - paused = true; - } - - /** - * Append a new buffer and its samples to a waiting queue to play. - * - * @param buffer - Audio buffer information to be queued. - * @param samples - The s16 samples to be queue for playback. - */ - void AppendBuffer(::AudioCore::Sink::SinkBuffer& buffer, std::vector<s16>& samples) override { - if (type == StreamType::In) { - queue.enqueue(buffer); - queued_buffers++; - } else { - constexpr s32 min{std::numeric_limits<s16>::min()}; - constexpr s32 max{std::numeric_limits<s16>::max()}; - - auto yuzu_volume{Settings::Volume()}; - if (yuzu_volume > 1.0f) { - yuzu_volume = 0.6f + 20 * std::log10(yuzu_volume); - } - auto volume{system_volume * device_volume * yuzu_volume}; - - if (system_channels == 6 && device_channels == 2) { - // We're given 6 channels, but our device only outputs 2, so downmix. - constexpr std::array<f32, 4> down_mix_coeff{1.0f, 0.707f, 0.251f, 0.707f}; - - for (u32 read_index = 0, write_index = 0; read_index < samples.size(); - read_index += system_channels, write_index += device_channels) { - const auto left_sample{ - ((Common::FixedPoint<49, 15>( - samples[read_index + static_cast<u32>(Channels::FrontLeft)]) * - down_mix_coeff[0] + - samples[read_index + static_cast<u32>(Channels::Center)] * - down_mix_coeff[1] + - samples[read_index + static_cast<u32>(Channels::LFE)] * - down_mix_coeff[2] + - samples[read_index + static_cast<u32>(Channels::BackLeft)] * - down_mix_coeff[3]) * - volume) - .to_int()}; - - const auto right_sample{ - ((Common::FixedPoint<49, 15>( - samples[read_index + static_cast<u32>(Channels::FrontRight)]) * - down_mix_coeff[0] + - samples[read_index + static_cast<u32>(Channels::Center)] * - down_mix_coeff[1] + - samples[read_index + static_cast<u32>(Channels::LFE)] * - down_mix_coeff[2] + - samples[read_index + static_cast<u32>(Channels::BackRight)] * - down_mix_coeff[3]) * - volume) - .to_int()}; - - samples[write_index + static_cast<u32>(Channels::FrontLeft)] = - static_cast<s16>(std::clamp(left_sample, min, max)); - samples[write_index + static_cast<u32>(Channels::FrontRight)] = - static_cast<s16>(std::clamp(right_sample, min, max)); - } - - samples.resize(samples.size() / system_channels * device_channels); - - } else if (system_channels == 2 && device_channels == 6) { - // We need moar samples! Not all games will provide 6 channel audio. - // TODO: Implement some upmixing here. Currently just passthrough, with other - // channels left as silence. - std::vector<s16> new_samples(samples.size() / system_channels * device_channels, 0); - - for (u32 read_index = 0, write_index = 0; read_index < samples.size(); - read_index += system_channels, write_index += device_channels) { - const auto left_sample{static_cast<s16>(std::clamp( - static_cast<s32>( - static_cast<f32>( - samples[read_index + static_cast<u32>(Channels::FrontLeft)]) * - volume), - min, max))}; - - new_samples[write_index + static_cast<u32>(Channels::FrontLeft)] = left_sample; - - const auto right_sample{static_cast<s16>(std::clamp( - static_cast<s32>( - static_cast<f32>( - samples[read_index + static_cast<u32>(Channels::FrontRight)]) * - volume), - min, max))}; - - new_samples[write_index + static_cast<u32>(Channels::FrontRight)] = - right_sample; - } - samples = std::move(new_samples); - - } else if (volume != 1.0f) { - for (u32 i = 0; i < samples.size(); i++) { - samples[i] = static_cast<s16>(std::clamp( - static_cast<s32>(static_cast<f32>(samples[i]) * volume), min, max)); - } - } - - samples_buffer.Push(samples); - queue.enqueue(buffer); - queued_buffers++; - } - } - - /** - * Release a buffer. Audio In only, will fill a buffer with recorded samples. - * - * @param num_samples - Maximum number of samples to receive. - * @return Vector of recorded samples. May have fewer than num_samples. - */ - std::vector<s16> ReleaseBuffer(const u64 num_samples) override { - static constexpr s32 min = std::numeric_limits<s16>::min(); - static constexpr s32 max = std::numeric_limits<s16>::max(); - - auto samples{samples_buffer.Pop(num_samples)}; - - // TODO: Up-mix to 6 channels if the game expects it. - // For audio input this is unlikely to ever be the case though. - - // Incoming mic volume seems to always be very quiet, so multiply by an additional 8 here. - // TODO: Play with this and find something that works better. - auto volume{system_volume * device_volume * 8}; - for (u32 i = 0; i < samples.size(); i++) { - samples[i] = static_cast<s16>( - std::clamp(static_cast<s32>(static_cast<f32>(samples[i]) * volume), min, max)); - } - - if (samples.size() < num_samples) { - samples.resize(num_samples, 0); - } - return samples; - } - - /** - * Check if a certain buffer has been consumed (fully played). - * - * @param tag - Unique tag of a buffer to check for. - * @return True if the buffer has been played, otherwise false. - */ - bool IsBufferConsumed(const u64 tag) override { - if (released_buffer.tag == 0) { - if (!released_buffers.try_dequeue(released_buffer)) { - return false; - } - } - - if (released_buffer.tag == tag) { - released_buffer.tag = 0; - return true; - } - return false; - } - - /** - * Empty out the buffer queue. - */ - void ClearQueue() override { - samples_buffer.Pop(); - while (queue.pop()) { - } - while (released_buffers.pop()) { - } - queued_buffers = 0; - released_buffer = {}; - playing_buffer = {}; - playing_buffer.consumed = true; } private: /** - * Signal events back to the audio system that a buffer was played/can be filled. - * - * @param buffer - Consumed audio buffer to be released. - */ - void SignalEvent(const ::AudioCore::Sink::SinkBuffer& buffer) { - auto& manager{system.AudioCore().GetAudioManager()}; - switch (type) { - case StreamType::Out: - released_buffers.enqueue(buffer); - manager.SetEvent(Event::Type::AudioOutManager, true); - break; - case StreamType::In: - released_buffers.enqueue(buffer); - manager.SetEvent(Event::Type::AudioInManager, true); - break; - case StreamType::Render: - break; - } - } - - /** * Main callback from Cubeb. Either expects samples from us (audio render/audio out), or will * provide samples to be copied (audio in). * @@ -378,106 +176,15 @@ private: const std::size_t num_channels = impl->GetDeviceChannels(); const std::size_t frame_size = num_channels; - const std::size_t frame_size_bytes = frame_size * sizeof(s16); const std::size_t num_frames{static_cast<size_t>(num_frames_)}; - size_t frames_written{0}; - [[maybe_unused]] bool underrun{false}; if (impl->type == StreamType::In) { - // INPUT std::span<const s16> input_buffer{reinterpret_cast<const s16*>(in_buff), num_frames * frame_size}; - - while (frames_written < num_frames) { - auto& playing_buffer{impl->playing_buffer}; - - // If the playing buffer has been consumed or has no frames, we need a new one - if (playing_buffer.consumed || playing_buffer.frames == 0) { - if (!impl->queue.try_dequeue(impl->playing_buffer)) { - // If no buffer was available we've underrun, just push the samples and - // continue. - underrun = true; - impl->samples_buffer.Push(&input_buffer[frames_written * frame_size], - (num_frames - frames_written) * frame_size); - frames_written = num_frames; - continue; - } else { - // Successfully got a new buffer, mark the old one as consumed and signal. - impl->queued_buffers--; - impl->SignalEvent(impl->playing_buffer); - } - } - - // Get the minimum frames available between the currently playing buffer, and the - // amount we have left to fill - size_t frames_available{ - std::min(playing_buffer.frames - playing_buffer.frames_played, - num_frames - frames_written)}; - - impl->samples_buffer.Push(&input_buffer[frames_written * frame_size], - frames_available * frame_size); - - frames_written += frames_available; - playing_buffer.frames_played += frames_available; - - // If that's all the frames in the current buffer, add its samples and mark it as - // consumed - if (playing_buffer.frames_played >= playing_buffer.frames) { - impl->AddPlayedSampleCount(playing_buffer.frames_played * num_channels); - impl->playing_buffer.consumed = true; - } - } - - std::memcpy(&impl->last_frame[0], &input_buffer[(frames_written - 1) * frame_size], - frame_size_bytes); + impl->ProcessAudioIn(input_buffer, num_frames); } else { - // OUTPUT std::span<s16> output_buffer{reinterpret_cast<s16*>(out_buff), num_frames * frame_size}; - - while (frames_written < num_frames) { - auto& playing_buffer{impl->playing_buffer}; - - // If the playing buffer has been consumed or has no frames, we need a new one - if (playing_buffer.consumed || playing_buffer.frames == 0) { - if (!impl->queue.try_dequeue(impl->playing_buffer)) { - // If no buffer was available we've underrun, fill the remaining buffer with - // the last written frame and continue. - underrun = true; - for (size_t i = frames_written; i < num_frames; i++) { - std::memcpy(&output_buffer[i * frame_size], &impl->last_frame[0], - frame_size_bytes); - } - frames_written = num_frames; - continue; - } else { - // Successfully got a new buffer, mark the old one as consumed and signal. - impl->queued_buffers--; - impl->SignalEvent(impl->playing_buffer); - } - } - - // Get the minimum frames available between the currently playing buffer, and the - // amount we have left to fill - size_t frames_available{ - std::min(playing_buffer.frames - playing_buffer.frames_played, - num_frames - frames_written)}; - - impl->samples_buffer.Pop(&output_buffer[frames_written * frame_size], - frames_available * frame_size); - - frames_written += frames_available; - playing_buffer.frames_played += frames_available; - - // If that's all the frames in the current buffer, add its samples and mark it as - // consumed - if (playing_buffer.frames_played >= playing_buffer.frames) { - impl->AddPlayedSampleCount(playing_buffer.frames_played * num_channels); - impl->playing_buffer.consumed = true; - } - } - - std::memcpy(&impl->last_frame[0], &output_buffer[(frames_written - 1) * frame_size], - frame_size_bytes); + impl->ProcessAudioOutAndRender(output_buffer, num_frames); } return num_frames_; @@ -490,32 +197,12 @@ private: * @param user_data - Custom data pointer passed along, points to a CubebSinkStream. * @param state - New state of the device. */ - static void StateCallback([[maybe_unused]] cubeb_stream* stream, - [[maybe_unused]] void* user_data, - [[maybe_unused]] cubeb_state state) {} + static void StateCallback(cubeb_stream*, void*, cubeb_state) {} /// Main Cubeb context cubeb* ctx{}; /// Cubeb stream backend cubeb_stream* stream_backend{}; - /// Name of this stream - std::string name{}; - /// Type of this stream - StreamType type; - /// Core system - Core::System& system; - /// Ring buffer of the samples waiting to be played or consumed - Common::RingBuffer<s16, 0x10000> samples_buffer; - /// Audio buffers queued and waiting to play - Common::ReaderWriterQueue<::AudioCore::Sink::SinkBuffer> queue; - /// The currently-playing audio buffer - ::AudioCore::Sink::SinkBuffer playing_buffer{}; - /// Audio buffers which have been played and are in queue to be released by the audio system - Common::ReaderWriterQueue<::AudioCore::Sink::SinkBuffer> released_buffers{}; - /// Currently released buffer waiting to be taken by the audio system - ::AudioCore::Sink::SinkBuffer released_buffer{}; - /// The last played (or received) frame of audio, used when the callback underruns - std::array<s16, MaxChannels> last_frame{}; }; CubebSink::CubebSink(std::string_view target_device_name) { @@ -569,15 +256,15 @@ CubebSink::~CubebSink() { #endif } -SinkStream* CubebSink::AcquireSinkStream(Core::System& system, const u32 system_channels, - const std::string& name, const StreamType type) { +SinkStream* CubebSink::AcquireSinkStream(Core::System& system, u32 system_channels, + const std::string& name, StreamType type) { SinkStreamPtr& stream = sink_streams.emplace_back(std::make_unique<CubebSinkStream>( ctx, device_channels, system_channels, output_device, input_device, name, type, system)); return stream.get(); } -void CubebSink::CloseStream(const SinkStream* stream) { +void CubebSink::CloseStream(SinkStream* stream) { for (size_t i = 0; i < sink_streams.size(); i++) { if (sink_streams[i].get() == stream) { sink_streams[i].reset(); @@ -591,18 +278,6 @@ void CubebSink::CloseStreams() { sink_streams.clear(); } -void CubebSink::PauseStreams() { - for (auto& stream : sink_streams) { - stream->Stop(); - } -} - -void CubebSink::UnpauseStreams() { - for (auto& stream : sink_streams) { - stream->Start(true); - } -} - f32 CubebSink::GetDeviceVolume() const { if (sink_streams.empty()) { return 1.0f; @@ -611,19 +286,19 @@ f32 CubebSink::GetDeviceVolume() const { return sink_streams[0]->GetDeviceVolume(); } -void CubebSink::SetDeviceVolume(const f32 volume) { +void CubebSink::SetDeviceVolume(f32 volume) { for (auto& stream : sink_streams) { stream->SetDeviceVolume(volume); } } -void CubebSink::SetSystemVolume(const f32 volume) { +void CubebSink::SetSystemVolume(f32 volume) { for (auto& stream : sink_streams) { stream->SetSystemVolume(volume); } } -std::vector<std::string> ListCubebSinkDevices(const bool capture) { +std::vector<std::string> ListCubebSinkDevices(bool capture) { std::vector<std::string> device_list; cubeb* ctx; @@ -651,4 +326,31 @@ std::vector<std::string> ListCubebSinkDevices(const bool capture) { return device_list; } +u32 GetCubebLatency() { + cubeb* ctx; + + if (cubeb_init(&ctx, "yuzu Latency Getter", nullptr) != CUBEB_OK) { + LOG_CRITICAL(Audio_Sink, "cubeb_init failed"); + // Return a large latency so we choose SDL instead. + return 10000u; + } + + cubeb_stream_params params{}; + params.rate = TargetSampleRate; + params.channels = 2; + params.format = CUBEB_SAMPLE_S16LE; + params.prefs = CUBEB_STREAM_PREF_NONE; + params.layout = CUBEB_LAYOUT_STEREO; + + u32 latency{0}; + const auto latency_error = cubeb_get_min_latency(ctx, ¶ms, &latency); + if (latency_error != CUBEB_OK) { + LOG_CRITICAL(Audio_Sink, "Error getting minimum latency, error: {}", latency_error); + latency = TargetSampleCount * 2; + } + latency = std::max(latency, TargetSampleCount * 2); + cubeb_destroy(ctx); + return latency; +} + } // namespace AudioCore::Sink diff --git a/src/audio_core/sink/cubeb_sink.h b/src/audio_core/sink/cubeb_sink.h index f0f43dfa1..3302cb98d 100644 --- a/src/audio_core/sink/cubeb_sink.h +++ b/src/audio_core/sink/cubeb_sink.h @@ -34,8 +34,7 @@ public: * May differ from the device's channel count. * @param name - Name of this stream. * @param type - Type of this stream, render/in/out. - * @param event - Audio render only, a signal used to prevent the renderer running too - * fast. + * * @return A pointer to the created SinkStream */ SinkStream* AcquireSinkStream(Core::System& system, u32 system_channels, @@ -46,7 +45,7 @@ public: * * @param stream - The stream to close. */ - void CloseStream(const SinkStream* stream) override; + void CloseStream(SinkStream* stream) override; /** * Close all streams. @@ -54,16 +53,6 @@ public: void CloseStreams() override; /** - * Pause all streams. - */ - void PauseStreams() override; - - /** - * Unpause all streams. - */ - void UnpauseStreams() override; - - /** * Get the device volume. Set from calls to the IAudioDevice service. * * @return Volume of the device. @@ -101,10 +90,17 @@ private: }; /** - * Get a list of conencted devices from Cubeb. + * Get a list of connected devices from Cubeb. * * @param capture - Return input (capture) devices if true, otherwise output devices. */ std::vector<std::string> ListCubebSinkDevices(bool capture); +/** + * Get the reported latency for this sink. + * + * @return Minimum latency for this sink. + */ +u32 GetCubebLatency(); + } // namespace AudioCore::Sink diff --git a/src/audio_core/sink/null_sink.h b/src/audio_core/sink/null_sink.h index 47a342171..1215d3cd2 100644 --- a/src/audio_core/sink/null_sink.h +++ b/src/audio_core/sink/null_sink.h @@ -3,10 +3,29 @@ #pragma once +#include <string> +#include <string_view> +#include <vector> + #include "audio_core/sink/sink.h" #include "audio_core/sink/sink_stream.h" +namespace Core { +class System; +} // namespace Core + namespace AudioCore::Sink { +class NullSinkStreamImpl final : public SinkStream { +public: + explicit NullSinkStreamImpl(Core::System& system_, StreamType type_) + : SinkStream{system_, type_} {} + ~NullSinkStreamImpl() override {} + void AppendBuffer(SinkBuffer&, std::vector<s16>&) override {} + std::vector<s16> ReleaseBuffer(u64) override { + return {}; + } +}; + /** * A no-op sink for when no audio out is wanted. */ @@ -15,17 +34,16 @@ public: explicit NullSink(std::string_view) {} ~NullSink() override = default; - SinkStream* AcquireSinkStream([[maybe_unused]] Core::System& system, - [[maybe_unused]] u32 system_channels, - [[maybe_unused]] const std::string& name, - [[maybe_unused]] StreamType type) override { - return &null_sink_stream; + SinkStream* AcquireSinkStream(Core::System& system, u32, const std::string&, + StreamType type) override { + if (null_sink == nullptr) { + null_sink = std::make_unique<NullSinkStreamImpl>(system, type); + } + return null_sink.get(); } - void CloseStream([[maybe_unused]] const SinkStream* stream) override {} + void CloseStream(SinkStream*) override {} void CloseStreams() override {} - void PauseStreams() override {} - void UnpauseStreams() override {} f32 GetDeviceVolume() const override { return 1.0f; } @@ -33,20 +51,7 @@ public: void SetSystemVolume(f32 volume) override {} private: - struct NullSinkStreamImpl final : SinkStream { - void Finalize() override {} - void Start(bool resume = false) override {} - void Stop() override {} - void AppendBuffer([[maybe_unused]] ::AudioCore::Sink::SinkBuffer& buffer, - [[maybe_unused]] std::vector<s16>& samples) override {} - std::vector<s16> ReleaseBuffer([[maybe_unused]] u64 num_samples) override { - return {}; - } - bool IsBufferConsumed([[maybe_unused]] const u64 tag) { - return true; - } - void ClearQueue() override {} - } null_sink_stream; + SinkStreamPtr null_sink{}; }; } // namespace AudioCore::Sink diff --git a/src/audio_core/sink/sdl2_sink.cpp b/src/audio_core/sink/sdl2_sink.cpp index d6c9ec90d..c138dc628 100644 --- a/src/audio_core/sink/sdl2_sink.cpp +++ b/src/audio_core/sink/sdl2_sink.cpp @@ -1,20 +1,13 @@ // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later -#include <algorithm> -#include <atomic> +#include <span> +#include <vector> -#include "audio_core/audio_core.h" -#include "audio_core/audio_event.h" -#include "audio_core/audio_manager.h" +#include "audio_core/common/common.h" #include "audio_core/sink/sdl2_sink.h" #include "audio_core/sink/sink_stream.h" -#include "common/assert.h" -#include "common/fixed_point.h" #include "common/logging/log.h" -#include "common/reader_writer_queue.h" -#include "common/ring_buffer.h" -#include "common/settings.h" #include "core/core.h" // Ignore -Wimplicit-fallthrough due to https://github.com/libsdl-org/SDL/issues/4307 @@ -44,10 +37,9 @@ public: * @param system_ - Core system. * @param event - Event used only for audio renderer, signalled on buffer consume. */ - SDLSinkStream(u32 device_channels_, const u32 system_channels_, - const std::string& output_device, const std::string& input_device, - const StreamType type_, Core::System& system_) - : type{type_}, system{system_} { + SDLSinkStream(u32 device_channels_, u32 system_channels_, const std::string& output_device, + const std::string& input_device, StreamType type_, Core::System& system_) + : SinkStream{system_, type_} { system_channels = system_channels_; device_channels = device_channels_; @@ -55,16 +47,10 @@ public: spec.freq = TargetSampleRate; spec.channels = static_cast<u8>(device_channels); spec.format = AUDIO_S16SYS; - if (type == StreamType::Render) { - spec.samples = TargetSampleCount; - } else { - spec.samples = 1024; - } + spec.samples = TargetSampleCount * 2; spec.callback = &SDLSinkStream::DataCallback; spec.userdata = this; - playing_buffer.consumed = true; - std::string device_name{output_device}; bool capture{false}; if (type == StreamType::In) { @@ -84,31 +70,30 @@ public: return; } - LOG_DEBUG(Service_Audio, - "Opening sdl stream {} with: rate {} channels {} (system channels {}) " - " samples {}", - device, obtained.freq, obtained.channels, system_channels, obtained.samples); + LOG_INFO(Service_Audio, + "Opening SDL stream {} with: rate {} channels {} (system channels {}) " + " samples {}", + device, obtained.freq, obtained.channels, system_channels, obtained.samples); } /** * Destroy the sink stream. */ ~SDLSinkStream() override { - if (device == 0) { - return; - } - - SDL_CloseAudioDevice(device); + LOG_DEBUG(Service_Audio, "Destructing SDL stream {}", name); + Finalize(); } /** * Finalize the sink stream. */ void Finalize() override { + Unstall(); if (device == 0) { return; } + Stop(); SDL_CloseAudioDevice(device); } @@ -118,217 +103,29 @@ public: * @param resume - Set to true if this is resuming the stream a previously-active stream. * Default false. */ - void Start(const bool resume = false) override { - if (device == 0) { + void Start(bool resume = false) override { + if (device == 0 || !paused) { return; } - if (resume && was_playing) { - SDL_PauseAudioDevice(device, 0); - paused = false; - } else if (!resume) { - SDL_PauseAudioDevice(device, 0); - paused = false; - } + paused = false; + SDL_PauseAudioDevice(device, 0); } /** * Stop the sink stream. */ - void Stop() { - if (device == 0) { + void Stop() override { + Unstall(); + if (device == 0 || paused) { return; } - SDL_PauseAudioDevice(device, 1); paused = true; - } - - /** - * Append a new buffer and its samples to a waiting queue to play. - * - * @param buffer - Audio buffer information to be queued. - * @param samples - The s16 samples to be queue for playback. - */ - void AppendBuffer(::AudioCore::Sink::SinkBuffer& buffer, std::vector<s16>& samples) override { - if (type == StreamType::In) { - queue.enqueue(buffer); - queued_buffers++; - } else { - constexpr s32 min = std::numeric_limits<s16>::min(); - constexpr s32 max = std::numeric_limits<s16>::max(); - - auto yuzu_volume{Settings::Volume()}; - auto volume{system_volume * device_volume * yuzu_volume}; - - if (system_channels == 6 && device_channels == 2) { - // We're given 6 channels, but our device only outputs 2, so downmix. - constexpr std::array<f32, 4> down_mix_coeff{1.0f, 0.707f, 0.251f, 0.707f}; - - for (u32 read_index = 0, write_index = 0; read_index < samples.size(); - read_index += system_channels, write_index += device_channels) { - const auto left_sample{ - ((Common::FixedPoint<49, 15>( - samples[read_index + static_cast<u32>(Channels::FrontLeft)]) * - down_mix_coeff[0] + - samples[read_index + static_cast<u32>(Channels::Center)] * - down_mix_coeff[1] + - samples[read_index + static_cast<u32>(Channels::LFE)] * - down_mix_coeff[2] + - samples[read_index + static_cast<u32>(Channels::BackLeft)] * - down_mix_coeff[3]) * - volume) - .to_int()}; - - const auto right_sample{ - ((Common::FixedPoint<49, 15>( - samples[read_index + static_cast<u32>(Channels::FrontRight)]) * - down_mix_coeff[0] + - samples[read_index + static_cast<u32>(Channels::Center)] * - down_mix_coeff[1] + - samples[read_index + static_cast<u32>(Channels::LFE)] * - down_mix_coeff[2] + - samples[read_index + static_cast<u32>(Channels::BackRight)] * - down_mix_coeff[3]) * - volume) - .to_int()}; - - samples[write_index + static_cast<u32>(Channels::FrontLeft)] = - static_cast<s16>(std::clamp(left_sample, min, max)); - samples[write_index + static_cast<u32>(Channels::FrontRight)] = - static_cast<s16>(std::clamp(right_sample, min, max)); - } - - samples.resize(samples.size() / system_channels * device_channels); - - } else if (system_channels == 2 && device_channels == 6) { - // We need moar samples! Not all games will provide 6 channel audio. - // TODO: Implement some upmixing here. Currently just passthrough, with other - // channels left as silence. - std::vector<s16> new_samples(samples.size() / system_channels * device_channels, 0); - - for (u32 read_index = 0, write_index = 0; read_index < samples.size(); - read_index += system_channels, write_index += device_channels) { - const auto left_sample{static_cast<s16>(std::clamp( - static_cast<s32>( - static_cast<f32>( - samples[read_index + static_cast<u32>(Channels::FrontLeft)]) * - volume), - min, max))}; - - new_samples[write_index + static_cast<u32>(Channels::FrontLeft)] = left_sample; - - const auto right_sample{static_cast<s16>(std::clamp( - static_cast<s32>( - static_cast<f32>( - samples[read_index + static_cast<u32>(Channels::FrontRight)]) * - volume), - min, max))}; - - new_samples[write_index + static_cast<u32>(Channels::FrontRight)] = - right_sample; - } - samples = std::move(new_samples); - - } else if (volume != 1.0f) { - for (u32 i = 0; i < samples.size(); i++) { - samples[i] = static_cast<s16>(std::clamp( - static_cast<s32>(static_cast<f32>(samples[i]) * volume), min, max)); - } - } - - samples_buffer.Push(samples); - queue.enqueue(buffer); - queued_buffers++; - } - } - - /** - * Release a buffer. Audio In only, will fill a buffer with recorded samples. - * - * @param num_samples - Maximum number of samples to receive. - * @return Vector of recorded samples. May have fewer than num_samples. - */ - std::vector<s16> ReleaseBuffer(const u64 num_samples) override { - static constexpr s32 min = std::numeric_limits<s16>::min(); - static constexpr s32 max = std::numeric_limits<s16>::max(); - - auto samples{samples_buffer.Pop(num_samples)}; - - // TODO: Up-mix to 6 channels if the game expects it. - // For audio input this is unlikely to ever be the case though. - - // Incoming mic volume seems to always be very quiet, so multiply by an additional 8 here. - // TODO: Play with this and find something that works better. - auto volume{system_volume * device_volume * 8}; - for (u32 i = 0; i < samples.size(); i++) { - samples[i] = static_cast<s16>( - std::clamp(static_cast<s32>(static_cast<f32>(samples[i]) * volume), min, max)); - } - - if (samples.size() < num_samples) { - samples.resize(num_samples, 0); - } - return samples; - } - - /** - * Check if a certain buffer has been consumed (fully played). - * - * @param tag - Unique tag of a buffer to check for. - * @return True if the buffer has been played, otherwise false. - */ - bool IsBufferConsumed(const u64 tag) override { - if (released_buffer.tag == 0) { - if (!released_buffers.try_dequeue(released_buffer)) { - return false; - } - } - - if (released_buffer.tag == tag) { - released_buffer.tag = 0; - return true; - } - return false; - } - - /** - * Empty out the buffer queue. - */ - void ClearQueue() override { - samples_buffer.Pop(); - while (queue.pop()) { - } - while (released_buffers.pop()) { - } - released_buffer = {}; - playing_buffer = {}; - playing_buffer.consumed = true; - queued_buffers = 0; + SDL_PauseAudioDevice(device, 1); } private: /** - * Signal events back to the audio system that a buffer was played/can be filled. - * - * @param buffer - Consumed audio buffer to be released. - */ - void SignalEvent(const ::AudioCore::Sink::SinkBuffer& buffer) { - auto& manager{system.AudioCore().GetAudioManager()}; - switch (type) { - case StreamType::Out: - released_buffers.enqueue(buffer); - manager.SetEvent(Event::Type::AudioOutManager, true); - break; - case StreamType::In: - released_buffers.enqueue(buffer); - manager.SetEvent(Event::Type::AudioInManager, true); - break; - case StreamType::Render: - break; - } - } - - /** * Main callback from SDL. Either expects samples from us (audio render/audio out), or will * provide samples to be copied (audio in). * @@ -345,122 +142,20 @@ private: const std::size_t num_channels = impl->GetDeviceChannels(); const std::size_t frame_size = num_channels; - const std::size_t frame_size_bytes = frame_size * sizeof(s16); const std::size_t num_frames{len / num_channels / sizeof(s16)}; - size_t frames_written{0}; - [[maybe_unused]] bool underrun{false}; if (impl->type == StreamType::In) { - std::span<s16> input_buffer{reinterpret_cast<s16*>(stream), num_frames * frame_size}; - - while (frames_written < num_frames) { - auto& playing_buffer{impl->playing_buffer}; - - // If the playing buffer has been consumed or has no frames, we need a new one - if (playing_buffer.consumed || playing_buffer.frames == 0) { - if (!impl->queue.try_dequeue(impl->playing_buffer)) { - // If no buffer was available we've underrun, just push the samples and - // continue. - underrun = true; - impl->samples_buffer.Push(&input_buffer[frames_written * frame_size], - (num_frames - frames_written) * frame_size); - frames_written = num_frames; - continue; - } else { - impl->queued_buffers--; - impl->SignalEvent(impl->playing_buffer); - } - } - - // Get the minimum frames available between the currently playing buffer, and the - // amount we have left to fill - size_t frames_available{ - std::min(playing_buffer.frames - playing_buffer.frames_played, - num_frames - frames_written)}; - - impl->samples_buffer.Push(&input_buffer[frames_written * frame_size], - frames_available * frame_size); - - frames_written += frames_available; - playing_buffer.frames_played += frames_available; - - // If that's all the frames in the current buffer, add its samples and mark it as - // consumed - if (playing_buffer.frames_played >= playing_buffer.frames) { - impl->AddPlayedSampleCount(playing_buffer.frames_played * num_channels); - impl->playing_buffer.consumed = true; - } - } - - std::memcpy(&impl->last_frame[0], &input_buffer[(frames_written - 1) * frame_size], - frame_size_bytes); + std::span<const s16> input_buffer{reinterpret_cast<const s16*>(stream), + num_frames * frame_size}; + impl->ProcessAudioIn(input_buffer, num_frames); } else { std::span<s16> output_buffer{reinterpret_cast<s16*>(stream), num_frames * frame_size}; - - while (frames_written < num_frames) { - auto& playing_buffer{impl->playing_buffer}; - - // If the playing buffer has been consumed or has no frames, we need a new one - if (playing_buffer.consumed || playing_buffer.frames == 0) { - if (!impl->queue.try_dequeue(impl->playing_buffer)) { - // If no buffer was available we've underrun, fill the remaining buffer with - // the last written frame and continue. - underrun = true; - for (size_t i = frames_written; i < num_frames; i++) { - std::memcpy(&output_buffer[i * frame_size], &impl->last_frame[0], - frame_size_bytes); - } - frames_written = num_frames; - continue; - } else { - impl->queued_buffers--; - impl->SignalEvent(impl->playing_buffer); - } - } - - // Get the minimum frames available between the currently playing buffer, and the - // amount we have left to fill - size_t frames_available{ - std::min(playing_buffer.frames - playing_buffer.frames_played, - num_frames - frames_written)}; - - impl->samples_buffer.Pop(&output_buffer[frames_written * frame_size], - frames_available * frame_size); - - frames_written += frames_available; - playing_buffer.frames_played += frames_available; - - // If that's all the frames in the current buffer, add its samples and mark it as - // consumed - if (playing_buffer.frames_played >= playing_buffer.frames) { - impl->AddPlayedSampleCount(playing_buffer.frames_played * num_channels); - impl->playing_buffer.consumed = true; - } - } - - std::memcpy(&impl->last_frame[0], &output_buffer[(frames_written - 1) * frame_size], - frame_size_bytes); + impl->ProcessAudioOutAndRender(output_buffer, num_frames); } } /// SDL device id of the opened input/output device SDL_AudioDeviceID device{}; - /// Type of this stream - StreamType type; - /// Core system - Core::System& system; - /// Ring buffer of the samples waiting to be played or consumed - Common::RingBuffer<s16, 0x10000> samples_buffer; - /// Audio buffers queued and waiting to play - Common::ReaderWriterQueue<::AudioCore::Sink::SinkBuffer> queue; - /// The currently-playing audio buffer - ::AudioCore::Sink::SinkBuffer playing_buffer{}; - /// Audio buffers which have been played and are in queue to be released by the audio system - Common::ReaderWriterQueue<::AudioCore::Sink::SinkBuffer> released_buffers{}; - /// Currently released buffer waiting to be taken by the audio system - ::AudioCore::Sink::SinkBuffer released_buffer{}; - /// The last played (or received) frame of audio, used when the callback underruns - std::array<s16, MaxChannels> last_frame{}; }; SDLSink::SDLSink(std::string_view target_device_name) { @@ -482,14 +177,14 @@ SDLSink::SDLSink(std::string_view target_device_name) { SDLSink::~SDLSink() = default; -SinkStream* SDLSink::AcquireSinkStream(Core::System& system, const u32 system_channels, - const std::string&, const StreamType type) { +SinkStream* SDLSink::AcquireSinkStream(Core::System& system, u32 system_channels, + const std::string&, StreamType type) { SinkStreamPtr& stream = sink_streams.emplace_back(std::make_unique<SDLSinkStream>( device_channels, system_channels, output_device, input_device, type, system)); return stream.get(); } -void SDLSink::CloseStream(const SinkStream* stream) { +void SDLSink::CloseStream(SinkStream* stream) { for (size_t i = 0; i < sink_streams.size(); i++) { if (sink_streams[i].get() == stream) { sink_streams[i].reset(); @@ -503,18 +198,6 @@ void SDLSink::CloseStreams() { sink_streams.clear(); } -void SDLSink::PauseStreams() { - for (auto& stream : sink_streams) { - stream->Stop(); - } -} - -void SDLSink::UnpauseStreams() { - for (auto& stream : sink_streams) { - stream->Start(); - } -} - f32 SDLSink::GetDeviceVolume() const { if (sink_streams.empty()) { return 1.0f; @@ -523,19 +206,19 @@ f32 SDLSink::GetDeviceVolume() const { return sink_streams[0]->GetDeviceVolume(); } -void SDLSink::SetDeviceVolume(const f32 volume) { +void SDLSink::SetDeviceVolume(f32 volume) { for (auto& stream : sink_streams) { stream->SetDeviceVolume(volume); } } -void SDLSink::SetSystemVolume(const f32 volume) { +void SDLSink::SetSystemVolume(f32 volume) { for (auto& stream : sink_streams) { stream->SetSystemVolume(volume); } } -std::vector<std::string> ListSDLSinkDevices(const bool capture) { +std::vector<std::string> ListSDLSinkDevices(bool capture) { std::vector<std::string> device_list; if (!SDL_WasInit(SDL_INIT_AUDIO)) { @@ -547,10 +230,16 @@ std::vector<std::string> ListSDLSinkDevices(const bool capture) { const int device_count = SDL_GetNumAudioDevices(capture); for (int i = 0; i < device_count; ++i) { - device_list.emplace_back(SDL_GetAudioDeviceName(i, 0)); + if (const char* name = SDL_GetAudioDeviceName(i, capture)) { + device_list.emplace_back(name); + } } return device_list; } +u32 GetSDLLatency() { + return TargetSampleCount * 2; +} + } // namespace AudioCore::Sink diff --git a/src/audio_core/sink/sdl2_sink.h b/src/audio_core/sink/sdl2_sink.h index 186bc2fa3..27ed1ab94 100644 --- a/src/audio_core/sink/sdl2_sink.h +++ b/src/audio_core/sink/sdl2_sink.h @@ -32,8 +32,7 @@ public: * May differ from the device's channel count. * @param name - Name of this stream. * @param type - Type of this stream, render/in/out. - * @param event - Audio render only, a signal used to prevent the renderer running too - * fast. + * * @return A pointer to the created SinkStream */ SinkStream* AcquireSinkStream(Core::System& system, u32 system_channels, @@ -44,7 +43,7 @@ public: * * @param stream - The stream to close. */ - void CloseStream(const SinkStream* stream) override; + void CloseStream(SinkStream* stream) override; /** * Close all streams. @@ -52,16 +51,6 @@ public: void CloseStreams() override; /** - * Pause all streams. - */ - void PauseStreams() override; - - /** - * Unpause all streams. - */ - void UnpauseStreams() override; - - /** * Get the device volume. Set from calls to the IAudioDevice service. * * @return Volume of the device. @@ -92,10 +81,17 @@ private: }; /** - * Get a list of conencted devices from Cubeb. + * Get a list of connected devices from SDL. * * @param capture - Return input (capture) devices if true, otherwise output devices. */ std::vector<std::string> ListSDLSinkDevices(bool capture); +/** + * Get the reported latency for this sink. + * + * @return Minimum latency for this sink. + */ +u32 GetSDLLatency(); + } // namespace AudioCore::Sink diff --git a/src/audio_core/sink/sink.h b/src/audio_core/sink/sink.h index 91fe455e4..f28c6d126 100644 --- a/src/audio_core/sink/sink.h +++ b/src/audio_core/sink/sink.h @@ -32,7 +32,7 @@ public: * * @param stream - The stream to close. */ - virtual void CloseStream(const SinkStream* stream) = 0; + virtual void CloseStream(SinkStream* stream) = 0; /** * Close all streams. @@ -40,16 +40,6 @@ public: virtual void CloseStreams() = 0; /** - * Pause all streams. - */ - virtual void PauseStreams() = 0; - - /** - * Unpause all streams. - */ - virtual void UnpauseStreams() = 0; - - /** * Create a new sink stream, kept within this sink, with a pointer returned for use. * Do not free the returned pointer. When done with the stream, call CloseStream on the sink. * @@ -58,8 +48,7 @@ public: * May differ from the device's channel count. * @param name - Name of this stream. * @param type - Type of this stream, render/in/out. - * @param event - Audio render only, a signal used to prevent the renderer running too - * fast. + * * @return A pointer to the created SinkStream */ virtual SinkStream* AcquireSinkStream(Core::System& system, u32 system_channels, diff --git a/src/audio_core/sink/sink_details.cpp b/src/audio_core/sink/sink_details.cpp index 253c0fd1e..39ea6d91b 100644 --- a/src/audio_core/sink/sink_details.cpp +++ b/src/audio_core/sink/sink_details.cpp @@ -5,7 +5,7 @@ #include <memory> #include <string> #include <vector> -#include "audio_core/sink/null_sink.h" + #include "audio_core/sink/sink_details.h" #ifdef HAVE_CUBEB #include "audio_core/sink/cubeb_sink.h" @@ -13,6 +13,7 @@ #ifdef HAVE_SDL2 #include "audio_core/sink/sdl2_sink.h" #endif +#include "audio_core/sink/null_sink.h" #include "common/logging/log.h" namespace AudioCore::Sink { @@ -20,59 +21,80 @@ namespace { struct SinkDetails { using FactoryFn = std::unique_ptr<Sink> (*)(std::string_view); using ListDevicesFn = std::vector<std::string> (*)(bool); + using LatencyFn = u32 (*)(); /// Name for this sink. - const char* id; + std::string_view id; /// A method to call to construct an instance of this type of sink. FactoryFn factory; /// A method to call to list available devices. ListDevicesFn list_devices; + /// Method to get the latency of this backend. + LatencyFn latency; }; // sink_details is ordered in terms of desirability, with the best choice at the top. constexpr SinkDetails sink_details[] = { #ifdef HAVE_CUBEB - SinkDetails{"cubeb", - [](std::string_view device_id) -> std::unique_ptr<Sink> { - return std::make_unique<CubebSink>(device_id); - }, - &ListCubebSinkDevices}, + SinkDetails{ + "cubeb", + [](std::string_view device_id) -> std::unique_ptr<Sink> { + return std::make_unique<CubebSink>(device_id); + }, + &ListCubebSinkDevices, + &GetCubebLatency, + }, #endif #ifdef HAVE_SDL2 - SinkDetails{"sdl2", - [](std::string_view device_id) -> std::unique_ptr<Sink> { - return std::make_unique<SDLSink>(device_id); - }, - &ListSDLSinkDevices}, + SinkDetails{ + "sdl2", + [](std::string_view device_id) -> std::unique_ptr<Sink> { + return std::make_unique<SDLSink>(device_id); + }, + &ListSDLSinkDevices, + &GetSDLLatency, + }, #endif SinkDetails{"null", [](std::string_view device_id) -> std::unique_ptr<Sink> { return std::make_unique<NullSink>(device_id); }, - [](bool capture) { return std::vector<std::string>{"null"}; }}, + [](bool capture) { return std::vector<std::string>{"null"}; }, []() { return 0u; }}, }; const SinkDetails& GetOutputSinkDetails(std::string_view sink_id) { - auto iter = - std::find_if(std::begin(sink_details), std::end(sink_details), - [sink_id](const auto& sink_detail) { return sink_detail.id == sink_id; }); - - if (sink_id == "auto" || iter == std::end(sink_details)) { - if (sink_id != "auto") { - LOG_ERROR(Audio, "AudioCore::Sink::GetOutputSinkDetails given invalid sink_id {}", - sink_id); + const auto find_backend{[](std::string_view id) { + return std::find_if(std::begin(sink_details), std::end(sink_details), + [&id](const auto& sink_detail) { return sink_detail.id == id; }); + }}; + + auto iter = find_backend(sink_id); + + if (sink_id == "auto") { + // Auto-select a backend. Prefer CubeB, but it may report a large minimum latency which + // causes audio issues, in that case go with SDL. +#if defined(HAVE_CUBEB) && defined(HAVE_SDL2) + iter = find_backend("cubeb"); + if (iter->latency() > TargetSampleCount * 3) { + iter = find_backend("sdl2"); } - // Auto-select. - // sink_details is ordered in terms of desirability, with the best choice at the front. +#else iter = std::begin(sink_details); +#endif + LOG_INFO(Service_Audio, "Auto-selecting the {} backend", iter->id); + } + + if (iter == std::end(sink_details)) { + LOG_ERROR(Audio, "Invalid sink_id {}", sink_id); + iter = find_backend("null"); } return *iter; } } // Anonymous namespace -std::vector<const char*> GetSinkIDs() { - std::vector<const char*> sink_ids(std::size(sink_details)); +std::vector<std::string_view> GetSinkIDs() { + std::vector<std::string_view> sink_ids(std::size(sink_details)); std::transform(std::begin(sink_details), std::end(sink_details), std::begin(sink_ids), [](const auto& sink) { return sink.id; }); diff --git a/src/audio_core/sink/sink_details.h b/src/audio_core/sink/sink_details.h index 3ebdb1e30..e75932898 100644 --- a/src/audio_core/sink/sink_details.h +++ b/src/audio_core/sink/sink_details.h @@ -19,7 +19,7 @@ class Sink; * * @return Vector of available sink names. */ -std::vector<const char*> GetSinkIDs(); +std::vector<std::string_view> GetSinkIDs(); /** * Gets the list of devices for a particular sink identified by the given ID. diff --git a/src/audio_core/sink/sink_stream.cpp b/src/audio_core/sink/sink_stream.cpp new file mode 100644 index 000000000..849f862b0 --- /dev/null +++ b/src/audio_core/sink/sink_stream.cpp @@ -0,0 +1,284 @@ +// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <array> +#include <atomic> +#include <memory> +#include <span> +#include <vector> + +#include "audio_core/audio_core.h" +#include "audio_core/common/common.h" +#include "audio_core/sink/sink_stream.h" +#include "common/common_types.h" +#include "common/fixed_point.h" +#include "common/settings.h" +#include "core/core.h" + +namespace AudioCore::Sink { + +void SinkStream::AppendBuffer(SinkBuffer& buffer, std::vector<s16>& samples) { + if (type == StreamType::In) { + queue.enqueue(buffer); + queued_buffers++; + return; + } + + constexpr s32 min{std::numeric_limits<s16>::min()}; + constexpr s32 max{std::numeric_limits<s16>::max()}; + + auto yuzu_volume{Settings::Volume()}; + if (yuzu_volume > 1.0f) { + yuzu_volume = 0.6f + 20 * std::log10(yuzu_volume); + } + auto volume{system_volume * device_volume * yuzu_volume}; + + if (system_channels == 6 && device_channels == 2) { + // We're given 6 channels, but our device only outputs 2, so downmix. + constexpr std::array<f32, 4> down_mix_coeff{1.0f, 0.707f, 0.251f, 0.707f}; + + for (u32 read_index = 0, write_index = 0; read_index < samples.size(); + read_index += system_channels, write_index += device_channels) { + const auto left_sample{ + ((Common::FixedPoint<49, 15>( + samples[read_index + static_cast<u32>(Channels::FrontLeft)]) * + down_mix_coeff[0] + + samples[read_index + static_cast<u32>(Channels::Center)] * down_mix_coeff[1] + + samples[read_index + static_cast<u32>(Channels::LFE)] * down_mix_coeff[2] + + samples[read_index + static_cast<u32>(Channels::BackLeft)] * down_mix_coeff[3]) * + volume) + .to_int()}; + + const auto right_sample{ + ((Common::FixedPoint<49, 15>( + samples[read_index + static_cast<u32>(Channels::FrontRight)]) * + down_mix_coeff[0] + + samples[read_index + static_cast<u32>(Channels::Center)] * down_mix_coeff[1] + + samples[read_index + static_cast<u32>(Channels::LFE)] * down_mix_coeff[2] + + samples[read_index + static_cast<u32>(Channels::BackRight)] * down_mix_coeff[3]) * + volume) + .to_int()}; + + samples[write_index + static_cast<u32>(Channels::FrontLeft)] = + static_cast<s16>(std::clamp(left_sample, min, max)); + samples[write_index + static_cast<u32>(Channels::FrontRight)] = + static_cast<s16>(std::clamp(right_sample, min, max)); + } + + samples.resize(samples.size() / system_channels * device_channels); + + } else if (system_channels == 2 && device_channels == 6) { + // We need moar samples! Not all games will provide 6 channel audio. + // TODO: Implement some upmixing here. Currently just passthrough, with other + // channels left as silence. + std::vector<s16> new_samples(samples.size() / system_channels * device_channels, 0); + + for (u32 read_index = 0, write_index = 0; read_index < samples.size(); + read_index += system_channels, write_index += device_channels) { + const auto left_sample{static_cast<s16>(std::clamp( + static_cast<s32>( + static_cast<f32>(samples[read_index + static_cast<u32>(Channels::FrontLeft)]) * + volume), + min, max))}; + + new_samples[write_index + static_cast<u32>(Channels::FrontLeft)] = left_sample; + + const auto right_sample{static_cast<s16>(std::clamp( + static_cast<s32>( + static_cast<f32>(samples[read_index + static_cast<u32>(Channels::FrontRight)]) * + volume), + min, max))}; + + new_samples[write_index + static_cast<u32>(Channels::FrontRight)] = right_sample; + } + samples = std::move(new_samples); + + } else if (volume != 1.0f) { + for (u32 i = 0; i < samples.size(); i++) { + samples[i] = static_cast<s16>( + std::clamp(static_cast<s32>(static_cast<f32>(samples[i]) * volume), min, max)); + } + } + + samples_buffer.Push(samples); + queue.enqueue(buffer); + queued_buffers++; +} + +std::vector<s16> SinkStream::ReleaseBuffer(u64 num_samples) { + constexpr s32 min = std::numeric_limits<s16>::min(); + constexpr s32 max = std::numeric_limits<s16>::max(); + + auto samples{samples_buffer.Pop(num_samples)}; + + // TODO: Up-mix to 6 channels if the game expects it. + // For audio input this is unlikely to ever be the case though. + + // Incoming mic volume seems to always be very quiet, so multiply by an additional 8 here. + // TODO: Play with this and find something that works better. + auto volume{system_volume * device_volume * 8}; + for (u32 i = 0; i < samples.size(); i++) { + samples[i] = static_cast<s16>( + std::clamp(static_cast<s32>(static_cast<f32>(samples[i]) * volume), min, max)); + } + + if (samples.size() < num_samples) { + samples.resize(num_samples, 0); + } + return samples; +} + +void SinkStream::ClearQueue() { + samples_buffer.Pop(); + while (queue.pop()) { + } + queued_buffers = 0; + playing_buffer = {}; + playing_buffer.consumed = true; +} + +void SinkStream::ProcessAudioIn(std::span<const s16> input_buffer, std::size_t num_frames) { + const std::size_t num_channels = GetDeviceChannels(); + const std::size_t frame_size = num_channels; + const std::size_t frame_size_bytes = frame_size * sizeof(s16); + size_t frames_written{0}; + + // If we're paused or going to shut down, we don't want to consume buffers as coretiming is + // paused and we'll desync, so just return. + if (system.IsPaused() || system.IsShuttingDown()) { + return; + } + + if (queued_buffers > max_queue_size) { + Stall(); + } + + while (frames_written < num_frames) { + // If the playing buffer has been consumed or has no frames, we need a new one + if (playing_buffer.consumed || playing_buffer.frames == 0) { + if (!queue.try_dequeue(playing_buffer)) { + // If no buffer was available we've underrun, just push the samples and + // continue. + samples_buffer.Push(&input_buffer[frames_written * frame_size], + (num_frames - frames_written) * frame_size); + frames_written = num_frames; + continue; + } + // Successfully dequeued a new buffer. + queued_buffers--; + } + + // Get the minimum frames available between the currently playing buffer, and the + // amount we have left to fill + size_t frames_available{std::min(playing_buffer.frames - playing_buffer.frames_played, + num_frames - frames_written)}; + + samples_buffer.Push(&input_buffer[frames_written * frame_size], + frames_available * frame_size); + + frames_written += frames_available; + playing_buffer.frames_played += frames_available; + + // If that's all the frames in the current buffer, add its samples and mark it as + // consumed + if (playing_buffer.frames_played >= playing_buffer.frames) { + playing_buffer.consumed = true; + } + } + + std::memcpy(&last_frame[0], &input_buffer[(frames_written - 1) * frame_size], frame_size_bytes); + + if (queued_buffers <= max_queue_size) { + Unstall(); + } +} + +void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::size_t num_frames) { + const std::size_t num_channels = GetDeviceChannels(); + const std::size_t frame_size = num_channels; + const std::size_t frame_size_bytes = frame_size * sizeof(s16); + size_t frames_written{0}; + + // If we're paused or going to shut down, we don't want to consume buffers as coretiming is + // paused and we'll desync, so just play silence. + if (system.IsPaused() || system.IsShuttingDown()) { + constexpr std::array<s16, 6> silence{}; + for (size_t i = frames_written; i < num_frames; i++) { + std::memcpy(&output_buffer[i * frame_size], &silence[0], frame_size_bytes); + } + return; + } + + // Due to many frames being queued up with nvdec (5 frames or so?), a lot of buffers also get + // queued up (30+) but not all at once, which causes constant stalling here, so just let the + // video play out without attempting to stall. + // Can hopefully remove this later with a more complete NVDEC implementation. + const auto nvdec_active{system.AudioCore().IsNVDECActive()}; + + // Core timing cannot be paused in single-core mode, so Stall ends up being called over and over + // and never recovers to a normal state, so just skip attempting to sync things on single-core. + if (system.IsMulticore() && !nvdec_active && queued_buffers > max_queue_size) { + Stall(); + } else if (system.IsMulticore() && queued_buffers <= max_queue_size) { + Unstall(); + } + + while (frames_written < num_frames) { + // If the playing buffer has been consumed or has no frames, we need a new one + if (playing_buffer.consumed || playing_buffer.frames == 0) { + if (!queue.try_dequeue(playing_buffer)) { + // If no buffer was available we've underrun, fill the remaining buffer with + // the last written frame and continue. + for (size_t i = frames_written; i < num_frames; i++) { + std::memcpy(&output_buffer[i * frame_size], &last_frame[0], frame_size_bytes); + } + frames_written = num_frames; + continue; + } + // Successfully dequeued a new buffer. + queued_buffers--; + } + + // Get the minimum frames available between the currently playing buffer, and the + // amount we have left to fill + size_t frames_available{std::min(playing_buffer.frames - playing_buffer.frames_played, + num_frames - frames_written)}; + + samples_buffer.Pop(&output_buffer[frames_written * frame_size], + frames_available * frame_size); + + frames_written += frames_available; + playing_buffer.frames_played += frames_available; + + // If that's all the frames in the current buffer, add its samples and mark it as + // consumed + if (playing_buffer.frames_played >= playing_buffer.frames) { + playing_buffer.consumed = true; + } + } + + std::memcpy(&last_frame[0], &output_buffer[(frames_written - 1) * frame_size], + frame_size_bytes); + + if (system.IsMulticore() && queued_buffers <= max_queue_size) { + Unstall(); + } +} + +void SinkStream::Stall() { + if (stalled) { + return; + } + stalled = true; + system.StallProcesses(); +} + +void SinkStream::Unstall() { + if (!stalled) { + return; + } + system.UnstallProcesses(); + stalled = false; +} + +} // namespace AudioCore::Sink diff --git a/src/audio_core/sink/sink_stream.h b/src/audio_core/sink/sink_stream.h index 17ed6593f..38a4b2f51 100644 --- a/src/audio_core/sink/sink_stream.h +++ b/src/audio_core/sink/sink_stream.h @@ -3,12 +3,20 @@ #pragma once +#include <array> #include <atomic> #include <memory> +#include <span> #include <vector> #include "audio_core/common/common.h" #include "common/common_types.h" +#include "common/reader_writer_queue.h" +#include "common/ring_buffer.h" + +namespace Core { +class System; +} // namespace Core namespace AudioCore::Sink { @@ -34,20 +42,24 @@ struct SinkBuffer { * You should regularly call IsBufferConsumed with the unique SinkBuffer tag to check if the buffer * has been consumed. * - * Since these are a FIFO queue, always check IsBufferConsumed in the same order you appended the - * buffers, skipping a buffer will result in all following buffers to never release. + * Since these are a FIFO queue, IsBufferConsumed must be checked in the same order buffers were + * appended, skipping a buffer will result in the queue getting stuck, and all following buffers to + * never release. * * If the buffers appear to be stuck, you can stop and re-open an IAudioIn/IAudioOut service (this * is what games do), or call ClearQueue to flush all of the buffers without a full restart. */ class SinkStream { public: - virtual ~SinkStream() = default; + explicit SinkStream(Core::System& system_, StreamType type_) : system{system_}, type{type_} {} + virtual ~SinkStream() { + Unstall(); + } /** * Finalize the sink stream. */ - virtual void Finalize() = 0; + virtual void Finalize() {} /** * Start the sink stream. @@ -55,48 +67,19 @@ public: * @param resume - Set to true if this is resuming the stream a previously-active stream. * Default false. */ - virtual void Start(bool resume = false) = 0; + virtual void Start(bool resume = false) {} /** * Stop the sink stream. */ - virtual void Stop() = 0; - - /** - * Append a new buffer and its samples to a waiting queue to play. - * - * @param buffer - Audio buffer information to be queued. - * @param samples - The s16 samples to be queue for playback. - */ - virtual void AppendBuffer(SinkBuffer& buffer, std::vector<s16>& samples) = 0; - - /** - * Release a buffer. Audio In only, will fill a buffer with recorded samples. - * - * @param num_samples - Maximum number of samples to receive. - * @return Vector of recorded samples. May have fewer than num_samples. - */ - virtual std::vector<s16> ReleaseBuffer(u64 num_samples) = 0; - - /** - * Check if a certain buffer has been consumed (fully played). - * - * @param tag - Unique tag of a buffer to check for. - * @return True if the buffer has been played, otherwise false. - */ - virtual bool IsBufferConsumed(u64 tag) = 0; - - /** - * Empty out the buffer queue. - */ - virtual void ClearQueue() = 0; + virtual void Stop() {} /** * Check if the stream is paused. * * @return True if paused, otherwise false. */ - bool IsPaused() { + bool IsPaused() const { return paused; } @@ -128,34 +111,6 @@ public: } /** - * Get the total number of samples played by this stream. - * - * @return Number of samples played. - */ - u64 GetPlayedSampleCount() const { - return played_sample_count; - } - - /** - * Set the number of samples played. - * This is started and stopped on system start/stop. - * - * @param played_sample_count_ - Number of samples to set. - */ - void SetPlayedSampleCount(u64 played_sample_count_) { - played_sample_count = played_sample_count_; - } - - /** - * Add to the played sample count. - * - * @param num_samples - Number of samples to add. - */ - void AddPlayedSampleCount(u64 num_samples) { - played_sample_count += num_samples; - } - - /** * Get the system volume. * * @return The current system volume. @@ -196,27 +151,97 @@ public: * * @return The number of queued buffers. */ - u32 GetQueueSize() { + u32 GetQueueSize() const { return queued_buffers.load(); } + /** + * Set the maximum buffer queue size. + */ + void SetRingSize(u32 ring_size) { + max_queue_size = ring_size; + } + + /** + * Append a new buffer and its samples to a waiting queue to play. + * + * @param buffer - Audio buffer information to be queued. + * @param samples - The s16 samples to be queue for playback. + */ + virtual void AppendBuffer(SinkBuffer& buffer, std::vector<s16>& samples); + + /** + * Release a buffer. Audio In only, will fill a buffer with recorded samples. + * + * @param num_samples - Maximum number of samples to receive. + * @return Vector of recorded samples. May have fewer than num_samples. + */ + virtual std::vector<s16> ReleaseBuffer(u64 num_samples); + + /** + * Empty out the buffer queue. + */ + void ClearQueue(); + + /** + * Callback for AudioIn. + * + * @param input_buffer - Input buffer to be filled with samples. + * @param num_frames - Number of frames to be filled. + */ + void ProcessAudioIn(std::span<const s16> input_buffer, std::size_t num_frames); + + /** + * Callback for AudioOut and AudioRenderer. + * + * @param output_buffer - Output buffer to be filled with samples. + * @param num_frames - Number of frames to be filled. + */ + void ProcessAudioOutAndRender(std::span<s16> output_buffer, std::size_t num_frames); + + /** + * Stall core processes if the audio thread falls too far behind. + */ + void Stall(); + + /** + * Unstall core processes. + */ + void Unstall(); + protected: - /// Number of buffers waiting to be played - std::atomic<u32> queued_buffers{}; - /// Total samples played by this stream - std::atomic<u64> played_sample_count{}; + /// Core system + Core::System& system; + /// Type of this stream + StreamType type; /// Set by the audio render/in/out system which uses this stream - f32 system_volume{1.0f}; - /// Set via IAudioDevice service calls - f32 device_volume{1.0f}; - /// Set by the audio render/in/out systen which uses this stream u32 system_channels{2}; /// Channels supported by hardware u32 device_channels{2}; /// Is this stream currently paused? std::atomic<bool> paused{true}; - /// Was this stream previously playing? - std::atomic<bool> was_playing{false}; + /// Name of this stream + std::string name{}; + +private: + /// Ring buffer of the samples waiting to be played or consumed + Common::RingBuffer<s16, 0x10000> samples_buffer; + /// Audio buffers queued and waiting to play + Common::ReaderWriterQueue<SinkBuffer> queue; + /// The currently-playing audio buffer + SinkBuffer playing_buffer{}; + /// The last played (or received) frame of audio, used when the callback underruns + std::array<s16, MaxChannels> last_frame{}; + /// Number of buffers waiting to be played + std::atomic<u32> queued_buffers{}; + /// The ring size for audio out buffers (usually 4, rarely 2 or 8) + u32 max_queue_size{}; + /// Set by the audio render/in/out system which uses this stream + f32 system_volume{1.0f}; + /// Set via IAudioDevice service calls + f32 device_volume{1.0f}; + /// True if coretiming has been stalled + bool stalled{false}; }; using SinkStreamPtr = std::unique_ptr<SinkStream>; diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 635fb85c8..c0555f840 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -14,34 +14,11 @@ if (DEFINED ENV{DISPLAYVERSION}) set(DISPLAY_VERSION $ENV{DISPLAYVERSION}) endif () -# Pass the path to git to the GenerateSCMRev.cmake as well -find_package(Git QUIET) - -add_custom_command(OUTPUT scm_rev.cpp - COMMAND ${CMAKE_COMMAND} - -DSRC_DIR=${CMAKE_SOURCE_DIR} - -DBUILD_REPOSITORY=${BUILD_REPOSITORY} - -DTITLE_BAR_FORMAT_IDLE=${TITLE_BAR_FORMAT_IDLE} - -DTITLE_BAR_FORMAT_RUNNING=${TITLE_BAR_FORMAT_RUNNING} - -DBUILD_TAG=${BUILD_TAG} - -DBUILD_ID=${DISPLAY_VERSION} - -DGIT_REF_SPEC=${GIT_REF_SPEC} - -DGIT_REV=${GIT_REV} - -DGIT_DESC=${GIT_DESC} - -DGIT_BRANCH=${GIT_BRANCH} - -DBUILD_FULLNAME=${BUILD_FULLNAME} - -DGIT_EXECUTABLE=${GIT_EXECUTABLE} - -P ${CMAKE_SOURCE_DIR}/CMakeModules/GenerateSCMRev.cmake - DEPENDS - # Check that the scm_rev files haven't changed - "${CMAKE_CURRENT_SOURCE_DIR}/scm_rev.cpp.in" - "${CMAKE_CURRENT_SOURCE_DIR}/scm_rev.h" - # technically we should regenerate if the git version changed, but its not worth the effort imo - "${CMAKE_SOURCE_DIR}/CMakeModules/GenerateSCMRev.cmake" - VERBATIM -) +include(GenerateSCMRev) add_library(common STATIC + address_space.cpp + address_space.h algorithm.h alignment.h announce_multiplayer_room.h @@ -106,6 +83,8 @@ add_library(common STATIC microprofile.cpp microprofile.h microprofileui.h + multi_level_page_table.cpp + multi_level_page_table.h nvidia_flags.cpp nvidia_flags.h page_table.cpp @@ -117,7 +96,7 @@ add_library(common STATIC quaternion.h reader_writer_queue.h ring_buffer.h - scm_rev.cpp + ${CMAKE_CURRENT_BINARY_DIR}/scm_rev.cpp scm_rev.h scope_exit.h settings.cpp @@ -166,6 +145,7 @@ if(ARCHITECTURE_x86_64) x64/xbyak_abi.h x64/xbyak_util.h ) + target_link_libraries(common PRIVATE xbyak) endif() if (MSVC) @@ -176,12 +156,13 @@ if (MSVC) ) target_compile_options(common PRIVATE /W4 - /WX + + /we4242 # 'identifier': conversion from 'type1' to 'type2', possible loss of data + /we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data + /we4800 # Implicit conversion from 'type' to bool. Possible information loss ) else() target_compile_options(common PRIVATE - -Werror - $<$<CXX_COMPILER_ID:Clang>:-fsized-deallocation> ) endif() @@ -189,7 +170,11 @@ endif() create_target_directory_groups(common) target_link_libraries(common PUBLIC ${Boost_LIBRARIES} fmt::fmt microprofile Threads::Threads) -target_link_libraries(common PRIVATE lz4::lz4 xbyak) +if (TARGET lz4::lz4) + target_link_libraries(common PRIVATE lz4::lz4) +else() + target_link_libraries(common PRIVATE LZ4::lz4_shared) +endif() if (TARGET zstd::zstd) target_link_libraries(common PRIVATE zstd::zstd) else() diff --git a/src/common/address_space.cpp b/src/common/address_space.cpp new file mode 100644 index 000000000..866e78dbe --- /dev/null +++ b/src/common/address_space.cpp @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common/address_space.inc" + +namespace Common { + +template class Common::FlatAllocator<u32, 0, 32>; + +} diff --git a/src/common/address_space.h b/src/common/address_space.h new file mode 100644 index 000000000..9222b2fdc --- /dev/null +++ b/src/common/address_space.h @@ -0,0 +1,150 @@ +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include <concepts> +#include <functional> +#include <mutex> +#include <vector> + +#include "common/common_types.h" + +namespace Common { +template <typename VaType, size_t AddressSpaceBits> +concept AddressSpaceValid = std::is_unsigned_v<VaType> && sizeof(VaType) * 8 >= AddressSpaceBits; + +struct EmptyStruct {}; + +/** + * @brief FlatAddressSpaceMap provides a generic VA->PA mapping implementation using a sorted vector + */ +template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, + bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo = EmptyStruct> +requires AddressSpaceValid<VaType, AddressSpaceBits> +class FlatAddressSpaceMap { +public: + /// The maximum VA that this AS can technically reach + static constexpr VaType VaMaximum{(1ULL << (AddressSpaceBits - 1)) + + ((1ULL << (AddressSpaceBits - 1)) - 1)}; + + explicit FlatAddressSpaceMap(VaType va_limit, + std::function<void(VaType, VaType)> unmap_callback = {}); + + FlatAddressSpaceMap() = default; + + void Map(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info = {}) { + std::scoped_lock lock(block_mutex); + MapLocked(virt, phys, size, extra_info); + } + + void Unmap(VaType virt, VaType size) { + std::scoped_lock lock(block_mutex); + UnmapLocked(virt, size); + } + + VaType GetVALimit() const { + return va_limit; + } + +protected: + /** + * @brief Represents a block of memory in the AS, the physical mapping is contiguous until + * another block with a different phys address is hit + */ + struct Block { + /// VA of the block + VaType virt{UnmappedVa}; + /// PA of the block, will increase 1-1 with VA until a new block is encountered + PaType phys{UnmappedPa}; + [[no_unique_address]] ExtraBlockInfo extra_info; + + Block() = default; + + Block(VaType virt_, PaType phys_, ExtraBlockInfo extra_info_) + : virt(virt_), phys(phys_), extra_info(extra_info_) {} + + bool Valid() const { + return virt != UnmappedVa; + } + + bool Mapped() const { + return phys != UnmappedPa; + } + + bool Unmapped() const { + return phys == UnmappedPa; + } + + bool operator<(const VaType& p_virt) const { + return virt < p_virt; + } + }; + + /** + * @brief Maps a PA range into the given AS region + * @note block_mutex MUST be locked when calling this + */ + void MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info); + + /** + * @brief Unmaps the given range and merges it with other unmapped regions + * @note block_mutex MUST be locked when calling this + */ + void UnmapLocked(VaType virt, VaType size); + + std::mutex block_mutex; + std::vector<Block> blocks{Block{}}; + + /// a soft limit on the maximum VA of the AS + VaType va_limit{VaMaximum}; + +private: + /// Callback called when the mappings in an region have changed + std::function<void(VaType, VaType)> unmap_callback{}; +}; + +/** + * @brief FlatMemoryManager specialises FlatAddressSpaceMap to work as an allocator, with an + * initial, fast linear pass and a subsequent slower pass that iterates until it finds a free block + */ +template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> +requires AddressSpaceValid<VaType, AddressSpaceBits> +class FlatAllocator + : public FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits> { +private: + using Base = FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits>; + +public: + explicit FlatAllocator(VaType virt_start, VaType va_limit = Base::VaMaximum); + + /** + * @brief Allocates a region in the AS of the given size and returns its address + */ + VaType Allocate(VaType size); + + /** + * @brief Marks the given region in the AS as allocated + */ + void AllocateFixed(VaType virt, VaType size); + + /** + * @brief Frees an AS region so it can be used again + */ + void Free(VaType virt, VaType size); + + VaType GetVAStart() const { + return virt_start; + } + +private: + /// The base VA of the allocator, no allocations will be below this + VaType virt_start; + + /** + * The end address for the initial linear allocation pass + * Once this reaches the AS limit the slower allocation path will be used + */ + VaType current_linear_alloc_end; +}; +} // namespace Common diff --git a/src/common/address_space.inc b/src/common/address_space.inc new file mode 100644 index 000000000..2195dabd5 --- /dev/null +++ b/src/common/address_space.inc @@ -0,0 +1,366 @@ +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common/address_space.h" +#include "common/assert.h" + +#define MAP_MEMBER(returnType) \ + template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \ + bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \ + requires AddressSpaceValid<VaType, AddressSpaceBits> returnType FlatAddressSpaceMap< \ + VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo> +#define MAP_MEMBER_CONST() \ + template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \ + bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \ + requires AddressSpaceValid<VaType, AddressSpaceBits> FlatAddressSpaceMap< \ + VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo> + +#define MM_MEMBER(returnType) \ + template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \ + requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \ + FlatMemoryManager<VaType, UnmappedVa, AddressSpaceBits> + +#define ALLOC_MEMBER(returnType) \ + template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \ + requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \ + FlatAllocator<VaType, UnmappedVa, AddressSpaceBits> +#define ALLOC_MEMBER_CONST() \ + template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \ + requires AddressSpaceValid<VaType, AddressSpaceBits> \ + FlatAllocator<VaType, UnmappedVa, AddressSpaceBits> + +namespace Common { +MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType va_limit_, + std::function<void(VaType, VaType)> unmap_callback_) + : va_limit{va_limit_}, unmap_callback{std::move(unmap_callback_)} { + if (va_limit > VaMaximum) { + ASSERT_MSG(false, "Invalid VA limit!"); + } +} + +MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info) { + VaType virt_end{virt + size}; + + if (virt_end > va_limit) { + ASSERT_MSG(false, + "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", + virt_end, va_limit); + } + + auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)}; + if (block_end_successor == blocks.begin()) { + ASSERT_MSG(false, "Trying to map a block before the VA start: virt_end: 0x{:X}", virt_end); + } + + auto block_end_predecessor{std::prev(block_end_successor)}; + + if (block_end_successor != blocks.end()) { + // We have blocks in front of us, if one is directly in front then we don't have to add a + // tail + if (block_end_successor->virt != virt_end) { + PaType tailPhys{[&]() -> PaType { + if constexpr (!PaContigSplit) { + // Always propagate unmapped regions rather than calculating offset + return block_end_predecessor->phys; + } else { + if (block_end_predecessor->Unmapped()) { + // Always propagate unmapped regions rather than calculating offset + return block_end_predecessor->phys; + } else { + return block_end_predecessor->phys + virt_end - block_end_predecessor->virt; + } + } + }()}; + + if (block_end_predecessor->virt >= virt) { + // If this block's start would be overlapped by the map then reuse it as a tail + // block + block_end_predecessor->virt = virt_end; + block_end_predecessor->phys = tailPhys; + block_end_predecessor->extra_info = block_end_predecessor->extra_info; + + // No longer predecessor anymore + block_end_successor = block_end_predecessor--; + } else { + // Else insert a new one and we're done + blocks.insert(block_end_successor, + {Block(virt, phys, extra_info), + Block(virt_end, tailPhys, block_end_predecessor->extra_info)}); + if (unmap_callback) { + unmap_callback(virt, size); + } + + return; + } + } + } else { + // block_end_predecessor will always be unmapped as blocks has to be terminated by an + // unmapped chunk + if (block_end_predecessor != blocks.begin() && block_end_predecessor->virt >= virt) { + // Move the unmapped block start backwards + block_end_predecessor->virt = virt_end; + + // No longer predecessor anymore + block_end_successor = block_end_predecessor--; + } else { + // Else insert a new one and we're done + blocks.insert(block_end_successor, + {Block(virt, phys, extra_info), Block(virt_end, UnmappedPa, {})}); + if (unmap_callback) { + unmap_callback(virt, size); + } + + return; + } + } + + auto block_start_successor{block_end_successor}; + + // Walk the block vector to find the start successor as this is more efficient than another + // binary search in most scenarios + while (std::prev(block_start_successor)->virt >= virt) { + block_start_successor--; + } + + // Check that the start successor is either the end block or something in between + if (block_start_successor->virt > virt_end) { + ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); + } else if (block_start_successor->virt == virt_end) { + // We need to create a new block as there are none spare that we would overwrite + blocks.insert(block_start_successor, Block(virt, phys, extra_info)); + } else { + // Erase overwritten blocks + if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) { + blocks.erase(eraseStart, block_end_successor); + } + + // Reuse a block that would otherwise be overwritten as a start block + block_start_successor->virt = virt; + block_start_successor->phys = phys; + block_start_successor->extra_info = extra_info; + } + + if (unmap_callback) { + unmap_callback(virt, size); + } +} + +MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { + VaType virt_end{virt + size}; + + if (virt_end > va_limit) { + ASSERT_MSG(false, + "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", + virt_end, va_limit); + } + + auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)}; + if (block_end_successor == blocks.begin()) { + ASSERT_MSG(false, "Trying to unmap a block before the VA start: virt_end: 0x{:X}", + virt_end); + } + + auto block_end_predecessor{std::prev(block_end_successor)}; + + auto walk_back_to_predecessor{[&](auto iter) { + while (iter->virt >= virt) { + iter--; + } + + return iter; + }}; + + auto erase_blocks_with_end_unmapped{[&](auto unmappedEnd) { + auto block_start_predecessor{walk_back_to_predecessor(unmappedEnd)}; + auto block_start_successor{std::next(block_start_predecessor)}; + + auto eraseEnd{[&]() { + if (block_start_predecessor->Unmapped()) { + // If the start predecessor is unmapped then we can erase everything in our region + // and be done + return std::next(unmappedEnd); + } else { + // Else reuse the end predecessor as the start of our unmapped region then erase all + // up to it + unmappedEnd->virt = virt; + return unmappedEnd; + } + }()}; + + // We can't have two unmapped regions after each other + if (eraseEnd != blocks.end() && + (eraseEnd == block_start_successor || + (block_start_predecessor->Unmapped() && eraseEnd->Unmapped()))) { + ASSERT_MSG(false, "Multiple contiguous unmapped regions are unsupported!"); + } + + blocks.erase(block_start_successor, eraseEnd); + }}; + + // We can avoid any splitting logic if these are the case + if (block_end_predecessor->Unmapped()) { + if (block_end_predecessor->virt > virt) { + erase_blocks_with_end_unmapped(block_end_predecessor); + } + + if (unmap_callback) { + unmap_callback(virt, size); + } + + return; // The region is unmapped, bail out early + } else if (block_end_successor->virt == virt_end && block_end_successor->Unmapped()) { + erase_blocks_with_end_unmapped(block_end_successor); + + if (unmap_callback) { + unmap_callback(virt, size); + } + + return; // The region is unmapped here and doesn't need splitting, bail out early + } else if (block_end_successor == blocks.end()) { + // This should never happen as the end should always follow an unmapped block + ASSERT_MSG(false, "Unexpected Memory Manager state!"); + } else if (block_end_successor->virt != virt_end) { + // If one block is directly in front then we don't have to add a tail + + // The previous block is mapped so we will need to add a tail with an offset + PaType tailPhys{[&]() { + if constexpr (PaContigSplit) { + return block_end_predecessor->phys + virt_end - block_end_predecessor->virt; + } else { + return block_end_predecessor->phys; + } + }()}; + + if (block_end_predecessor->virt >= virt) { + // If this block's start would be overlapped by the unmap then reuse it as a tail block + block_end_predecessor->virt = virt_end; + block_end_predecessor->phys = tailPhys; + + // No longer predecessor anymore + block_end_successor = block_end_predecessor--; + } else { + blocks.insert(block_end_successor, + {Block(virt, UnmappedPa, {}), + Block(virt_end, tailPhys, block_end_predecessor->extra_info)}); + if (unmap_callback) { + unmap_callback(virt, size); + } + + // The previous block is mapped and ends before + return; + } + } + + // Walk the block vector to find the start predecessor as this is more efficient than another + // binary search in most scenarios + auto block_start_predecessor{walk_back_to_predecessor(block_end_successor)}; + auto block_start_successor{std::next(block_start_predecessor)}; + + if (block_start_successor->virt > virt_end) { + ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); + } else if (block_start_successor->virt == virt_end) { + // There are no blocks between the start and the end that would let us skip inserting a new + // one for head + + // The previous block is may be unmapped, if so we don't need to insert any unmaps after it + if (block_start_predecessor->Mapped()) { + blocks.insert(block_start_successor, Block(virt, UnmappedPa, {})); + } + } else if (block_start_predecessor->Unmapped()) { + // If the previous block is unmapped + blocks.erase(block_start_successor, block_end_predecessor); + } else { + // Erase overwritten blocks, skipping the first one as we have written the unmapped start + // block there + if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) { + blocks.erase(eraseStart, block_end_successor); + } + + // Add in the unmapped block header + block_start_successor->virt = virt; + block_start_successor->phys = UnmappedPa; + } + + if (unmap_callback) + unmap_callback(virt, size); +} + +ALLOC_MEMBER_CONST()::FlatAllocator(VaType virt_start_, VaType va_limit_) + : Base{va_limit_}, virt_start{virt_start_}, current_linear_alloc_end{virt_start_} {} + +ALLOC_MEMBER(VaType)::Allocate(VaType size) { + std::scoped_lock lock(this->block_mutex); + + VaType alloc_start{UnmappedVa}; + VaType alloc_end{current_linear_alloc_end + size}; + + // Avoid searching backwards in the address space if possible + if (alloc_end >= current_linear_alloc_end && alloc_end <= this->va_limit) { + auto alloc_end_successor{ + std::lower_bound(this->blocks.begin(), this->blocks.end(), alloc_end)}; + if (alloc_end_successor == this->blocks.begin()) { + ASSERT_MSG(false, "First block in AS map is invalid!"); + } + + auto alloc_end_predecessor{std::prev(alloc_end_successor)}; + if (alloc_end_predecessor->virt <= current_linear_alloc_end) { + alloc_start = current_linear_alloc_end; + } else { + // Skip over fixed any mappings in front of us + while (alloc_end_successor != this->blocks.end()) { + if (alloc_end_successor->virt - alloc_end_predecessor->virt < size || + alloc_end_predecessor->Mapped()) { + alloc_start = alloc_end_predecessor->virt; + break; + } + + alloc_end_predecessor = alloc_end_successor++; + + // Use the VA limit to calculate if we can fit in the final block since it has no + // successor + if (alloc_end_successor == this->blocks.end()) { + alloc_end = alloc_end_predecessor->virt + size; + + if (alloc_end >= alloc_end_predecessor->virt && alloc_end <= this->va_limit) { + alloc_start = alloc_end_predecessor->virt; + } + } + } + } + } + + if (alloc_start != UnmappedVa) { + current_linear_alloc_end = alloc_start + size; + } else { // If linear allocation overflows the AS then find a gap + if (this->blocks.size() <= 2) { + ASSERT_MSG(false, "Unexpected allocator state!"); + } + + auto search_predecessor{this->blocks.begin()}; + auto search_successor{std::next(search_predecessor)}; + + while (search_successor != this->blocks.end() && + (search_successor->virt - search_predecessor->virt < size || + search_predecessor->Mapped())) { + search_predecessor = search_successor++; + } + + if (search_successor != this->blocks.end()) { + alloc_start = search_predecessor->virt; + } else { + return {}; // AS is full + } + } + + this->MapLocked(alloc_start, true, size, {}); + return alloc_start; +} + +ALLOC_MEMBER(void)::AllocateFixed(VaType virt, VaType size) { + this->Map(virt, true, size); +} + +ALLOC_MEMBER(void)::Free(VaType virt, VaType size) { + this->Unmap(virt, size); +} +} // namespace Common diff --git a/src/common/algorithm.h b/src/common/algorithm.h index 9ddfd637b..c27c9241d 100644 --- a/src/common/algorithm.h +++ b/src/common/algorithm.h @@ -24,4 +24,12 @@ template <class ForwardIt, class T, class Compare = std::less<>> return first != last && !comp(value, *first) ? first : last; } +template <typename T, typename Func, typename... Args> +T FoldRight(T initial_value, Func&& func, Args&&... args) { + T value{initial_value}; + const auto high_func = [&value, &func]<typename U>(U x) { value = func(value, x); }; + (std::invoke(high_func, std::forward<Args>(args)), ...); + return value; +} + } // namespace Common diff --git a/src/common/bit_field.h b/src/common/bit_field.h index 7e1df62b1..e4e58ea45 100644 --- a/src/common/bit_field.h +++ b/src/common/bit_field.h @@ -141,10 +141,6 @@ public: constexpr BitField(BitField&&) noexcept = default; constexpr BitField& operator=(BitField&&) noexcept = default; - [[nodiscard]] constexpr operator T() const { - return Value(); - } - constexpr void Assign(const T& value) { #ifdef _MSC_VER storage = static_cast<StorageType>((storage & ~mask) | FormatValue(value)); @@ -162,6 +158,17 @@ public: return ExtractValue(storage); } + template <typename ConvertedToType> + [[nodiscard]] constexpr ConvertedToType As() const { + static_assert(!std::is_same_v<T, ConvertedToType>, + "Unnecessary cast. Use Value() instead."); + return static_cast<ConvertedToType>(Value()); + } + + [[nodiscard]] constexpr operator T() const { + return Value(); + } + [[nodiscard]] constexpr explicit operator bool() const { return Value() != 0; } diff --git a/src/common/bounded_threadsafe_queue.h b/src/common/bounded_threadsafe_queue.h index 7e465549b..21217801e 100644 --- a/src/common/bounded_threadsafe_queue.h +++ b/src/common/bounded_threadsafe_queue.h @@ -21,11 +21,6 @@ constexpr size_t hardware_interference_size = std::hardware_destructive_interfer constexpr size_t hardware_interference_size = 64; #endif -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4324) -#endif - template <typename T, size_t capacity = 0x400> class MPSCQueue { public: @@ -160,8 +155,4 @@ private: static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible"); }; -#ifdef _MSC_VER -#pragma warning(pop) -#endif - } // namespace Common diff --git a/src/common/concepts.h b/src/common/concepts.h index a97555f6a..e8ce30dfe 100644 --- a/src/common/concepts.h +++ b/src/common/concepts.h @@ -34,4 +34,12 @@ concept DerivedFrom = requires { template <typename From, typename To> concept ConvertibleTo = std::is_convertible_v<From, To>; +// No equivalents in the stdlib + +template <typename T> +concept IsArithmetic = std::is_arithmetic_v<T>; + +template <typename T> +concept IsIntegral = std::is_integral_v<T>; + } // namespace Common diff --git a/src/common/fixed_point.h b/src/common/fixed_point.h index 4a0f72cc9..f899b0d54 100644 --- a/src/common/fixed_point.h +++ b/src/common/fixed_point.h @@ -4,14 +4,7 @@ // From: https://github.com/eteran/cpp-utilities/blob/master/fixed/include/cpp-utilities/fixed.h // See also: http://stackoverflow.com/questions/79677/whats-the-best-way-to-do-fixed-point-math -#ifndef FIXED_H_ -#define FIXED_H_ - -#if __cplusplus >= 201402L -#define CONSTEXPR14 constexpr -#else -#define CONSTEXPR14 -#endif +#pragma once #include <cstddef> // for size_t #include <cstdint> @@ -19,6 +12,8 @@ #include <ostream> #include <type_traits> +#include <common/concepts.h> + namespace Common { template <size_t I, size_t F> @@ -57,8 +52,8 @@ struct type_from_size<64> { static constexpr size_t size = 64; using value_type = int64_t; - using unsigned_type = std::make_unsigned<value_type>::type; - using signed_type = std::make_signed<value_type>::type; + using unsigned_type = std::make_unsigned_t<value_type>; + using signed_type = std::make_signed_t<value_type>; using next_size = type_from_size<128>; }; @@ -68,8 +63,8 @@ struct type_from_size<32> { static constexpr size_t size = 32; using value_type = int32_t; - using unsigned_type = std::make_unsigned<value_type>::type; - using signed_type = std::make_signed<value_type>::type; + using unsigned_type = std::make_unsigned_t<value_type>; + using signed_type = std::make_signed_t<value_type>; using next_size = type_from_size<64>; }; @@ -79,8 +74,8 @@ struct type_from_size<16> { static constexpr size_t size = 16; using value_type = int16_t; - using unsigned_type = std::make_unsigned<value_type>::type; - using signed_type = std::make_signed<value_type>::type; + using unsigned_type = std::make_unsigned_t<value_type>; + using signed_type = std::make_signed_t<value_type>; using next_size = type_from_size<32>; }; @@ -90,8 +85,8 @@ struct type_from_size<8> { static constexpr size_t size = 8; using value_type = int8_t; - using unsigned_type = std::make_unsigned<value_type>::type; - using signed_type = std::make_signed<value_type>::type; + using unsigned_type = std::make_unsigned_t<value_type>; + using signed_type = std::make_signed_t<value_type>; using next_size = type_from_size<16>; }; @@ -106,9 +101,9 @@ constexpr B next_to_base(N rhs) { struct divide_by_zero : std::exception {}; template <size_t I, size_t F> -CONSTEXPR14 FixedPoint<I, F> divide( +constexpr FixedPoint<I, F> divide( FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder, - typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { + std::enable_if_t<type_from_size<I + F>::next_size::is_specialized>* = nullptr) { using next_type = typename FixedPoint<I, F>::next_type; using base_type = typename FixedPoint<I, F>::base_type; @@ -126,9 +121,9 @@ CONSTEXPR14 FixedPoint<I, F> divide( } template <size_t I, size_t F> -CONSTEXPR14 FixedPoint<I, F> divide( +constexpr FixedPoint<I, F> divide( FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder, - typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { + std::enable_if_t<!type_from_size<I + F>::next_size::is_specialized>* = nullptr) { using unsigned_type = typename FixedPoint<I, F>::unsigned_type; @@ -196,9 +191,9 @@ CONSTEXPR14 FixedPoint<I, F> divide( // this is the usual implementation of multiplication template <size_t I, size_t F> -CONSTEXPR14 FixedPoint<I, F> multiply( +constexpr FixedPoint<I, F> multiply( FixedPoint<I, F> lhs, FixedPoint<I, F> rhs, - typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { + std::enable_if_t<type_from_size<I + F>::next_size::is_specialized>* = nullptr) { using next_type = typename FixedPoint<I, F>::next_type; using base_type = typename FixedPoint<I, F>::base_type; @@ -215,9 +210,9 @@ CONSTEXPR14 FixedPoint<I, F> multiply( // it is slightly slower, but is more robust since it doesn't // require and upgraded type template <size_t I, size_t F> -CONSTEXPR14 FixedPoint<I, F> multiply( +constexpr FixedPoint<I, F> multiply( FixedPoint<I, F> lhs, FixedPoint<I, F> rhs, - typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { + std::enable_if_t<!type_from_size<I + F>::next_size::is_specialized>* = nullptr) { using base_type = typename FixedPoint<I, F>::base_type; @@ -272,19 +267,20 @@ public: static constexpr base_type one = base_type(1) << fractional_bits; public: // constructors - FixedPoint() = default; - FixedPoint(const FixedPoint&) = default; - FixedPoint(FixedPoint&&) = default; - FixedPoint& operator=(const FixedPoint&) = default; + constexpr FixedPoint() = default; + + constexpr FixedPoint(const FixedPoint&) = default; + constexpr FixedPoint& operator=(const FixedPoint&) = default; + + constexpr FixedPoint(FixedPoint&&) noexcept = default; + constexpr FixedPoint& operator=(FixedPoint&&) noexcept = default; - template <class Number> - constexpr FixedPoint( - Number n, typename std::enable_if<std::is_arithmetic<Number>::value>::type* = nullptr) - : data_(static_cast<base_type>(n * one)) {} + template <IsArithmetic Number> + constexpr FixedPoint(Number n) : data_(static_cast<base_type>(n * one)) {} public: // conversion template <size_t I2, size_t F2> - CONSTEXPR14 explicit FixedPoint(FixedPoint<I2, F2> other) { + constexpr explicit FixedPoint(FixedPoint<I2, F2> other) { static_assert(I2 <= I && F2 <= F, "Scaling conversion can only upgrade types"); using T = FixedPoint<I2, F2>; @@ -308,36 +304,14 @@ public: } public: // comparison operators - constexpr bool operator==(FixedPoint rhs) const { - return data_ == rhs.data_; - } - - constexpr bool operator!=(FixedPoint rhs) const { - return data_ != rhs.data_; - } - - constexpr bool operator<(FixedPoint rhs) const { - return data_ < rhs.data_; - } - - constexpr bool operator>(FixedPoint rhs) const { - return data_ > rhs.data_; - } - - constexpr bool operator<=(FixedPoint rhs) const { - return data_ <= rhs.data_; - } - - constexpr bool operator>=(FixedPoint rhs) const { - return data_ >= rhs.data_; - } + friend constexpr auto operator<=>(FixedPoint lhs, FixedPoint rhs) = default; public: // unary operators - constexpr bool operator!() const { + [[nodiscard]] constexpr bool operator!() const { return !data_; } - constexpr FixedPoint operator~() const { + [[nodiscard]] constexpr FixedPoint operator~() const { // NOTE(eteran): this will often appear to "just negate" the value // that is not an error, it is because -x == (~x+1) // and that "+1" is adding an infinitesimally small fraction to the @@ -345,89 +319,87 @@ public: // unary operators return FixedPoint::from_base(~data_); } - constexpr FixedPoint operator-() const { + [[nodiscard]] constexpr FixedPoint operator-() const { return FixedPoint::from_base(-data_); } - constexpr FixedPoint operator+() const { + [[nodiscard]] constexpr FixedPoint operator+() const { return FixedPoint::from_base(+data_); } - CONSTEXPR14 FixedPoint& operator++() { + constexpr FixedPoint& operator++() { data_ += one; return *this; } - CONSTEXPR14 FixedPoint& operator--() { + constexpr FixedPoint& operator--() { data_ -= one; return *this; } - CONSTEXPR14 FixedPoint operator++(int) { + constexpr FixedPoint operator++(int) { FixedPoint tmp(*this); data_ += one; return tmp; } - CONSTEXPR14 FixedPoint operator--(int) { + constexpr FixedPoint operator--(int) { FixedPoint tmp(*this); data_ -= one; return tmp; } public: // basic math operators - CONSTEXPR14 FixedPoint& operator+=(FixedPoint n) { + constexpr FixedPoint& operator+=(FixedPoint n) { data_ += n.data_; return *this; } - CONSTEXPR14 FixedPoint& operator-=(FixedPoint n) { + constexpr FixedPoint& operator-=(FixedPoint n) { data_ -= n.data_; return *this; } - CONSTEXPR14 FixedPoint& operator*=(FixedPoint n) { + constexpr FixedPoint& operator*=(FixedPoint n) { return assign(detail::multiply(*this, n)); } - CONSTEXPR14 FixedPoint& operator/=(FixedPoint n) { + constexpr FixedPoint& operator/=(FixedPoint n) { FixedPoint temp; return assign(detail::divide(*this, n, temp)); } private: - CONSTEXPR14 FixedPoint& assign(FixedPoint rhs) { + constexpr FixedPoint& assign(FixedPoint rhs) { data_ = rhs.data_; return *this; } public: // binary math operators, effects underlying bit pattern since these // don't really typically make sense for non-integer values - CONSTEXPR14 FixedPoint& operator&=(FixedPoint n) { + constexpr FixedPoint& operator&=(FixedPoint n) { data_ &= n.data_; return *this; } - CONSTEXPR14 FixedPoint& operator|=(FixedPoint n) { + constexpr FixedPoint& operator|=(FixedPoint n) { data_ |= n.data_; return *this; } - CONSTEXPR14 FixedPoint& operator^=(FixedPoint n) { + constexpr FixedPoint& operator^=(FixedPoint n) { data_ ^= n.data_; return *this; } - template <class Integer, - class = typename std::enable_if<std::is_integral<Integer>::value>::type> - CONSTEXPR14 FixedPoint& operator>>=(Integer n) { + template <IsIntegral Integer> + constexpr FixedPoint& operator>>=(Integer n) { data_ >>= n; return *this; } - template <class Integer, - class = typename std::enable_if<std::is_integral<Integer>::value>::type> - CONSTEXPR14 FixedPoint& operator<<=(Integer n) { + template <IsIntegral Integer> + constexpr FixedPoint& operator<<=(Integer n) { data_ <<= n; return *this; } @@ -437,42 +409,42 @@ public: // conversion to basic types data_ += (data_ & fractional_mask) >> 1; } - constexpr int to_int() { + [[nodiscard]] constexpr int to_int() { round_up(); return static_cast<int>((data_ & integer_mask) >> fractional_bits); } - constexpr unsigned int to_uint() const { + [[nodiscard]] constexpr unsigned int to_uint() { round_up(); return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits); } - constexpr int64_t to_long() { + [[nodiscard]] constexpr int64_t to_long() { round_up(); return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits); } - constexpr int to_int_floor() const { + [[nodiscard]] constexpr int to_int_floor() const { return static_cast<int>((data_ & integer_mask) >> fractional_bits); } - constexpr int64_t to_long_floor() { + [[nodiscard]] constexpr int64_t to_long_floor() const { return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits); } - constexpr unsigned int to_uint_floor() const { + [[nodiscard]] constexpr unsigned int to_uint_floor() const { return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits); } - constexpr float to_float() const { + [[nodiscard]] constexpr float to_float() const { return static_cast<float>(data_) / FixedPoint::one; } - constexpr double to_double() const { + [[nodiscard]] constexpr double to_double() const { return static_cast<double>(data_) / FixedPoint::one; } - constexpr base_type to_raw() const { + [[nodiscard]] constexpr base_type to_raw() const { return data_; } @@ -480,27 +452,27 @@ public: // conversion to basic types data_ &= fractional_mask; } - constexpr base_type get_frac() const { + [[nodiscard]] constexpr base_type get_frac() const { return data_ & fractional_mask; } public: - CONSTEXPR14 void swap(FixedPoint& rhs) { + constexpr void swap(FixedPoint& rhs) noexcept { using std::swap; swap(data_, rhs.data_); } public: - base_type data_; + base_type data_{}; }; // if we have the same fractional portion, but differing integer portions, we trivially upgrade the // smaller type template <size_t I1, size_t I2, size_t F> -CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type -operator+(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { +constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator+( + FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { - using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; + using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>; const T l = T::from_base(lhs.to_raw()); const T r = T::from_base(rhs.to_raw()); @@ -508,10 +480,10 @@ operator+(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { } template <size_t I1, size_t I2, size_t F> -CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type -operator-(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { +constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator-( + FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { - using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; + using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>; const T l = T::from_base(lhs.to_raw()); const T r = T::from_base(rhs.to_raw()); @@ -519,10 +491,10 @@ operator-(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { } template <size_t I1, size_t I2, size_t F> -CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type -operator*(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { +constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator*( + FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { - using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; + using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>; const T l = T::from_base(lhs.to_raw()); const T r = T::from_base(rhs.to_raw()); @@ -530,10 +502,10 @@ operator*(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { } template <size_t I1, size_t I2, size_t F> -CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type -operator/(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { +constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator/( + FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { - using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; + using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>; const T l = T::from_base(lhs.to_raw()); const T r = T::from_base(rhs.to_raw()); @@ -548,159 +520,133 @@ std::ostream& operator<<(std::ostream& os, FixedPoint<I, F> f) { // basic math operators template <size_t I, size_t F> -CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { +constexpr FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { lhs += rhs; return lhs; } template <size_t I, size_t F> -CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { +constexpr FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { lhs -= rhs; return lhs; } template <size_t I, size_t F> -CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { +constexpr FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { lhs *= rhs; return lhs; } template <size_t I, size_t F> -CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { +constexpr FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { lhs /= rhs; return lhs; } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> -CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) { +template <size_t I, size_t F, IsArithmetic Number> +constexpr FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) { lhs += FixedPoint<I, F>(rhs); return lhs; } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> -CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) { +template <size_t I, size_t F, IsArithmetic Number> +constexpr FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) { lhs -= FixedPoint<I, F>(rhs); return lhs; } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> -CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) { +template <size_t I, size_t F, IsArithmetic Number> +constexpr FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) { lhs *= FixedPoint<I, F>(rhs); return lhs; } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> -CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) { +template <size_t I, size_t F, IsArithmetic Number> +constexpr FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) { lhs /= FixedPoint<I, F>(rhs); return lhs; } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> -CONSTEXPR14 FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) { +template <size_t I, size_t F, IsArithmetic Number> +constexpr FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) { FixedPoint<I, F> tmp(lhs); tmp += rhs; return tmp; } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> -CONSTEXPR14 FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) { +template <size_t I, size_t F, IsArithmetic Number> +constexpr FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) { FixedPoint<I, F> tmp(lhs); tmp -= rhs; return tmp; } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> -CONSTEXPR14 FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) { +template <size_t I, size_t F, IsArithmetic Number> +constexpr FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) { FixedPoint<I, F> tmp(lhs); tmp *= rhs; return tmp; } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> -CONSTEXPR14 FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) { +template <size_t I, size_t F, IsArithmetic Number> +constexpr FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) { FixedPoint<I, F> tmp(lhs); tmp /= rhs; return tmp; } // shift operators -template <size_t I, size_t F, class Integer, - class = typename std::enable_if<std::is_integral<Integer>::value>::type> -CONSTEXPR14 FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) { +template <size_t I, size_t F, IsIntegral Integer> +constexpr FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) { lhs <<= rhs; return lhs; } -template <size_t I, size_t F, class Integer, - class = typename std::enable_if<std::is_integral<Integer>::value>::type> -CONSTEXPR14 FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) { +template <size_t I, size_t F, IsIntegral Integer> +constexpr FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) { lhs >>= rhs; return lhs; } // comparison operators -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> +template <size_t I, size_t F, IsArithmetic Number> constexpr bool operator>(FixedPoint<I, F> lhs, Number rhs) { return lhs > FixedPoint<I, F>(rhs); } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> +template <size_t I, size_t F, IsArithmetic Number> constexpr bool operator<(FixedPoint<I, F> lhs, Number rhs) { return lhs < FixedPoint<I, F>(rhs); } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> +template <size_t I, size_t F, IsArithmetic Number> constexpr bool operator>=(FixedPoint<I, F> lhs, Number rhs) { return lhs >= FixedPoint<I, F>(rhs); } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> +template <size_t I, size_t F, IsArithmetic Number> constexpr bool operator<=(FixedPoint<I, F> lhs, Number rhs) { return lhs <= FixedPoint<I, F>(rhs); } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> +template <size_t I, size_t F, IsArithmetic Number> constexpr bool operator==(FixedPoint<I, F> lhs, Number rhs) { return lhs == FixedPoint<I, F>(rhs); } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> +template <size_t I, size_t F, IsArithmetic Number> constexpr bool operator!=(FixedPoint<I, F> lhs, Number rhs) { return lhs != FixedPoint<I, F>(rhs); } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> +template <size_t I, size_t F, IsArithmetic Number> constexpr bool operator>(Number lhs, FixedPoint<I, F> rhs) { return FixedPoint<I, F>(lhs) > rhs; } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> +template <size_t I, size_t F, IsArithmetic Number> constexpr bool operator<(Number lhs, FixedPoint<I, F> rhs) { return FixedPoint<I, F>(lhs) < rhs; } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> +template <size_t I, size_t F, IsArithmetic Number> constexpr bool operator>=(Number lhs, FixedPoint<I, F> rhs) { return FixedPoint<I, F>(lhs) >= rhs; } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> +template <size_t I, size_t F, IsArithmetic Number> constexpr bool operator<=(Number lhs, FixedPoint<I, F> rhs) { return FixedPoint<I, F>(lhs) <= rhs; } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> +template <size_t I, size_t F, IsArithmetic Number> constexpr bool operator==(Number lhs, FixedPoint<I, F> rhs) { return FixedPoint<I, F>(lhs) == rhs; } -template <size_t I, size_t F, class Number, - class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> +template <size_t I, size_t F, IsArithmetic Number> constexpr bool operator!=(Number lhs, FixedPoint<I, F> rhs) { return FixedPoint<I, F>(lhs) != rhs; } } // namespace Common - -#undef CONSTEXPR14 - -#endif diff --git a/src/common/hash.h b/src/common/hash.h index b6f3e6d6f..e8fe78b07 100644 --- a/src/common/hash.h +++ b/src/common/hash.h @@ -18,4 +18,11 @@ struct PairHash { } }; +template <typename T> +struct IdentityHash { + [[nodiscard]] size_t operator()(T value) const noexcept { + return static_cast<size_t>(value); + } +}; + } // namespace Common diff --git a/src/common/input.h b/src/common/input.h index 825b0d650..b533f3844 100644 --- a/src/common/input.h +++ b/src/common/input.h @@ -76,6 +76,19 @@ enum class PollingError { Unknown, }; +// Nfc reply from the controller +enum class NfcState { + Success, + NewAmiibo, + WaitingForAmiibo, + AmiiboRemoved, + NotAnAmiibo, + NotSupported, + WrongDeviceState, + WriteFailed, + Unknown, +}; + // Ir camera reply from the controller enum class CameraError { None, @@ -202,6 +215,11 @@ struct CameraStatus { std::vector<u8> data{}; }; +struct NfcStatus { + NfcState state{}; + std::vector<u8> data{}; +}; + // List of buttons to be passed to Qt that can be translated enum class ButtonNames { Undefined, @@ -259,7 +277,9 @@ struct CallbackStatus { BodyColorStatus color_status{}; BatteryStatus battery_status{}; VibrationStatus vibration_status{}; - CameraStatus camera_status{}; + CameraFormat camera_status{CameraFormat::None}; + NfcState nfc_status{NfcState::Unknown}; + std::vector<u8> raw_data{}; }; // Triggered once every input change @@ -312,6 +332,14 @@ public: virtual CameraError SetCameraFormat([[maybe_unused]] CameraFormat camera_format) { return CameraError::NotSupported; } + + virtual NfcState SupportsNfc() const { + return NfcState::NotSupported; + } + + virtual NfcState WriteNfcData([[maybe_unused]] const std::vector<u8>& data) { + return NfcState::NotSupported; + } }; /// An abstract class template for a factory that can create input devices. diff --git a/src/common/logging/backend.cpp b/src/common/logging/backend.cpp index 8ce1c2fd1..15d92505e 100644 --- a/src/common/logging/backend.cpp +++ b/src/common/logging/backend.cpp @@ -219,7 +219,7 @@ private: void StartBackendThread() { backend_thread = std::jthread([this](std::stop_token stop_token) { - Common::SetCurrentThreadName("yuzu:Log"); + Common::SetCurrentThreadName("Logger"); Entry entry; const auto write_logs = [this, &entry]() { ForEachBackend([&entry](Backend& backend) { backend.Write(entry); }); diff --git a/src/common/multi_level_page_table.cpp b/src/common/multi_level_page_table.cpp new file mode 100644 index 000000000..46e362f3b --- /dev/null +++ b/src/common/multi_level_page_table.cpp @@ -0,0 +1,9 @@ +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "common/multi_level_page_table.inc" + +namespace Common { +template class Common::MultiLevelPageTable<u64>; +template class Common::MultiLevelPageTable<u32>; +} // namespace Common diff --git a/src/common/multi_level_page_table.h b/src/common/multi_level_page_table.h new file mode 100644 index 000000000..31f6676a0 --- /dev/null +++ b/src/common/multi_level_page_table.h @@ -0,0 +1,78 @@ +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <type_traits> +#include <utility> +#include <vector> + +#include "common/common_types.h" + +namespace Common { + +template <typename BaseAddr> +class MultiLevelPageTable final { +public: + constexpr MultiLevelPageTable() = default; + explicit MultiLevelPageTable(std::size_t address_space_bits, std::size_t first_level_bits, + std::size_t page_bits); + + ~MultiLevelPageTable() noexcept; + + MultiLevelPageTable(const MultiLevelPageTable&) = delete; + MultiLevelPageTable& operator=(const MultiLevelPageTable&) = delete; + + MultiLevelPageTable(MultiLevelPageTable&& other) noexcept + : address_space_bits{std::exchange(other.address_space_bits, 0)}, + first_level_bits{std::exchange(other.first_level_bits, 0)}, page_bits{std::exchange( + other.page_bits, 0)}, + first_level_shift{std::exchange(other.first_level_shift, 0)}, + first_level_chunk_size{std::exchange(other.first_level_chunk_size, 0)}, + first_level_map{std::move(other.first_level_map)}, base_ptr{std::exchange(other.base_ptr, + nullptr)} {} + + MultiLevelPageTable& operator=(MultiLevelPageTable&& other) noexcept { + address_space_bits = std::exchange(other.address_space_bits, 0); + first_level_bits = std::exchange(other.first_level_bits, 0); + page_bits = std::exchange(other.page_bits, 0); + first_level_shift = std::exchange(other.first_level_shift, 0); + first_level_chunk_size = std::exchange(other.first_level_chunk_size, 0); + alloc_size = std::exchange(other.alloc_size, 0); + first_level_map = std::move(other.first_level_map); + base_ptr = std::exchange(other.base_ptr, nullptr); + return *this; + } + + void ReserveRange(u64 start, std::size_t size); + + [[nodiscard]] const BaseAddr& operator[](std::size_t index) const { + return base_ptr[index]; + } + + [[nodiscard]] BaseAddr& operator[](std::size_t index) { + return base_ptr[index]; + } + + [[nodiscard]] BaseAddr* data() { + return base_ptr; + } + + [[nodiscard]] const BaseAddr* data() const { + return base_ptr; + } + +private: + void AllocateLevel(u64 level); + + std::size_t address_space_bits{}; + std::size_t first_level_bits{}; + std::size_t page_bits{}; + std::size_t first_level_shift{}; + std::size_t first_level_chunk_size{}; + std::size_t alloc_size{}; + std::vector<void*> first_level_map{}; + BaseAddr* base_ptr{}; +}; + +} // namespace Common diff --git a/src/common/multi_level_page_table.inc b/src/common/multi_level_page_table.inc new file mode 100644 index 000000000..8ac506fa0 --- /dev/null +++ b/src/common/multi_level_page_table.inc @@ -0,0 +1,84 @@ +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#ifdef _WIN32 +#include <windows.h> +#else +#include <sys/mman.h> +#endif + +#include "common/assert.h" +#include "common/multi_level_page_table.h" + +namespace Common { + +template <typename BaseAddr> +MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bits_, + std::size_t first_level_bits_, + std::size_t page_bits_) + : address_space_bits{address_space_bits_}, + first_level_bits{first_level_bits_}, page_bits{page_bits_} { + if (page_bits == 0) { + return; + } + first_level_shift = address_space_bits - first_level_bits; + first_level_chunk_size = (1ULL << (first_level_shift - page_bits)) * sizeof(BaseAddr); + alloc_size = (1ULL << (address_space_bits - page_bits)) * sizeof(BaseAddr); + std::size_t first_level_size = 1ULL << first_level_bits; + first_level_map.resize(first_level_size, nullptr); +#ifdef _WIN32 + void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)}; +#else + void* base{mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)}; + + if (base == MAP_FAILED) { + base = nullptr; + } +#endif + + ASSERT(base); + base_ptr = reinterpret_cast<BaseAddr*>(base); +} + +template <typename BaseAddr> +MultiLevelPageTable<BaseAddr>::~MultiLevelPageTable() noexcept { + if (!base_ptr) { + return; + } +#ifdef _WIN32 + ASSERT(VirtualFree(base_ptr, 0, MEM_RELEASE)); +#else + ASSERT(munmap(base_ptr, alloc_size) == 0); +#endif +} + +template <typename BaseAddr> +void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) { + const u64 new_start = start >> first_level_shift; + const u64 new_end = (start + size) >> first_level_shift; + for (u64 i = new_start; i <= new_end; i++) { + if (!first_level_map[i]) { + AllocateLevel(i); + } + } +} + +template <typename BaseAddr> +void MultiLevelPageTable<BaseAddr>::AllocateLevel(u64 level) { + void* ptr = reinterpret_cast<char *>(base_ptr) + level * first_level_chunk_size; +#ifdef _WIN32 + void* base{VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE)}; +#else + void* base{mmap(ptr, first_level_chunk_size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)}; + + if (base == MAP_FAILED) { + base = nullptr; + } +#endif + ASSERT(base); + + first_level_map[level] = base; +} + +} // namespace Common diff --git a/src/common/settings.h b/src/common/settings.h index 13651de57..0eb98939c 100644 --- a/src/common/settings.h +++ b/src/common/settings.h @@ -431,7 +431,7 @@ struct Values { FullscreenMode::Exclusive, #endif FullscreenMode::Borderless, FullscreenMode::Exclusive, "fullscreen_mode"}; - SwitchableSetting<int, true> aspect_ratio{0, 0, 3, "aspect_ratio"}; + SwitchableSetting<int, true> aspect_ratio{0, 0, 4, "aspect_ratio"}; SwitchableSetting<int, true> max_anisotropy{0, 0, 5, "max_anisotropy"}; SwitchableSetting<bool> use_speed_limit{true, "use_speed_limit"}; SwitchableSetting<u16, true> speed_limit{100, 0, 9999, "speed_limit"}; @@ -530,6 +530,8 @@ struct Values { Setting<bool> use_debug_asserts{false, "use_debug_asserts"}; Setting<bool> use_auto_stub{false, "use_auto_stub"}; Setting<bool> enable_all_controllers{false, "enable_all_controllers"}; + Setting<bool> create_crash_dumps{false, "create_crash_dumps"}; + Setting<bool> perform_vulkan_check{true, "perform_vulkan_check"}; // Miscellaneous Setting<std::string> log_filter{"*:Info", "log_filter"}; diff --git a/src/common/thread.h b/src/common/thread.h index 1552f58e0..e17a7850f 100644 --- a/src/common/thread.h +++ b/src/common/thread.h @@ -54,6 +54,10 @@ public: is_set = false; } + [[nodiscard]] bool IsSet() { + return is_set; + } + private: std::condition_variable condvar; std::mutex mutex; diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 806e7ff6c..113e663b5 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -4,12 +4,6 @@ add_library(core STATIC arm/arm_interface.h arm/arm_interface.cpp - arm/dynarmic/arm_dynarmic_32.cpp - arm/dynarmic/arm_dynarmic_32.h - arm/dynarmic/arm_dynarmic_64.cpp - arm/dynarmic/arm_dynarmic_64.h - arm/dynarmic/arm_dynarmic_cp15.cpp - arm/dynarmic/arm_dynarmic_cp15.h arm/dynarmic/arm_exclusive_monitor.cpp arm/dynarmic/arm_exclusive_monitor.h arm/exclusive_monitor.cpp @@ -144,8 +138,6 @@ add_library(core STATIC frontend/emu_window.h frontend/framebuffer_layout.cpp frontend/framebuffer_layout.h - hardware_interrupt_manager.cpp - hardware_interrupt_manager.h hid/emulated_console.cpp hid/emulated_console.h hid/emulated_controller.cpp @@ -198,6 +190,9 @@ add_library(core STATIC hle/kernel/k_code_memory.h hle/kernel/k_condition_variable.cpp hle/kernel/k_condition_variable.h + hle/kernel/k_dynamic_page_manager.h + hle/kernel/k_dynamic_resource_manager.h + hle/kernel/k_dynamic_slab_heap.h hle/kernel/k_event.cpp hle/kernel/k_event.h hle/kernel/k_handle_table.cpp @@ -248,6 +243,8 @@ add_library(core STATIC hle/kernel/k_server_session.h hle/kernel/k_session.cpp hle/kernel/k_session.h + hle/kernel/k_session_request.cpp + hle/kernel/k_session_request.h hle/kernel/k_shared_memory.cpp hle/kernel/k_shared_memory.h hle/kernel/k_shared_memory_info.h @@ -269,8 +266,6 @@ add_library(core STATIC hle/kernel/k_worker_task.h hle/kernel/k_worker_task_manager.cpp hle/kernel/k_worker_task_manager.h - hle/kernel/k_writable_event.cpp - hle/kernel/k_writable_event.h hle/kernel/kernel.cpp hle/kernel/kernel.h hle/kernel/memory_types.h @@ -466,6 +461,8 @@ add_library(core STATIC hle/service/hid/controllers/mouse.h hle/service/hid/controllers/npad.cpp hle/service/hid/controllers/npad.h + hle/service/hid/controllers/palma.cpp + hle/service/hid/controllers/palma.h hle/service/hid/controllers/stubbed.cpp hle/service/hid/controllers/stubbed.h hle/service/hid/controllers/touchscreen.cpp @@ -500,6 +497,8 @@ add_library(core STATIC hle/service/jit/jit.h hle/service/lbl/lbl.cpp hle/service/lbl/lbl.h + hle/service/ldn/lan_discovery.cpp + hle/service/ldn/lan_discovery.h hle/service/ldn/ldn_results.h hle/service/ldn/ldn.cpp hle/service/ldn/ldn.h @@ -525,8 +524,14 @@ add_library(core STATIC hle/service/ncm/ncm.h hle/service/nfc/nfc.cpp hle/service/nfc/nfc.h + hle/service/nfp/amiibo_crypto.cpp + hle/service/nfp/amiibo_crypto.h hle/service/nfp/nfp.cpp hle/service/nfp/nfp.h + hle/service/nfp/nfp_device.cpp + hle/service/nfp/nfp_device.h + hle/service/nfp/nfp_result.h + hle/service/nfp/nfp_types.h hle/service/nfp/nfp_user.cpp hle/service/nfp/nfp_user.h hle/service/ngct/ngct.cpp @@ -546,6 +551,12 @@ add_library(core STATIC hle/service/ns/ns.h hle/service/ns/pdm_qry.cpp hle/service/ns/pdm_qry.h + hle/service/nvdrv/core/container.cpp + hle/service/nvdrv/core/container.h + hle/service/nvdrv/core/nvmap.cpp + hle/service/nvdrv/core/nvmap.h + hle/service/nvdrv/core/syncpoint_manager.cpp + hle/service/nvdrv/core/syncpoint_manager.h hle/service/nvdrv/devices/nvdevice.h hle/service/nvdrv/devices/nvdisp_disp0.cpp hle/service/nvdrv/devices/nvdisp_disp0.h @@ -574,8 +585,6 @@ add_library(core STATIC hle/service/nvdrv/nvdrv_interface.h hle/service/nvdrv/nvmemp.cpp hle/service/nvdrv/nvmemp.h - hle/service/nvdrv/syncpoint_manager.cpp - hle/service/nvdrv/syncpoint_manager.h hle/service/nvflinger/binder.h hle/service/nvflinger/buffer_item.h hle/service/nvflinger/buffer_item_consumer.cpp @@ -765,19 +774,15 @@ if (MSVC) /we4244 # 'conversion': conversion from 'type1' to 'type2', possible loss of data /we4245 # 'conversion': conversion from 'type1' to 'type2', signed/unsigned mismatch /we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data + /we4800 # Implicit conversion from 'type' to bool. Possible information loss ) else() target_compile_options(core PRIVATE -Werror=conversion - -Werror=ignored-qualifiers - $<$<CXX_COMPILER_ID:GNU>:-Werror=class-memaccess> - $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter> - $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable> + -Wno-sign-conversion $<$<CXX_COMPILER_ID:Clang>:-fsized-deallocation> - - -Wno-sign-conversion ) endif() diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp index 953d96439..29ba562dc 100644 --- a/src/core/arm/arm_interface.cpp +++ b/src/core/arm/arm_interface.cpp @@ -134,6 +134,14 @@ void ARM_Interface::Run() { } system.ExitDynarmicProfile(); + // If the thread is scheduled for termination, exit the thread. + if (current_thread->HasDpc()) { + if (current_thread->IsTerminationRequested()) { + current_thread->Exit(); + UNREACHABLE(); + } + } + // Notify the debugger and go to sleep if a breakpoint was hit, // or if the thread is unable to continue for any reason. if (Has(hr, breakpoint) || Has(hr, no_execute)) { diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index 1d46f6d40..22b5d5656 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp @@ -111,6 +111,7 @@ public: LOG_ERROR(Core_ARM, "Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc, num_instructions, memory.Read32(pc)); + ReturnException(pc, ARM_Interface::no_execute); } void InstructionCacheOperationRaised(Dynarmic::A64::InstructionCacheOperation op, diff --git a/src/core/core.cpp b/src/core/core.cpp index e651ce100..7fb8bc019 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -27,7 +27,6 @@ #include "core/file_sys/savedata_factory.h" #include "core/file_sys/vfs_concat.h" #include "core/file_sys/vfs_real.h" -#include "core/hardware_interrupt_manager.h" #include "core/hid/hid_core.h" #include "core/hle/kernel/k_memory_manager.h" #include "core/hle/kernel/k_process.h" @@ -51,6 +50,7 @@ #include "core/telemetry_session.h" #include "core/tools/freezer.h" #include "network/network.h" +#include "video_core/host1x/host1x.h" #include "video_core/renderer_base.h" #include "video_core/video_core.h" @@ -133,6 +133,50 @@ struct System::Impl { : kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{}, cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {} + void Initialize(System& system) { + device_memory = std::make_unique<Core::DeviceMemory>(); + + is_multicore = Settings::values.use_multi_core.GetValue(); + + core_timing.SetMulticore(is_multicore); + core_timing.Initialize([&system]() { system.RegisterHostThread(); }); + + const auto posix_time = std::chrono::system_clock::now().time_since_epoch(); + const auto current_time = + std::chrono::duration_cast<std::chrono::seconds>(posix_time).count(); + Settings::values.custom_rtc_differential = + Settings::values.custom_rtc.value_or(current_time) - current_time; + + // Create a default fs if one doesn't already exist. + if (virtual_filesystem == nullptr) { + virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>(); + } + if (content_provider == nullptr) { + content_provider = std::make_unique<FileSys::ContentProviderUnion>(); + } + + // Create default implementations of applets if one is not provided. + applet_manager.SetDefaultAppletsIfMissing(); + + is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue(); + + kernel.SetMulticore(is_multicore); + cpu_manager.SetMulticore(is_multicore); + cpu_manager.SetAsyncGpu(is_async_gpu); + } + + void ReinitializeIfNecessary(System& system) { + if (is_multicore == Settings::values.use_multi_core.GetValue()) { + return; + } + + LOG_DEBUG(Kernel, "Re-initializing"); + + is_multicore = Settings::values.use_multi_core.GetValue(); + + Initialize(system); + } + SystemResultStatus Run() { std::unique_lock<std::mutex> lk(suspend_guard); status = SystemResultStatus::Success; @@ -141,8 +185,6 @@ struct System::Impl { core_timing.SyncPause(false); is_paused = false; - audio_core->PauseSinks(false); - return status; } @@ -150,8 +192,6 @@ struct System::Impl { std::unique_lock<std::mutex> lk(suspend_guard); status = SystemResultStatus::Success; - audio_core->PauseSinks(true); - core_timing.SyncPause(true); kernel.Suspend(true); is_paused = true; @@ -182,43 +222,21 @@ struct System::Impl { debugger = std::make_unique<Debugger>(system, port); } - SystemResultStatus Init(System& system, Frontend::EmuWindow& emu_window) { + SystemResultStatus SetupForMainProcess(System& system, Frontend::EmuWindow& emu_window) { LOG_DEBUG(Core, "initialized OK"); - device_memory = std::make_unique<Core::DeviceMemory>(); - - is_multicore = Settings::values.use_multi_core.GetValue(); - is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue(); - - kernel.SetMulticore(is_multicore); - cpu_manager.SetMulticore(is_multicore); - cpu_manager.SetAsyncGpu(is_async_gpu); - core_timing.SetMulticore(is_multicore); + // Setting changes may require a full system reinitialization (e.g., disabling multicore). + ReinitializeIfNecessary(system); kernel.Initialize(); cpu_manager.Initialize(); - core_timing.Initialize([&system]() { system.RegisterHostThread(); }); - - const auto posix_time = std::chrono::system_clock::now().time_since_epoch(); - const auto current_time = - std::chrono::duration_cast<std::chrono::seconds>(posix_time).count(); - Settings::values.custom_rtc_differential = - Settings::values.custom_rtc.value_or(current_time) - current_time; - - // Create a default fs if one doesn't already exist. - if (virtual_filesystem == nullptr) - virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>(); - if (content_provider == nullptr) - content_provider = std::make_unique<FileSys::ContentProviderUnion>(); - - /// Create default implementations of applets if one is not provided. - applet_manager.SetDefaultAppletsIfMissing(); /// Reset all glue registrations arp_manager.ResetAll(); telemetry_session = std::make_unique<Core::TelemetrySession>(); + host1x_core = std::make_unique<Tegra::Host1x::Host1x>(system); gpu_core = VideoCore::CreateGPU(emu_window, system); if (!gpu_core) { return SystemResultStatus::ErrorVideoCore; @@ -228,7 +246,6 @@ struct System::Impl { service_manager = std::make_shared<Service::SM::ServiceManager>(kernel); services = std::make_unique<Service::Services>(service_manager, system); - interrupt_manager = std::make_unique<Hardware::InterruptManager>(system); // Initialize time manager, which must happen after kernel is created time_manager.Initialize(); @@ -257,11 +274,11 @@ struct System::Impl { return SystemResultStatus::ErrorGetLoader; } - SystemResultStatus init_result{Init(system, emu_window)}; + SystemResultStatus init_result{SetupForMainProcess(system, emu_window)}; if (init_result != SystemResultStatus::Success) { LOG_CRITICAL(Core, "Failed to initialize system (Error {})!", static_cast<int>(init_result)); - Shutdown(); + ShutdownMainProcess(); return init_result; } @@ -280,7 +297,7 @@ struct System::Impl { const auto [load_result, load_parameters] = app_loader->Load(*main_process, system); if (load_result != Loader::ResultStatus::Success) { LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result); - Shutdown(); + ShutdownMainProcess(); return static_cast<SystemResultStatus>( static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result)); @@ -339,7 +356,7 @@ struct System::Impl { return status; } - void Shutdown() { + void ShutdownMainProcess() { SetShuttingDown(true); // Log last frame performance stats if game was loded @@ -373,14 +390,14 @@ struct System::Impl { cheat_engine.reset(); telemetry_session.reset(); time_manager.Shutdown(); - core_timing.Shutdown(); + core_timing.ClearPendingEvents(); app_loader.reset(); audio_core.reset(); gpu_core.reset(); + host1x_core.reset(); perf_stats.reset(); kernel.Shutdown(); memory.Reset(); - applet_manager.ClearAll(); if (auto room_member = room_network.GetRoomMember().lock()) { Network::GameInfo game_info{}; @@ -454,7 +471,7 @@ struct System::Impl { /// AppLoader used to load the current executing application std::unique_ptr<Loader::AppLoader> app_loader; std::unique_ptr<Tegra::GPU> gpu_core; - std::unique_ptr<Hardware::InterruptManager> interrupt_manager; + std::unique_ptr<Tegra::Host1x::Host1x> host1x_core; std::unique_ptr<Core::DeviceMemory> device_memory; std::unique_ptr<AudioCore::AudioCore> audio_core; Core::Memory::Memory memory; @@ -523,6 +540,10 @@ const CpuManager& System::GetCpuManager() const { return impl->cpu_manager; } +void System::Initialize() { + impl->Initialize(*this); +} + SystemResultStatus System::Run() { return impl->Run(); } @@ -543,8 +564,8 @@ void System::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) { impl->kernel.InvalidateCpuInstructionCacheRange(addr, size); } -void System::Shutdown() { - impl->Shutdown(); +void System::ShutdownMainProcess() { + impl->ShutdownMainProcess(); } bool System::IsShuttingDown() const { @@ -672,12 +693,12 @@ const Tegra::GPU& System::GPU() const { return *impl->gpu_core; } -Core::Hardware::InterruptManager& System::InterruptManager() { - return *impl->interrupt_manager; +Tegra::Host1x::Host1x& System::Host1x() { + return *impl->host1x_core; } -const Core::Hardware::InterruptManager& System::InterruptManager() const { - return *impl->interrupt_manager; +const Tegra::Host1x::Host1x& System::Host1x() const { + return *impl->host1x_core; } VideoCore::RendererBase& System::Renderer() { diff --git a/src/core/core.h b/src/core/core.h index 0ce3b1d60..4ebedffd9 100644 --- a/src/core/core.h +++ b/src/core/core.h @@ -74,6 +74,9 @@ class TimeManager; namespace Tegra { class DebugContext; class GPU; +namespace Host1x { +class Host1x; +} // namespace Host1x } // namespace Tegra namespace VideoCore { @@ -88,10 +91,6 @@ namespace Core::Timing { class CoreTiming; } -namespace Core::Hardware { -class InterruptManager; -} - namespace Core::HID { class HIDCore; } @@ -144,6 +143,12 @@ public: System& operator=(System&&) = delete; /** + * Initializes the system + * This function will initialize core functionaility used for system emulation + */ + void Initialize(); + + /** * Run the OS and Application * This function will start emulation and run the relevant devices */ @@ -167,8 +172,8 @@ public: void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size); - /// Shutdown the emulated system. - void Shutdown(); + /// Shutdown the main emulated process. + void ShutdownMainProcess(); /// Check if the core is shutting down. [[nodiscard]] bool IsShuttingDown() const; @@ -260,6 +265,12 @@ public: /// Gets an immutable reference to the GPU interface. [[nodiscard]] const Tegra::GPU& GPU() const; + /// Gets a mutable reference to the Host1x interface + [[nodiscard]] Tegra::Host1x::Host1x& Host1x(); + + /// Gets an immutable reference to the Host1x interface. + [[nodiscard]] const Tegra::Host1x::Host1x& Host1x() const; + /// Gets a mutable reference to the renderer. [[nodiscard]] VideoCore::RendererBase& Renderer(); @@ -296,12 +307,6 @@ public: /// Provides a constant reference to the core timing instance. [[nodiscard]] const Timing::CoreTiming& CoreTiming() const; - /// Provides a reference to the interrupt manager instance. - [[nodiscard]] Core::Hardware::InterruptManager& InterruptManager(); - - /// Provides a constant reference to the interrupt manager instance. - [[nodiscard]] const Core::Hardware::InterruptManager& InterruptManager() const; - /// Provides a reference to the kernel instance. [[nodiscard]] Kernel::KernelCore& Kernel(); diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp index 2dbb99c8b..0e7b5f943 100644 --- a/src/core/core_timing.cpp +++ b/src/core/core_timing.cpp @@ -40,10 +40,12 @@ struct CoreTiming::Event { CoreTiming::CoreTiming() : clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {} -CoreTiming::~CoreTiming() = default; +CoreTiming::~CoreTiming() { + Reset(); +} void CoreTiming::ThreadEntry(CoreTiming& instance) { - constexpr char name[] = "yuzu:HostTiming"; + constexpr char name[] = "HostTiming"; MicroProfileOnThreadCreate(name); Common::SetCurrentThreadName(name); Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical); @@ -53,6 +55,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) { } void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) { + Reset(); on_thread_init = std::move(on_thread_init_); event_fifo_id = 0; shutting_down = false; @@ -65,18 +68,8 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) { } } -void CoreTiming::Shutdown() { - paused = true; - shutting_down = true; - pause_event.Set(); - event.Set(); - if (timer_thread) { - timer_thread->join(); - } - pause_callbacks.clear(); - ClearPendingEvents(); - timer_thread.reset(); - has_started = false; +void CoreTiming::ClearPendingEvents() { + event_queue.clear(); } void CoreTiming::Pause(bool is_paused) { @@ -86,10 +79,6 @@ void CoreTiming::Pause(bool is_paused) { if (!is_paused) { pause_end_time = GetGlobalTimeNs().count(); } - - for (auto& cb : pause_callbacks) { - cb(is_paused); - } } void CoreTiming::SyncPause(bool is_paused) { @@ -110,10 +99,6 @@ void CoreTiming::SyncPause(bool is_paused) { if (!is_paused) { pause_end_time = GetGlobalTimeNs().count(); } - - for (auto& cb : pause_callbacks) { - cb(is_paused); - } } bool CoreTiming::IsRunning() const { @@ -143,13 +128,17 @@ void CoreTiming::ScheduleLoopingEvent(std::chrono::nanoseconds start_time, std::chrono::nanoseconds resched_time, const std::shared_ptr<EventType>& event_type, std::uintptr_t user_data, bool absolute_time) { - std::scoped_lock scope{basic_lock}; - const auto next_time{absolute_time ? start_time : GetGlobalTimeNs() + start_time}; + { + std::scoped_lock scope{basic_lock}; + const auto next_time{absolute_time ? start_time : GetGlobalTimeNs() + start_time}; - event_queue.emplace_back( - Event{next_time.count(), event_fifo_id++, user_data, event_type, resched_time.count()}); + event_queue.emplace_back( + Event{next_time.count(), event_fifo_id++, user_data, event_type, resched_time.count()}); + + std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>()); + } - std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>()); + event.Set(); } void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, @@ -201,10 +190,6 @@ u64 CoreTiming::GetClockTicks() const { return CpuCyclesToClockCycles(ticks); } -void CoreTiming::ClearPendingEvents() { - event_queue.clear(); -} - void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { std::scoped_lock lock{basic_lock}; @@ -219,11 +204,6 @@ void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { } } -void CoreTiming::RegisterPauseCallback(PauseCallback&& callback) { - std::scoped_lock lock{basic_lock}; - pause_callbacks.emplace_back(std::move(callback)); -} - std::optional<s64> CoreTiming::Advance() { std::scoped_lock lock{advance_lock, basic_lock}; global_timer = GetGlobalTimeNs().count(); @@ -243,17 +223,17 @@ std::optional<s64> CoreTiming::Advance() { basic_lock.lock(); if (evt.reschedule_time != 0) { + const auto next_schedule_time{new_schedule_time.has_value() + ? new_schedule_time.value().count() + : evt.reschedule_time}; + // If this event was scheduled into a pause, its time now is going to be way behind. // Re-set this event to continue from the end of the pause. - auto next_time{evt.time + evt.reschedule_time}; + auto next_time{evt.time + next_schedule_time}; if (evt.time < pause_end_time) { - next_time = pause_end_time + evt.reschedule_time; + next_time = pause_end_time + next_schedule_time; } - const auto next_schedule_time{new_schedule_time.has_value() - ? new_schedule_time.value().count() - : evt.reschedule_time}; - event_queue.emplace_back( Event{next_time, event_fifo_id++, evt.user_data, evt.type, next_schedule_time}); std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>()); @@ -264,8 +244,7 @@ std::optional<s64> CoreTiming::Advance() { } if (!event_queue.empty()) { - const s64 next_time = event_queue.front().time - global_timer; - return next_time; + return event_queue.front().time; } else { return std::nullopt; } @@ -278,11 +257,33 @@ void CoreTiming::ThreadLoop() { paused_set = false; const auto next_time = Advance(); if (next_time) { - if (*next_time > 0) { - std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time); - event.WaitFor(next_time_ns); + // There are more events left in the queue, wait until the next event. + const auto wait_time = *next_time - GetGlobalTimeNs().count(); + if (wait_time > 0) { +#ifdef _WIN32 + // Assume a timer resolution of 1ms. + static constexpr s64 TimerResolutionNS = 1000000; + + // Sleep in discrete intervals of the timer resolution, and spin the rest. + const auto sleep_time = wait_time - (wait_time % TimerResolutionNS); + if (sleep_time > 0) { + event.WaitFor(std::chrono::nanoseconds(sleep_time)); + } + + while (!paused && !event.IsSet() && GetGlobalTimeNs().count() < *next_time) { + // Yield to reduce thread starvation. + std::this_thread::yield(); + } + + if (event.IsSet()) { + event.Reset(); + } +#else + event.WaitFor(std::chrono::nanoseconds(wait_time)); +#endif } } else { + // Queue is empty, wait until another event is scheduled and signals us to continue. wait_set = true; event.Wait(); } @@ -296,6 +297,18 @@ void CoreTiming::ThreadLoop() { } } +void CoreTiming::Reset() { + paused = true; + shutting_down = true; + pause_event.Set(); + event.Set(); + if (timer_thread) { + timer_thread->join(); + } + timer_thread.reset(); + has_started = false; +} + std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const { if (is_multicore) { return clock->GetTimeNS(); diff --git a/src/core/core_timing.h b/src/core/core_timing.h index 6aa3ae923..b5925193c 100644 --- a/src/core/core_timing.h +++ b/src/core/core_timing.h @@ -22,7 +22,6 @@ namespace Core::Timing { /// A callback that may be scheduled for a particular core timing event. using TimedCallback = std::function<std::optional<std::chrono::nanoseconds>( std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late)>; -using PauseCallback = std::function<void(bool paused)>; /// Contains the characteristics of a particular event. struct EventType { @@ -62,19 +61,14 @@ public: /// required to end slice - 1 and start slice 0 before the first cycle of code is executed. void Initialize(std::function<void()>&& on_thread_init_); - /// Tears down all timing related functionality. - void Shutdown(); + /// Clear all pending events. This should ONLY be done on exit. + void ClearPendingEvents(); /// Sets if emulation is multicore or single core, must be set before Initialize void SetMulticore(bool is_multicore_) { is_multicore = is_multicore_; } - /// Check if it's using host timing. - bool IsHostTiming() const { - return is_multicore; - } - /// Pauses/Unpauses the execution of the timer thread. void Pause(bool is_paused); @@ -134,18 +128,14 @@ public: /// Checks for events manually and returns time in nanoseconds for next event, threadsafe. std::optional<s64> Advance(); - /// Register a callback function to be called when coretiming pauses. - void RegisterPauseCallback(PauseCallback&& callback); - private: struct Event; - /// Clear all pending events. This should ONLY be done on exit. - void ClearPendingEvents(); - static void ThreadEntry(CoreTiming& instance); void ThreadLoop(); + void Reset(); + std::unique_ptr<Common::WallClock> clock; s64 global_timer = 0; @@ -176,8 +166,6 @@ private: /// Cycle timing u64 ticks{}; s64 downcount{}; - - std::vector<PauseCallback> pause_callbacks{}; }; /// Creates a core timing event with the given name and callback. diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp index 9b1565ae1..0dd4c2196 100644 --- a/src/core/cpu_manager.cpp +++ b/src/core/cpu_manager.cpp @@ -189,9 +189,9 @@ void CpuManager::RunThread(std::size_t core) { system.RegisterCoreThread(core); std::string name; if (is_multicore) { - name = "yuzu:CPUCore_" + std::to_string(core); + name = "CPUCore_" + std::to_string(core); } else { - name = "yuzu:CPUThread"; + name = "CPUThread"; } MicroProfileOnThreadCreate(name.c_str()); Common::SetCurrentThreadName(name.c_str()); diff --git a/src/core/debugger/debugger.cpp b/src/core/debugger/debugger.cpp index e42bdd17d..339f971e6 100644 --- a/src/core/debugger/debugger.cpp +++ b/src/core/debugger/debugger.cpp @@ -140,7 +140,7 @@ private: } void ThreadLoop(std::stop_token stop_token) { - Common::SetCurrentThreadName("yuzu:Debugger"); + Common::SetCurrentThreadName("Debugger"); // Set up the client signals for new data. AsyncReceiveInto(signal_pipe, pipe_data, [&](auto d) { PipeData(d); }); diff --git a/src/core/device_memory.h b/src/core/device_memory.h index df61b0c0b..90510733c 100644 --- a/src/core/device_memory.h +++ b/src/core/device_memory.h @@ -31,12 +31,14 @@ public: DramMemoryMap::Base; } - u8* GetPointer(PAddr addr) { - return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base); + template <typename T> + T* GetPointer(PAddr addr) { + return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base)); } - const u8* GetPointer(PAddr addr) const { - return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base); + template <typename T> + const T* GetPointer(PAddr addr) const { + return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base)); } Common::HostMemory buffer; diff --git a/src/core/file_sys/card_image.cpp b/src/core/file_sys/card_image.cpp index f23d9373b..5d02865f4 100644 --- a/src/core/file_sys/card_image.cpp +++ b/src/core/file_sys/card_image.cpp @@ -232,8 +232,8 @@ const std::vector<std::shared_ptr<NCA>>& XCI::GetNCAs() const { std::shared_ptr<NCA> XCI::GetNCAByType(NCAContentType type) const { const auto program_id = secure_partition->GetProgramTitleID(); - const auto iter = std::find_if( - ncas.begin(), ncas.end(), [this, type, program_id](const std::shared_ptr<NCA>& nca) { + const auto iter = + std::find_if(ncas.begin(), ncas.end(), [type, program_id](const std::shared_ptr<NCA>& nca) { return nca->GetType() == type && nca->GetTitleId() == program_id; }); return iter == ncas.end() ? nullptr : *iter; diff --git a/src/core/file_sys/program_metadata.cpp b/src/core/file_sys/program_metadata.cpp index e0cdf3520..f00479bd3 100644 --- a/src/core/file_sys/program_metadata.cpp +++ b/src/core/file_sys/program_metadata.cpp @@ -33,11 +33,55 @@ Loader::ResultStatus ProgramMetadata::Load(VirtualFile file) { return Loader::ResultStatus::ErrorBadACIHeader; } - if (sizeof(FileAccessControl) != file->ReadObject(&acid_file_access, acid_header.fac_offset)) { + // Load acid_file_access per-component instead of the entire struct, since this struct does not + // reflect the layout of the real data. + std::size_t current_offset = acid_header.fac_offset; + if (sizeof(FileAccessControl::version) != file->ReadBytes(&acid_file_access.version, + sizeof(FileAccessControl::version), + current_offset)) { + return Loader::ResultStatus::ErrorBadFileAccessControl; + } + if (sizeof(FileAccessControl::permissions) != + file->ReadBytes(&acid_file_access.permissions, sizeof(FileAccessControl::permissions), + current_offset += sizeof(FileAccessControl::version) + 3)) { + return Loader::ResultStatus::ErrorBadFileAccessControl; + } + if (sizeof(FileAccessControl::unknown) != + file->ReadBytes(&acid_file_access.unknown, sizeof(FileAccessControl::unknown), + current_offset + sizeof(FileAccessControl::permissions))) { return Loader::ResultStatus::ErrorBadFileAccessControl; } - if (sizeof(FileAccessHeader) != file->ReadObject(&aci_file_access, aci_header.fah_offset)) { + // Load aci_file_access per-component instead of the entire struct, same as acid_file_access + current_offset = aci_header.fah_offset; + if (sizeof(FileAccessHeader::version) != file->ReadBytes(&aci_file_access.version, + sizeof(FileAccessHeader::version), + current_offset)) { + return Loader::ResultStatus::ErrorBadFileAccessHeader; + } + if (sizeof(FileAccessHeader::permissions) != + file->ReadBytes(&aci_file_access.permissions, sizeof(FileAccessHeader::permissions), + current_offset += sizeof(FileAccessHeader::version) + 3)) { + return Loader::ResultStatus::ErrorBadFileAccessHeader; + } + if (sizeof(FileAccessHeader::unk_offset) != + file->ReadBytes(&aci_file_access.unk_offset, sizeof(FileAccessHeader::unk_offset), + current_offset += sizeof(FileAccessHeader::permissions))) { + return Loader::ResultStatus::ErrorBadFileAccessHeader; + } + if (sizeof(FileAccessHeader::unk_size) != + file->ReadBytes(&aci_file_access.unk_size, sizeof(FileAccessHeader::unk_size), + current_offset += sizeof(FileAccessHeader::unk_offset))) { + return Loader::ResultStatus::ErrorBadFileAccessHeader; + } + if (sizeof(FileAccessHeader::unk_offset_2) != + file->ReadBytes(&aci_file_access.unk_offset_2, sizeof(FileAccessHeader::unk_offset_2), + current_offset += sizeof(FileAccessHeader::unk_size))) { + return Loader::ResultStatus::ErrorBadFileAccessHeader; + } + if (sizeof(FileAccessHeader::unk_size_2) != + file->ReadBytes(&aci_file_access.unk_size_2, sizeof(FileAccessHeader::unk_size_2), + current_offset + sizeof(FileAccessHeader::unk_offset_2))) { return Loader::ResultStatus::ErrorBadFileAccessHeader; } @@ -83,7 +127,7 @@ void ProgramMetadata::LoadManual(bool is_64_bit, ProgramAddressSpaceType address } bool ProgramMetadata::Is64BitProgram() const { - return npdm_header.has_64_bit_instructions; + return npdm_header.has_64_bit_instructions.As<bool>(); } ProgramAddressSpaceType ProgramMetadata::GetAddressSpaceType() const { @@ -152,9 +196,7 @@ void ProgramMetadata::Print() const { LOG_DEBUG(Service_FS, " > Is Retail: {}", acid_header.is_retail ? "YES" : "NO"); LOG_DEBUG(Service_FS, "Title ID Min: 0x{:016X}", acid_header.title_id_min); LOG_DEBUG(Service_FS, "Title ID Max: 0x{:016X}", acid_header.title_id_max); - u64_le permissions_l; // local copy to fix alignment error - std::memcpy(&permissions_l, &acid_file_access.permissions, sizeof(permissions_l)); - LOG_DEBUG(Service_FS, "Filesystem Access: 0x{:016X}\n", permissions_l); + LOG_DEBUG(Service_FS, "Filesystem Access: 0x{:016X}\n", acid_file_access.permissions); // Begin ACI0 printing (actual perms, unsigned) LOG_DEBUG(Service_FS, "Magic: {:.4}", aci_header.magic.data()); diff --git a/src/core/file_sys/program_metadata.h b/src/core/file_sys/program_metadata.h index e8fb4e27f..2e8960b07 100644 --- a/src/core/file_sys/program_metadata.h +++ b/src/core/file_sys/program_metadata.h @@ -144,20 +144,18 @@ private: static_assert(sizeof(AciHeader) == 0x40, "ACI0 header structure size is wrong"); -#pragma pack(push, 1) - + // FileAccessControl and FileAccessHeader need loaded per-component: this layout does not + // reflect the real layout to avoid reference binding to misaligned addresses struct FileAccessControl { u8 version; - INSERT_PADDING_BYTES(3); + // 3 padding bytes u64_le permissions; std::array<u8, 0x20> unknown; }; - static_assert(sizeof(FileAccessControl) == 0x2C, "FS access control structure size is wrong"); - struct FileAccessHeader { u8 version; - INSERT_PADDING_BYTES(3); + // 3 padding bytes u64_le permissions; u32_le unk_offset; u32_le unk_size; @@ -165,10 +163,6 @@ private: u32_le unk_size_2; }; - static_assert(sizeof(FileAccessHeader) == 0x1C, "FS access header structure size is wrong"); - -#pragma pack(pop) - Header npdm_header; AciHeader aci_header; AcidHeader acid_header; diff --git a/src/core/file_sys/savedata_factory.cpp b/src/core/file_sys/savedata_factory.cpp index 8c1b2523c..1567da231 100644 --- a/src/core/file_sys/savedata_factory.cpp +++ b/src/core/file_sys/savedata_factory.cpp @@ -5,6 +5,7 @@ #include "common/assert.h" #include "common/common_types.h" #include "common/logging/log.h" +#include "common/uuid.h" #include "core/core.h" #include "core/file_sys/savedata_factory.h" #include "core/file_sys/vfs.h" @@ -59,6 +60,36 @@ bool ShouldSaveDataBeAutomaticallyCreated(SaveDataSpaceId space, const SaveDataA attr.title_id == 0 && attr.save_id == 0); } +std::string GetFutureSaveDataPath(SaveDataSpaceId space_id, SaveDataType type, u64 title_id, + u128 user_id) { + // Only detect nand user saves. + const auto space_id_path = [space_id]() -> std::string_view { + switch (space_id) { + case SaveDataSpaceId::NandUser: + return "/user/save"; + default: + return ""; + } + }(); + + if (space_id_path.empty()) { + return ""; + } + + Common::UUID uuid; + std::memcpy(uuid.uuid.data(), user_id.data(), sizeof(Common::UUID)); + + // Only detect account/device saves from the future location. + switch (type) { + case SaveDataType::SaveData: + return fmt::format("{}/account/{}/{:016X}/1", space_id_path, uuid.RawString(), title_id); + case SaveDataType::DeviceSaveData: + return fmt::format("{}/device/{:016X}/1", space_id_path, title_id); + default: + return ""; + } +} + } // Anonymous namespace std::string SaveDataAttribute::DebugInfo() const { @@ -82,7 +113,7 @@ ResultVal<VirtualDir> SaveDataFactory::Create(SaveDataSpaceId space, PrintSaveDataAttributeWarnings(meta); const auto save_directory = - GetFullPath(system, space, meta.type, meta.title_id, meta.user_id, meta.save_id); + GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id); auto out = dir->CreateDirectoryRelative(save_directory); @@ -99,7 +130,7 @@ ResultVal<VirtualDir> SaveDataFactory::Open(SaveDataSpaceId space, const SaveDataAttribute& meta) const { const auto save_directory = - GetFullPath(system, space, meta.type, meta.title_id, meta.user_id, meta.save_id); + GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id); auto out = dir->GetDirectoryRelative(save_directory); @@ -134,9 +165,9 @@ std::string SaveDataFactory::GetSaveDataSpaceIdPath(SaveDataSpaceId space) { } } -std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId space, - SaveDataType type, u64 title_id, u128 user_id, - u64 save_id) { +std::string SaveDataFactory::GetFullPath(Core::System& system, VirtualDir dir, + SaveDataSpaceId space, SaveDataType type, u64 title_id, + u128 user_id, u64 save_id) { // According to switchbrew, if a save is of type SaveData and the title id field is 0, it should // be interpreted as the title id of the current process. if (type == SaveDataType::SaveData || type == SaveDataType::DeviceSaveData) { @@ -145,6 +176,17 @@ std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId s } } + // For compat with a future impl. + if (std::string future_path = + GetFutureSaveDataPath(space, type, title_id & ~(0xFFULL), user_id); + !future_path.empty()) { + // Check if this location exists, and prefer it over the old. + if (const auto future_dir = dir->GetDirectoryRelative(future_path); future_dir != nullptr) { + LOG_INFO(Service_FS, "Using save at new location: {}", future_path); + return future_path; + } + } + std::string out = GetSaveDataSpaceIdPath(space); switch (type) { @@ -167,7 +209,8 @@ std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId s SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id, u128 user_id) const { - const auto path = GetFullPath(system, SaveDataSpaceId::NandUser, type, title_id, user_id, 0); + const auto path = + GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0); const auto relative_dir = GetOrCreateDirectoryRelative(dir, path); const auto size_file = relative_dir->GetFile(SAVE_DATA_SIZE_FILENAME); @@ -185,7 +228,8 @@ SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id, void SaveDataFactory::WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id, SaveDataSize new_value) const { - const auto path = GetFullPath(system, SaveDataSpaceId::NandUser, type, title_id, user_id, 0); + const auto path = + GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0); const auto relative_dir = GetOrCreateDirectoryRelative(dir, path); const auto size_file = relative_dir->CreateFile(SAVE_DATA_SIZE_FILENAME); diff --git a/src/core/file_sys/savedata_factory.h b/src/core/file_sys/savedata_factory.h index a763b94c8..d3633ef03 100644 --- a/src/core/file_sys/savedata_factory.h +++ b/src/core/file_sys/savedata_factory.h @@ -95,8 +95,8 @@ public: VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const; static std::string GetSaveDataSpaceIdPath(SaveDataSpaceId space); - static std::string GetFullPath(Core::System& system, SaveDataSpaceId space, SaveDataType type, - u64 title_id, u128 user_id, u64 save_id); + static std::string GetFullPath(Core::System& system, VirtualDir dir, SaveDataSpaceId space, + SaveDataType type, u64 title_id, u128 user_id, u64 save_id); SaveDataSize ReadSaveDataSize(SaveDataType type, u64 title_id, u128 user_id) const; void WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id, diff --git a/src/core/frontend/framebuffer_layout.cpp b/src/core/frontend/framebuffer_layout.cpp index 90dd68ff1..b4081fc39 100644 --- a/src/core/frontend/framebuffer_layout.cpp +++ b/src/core/frontend/framebuffer_layout.cpp @@ -67,6 +67,8 @@ float EmulationAspectRatio(AspectRatio aspect, float window_aspect_ratio) { return 3.0f / 4.0f; case AspectRatio::R21_9: return 9.0f / 21.0f; + case AspectRatio::R16_10: + return 10.0f / 16.0f; case AspectRatio::StretchToWindow: return window_aspect_ratio; default: diff --git a/src/core/frontend/framebuffer_layout.h b/src/core/frontend/framebuffer_layout.h index 1561d994e..94683b30f 100644 --- a/src/core/frontend/framebuffer_layout.h +++ b/src/core/frontend/framebuffer_layout.h @@ -27,6 +27,7 @@ enum class AspectRatio { Default, R4_3, R21_9, + R16_10, StretchToWindow, }; diff --git a/src/core/hardware_interrupt_manager.cpp b/src/core/hardware_interrupt_manager.cpp deleted file mode 100644 index d08cc3315..000000000 --- a/src/core/hardware_interrupt_manager.cpp +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later - -#include "core/core.h" -#include "core/core_timing.h" -#include "core/hardware_interrupt_manager.h" -#include "core/hle/service/nvdrv/nvdrv_interface.h" -#include "core/hle/service/sm/sm.h" - -namespace Core::Hardware { - -InterruptManager::InterruptManager(Core::System& system_in) : system(system_in) { - gpu_interrupt_event = Core::Timing::CreateEvent( - "GPUInterrupt", - [this](std::uintptr_t message, u64 time, - std::chrono::nanoseconds) -> std::optional<std::chrono::nanoseconds> { - auto nvdrv = system.ServiceManager().GetService<Service::Nvidia::NVDRV>("nvdrv"); - const u32 syncpt = static_cast<u32>(message >> 32); - const u32 value = static_cast<u32>(message); - nvdrv->SignalGPUInterruptSyncpt(syncpt, value); - return std::nullopt; - }); -} - -InterruptManager::~InterruptManager() = default; - -void InterruptManager::GPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) { - const u64 msg = (static_cast<u64>(syncpoint_id) << 32ULL) | value; - system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{10}, gpu_interrupt_event, msg); -} - -} // namespace Core::Hardware diff --git a/src/core/hardware_interrupt_manager.h b/src/core/hardware_interrupt_manager.h deleted file mode 100644 index 5665c5918..000000000 --- a/src/core/hardware_interrupt_manager.h +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later - -#pragma once - -#include <memory> - -#include "common/common_types.h" - -namespace Core { -class System; -} - -namespace Core::Timing { -struct EventType; -} - -namespace Core::Hardware { - -class InterruptManager { -public: - explicit InterruptManager(Core::System& system); - ~InterruptManager(); - - void GPUInterruptSyncpt(u32 syncpoint_id, u32 value); - -private: - Core::System& system; - std::shared_ptr<Core::Timing::EventType> gpu_interrupt_event; -}; - -} // namespace Core::Hardware diff --git a/src/core/hid/emulated_controller.cpp b/src/core/hid/emulated_controller.cpp index 01c43be93..57eff72fe 100644 --- a/src/core/hid/emulated_controller.cpp +++ b/src/core/hid/emulated_controller.cpp @@ -93,7 +93,7 @@ void EmulatedController::ReloadFromSettings() { .body = GetNpadColor(player.body_color_left), .button = GetNpadColor(player.button_color_left), }; - controller.colors_state.left = { + controller.colors_state.right = { .body = GetNpadColor(player.body_color_right), .button = GetNpadColor(player.button_color_right), }; @@ -131,13 +131,16 @@ void EmulatedController::LoadDevices() { battery_params[RightIndex].Set("battery", true); camera_params = Common::ParamPackage{"engine:camera,camera:1"}; + nfc_params = Common::ParamPackage{"engine:virtual_amiibo,nfc:1"}; output_params[LeftIndex] = left_joycon; output_params[RightIndex] = right_joycon; output_params[2] = camera_params; + output_params[3] = nfc_params; output_params[LeftIndex].Set("output", true); output_params[RightIndex].Set("output", true); output_params[2].Set("output", true); + output_params[3].Set("output", true); LoadTASParams(); @@ -155,6 +158,7 @@ void EmulatedController::LoadDevices() { std::transform(battery_params.begin(), battery_params.end(), battery_devices.begin(), Common::Input::CreateDevice<Common::Input::InputDevice>); camera_devices = Common::Input::CreateDevice<Common::Input::InputDevice>(camera_params); + nfc_devices = Common::Input::CreateDevice<Common::Input::InputDevice>(nfc_params); std::transform(output_params.begin(), output_params.end(), output_devices.begin(), Common::Input::CreateDevice<Common::Input::OutputDevice>); @@ -284,6 +288,16 @@ void EmulatedController::ReloadInput() { camera_devices->ForceUpdate(); } + if (nfc_devices) { + if (npad_id_type == NpadIdType::Handheld || npad_id_type == NpadIdType::Player1) { + nfc_devices->SetCallback({ + .on_change = + [this](const Common::Input::CallbackStatus& callback) { SetNfc(callback); }, + }); + nfc_devices->ForceUpdate(); + } + } + // Use a common UUID for TAS static constexpr Common::UUID TAS_UUID = Common::UUID{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7, 0xA5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}; @@ -339,6 +353,8 @@ void EmulatedController::UnloadInput() { for (auto& stick : tas_stick_devices) { stick.reset(); } + camera_devices.reset(); + nfc_devices.reset(); } void EmulatedController::EnableConfiguration() { @@ -903,6 +919,25 @@ void EmulatedController::SetCamera(const Common::Input::CallbackStatus& callback TriggerOnChange(ControllerTriggerType::IrSensor, true); } +void EmulatedController::SetNfc(const Common::Input::CallbackStatus& callback) { + std::unique_lock lock{mutex}; + controller.nfc_values = TransformToNfc(callback); + + if (is_configuring) { + lock.unlock(); + TriggerOnChange(ControllerTriggerType::Nfc, false); + return; + } + + controller.nfc_state = { + controller.nfc_values.state, + controller.nfc_values.data, + }; + + lock.unlock(); + TriggerOnChange(ControllerTriggerType::Nfc, true); +} + bool EmulatedController::SetVibration(std::size_t device_index, VibrationValue vibration) { if (device_index >= output_devices.size()) { return false; @@ -980,7 +1015,13 @@ bool EmulatedController::TestVibration(std::size_t device_index) { bool EmulatedController::SetPollingMode(Common::Input::PollingMode polling_mode) { LOG_INFO(Service_HID, "Set polling mode {}", polling_mode); auto& output_device = output_devices[static_cast<std::size_t>(DeviceIndex::Right)]; - return output_device->SetPollingMode(polling_mode) == Common::Input::PollingError::None; + auto& nfc_output_device = output_devices[3]; + + const auto virtual_nfc_result = nfc_output_device->SetPollingMode(polling_mode); + const auto mapped_nfc_result = output_device->SetPollingMode(polling_mode); + + return virtual_nfc_result == Common::Input::PollingError::None || + mapped_nfc_result == Common::Input::PollingError::None; } bool EmulatedController::SetCameraFormat( @@ -1000,6 +1041,32 @@ bool EmulatedController::SetCameraFormat( camera_format)) == Common::Input::CameraError::None; } +bool EmulatedController::HasNfc() const { + const auto& nfc_output_device = output_devices[3]; + + switch (npad_type) { + case NpadStyleIndex::JoyconRight: + case NpadStyleIndex::JoyconDual: + case NpadStyleIndex::ProController: + break; + default: + return false; + } + + const bool has_virtual_nfc = + npad_id_type == NpadIdType::Player1 || npad_id_type == NpadIdType::Handheld; + const bool is_virtual_nfc_supported = + nfc_output_device->SupportsNfc() != Common::Input::NfcState::NotSupported; + + return is_connected && (has_virtual_nfc && is_virtual_nfc_supported); +} + +bool EmulatedController::WriteNfc(const std::vector<u8>& data) { + auto& nfc_output_device = output_devices[3]; + + return nfc_output_device->WriteNfcData(data) == Common::Input::NfcState::Success; +} + void EmulatedController::SetLedPattern() { for (auto& device : output_devices) { if (!device) { @@ -1091,27 +1158,27 @@ bool EmulatedController::IsControllerSupported(bool use_temporary_value) const { const auto type = is_configuring && use_temporary_value ? tmp_npad_type : npad_type; switch (type) { case NpadStyleIndex::ProController: - return supported_style_tag.fullkey; + return supported_style_tag.fullkey.As<bool>(); case NpadStyleIndex::Handheld: - return supported_style_tag.handheld; + return supported_style_tag.handheld.As<bool>(); case NpadStyleIndex::JoyconDual: - return supported_style_tag.joycon_dual; + return supported_style_tag.joycon_dual.As<bool>(); case NpadStyleIndex::JoyconLeft: - return supported_style_tag.joycon_left; + return supported_style_tag.joycon_left.As<bool>(); case NpadStyleIndex::JoyconRight: - return supported_style_tag.joycon_right; + return supported_style_tag.joycon_right.As<bool>(); case NpadStyleIndex::GameCube: - return supported_style_tag.gamecube; + return supported_style_tag.gamecube.As<bool>(); case NpadStyleIndex::Pokeball: - return supported_style_tag.palma; + return supported_style_tag.palma.As<bool>(); case NpadStyleIndex::NES: - return supported_style_tag.lark; + return supported_style_tag.lark.As<bool>(); case NpadStyleIndex::SNES: - return supported_style_tag.lucia; + return supported_style_tag.lucia.As<bool>(); case NpadStyleIndex::N64: - return supported_style_tag.lagoon; + return supported_style_tag.lagoon.As<bool>(); case NpadStyleIndex::SegaGenesis: - return supported_style_tag.lager; + return supported_style_tag.lager.As<bool>(); default: return false; } @@ -1363,6 +1430,11 @@ const CameraState& EmulatedController::GetCamera() const { return controller.camera_state; } +const NfcState& EmulatedController::GetNfc() const { + std::scoped_lock lock{mutex}; + return controller.nfc_state; +} + NpadColor EmulatedController::GetNpadColor(u32 color) { return { .r = static_cast<u8>((color >> 16) & 0xFF), diff --git a/src/core/hid/emulated_controller.h b/src/core/hid/emulated_controller.h index c3aa8f9d3..319226bf8 100644 --- a/src/core/hid/emulated_controller.h +++ b/src/core/hid/emulated_controller.h @@ -20,7 +20,7 @@ namespace Core::HID { const std::size_t max_emulated_controllers = 2; -const std::size_t output_devices = 3; +const std::size_t output_devices_size = 4; struct ControllerMotionInfo { Common::Input::MotionStatus raw_status{}; MotionInput emulated{}; @@ -37,7 +37,8 @@ using TriggerDevices = using BatteryDevices = std::array<std::unique_ptr<Common::Input::InputDevice>, max_emulated_controllers>; using CameraDevices = std::unique_ptr<Common::Input::InputDevice>; -using OutputDevices = std::array<std::unique_ptr<Common::Input::OutputDevice>, output_devices>; +using NfcDevices = std::unique_ptr<Common::Input::InputDevice>; +using OutputDevices = std::array<std::unique_ptr<Common::Input::OutputDevice>, output_devices_size>; using ButtonParams = std::array<Common::ParamPackage, Settings::NativeButton::NumButtons>; using StickParams = std::array<Common::ParamPackage, Settings::NativeAnalog::NumAnalogs>; @@ -45,7 +46,8 @@ using ControllerMotionParams = std::array<Common::ParamPackage, Settings::Native using TriggerParams = std::array<Common::ParamPackage, Settings::NativeTrigger::NumTriggers>; using BatteryParams = std::array<Common::ParamPackage, max_emulated_controllers>; using CameraParams = Common::ParamPackage; -using OutputParams = std::array<Common::ParamPackage, output_devices>; +using NfcParams = Common::ParamPackage; +using OutputParams = std::array<Common::ParamPackage, output_devices_size>; using ButtonValues = std::array<Common::Input::ButtonStatus, Settings::NativeButton::NumButtons>; using SticksValues = std::array<Common::Input::StickStatus, Settings::NativeAnalog::NumAnalogs>; @@ -55,6 +57,7 @@ using ControllerMotionValues = std::array<ControllerMotionInfo, Settings::Native using ColorValues = std::array<Common::Input::BodyColorStatus, max_emulated_controllers>; using BatteryValues = std::array<Common::Input::BatteryStatus, max_emulated_controllers>; using CameraValues = Common::Input::CameraStatus; +using NfcValues = Common::Input::NfcStatus; using VibrationValues = std::array<Common::Input::VibrationStatus, max_emulated_controllers>; struct AnalogSticks { @@ -80,6 +83,11 @@ struct CameraState { std::size_t sample{}; }; +struct NfcState { + Common::Input::NfcState state{}; + std::vector<u8> data{}; +}; + struct ControllerMotion { Common::Vec3f accel{}; Common::Vec3f gyro{}; @@ -107,6 +115,7 @@ struct ControllerStatus { BatteryValues battery_values{}; VibrationValues vibration_values{}; CameraValues camera_values{}; + NfcValues nfc_values{}; // Data for HID serices HomeButtonState home_button_state{}; @@ -119,6 +128,7 @@ struct ControllerStatus { ControllerColors colors_state{}; BatteryLevelState battery_state{}; CameraState camera_state{}; + NfcState nfc_state{}; }; enum class ControllerTriggerType { @@ -130,6 +140,7 @@ enum class ControllerTriggerType { Battery, Vibration, IrSensor, + Nfc, Connected, Disconnected, Type, @@ -315,6 +326,9 @@ public: /// Returns the latest camera status from the controller const CameraState& GetCamera() const; + /// Returns the latest ntag status from the controller + const NfcState& GetNfc() const; + /** * Sends a specific vibration to the output device * @return true if vibration had no errors @@ -341,6 +355,12 @@ public: */ bool SetCameraFormat(Core::IrSensor::ImageTransferProcessorFormat camera_format); + /// Returns true if the device has nfc support + bool HasNfc() const; + + /// Returns true if the nfc tag was written + bool WriteNfc(const std::vector<u8>& data); + /// Returns the led pattern corresponding to this emulated controller LedPattern GetLedPattern() const; @@ -425,6 +445,12 @@ private: void SetCamera(const Common::Input::CallbackStatus& callback); /** + * Updates the nfc status of the controller + * @param callback A CallbackStatus containing the nfc status + */ + void SetNfc(const Common::Input::CallbackStatus& callback); + + /** * Converts a color format from bgra to rgba * @param color in bgra format * @return NpadColor in rgba format @@ -458,6 +484,7 @@ private: TriggerParams trigger_params; BatteryParams battery_params; CameraParams camera_params; + NfcParams nfc_params; OutputParams output_params; ButtonDevices button_devices; @@ -466,6 +493,7 @@ private: TriggerDevices trigger_devices; BatteryDevices battery_devices; CameraDevices camera_devices; + NfcDevices nfc_devices; OutputDevices output_devices; // TAS related variables diff --git a/src/core/hid/input_converter.cpp b/src/core/hid/input_converter.cpp index 52fb69e9c..5d8b75b50 100644 --- a/src/core/hid/input_converter.cpp +++ b/src/core/hid/input_converter.cpp @@ -277,7 +277,10 @@ Common::Input::CameraStatus TransformToCamera(const Common::Input::CallbackStatu Common::Input::CameraStatus camera{}; switch (callback.type) { case Common::Input::InputType::IrSensor: - camera = callback.camera_status; + camera = { + .format = callback.camera_status, + .data = callback.raw_data, + }; break; default: LOG_ERROR(Input, "Conversion from type {} to camera not implemented", callback.type); @@ -287,6 +290,23 @@ Common::Input::CameraStatus TransformToCamera(const Common::Input::CallbackStatu return camera; } +Common::Input::NfcStatus TransformToNfc(const Common::Input::CallbackStatus& callback) { + Common::Input::NfcStatus nfc{}; + switch (callback.type) { + case Common::Input::InputType::Nfc: + nfc = { + .state = callback.nfc_status, + .data = callback.raw_data, + }; + break; + default: + LOG_ERROR(Input, "Conversion from type {} to NFC not implemented", callback.type); + break; + } + + return nfc; +} + void SanitizeAnalog(Common::Input::AnalogStatus& analog, bool clamp_value) { const auto& properties = analog.properties; float& raw_value = analog.raw_value; diff --git a/src/core/hid/input_converter.h b/src/core/hid/input_converter.h index 143c50cc0..b7eb6e660 100644 --- a/src/core/hid/input_converter.h +++ b/src/core/hid/input_converter.h @@ -85,6 +85,14 @@ Common::Input::AnalogStatus TransformToAnalog(const Common::Input::CallbackStatu Common::Input::CameraStatus TransformToCamera(const Common::Input::CallbackStatus& callback); /** + * Converts raw input data into a valid nfc status. + * + * @param callback Supported callbacks: Nfc. + * @return A valid CameraObject object. + */ +Common::Input::NfcStatus TransformToNfc(const Common::Input::CallbackStatus& callback); + +/** * Converts raw analog data into a valid analog value * @param analog An analog object containing raw data and properties * @param clamp_value determines if the value needs to be clamped between -1.0f and 1.0f. diff --git a/src/core/hid/irs_types.h b/src/core/hid/irs_types.h index 88c5b016d..0d1bfe53f 100644 --- a/src/core/hid/irs_types.h +++ b/src/core/hid/irs_types.h @@ -14,7 +14,7 @@ enum class CameraAmbientNoiseLevel : u32 { Low, Medium, High, - Unkown3, // This level can't be reached + Unknown3, // This level can't be reached }; // This is nn::irsensor::CameraLightTarget @@ -75,9 +75,9 @@ enum class IrCameraStatus : u32 { enum class IrCameraInternalStatus : u32 { Stopped, FirmwareUpdateNeeded, - Unkown2, - Unkown3, - Unkown4, + Unknown2, + Unknown3, + Unknown4, FirmwareVersionRequested, FirmwareVersionIsInvalid, Ready, @@ -121,20 +121,20 @@ enum class IrSensorFunctionLevel : u8 { // This is nn::irsensor::MomentProcessorPreprocess enum class MomentProcessorPreprocess : u32 { - Unkown0, - Unkown1, + Unknown0, + Unknown1, }; // This is nn::irsensor::PackedMomentProcessorPreprocess enum class PackedMomentProcessorPreprocess : u8 { - Unkown0, - Unkown1, + Unknown0, + Unknown1, }; // This is nn::irsensor::PointingStatus enum class PointingStatus : u32 { - Unkown0, - Unkown1, + Unknown0, + Unknown1, }; struct IrsRect { diff --git a/src/core/hle/ipc_helpers.h b/src/core/hle/ipc_helpers.h index d631c0357..18fde8bd6 100644 --- a/src/core/hle/ipc_helpers.h +++ b/src/core/hle/ipc_helpers.h @@ -86,13 +86,13 @@ public: u32 num_domain_objects{}; const bool always_move_handles{ (static_cast<u32>(flags) & static_cast<u32>(Flags::AlwaysMoveHandles)) != 0}; - if (!ctx.Session()->IsDomain() || always_move_handles) { + if (!ctx.Session()->GetSessionRequestManager()->IsDomain() || always_move_handles) { num_handles_to_move = num_objects_to_move; } else { num_domain_objects = num_objects_to_move; } - if (ctx.Session()->IsDomain()) { + if (ctx.Session()->GetSessionRequestManager()->IsDomain()) { raw_data_size += static_cast<u32>(sizeof(DomainMessageHeader) / sizeof(u32) + num_domain_objects); ctx.write_size += num_domain_objects; @@ -125,7 +125,8 @@ public: if (!ctx.IsTipc()) { AlignWithPadding(); - if (ctx.Session()->IsDomain() && ctx.HasDomainMessageHeader()) { + if (ctx.Session()->GetSessionRequestManager()->IsDomain() && + ctx.HasDomainMessageHeader()) { IPC::DomainMessageHeader domain_header{}; domain_header.num_objects = num_domain_objects; PushRaw(domain_header); @@ -145,14 +146,15 @@ public: template <class T> void PushIpcInterface(std::shared_ptr<T> iface) { - if (context->Session()->IsDomain()) { + if (context->Session()->GetSessionRequestManager()->IsDomain()) { context->AddDomainObject(std::move(iface)); } else { kernel.CurrentProcess()->GetResourceLimit()->Reserve( Kernel::LimitableResource::Sessions, 1); auto* session = Kernel::KSession::Create(kernel); - session->Initialize(nullptr, iface->GetServiceName()); + session->Initialize(nullptr, iface->GetServiceName(), + std::make_shared<Kernel::SessionRequestManager>(kernel)); context->AddMoveObject(&session->GetClientSession()); iface->ClientConnected(&session->GetServerSession()); @@ -385,7 +387,7 @@ public: template <class T> std::weak_ptr<T> PopIpcInterface() { - ASSERT(context->Session()->IsDomain()); + ASSERT(context->Session()->GetSessionRequestManager()->IsDomain()); ASSERT(context->GetDomainMessageHeader().input_object_count > 0); return context->GetDomainHandler<T>(Pop<u32>() - 1); } @@ -404,7 +406,7 @@ inline s32 RequestParser::Pop() { } // Ignore the -Wclass-memaccess warning on memcpy for non-trivially default constructible objects. -#if defined(__GNUC__) +#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wclass-memaccess" #endif @@ -415,7 +417,7 @@ void RequestParser::PopRaw(T& value) { std::memcpy(&value, cmdbuf + index, sizeof(T)); index += (sizeof(T) + 3) / 4; // round up to word length } -#if defined(__GNUC__) +#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) #pragma GCC diagnostic pop #endif diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index 5b3feec66..e4f43a053 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -19,6 +19,7 @@ #include "core/hle/kernel/k_server_session.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/service_thread.h" #include "core/memory.h" namespace Kernel { @@ -56,16 +57,103 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co } } +Result SessionRequestManager::CompleteSyncRequest(KServerSession* server_session, + HLERequestContext& context) { + Result result = ResultSuccess; + + // If the session has been converted to a domain, handle the domain request + if (this->HasSessionRequestHandler(context)) { + if (IsDomain() && context.HasDomainMessageHeader()) { + result = HandleDomainSyncRequest(server_session, context); + // If there is no domain header, the regular session handler is used + } else if (this->HasSessionHandler()) { + // If this manager has an associated HLE handler, forward the request to it. + result = this->SessionHandler().HandleSyncRequest(*server_session, context); + } + } else { + ASSERT_MSG(false, "Session handler is invalid, stubbing response!"); + IPC::ResponseBuilder rb(context, 2); + rb.Push(ResultSuccess); + } + + if (convert_to_domain) { + ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance."); + this->ConvertToDomain(); + convert_to_domain = false; + } + + return result; +} + +Result SessionRequestManager::HandleDomainSyncRequest(KServerSession* server_session, + HLERequestContext& context) { + if (!context.HasDomainMessageHeader()) { + return ResultSuccess; + } + + // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs + context.SetSessionRequestManager(server_session->GetSessionRequestManager()); + + // If there is a DomainMessageHeader, then this is CommandType "Request" + const auto& domain_message_header = context.GetDomainMessageHeader(); + const u32 object_id{domain_message_header.object_id}; + switch (domain_message_header.command) { + case IPC::DomainMessageHeader::CommandType::SendMessage: + if (object_id > this->DomainHandlerCount()) { + LOG_CRITICAL(IPC, + "object_id {} is too big! This probably means a recent service call " + "needed to return a new interface!", + object_id); + ASSERT(false); + return ResultSuccess; // Ignore error if asserts are off + } + if (auto strong_ptr = this->DomainHandler(object_id - 1).lock()) { + return strong_ptr->HandleSyncRequest(*server_session, context); + } else { + ASSERT(false); + return ResultSuccess; + } + + case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: { + LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id); + + this->CloseDomainHandler(object_id - 1); + + IPC::ResponseBuilder rb{context, 2}; + rb.Push(ResultSuccess); + return ResultSuccess; + } + } + + LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value()); + ASSERT(false); + return ResultSuccess; +} + +Result SessionRequestManager::QueueSyncRequest(KSession* parent, + std::shared_ptr<HLERequestContext>&& context) { + // Ensure we have a session request handler + if (this->HasSessionRequestHandler(*context)) { + if (auto strong_ptr = this->GetServiceThread().lock()) { + strong_ptr->QueueSyncRequest(*parent, std::move(context)); + } else { + ASSERT_MSG(false, "strong_ptr is nullptr!"); + } + } else { + ASSERT_MSG(false, "handler is invalid!"); + } + + return ResultSuccess; +} + void SessionRequestHandler::ClientConnected(KServerSession* session) { - session->ClientConnected(shared_from_this()); + session->GetSessionRequestManager()->SetSessionHandler(shared_from_this()); // Ensure our server session is tracked globally. kernel.RegisterServerObject(session); } -void SessionRequestHandler::ClientDisconnected(KServerSession* session) { - session->ClientDisconnected(); -} +void SessionRequestHandler::ClientDisconnected(KServerSession* session) {} HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_, KServerSession* server_session_, KThread* thread_) @@ -126,7 +214,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32 // Padding to align to 16 bytes rp.AlignWithPadding(); - if (Session()->IsDomain() && + if (Session()->GetSessionRequestManager()->IsDomain() && ((command_header->type == IPC::CommandType::Request || command_header->type == IPC::CommandType::RequestWithContext) || !incoming)) { @@ -135,7 +223,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32 if (incoming || domain_message_header) { domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>(); } else { - if (Session()->IsDomain()) { + if (Session()->GetSessionRequestManager()->IsDomain()) { LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!"); } } @@ -228,12 +316,12 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_threa // Write the domain objects to the command buffer, these go after the raw untranslated data. // TODO(Subv): This completely ignores C buffers. - if (Session()->IsDomain()) { + if (server_session->GetSessionRequestManager()->IsDomain()) { current_offset = domain_offset - static_cast<u32>(outgoing_domain_objects.size()); - for (const auto& object : outgoing_domain_objects) { - server_session->AppendDomainHandler(object); - cmd_buf[current_offset++] = - static_cast<u32_le>(server_session->NumDomainRequestHandlers()); + for (auto& object : outgoing_domain_objects) { + server_session->GetSessionRequestManager()->AppendDomainHandler(std::move(object)); + cmd_buf[current_offset++] = static_cast<u32_le>( + server_session->GetSessionRequestManager()->DomainHandlerCount()); } } diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index 99265ce90..a0522bca0 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h @@ -43,13 +43,13 @@ class Domain; class HLERequestContext; class KAutoObject; class KernelCore; +class KEvent; class KHandleTable; class KProcess; class KServerSession; class KThread; class KReadableEvent; class KSession; -class KWritableEvent; class ServiceThread; enum class ThreadWakeupReason; @@ -121,6 +121,10 @@ public: is_domain = true; } + void ConvertToDomainOnRequestEnd() { + convert_to_domain = true; + } + std::size_t DomainHandlerCount() const { return domain_handlers.size(); } @@ -164,7 +168,12 @@ public: bool HasSessionRequestHandler(const HLERequestContext& context) const; + Result HandleDomainSyncRequest(KServerSession* server_session, HLERequestContext& context); + Result CompleteSyncRequest(KServerSession* server_session, HLERequestContext& context); + Result QueueSyncRequest(KSession* parent, std::shared_ptr<HLERequestContext>&& context); + private: + bool convert_to_domain{}; bool is_domain{}; SessionRequestHandlerPtr session_handler; std::vector<SessionRequestHandlerPtr> domain_handlers; diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index 9b6b284d0..477e4e407 100644 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp @@ -18,6 +18,7 @@ #include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_session.h" +#include "core/hle/kernel/k_session_request.h" #include "core/hle/kernel/k_shared_memory.h" #include "core/hle/kernel/k_shared_memory_info.h" #include "core/hle/kernel/k_system_control.h" @@ -34,6 +35,7 @@ namespace Kernel::Init { HANDLER(KThread, (SLAB_COUNT(KThread)), ##__VA_ARGS__) \ HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \ HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \ + HANDLER(KSessionRequest, (SLAB_COUNT(KSession) * 2), ##__VA_ARGS__) \ HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \ HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \ HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \ @@ -94,8 +96,8 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd // TODO(bunnei): Fix this once we support the kernel virtual memory layout. if (size > 0) { - void* backing_kernel_memory{ - system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))}; + void* backing_kernel_memory{system.DeviceMemory().GetPointer<void>( + TranslateSlabAddrToPhysical(memory_layout, start))}; const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1); ASSERT(region != nullptr); @@ -181,7 +183,7 @@ void InitializeKPageBufferSlabHeap(Core::System& system) { ASSERT(slab_address != 0); // Initialize the slabheap. - KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address), + KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address), slab_size); } diff --git a/src/core/hle/kernel/k_class_token.cpp b/src/core/hle/kernel/k_class_token.cpp index cc2a0f7ca..10265c23c 100644 --- a/src/core/hle/kernel/k_class_token.cpp +++ b/src/core/hle/kernel/k_class_token.cpp @@ -18,7 +18,6 @@ #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_transfer_memory.h" -#include "core/hle/kernel/k_writable_event.h" namespace Kernel { @@ -42,13 +41,12 @@ static_assert(ClassToken<KPort> == 0b10000101'00000000); static_assert(ClassToken<KSession> == 0b00011001'00000000); static_assert(ClassToken<KSharedMemory> == 0b00101001'00000000); static_assert(ClassToken<KEvent> == 0b01001001'00000000); -static_assert(ClassToken<KWritableEvent> == 0b10001001'00000000); // static_assert(ClassToken<KLightClientSession> == 0b00110001'00000000); // static_assert(ClassToken<KLightServerSession> == 0b01010001'00000000); -static_assert(ClassToken<KTransferMemory> == 0b10010001'00000000); +static_assert(ClassToken<KTransferMemory> == 0b01010001'00000000); // static_assert(ClassToken<KDeviceAddressSpace> == 0b01100001'00000000); // static_assert(ClassToken<KSessionRequest> == 0b10100001'00000000); -static_assert(ClassToken<KCodeMemory> == 0b11000001'00000000); +static_assert(ClassToken<KCodeMemory> == 0b10100001'00000000); // Ensure that the token hierarchy is correct. @@ -73,13 +71,12 @@ static_assert(ClassToken<KPort> == ((0b10000101 << 8) | ClassToken<KAutoObject>) static_assert(ClassToken<KSession> == ((0b00011001 << 8) | ClassToken<KAutoObject>)); static_assert(ClassToken<KSharedMemory> == ((0b00101001 << 8) | ClassToken<KAutoObject>)); static_assert(ClassToken<KEvent> == ((0b01001001 << 8) | ClassToken<KAutoObject>)); -static_assert(ClassToken<KWritableEvent> == ((0b10001001 << 8) | ClassToken<KAutoObject>)); // static_assert(ClassToken<KLightClientSession> == ((0b00110001 << 8) | ClassToken<KAutoObject>)); // static_assert(ClassToken<KLightServerSession> == ((0b01010001 << 8) | ClassToken<KAutoObject>)); -static_assert(ClassToken<KTransferMemory> == ((0b10010001 << 8) | ClassToken<KAutoObject>)); +static_assert(ClassToken<KTransferMemory> == ((0b01010001 << 8) | ClassToken<KAutoObject>)); // static_assert(ClassToken<KDeviceAddressSpace> == ((0b01100001 << 8) | ClassToken<KAutoObject>)); // static_assert(ClassToken<KSessionRequest> == ((0b10100001 << 8) | ClassToken<KAutoObject>)); -static_assert(ClassToken<KCodeMemory> == ((0b11000001 << 8) | ClassToken<KAutoObject>)); +static_assert(ClassToken<KCodeMemory> == ((0b10100001 << 8) | ClassToken<KAutoObject>)); // Ensure that the token hierarchy reflects the class hierarchy. @@ -110,7 +107,6 @@ static_assert(std::is_final_v<KPort> && std::is_base_of_v<KAutoObject, KPort>); static_assert(std::is_final_v<KSession> && std::is_base_of_v<KAutoObject, KSession>); static_assert(std::is_final_v<KSharedMemory> && std::is_base_of_v<KAutoObject, KSharedMemory>); static_assert(std::is_final_v<KEvent> && std::is_base_of_v<KAutoObject, KEvent>); -static_assert(std::is_final_v<KWritableEvent> && std::is_base_of_v<KAutoObject, KWritableEvent>); // static_assert(std::is_final_v<KLightClientSession> && // std::is_base_of_v<KAutoObject, KLightClientSession>); // static_assert(std::is_final_v<KLightServerSession> && diff --git a/src/core/hle/kernel/k_class_token.h b/src/core/hle/kernel/k_class_token.h index c9001ae3d..ab20e00ff 100644 --- a/src/core/hle/kernel/k_class_token.h +++ b/src/core/hle/kernel/k_class_token.h @@ -101,7 +101,6 @@ public: KSession, KSharedMemory, KEvent, - KWritableEvent, KLightClientSession, KLightServerSession, KTransferMemory, diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp index b2a887b14..b4197a8d5 100644 --- a/src/core/hle/kernel/k_client_session.cpp +++ b/src/core/hle/kernel/k_client_session.cpp @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later +#include "common/scope_exit.h" #include "core/hle/kernel/hle_ipc.h" #include "core/hle/kernel/k_client_session.h" #include "core/hle/kernel/k_server_session.h" @@ -10,6 +11,8 @@ namespace Kernel { +static constexpr u32 MessageBufferSize = 0x100; + KClientSession::KClientSession(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} KClientSession::~KClientSession() = default; @@ -21,10 +24,17 @@ void KClientSession::Destroy() { void KClientSession::OnServerClosed() {} -Result KClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory, - Core::Timing::CoreTiming& core_timing) { - // Signal the server session that new data is available - return parent->GetServerSession().HandleSyncRequest(thread, memory, core_timing); +Result KClientSession::SendSyncRequest() { + // Create a session request. + KSessionRequest* request = KSessionRequest::Create(kernel); + R_UNLESS(request != nullptr, ResultOutOfResource); + SCOPE_EXIT({ request->Close(); }); + + // Initialize the request. + request->Initialize(nullptr, GetCurrentThread(kernel).GetTLSAddress(), MessageBufferSize); + + // Send the request. + return parent->GetServerSession().OnRequest(request); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_client_session.h b/src/core/hle/kernel/k_client_session.h index 0c750d756..b4a19c546 100644 --- a/src/core/hle/kernel/k_client_session.h +++ b/src/core/hle/kernel/k_client_session.h @@ -46,8 +46,7 @@ public: return parent; } - Result SendSyncRequest(KThread* thread, Core::Memory::Memory& memory, - Core::Timing::CoreTiming& core_timing); + Result SendSyncRequest(); void OnServerClosed(); diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp index da57ceb21..4b1c134d4 100644 --- a/src/core/hle/kernel/k_code_memory.cpp +++ b/src/core/hle/kernel/k_code_memory.cpp @@ -34,7 +34,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si // Clear the memory. for (const auto& block : m_page_group.Nodes()) { - std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize()); + std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize()); } // Set remaining tracking members. diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h new file mode 100644 index 000000000..9076c8fa3 --- /dev/null +++ b/src/core/hle/kernel/k_dynamic_page_manager.h @@ -0,0 +1,136 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/alignment.h" +#include "common/common_types.h" +#include "core/hle/kernel/k_page_bitmap.h" +#include "core/hle/kernel/k_spin_lock.h" +#include "core/hle/kernel/memory_types.h" +#include "core/hle/kernel/svc_results.h" + +namespace Kernel { + +class KDynamicPageManager { +public: + class PageBuffer { + private: + u8 m_buffer[PageSize]; + }; + static_assert(sizeof(PageBuffer) == PageSize); + +public: + KDynamicPageManager() = default; + + template <typename T> + T* GetPointer(VAddr addr) { + return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); + } + + template <typename T> + const T* GetPointer(VAddr addr) const { + return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); + } + + Result Initialize(VAddr addr, size_t sz) { + // We need to have positive size. + R_UNLESS(sz > 0, ResultOutOfMemory); + m_backing_memory.resize(sz); + + // Calculate management overhead. + const size_t management_size = + KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer)); + const size_t allocatable_size = sz - management_size; + + // Set tracking fields. + m_address = addr; + m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer)); + m_count = allocatable_size / sizeof(PageBuffer); + R_UNLESS(m_count > 0, ResultOutOfMemory); + + // Clear the management region. + u64* management_ptr = GetPointer<u64>(m_address + allocatable_size); + std::memset(management_ptr, 0, management_size); + + // Initialize the bitmap. + m_page_bitmap.Initialize(management_ptr, m_count); + + // Free the pages to the bitmap. + for (size_t i = 0; i < m_count; i++) { + // Ensure the freed page is all-zero. + std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize); + + // Set the bit for the free page. + m_page_bitmap.SetBit(i); + } + + R_SUCCEED(); + } + + VAddr GetAddress() const { + return m_address; + } + size_t GetSize() const { + return m_size; + } + size_t GetUsed() const { + return m_used; + } + size_t GetPeak() const { + return m_peak; + } + size_t GetCount() const { + return m_count; + } + + PageBuffer* Allocate() { + // Take the lock. + // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. + KScopedSpinLock lk(m_lock); + + // Find a random free block. + s64 soffset = m_page_bitmap.FindFreeBlock(true); + if (soffset < 0) [[unlikely]] { + return nullptr; + } + + const size_t offset = static_cast<size_t>(soffset); + + // Update our tracking. + m_page_bitmap.ClearBit(offset); + m_peak = std::max(m_peak, (++m_used)); + + return GetPointer<PageBuffer>(m_address) + offset; + } + + void Free(PageBuffer* pb) { + // Ensure all pages in the heap are zero. + std::memset(pb, 0, PageSize); + + // Take the lock. + // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. + KScopedSpinLock lk(m_lock); + + // Set the bit for the free page. + size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer); + m_page_bitmap.SetBit(offset); + + // Decrement our used count. + --m_used; + } + +private: + KSpinLock m_lock; + KPageBitmap m_page_bitmap; + size_t m_used{}; + size_t m_peak{}; + size_t m_count{}; + VAddr m_address{}; + size_t m_size{}; + + // TODO(bunnei): Back by host memory until we emulate kernel virtual address space. + std::vector<u8> m_backing_memory; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_dynamic_resource_manager.h b/src/core/hle/kernel/k_dynamic_resource_manager.h new file mode 100644 index 000000000..1ce517e8e --- /dev/null +++ b/src/core/hle/kernel/k_dynamic_resource_manager.h @@ -0,0 +1,58 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/common_funcs.h" +#include "core/hle/kernel/k_dynamic_slab_heap.h" +#include "core/hle/kernel/k_memory_block.h" + +namespace Kernel { + +template <typename T, bool ClearNode = false> +class KDynamicResourceManager { + YUZU_NON_COPYABLE(KDynamicResourceManager); + YUZU_NON_MOVEABLE(KDynamicResourceManager); + +public: + using DynamicSlabType = KDynamicSlabHeap<T, ClearNode>; + +public: + constexpr KDynamicResourceManager() = default; + + constexpr size_t GetSize() const { + return m_slab_heap->GetSize(); + } + constexpr size_t GetUsed() const { + return m_slab_heap->GetUsed(); + } + constexpr size_t GetPeak() const { + return m_slab_heap->GetPeak(); + } + constexpr size_t GetCount() const { + return m_slab_heap->GetCount(); + } + + void Initialize(KDynamicPageManager* page_allocator, DynamicSlabType* slab_heap) { + m_page_allocator = page_allocator; + m_slab_heap = slab_heap; + } + + T* Allocate() const { + return m_slab_heap->Allocate(m_page_allocator); + } + + void Free(T* t) const { + m_slab_heap->Free(t); + } + +private: + KDynamicPageManager* m_page_allocator{}; + DynamicSlabType* m_slab_heap{}; +}; + +class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {}; + +using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_dynamic_slab_heap.h b/src/core/hle/kernel/k_dynamic_slab_heap.h new file mode 100644 index 000000000..3a0ddd050 --- /dev/null +++ b/src/core/hle/kernel/k_dynamic_slab_heap.h @@ -0,0 +1,122 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <atomic> + +#include "common/common_funcs.h" +#include "core/hle/kernel/k_dynamic_page_manager.h" +#include "core/hle/kernel/k_slab_heap.h" + +namespace Kernel { + +template <typename T, bool ClearNode = false> +class KDynamicSlabHeap : protected impl::KSlabHeapImpl { + YUZU_NON_COPYABLE(KDynamicSlabHeap); + YUZU_NON_MOVEABLE(KDynamicSlabHeap); + +public: + constexpr KDynamicSlabHeap() = default; + + constexpr VAddr GetAddress() const { + return m_address; + } + constexpr size_t GetSize() const { + return m_size; + } + constexpr size_t GetUsed() const { + return m_used.load(); + } + constexpr size_t GetPeak() const { + return m_peak.load(); + } + constexpr size_t GetCount() const { + return m_count.load(); + } + + constexpr bool IsInRange(VAddr addr) const { + return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1; + } + + void Initialize(KDynamicPageManager* page_allocator, size_t num_objects) { + ASSERT(page_allocator != nullptr); + + // Initialize members. + m_address = page_allocator->GetAddress(); + m_size = page_allocator->GetSize(); + + // Initialize the base allocator. + KSlabHeapImpl::Initialize(); + + // Allocate until we have the correct number of objects. + while (m_count.load() < num_objects) { + auto* allocated = reinterpret_cast<T*>(page_allocator->Allocate()); + ASSERT(allocated != nullptr); + + for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) { + KSlabHeapImpl::Free(allocated + i); + } + + m_count += sizeof(PageBuffer) / sizeof(T); + } + } + + T* Allocate(KDynamicPageManager* page_allocator) { + T* allocated = static_cast<T*>(KSlabHeapImpl::Allocate()); + + // If we successfully allocated and we should clear the node, do so. + if constexpr (ClearNode) { + if (allocated != nullptr) [[likely]] { + reinterpret_cast<KSlabHeapImpl::Node*>(allocated)->next = nullptr; + } + } + + // If we fail to allocate, try to get a new page from our next allocator. + if (allocated == nullptr) [[unlikely]] { + if (page_allocator != nullptr) { + allocated = reinterpret_cast<T*>(page_allocator->Allocate()); + if (allocated != nullptr) { + // If we succeeded in getting a page, free the rest to our slab. + for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) { + KSlabHeapImpl::Free(allocated + i); + } + m_count += sizeof(PageBuffer) / sizeof(T); + } + } + } + + if (allocated != nullptr) [[likely]] { + // Construct the object. + std::construct_at(allocated); + + // Update our tracking. + const size_t used = ++m_used; + size_t peak = m_peak.load(); + while (peak < used) { + if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) { + break; + } + } + } + + return allocated; + } + + void Free(T* t) { + KSlabHeapImpl::Free(t); + --m_used; + } + +private: + using PageBuffer = KDynamicPageManager::PageBuffer; + +private: + std::atomic<size_t> m_used{}; + std::atomic<size_t> m_peak{}; + std::atomic<size_t> m_count{}; + VAddr m_address{}; + size_t m_size{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp index e52fafbc7..78ca59463 100644 --- a/src/core/hle/kernel/k_event.cpp +++ b/src/core/hle/kernel/k_event.cpp @@ -8,39 +8,45 @@ namespace Kernel { KEvent::KEvent(KernelCore& kernel_) - : KAutoObjectWithSlabHeapAndContainer{kernel_}, readable_event{kernel_}, writable_event{ - kernel_} {} + : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_readable_event{kernel_} {} KEvent::~KEvent() = default; -void KEvent::Initialize(std::string&& name_, KProcess* owner_) { - // Increment reference count. - // Because reference count is one on creation, this will result - // in a reference count of two. Thus, when both readable and - // writable events are closed this object will be destroyed. - Open(); +void KEvent::Initialize(KProcess* owner) { + // Create our readable event. + KAutoObject::Create(std::addressof(m_readable_event)); - // Create our sub events. - KAutoObject::Create(std::addressof(readable_event)); - KAutoObject::Create(std::addressof(writable_event)); - - // Initialize our sub sessions. - readable_event.Initialize(this, name_ + ":Readable"); - writable_event.Initialize(this, name_ + ":Writable"); + // Initialize our readable event. + m_readable_event.Initialize(this); // Set our owner process. - owner = owner_; - owner->Open(); + m_owner = owner; + m_owner->Open(); // Mark initialized. - name = std::move(name_); - initialized = true; + m_initialized = true; } void KEvent::Finalize() { KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList>::Finalize(); } +Result KEvent::Signal() { + KScopedSchedulerLock sl{kernel}; + + R_SUCCEED_IF(m_readable_event_destroyed); + + return m_readable_event.Signal(); +} + +Result KEvent::Clear() { + KScopedSchedulerLock sl{kernel}; + + R_SUCCEED_IF(m_readable_event_destroyed); + + return m_readable_event.Clear(); +} + void KEvent::PostDestroy(uintptr_t arg) { // Release the event count resource the owner process holds. KProcess* owner = reinterpret_cast<KProcess*>(arg); diff --git a/src/core/hle/kernel/k_event.h b/src/core/hle/kernel/k_event.h index 2ff828feb..48ce7d9a0 100644 --- a/src/core/hle/kernel/k_event.h +++ b/src/core/hle/kernel/k_event.h @@ -4,14 +4,12 @@ #pragma once #include "core/hle/kernel/k_readable_event.h" -#include "core/hle/kernel/k_writable_event.h" #include "core/hle/kernel/slab_helpers.h" namespace Kernel { class KernelCore; class KReadableEvent; -class KWritableEvent; class KProcess; class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList> { @@ -21,37 +19,40 @@ public: explicit KEvent(KernelCore& kernel_); ~KEvent() override; - void Initialize(std::string&& name, KProcess* owner_); + void Initialize(KProcess* owner); void Finalize() override; bool IsInitialized() const override { - return initialized; + return m_initialized; } uintptr_t GetPostDestroyArgument() const override { - return reinterpret_cast<uintptr_t>(owner); + return reinterpret_cast<uintptr_t>(m_owner); } KProcess* GetOwner() const override { - return owner; + return m_owner; } KReadableEvent& GetReadableEvent() { - return readable_event; - } - - KWritableEvent& GetWritableEvent() { - return writable_event; + return m_readable_event; } static void PostDestroy(uintptr_t arg); + Result Signal(); + Result Clear(); + + void OnReadableEventDestroyed() { + m_readable_event_destroyed = true; + } + private: - KReadableEvent readable_event; - KWritableEvent writable_event; - KProcess* owner{}; - bool initialized{}; + KReadableEvent m_readable_event; + KProcess* m_owner{}; + bool m_initialized{}; + bool m_readable_event_destroyed{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp index 1b577a5b3..4a6b60d26 100644 --- a/src/core/hle/kernel/k_interrupt_manager.cpp +++ b/src/core/hle/kernel/k_interrupt_manager.cpp @@ -11,29 +11,34 @@ namespace Kernel::KInterruptManager { void HandleInterrupt(KernelCore& kernel, s32 core_id) { - auto* process = kernel.CurrentProcess(); - if (!process) { - return; - } - // Acknowledge the interrupt. kernel.PhysicalCore(core_id).ClearInterrupt(); auto& current_thread = GetCurrentThread(kernel); - // If the user disable count is set, we may need to pin the current thread. - if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) { - KScopedSchedulerLock sl{kernel}; + if (auto* process = kernel.CurrentProcess(); process) { + // If the user disable count is set, we may need to pin the current thread. + if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) { + KScopedSchedulerLock sl{kernel}; - // Pin the current thread. - process->PinCurrentThread(core_id); + // Pin the current thread. + process->PinCurrentThread(core_id); - // Set the interrupt flag for the thread. - GetCurrentThread(kernel).SetInterruptFlag(); + // Set the interrupt flag for the thread. + GetCurrentThread(kernel).SetInterruptFlag(); + } } // Request interrupt scheduling. kernel.CurrentScheduler()->RequestScheduleOnInterrupt(); } +void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask) { + for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) { + if (core_mask & (1ULL << core_id)) { + kernel.PhysicalCore(core_id).Interrupt(); + } + } +} + } // namespace Kernel::KInterruptManager diff --git a/src/core/hle/kernel/k_interrupt_manager.h b/src/core/hle/kernel/k_interrupt_manager.h index f103dfe3f..803dc9211 100644 --- a/src/core/hle/kernel/k_interrupt_manager.h +++ b/src/core/hle/kernel/k_interrupt_manager.h @@ -11,6 +11,8 @@ class KernelCore; namespace KInterruptManager { void HandleInterrupt(KernelCore& kernel, s32 core_id); -} +void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask); + +} // namespace KInterruptManager } // namespace Kernel diff --git a/src/core/hle/kernel/k_linked_list.h b/src/core/hle/kernel/k_linked_list.h index 78859ced3..29ebd16b7 100644 --- a/src/core/hle/kernel/k_linked_list.h +++ b/src/core/hle/kernel/k_linked_list.h @@ -16,6 +16,7 @@ class KLinkedListNode : public boost::intrusive::list_base_hook<>, public KSlabAllocated<KLinkedListNode> { public: + explicit KLinkedListNode(KernelCore&) {} KLinkedListNode() = default; void Initialize(void* it) { diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h index 18df1f836..9444f6bd2 100644 --- a/src/core/hle/kernel/k_memory_block.h +++ b/src/core/hle/kernel/k_memory_block.h @@ -6,6 +6,7 @@ #include "common/alignment.h" #include "common/assert.h" #include "common/common_types.h" +#include "common/intrusive_red_black_tree.h" #include "core/hle/kernel/memory_types.h" #include "core/hle/kernel/svc_types.h" @@ -168,9 +169,8 @@ constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission per enum class KMemoryAttribute : u8 { None = 0x00, - Mask = 0x7F, - All = Mask, - DontCareMask = 0x80, + All = 0xFF, + UserMask = All, Locked = static_cast<u8>(Svc::MemoryAttribute::Locked), IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked), @@ -178,76 +178,112 @@ enum class KMemoryAttribute : u8 { Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached), SetMask = Uncached, - - IpcAndDeviceMapped = IpcLocked | DeviceShared, - LockedAndIpcLocked = Locked | IpcLocked, - DeviceSharedAndUncached = DeviceShared | Uncached }; DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute); -static_assert((static_cast<u8>(KMemoryAttribute::Mask) & - static_cast<u8>(KMemoryAttribute::DontCareMask)) == 0); +enum class KMemoryBlockDisableMergeAttribute : u8 { + None = 0, + Normal = (1u << 0), + DeviceLeft = (1u << 1), + IpcLeft = (1u << 2), + Locked = (1u << 3), + DeviceRight = (1u << 4), + + AllLeft = Normal | DeviceLeft | IpcLeft | Locked, + AllRight = DeviceRight, +}; +DECLARE_ENUM_FLAG_OPERATORS(KMemoryBlockDisableMergeAttribute); struct KMemoryInfo { - VAddr addr{}; - std::size_t size{}; - KMemoryState state{}; - KMemoryPermission perm{}; - KMemoryAttribute attribute{}; - KMemoryPermission original_perm{}; - u16 ipc_lock_count{}; - u16 device_use_count{}; + uintptr_t m_address; + size_t m_size; + KMemoryState m_state; + u16 m_device_disable_merge_left_count; + u16 m_device_disable_merge_right_count; + u16 m_ipc_lock_count; + u16 m_device_use_count; + u16 m_ipc_disable_merge_count; + KMemoryPermission m_permission; + KMemoryAttribute m_attribute; + KMemoryPermission m_original_permission; + KMemoryBlockDisableMergeAttribute m_disable_merge_attribute; constexpr Svc::MemoryInfo GetSvcMemoryInfo() const { return { - addr, - size, - static_cast<Svc::MemoryState>(state & KMemoryState::Mask), - static_cast<Svc::MemoryAttribute>(attribute & KMemoryAttribute::Mask), - static_cast<Svc::MemoryPermission>(perm & KMemoryPermission::UserMask), - ipc_lock_count, - device_use_count, + .addr = m_address, + .size = m_size, + .state = static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask), + .attr = static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask), + .perm = static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask), + .ipc_refcount = m_ipc_lock_count, + .device_refcount = m_device_use_count, + .padding = {}, }; } - constexpr VAddr GetAddress() const { - return addr; + constexpr uintptr_t GetAddress() const { + return m_address; + } + + constexpr size_t GetSize() const { + return m_size; } - constexpr std::size_t GetSize() const { - return size; + + constexpr size_t GetNumPages() const { + return this->GetSize() / PageSize; } - constexpr std::size_t GetNumPages() const { - return GetSize() / PageSize; + + constexpr uintptr_t GetEndAddress() const { + return this->GetAddress() + this->GetSize(); } - constexpr VAddr GetEndAddress() const { - return GetAddress() + GetSize(); + + constexpr uintptr_t GetLastAddress() const { + return this->GetEndAddress() - 1; } - constexpr VAddr GetLastAddress() const { - return GetEndAddress() - 1; + + constexpr u16 GetIpcLockCount() const { + return m_ipc_lock_count; } + + constexpr u16 GetIpcDisableMergeCount() const { + return m_ipc_disable_merge_count; + } + constexpr KMemoryState GetState() const { - return state; + return m_state; + } + + constexpr KMemoryPermission GetPermission() const { + return m_permission; } + + constexpr KMemoryPermission GetOriginalPermission() const { + return m_original_permission; + } + constexpr KMemoryAttribute GetAttribute() const { - return attribute; + return m_attribute; } - constexpr KMemoryPermission GetPermission() const { - return perm; + + constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const { + return m_disable_merge_attribute; } }; -class KMemoryBlock final { - friend class KMemoryBlockManager; - +class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> { private: - VAddr addr{}; - std::size_t num_pages{}; - KMemoryState state{KMemoryState::None}; - u16 ipc_lock_count{}; - u16 device_use_count{}; - KMemoryPermission perm{KMemoryPermission::None}; - KMemoryPermission original_perm{KMemoryPermission::None}; - KMemoryAttribute attribute{KMemoryAttribute::None}; + u16 m_device_disable_merge_left_count; + u16 m_device_disable_merge_right_count; + VAddr m_address; + size_t m_num_pages; + KMemoryState m_memory_state; + u16 m_ipc_lock_count; + u16 m_device_use_count; + u16 m_ipc_disable_merge_count; + KMemoryPermission m_permission; + KMemoryPermission m_original_permission; + KMemoryAttribute m_attribute; + KMemoryBlockDisableMergeAttribute m_disable_merge_attribute; public: static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) { @@ -261,113 +297,349 @@ public: } public: - constexpr KMemoryBlock() = default; - constexpr KMemoryBlock(VAddr addr_, std::size_t num_pages_, KMemoryState state_, - KMemoryPermission perm_, KMemoryAttribute attribute_) - : addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {} - constexpr VAddr GetAddress() const { - return addr; + return m_address; } - constexpr std::size_t GetNumPages() const { - return num_pages; + constexpr size_t GetNumPages() const { + return m_num_pages; } - constexpr std::size_t GetSize() const { - return GetNumPages() * PageSize; + constexpr size_t GetSize() const { + return this->GetNumPages() * PageSize; } constexpr VAddr GetEndAddress() const { - return GetAddress() + GetSize(); + return this->GetAddress() + this->GetSize(); } constexpr VAddr GetLastAddress() const { - return GetEndAddress() - 1; + return this->GetEndAddress() - 1; + } + + constexpr u16 GetIpcLockCount() const { + return m_ipc_lock_count; + } + + constexpr u16 GetIpcDisableMergeCount() const { + return m_ipc_disable_merge_count; + } + + constexpr KMemoryPermission GetPermission() const { + return m_permission; + } + + constexpr KMemoryPermission GetOriginalPermission() const { + return m_original_permission; + } + + constexpr KMemoryAttribute GetAttribute() const { + return m_attribute; } constexpr KMemoryInfo GetMemoryInfo() const { return { - GetAddress(), GetSize(), state, perm, - attribute, original_perm, ipc_lock_count, device_use_count, + .m_address = this->GetAddress(), + .m_size = this->GetSize(), + .m_state = m_memory_state, + .m_device_disable_merge_left_count = m_device_disable_merge_left_count, + .m_device_disable_merge_right_count = m_device_disable_merge_right_count, + .m_ipc_lock_count = m_ipc_lock_count, + .m_device_use_count = m_device_use_count, + .m_ipc_disable_merge_count = m_ipc_disable_merge_count, + .m_permission = m_permission, + .m_attribute = m_attribute, + .m_original_permission = m_original_permission, + .m_disable_merge_attribute = m_disable_merge_attribute, }; } - void ShareToDevice(KMemoryPermission /*new_perm*/) { - ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || - device_use_count == 0); - attribute |= KMemoryAttribute::DeviceShared; - const u16 new_use_count{++device_use_count}; - ASSERT(new_use_count > 0); +public: + explicit KMemoryBlock() = default; + + constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p, + KMemoryAttribute attr) + : Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(), + m_device_disable_merge_left_count(), m_device_disable_merge_right_count(), + m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0), + m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p), + m_original_permission(KMemoryPermission::None), m_attribute(attr), + m_disable_merge_attribute() {} + + constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p, + KMemoryAttribute attr) { + m_device_disable_merge_left_count = 0; + m_device_disable_merge_right_count = 0; + m_address = addr; + m_num_pages = np; + m_memory_state = ms; + m_ipc_lock_count = 0; + m_device_use_count = 0; + m_permission = p; + m_original_permission = KMemoryPermission::None; + m_attribute = attr; + m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None; + } + + constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const { + constexpr auto AttributeIgnoreMask = + KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; + return m_memory_state == s && m_permission == p && + (m_attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask); + } + + constexpr bool HasSameProperties(const KMemoryBlock& rhs) const { + return m_memory_state == rhs.m_memory_state && m_permission == rhs.m_permission && + m_original_permission == rhs.m_original_permission && + m_attribute == rhs.m_attribute && m_ipc_lock_count == rhs.m_ipc_lock_count && + m_device_use_count == rhs.m_device_use_count; + } + + constexpr bool CanMergeWith(const KMemoryBlock& rhs) const { + return this->HasSameProperties(rhs) && + (m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight) == + KMemoryBlockDisableMergeAttribute::None && + (rhs.m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft) == + KMemoryBlockDisableMergeAttribute::None; } - void UnshareToDevice(KMemoryPermission /*new_perm*/) { - ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); - const u16 prev_use_count{device_use_count--}; - ASSERT(prev_use_count > 0); - if (prev_use_count == 1) { - attribute &= ~KMemoryAttribute::DeviceShared; + constexpr bool Contains(VAddr addr) const { + return this->GetAddress() <= addr && addr <= this->GetEndAddress(); + } + + constexpr void Add(const KMemoryBlock& added_block) { + ASSERT(added_block.GetNumPages() > 0); + ASSERT(this->GetAddress() + added_block.GetSize() - 1 < + this->GetEndAddress() + added_block.GetSize() - 1); + + m_num_pages += added_block.GetNumPages(); + m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( + m_disable_merge_attribute | added_block.m_disable_merge_attribute); + m_device_disable_merge_right_count = added_block.m_device_disable_merge_right_count; + } + + constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a, + bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) { + ASSERT(m_original_permission == KMemoryPermission::None); + ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None); + + m_memory_state = s; + m_permission = p; + m_attribute = static_cast<KMemoryAttribute>( + a | (m_attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared))); + + if (set_disable_merge_attr && set_mask != 0) { + m_disable_merge_attribute = m_disable_merge_attribute | + static_cast<KMemoryBlockDisableMergeAttribute>(set_mask); + } + if (clear_mask != 0) { + m_disable_merge_attribute = m_disable_merge_attribute & + static_cast<KMemoryBlockDisableMergeAttribute>(~clear_mask); } } -private: - constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const { - constexpr KMemoryAttribute AttributeIgnoreMask{KMemoryAttribute::DontCareMask | - KMemoryAttribute::IpcLocked | - KMemoryAttribute::DeviceShared}; - return state == s && perm == p && - (attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask); + constexpr void Split(KMemoryBlock* block, VAddr addr) { + ASSERT(this->GetAddress() < addr); + ASSERT(this->Contains(addr)); + ASSERT(Common::IsAligned(addr, PageSize)); + + block->m_address = m_address; + block->m_num_pages = (addr - this->GetAddress()) / PageSize; + block->m_memory_state = m_memory_state; + block->m_ipc_lock_count = m_ipc_lock_count; + block->m_device_use_count = m_device_use_count; + block->m_permission = m_permission; + block->m_original_permission = m_original_permission; + block->m_attribute = m_attribute; + block->m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( + m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft); + block->m_ipc_disable_merge_count = m_ipc_disable_merge_count; + block->m_device_disable_merge_left_count = m_device_disable_merge_left_count; + block->m_device_disable_merge_right_count = 0; + + m_address = addr; + m_num_pages -= block->m_num_pages; + + m_ipc_disable_merge_count = 0; + m_device_disable_merge_left_count = 0; + m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( + m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight); } - constexpr bool HasSameProperties(const KMemoryBlock& rhs) const { - return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm && - attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count && - device_use_count == rhs.device_use_count; + constexpr void UpdateDeviceDisableMergeStateForShareLeft( + [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { + if (left) { + m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( + m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft); + const u16 new_device_disable_merge_left_count = ++m_device_disable_merge_left_count; + ASSERT(new_device_disable_merge_left_count > 0); + } } - constexpr bool Contains(VAddr start) const { - return GetAddress() <= start && start <= GetEndAddress(); + constexpr void UpdateDeviceDisableMergeStateForShareRight( + [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { + if (right) { + m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( + m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight); + const u16 new_device_disable_merge_right_count = ++m_device_disable_merge_right_count; + ASSERT(new_device_disable_merge_right_count > 0); + } + } + + constexpr void UpdateDeviceDisableMergeStateForShare(KMemoryPermission new_perm, bool left, + bool right) { + this->UpdateDeviceDisableMergeStateForShareLeft(new_perm, left, right); + this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right); } - constexpr void Add(std::size_t count) { - ASSERT(count > 0); - ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1); + constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, + bool right) { + // We must either be shared or have a zero lock count. + ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || + m_device_use_count == 0); - num_pages += count; + // Share. + const u16 new_count = ++m_device_use_count; + ASSERT(new_count > 0); + + m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::DeviceShared); + + this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right); } - constexpr void Update(KMemoryState new_state, KMemoryPermission new_perm, - KMemoryAttribute new_attribute) { - ASSERT(original_perm == KMemoryPermission::None); - ASSERT((attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None); + constexpr void UpdateDeviceDisableMergeStateForUnshareLeft( + [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { - state = new_state; - perm = new_perm; + if (left) { + if (!m_device_disable_merge_left_count) { + return; + } + --m_device_disable_merge_left_count; + } - attribute = static_cast<KMemoryAttribute>( - new_attribute | - (attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared))); + m_device_disable_merge_left_count = + std::min(m_device_disable_merge_left_count, m_device_use_count); + + if (m_device_disable_merge_left_count == 0) { + m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( + m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceLeft); + } } - constexpr KMemoryBlock Split(VAddr split_addr) { - ASSERT(GetAddress() < split_addr); - ASSERT(Contains(split_addr)); - ASSERT(Common::IsAligned(split_addr, PageSize)); + constexpr void UpdateDeviceDisableMergeStateForUnshareRight( + [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { + if (right) { + const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--; + ASSERT(old_device_disable_merge_right_count > 0); + if (old_device_disable_merge_right_count == 1) { + m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( + m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceRight); + } + } + } - KMemoryBlock block; - block.addr = addr; - block.num_pages = (split_addr - GetAddress()) / PageSize; - block.state = state; - block.ipc_lock_count = ipc_lock_count; - block.device_use_count = device_use_count; - block.perm = perm; - block.original_perm = original_perm; - block.attribute = attribute; + constexpr void UpdateDeviceDisableMergeStateForUnshare(KMemoryPermission new_perm, bool left, + bool right) { + this->UpdateDeviceDisableMergeStateForUnshareLeft(new_perm, left, right); + this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right); + } - addr = split_addr; - num_pages -= block.num_pages; + constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, + bool right) { + // We must be shared. + ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); + + // Unhare. + const u16 old_count = m_device_use_count--; + ASSERT(old_count > 0); + + if (old_count == 1) { + m_attribute = + static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared); + } + + this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right); + } + + constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left, + bool right) { + + // We must be shared. + ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); + + // Unhare. + const u16 old_count = m_device_use_count--; + ASSERT(old_count > 0); + + if (old_count == 1) { + m_attribute = + static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared); + } + + this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right); + } + + constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { + // We must either be locked or have a zero lock count. + ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked || + m_ipc_lock_count == 0); + + // Lock. + const u16 new_lock_count = ++m_ipc_lock_count; + ASSERT(new_lock_count > 0); + + // If this is our first lock, update our permissions. + if (new_lock_count == 1) { + ASSERT(m_original_permission == KMemoryPermission::None); + ASSERT((m_permission | new_perm | KMemoryPermission::NotMapped) == + (m_permission | KMemoryPermission::NotMapped)); + ASSERT((m_permission & KMemoryPermission::UserExecute) != + KMemoryPermission::UserExecute || + (new_perm == KMemoryPermission::UserRead)); + m_original_permission = m_permission; + m_permission = static_cast<KMemoryPermission>( + (new_perm & KMemoryPermission::IpcLockChangeMask) | + (m_original_permission & ~KMemoryPermission::IpcLockChangeMask)); + } + m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::IpcLocked); + + if (left) { + m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( + m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::IpcLeft); + const u16 new_ipc_disable_merge_count = ++m_ipc_disable_merge_count; + ASSERT(new_ipc_disable_merge_count > 0); + } + } + + constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left, + [[maybe_unused]] bool right) { + // We must be locked. + ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked); + + // Unlock. + const u16 old_lock_count = m_ipc_lock_count--; + ASSERT(old_lock_count > 0); + + // If this is our last unlock, update our permissions. + if (old_lock_count == 1) { + ASSERT(m_original_permission != KMemoryPermission::None); + m_permission = m_original_permission; + m_original_permission = KMemoryPermission::None; + m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::IpcLocked); + } + + if (left) { + const u16 old_ipc_disable_merge_count = m_ipc_disable_merge_count--; + ASSERT(old_ipc_disable_merge_count > 0); + if (old_ipc_disable_merge_count == 1) { + m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( + m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::IpcLeft); + } + } + } - return block; + constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const { + return m_disable_merge_attribute; } }; static_assert(std::is_trivially_destructible<KMemoryBlock>::value); diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp index 3ddb9984f..cf4c1e371 100644 --- a/src/core/hle/kernel/k_memory_block_manager.cpp +++ b/src/core/hle/kernel/k_memory_block_manager.cpp @@ -2,221 +2,336 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include "core/hle/kernel/k_memory_block_manager.h" -#include "core/hle/kernel/memory_types.h" namespace Kernel { -KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_) - : start_addr{start_addr_}, end_addr{end_addr_} { - const u64 num_pages{(end_addr - start_addr) / PageSize}; - memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free, - KMemoryPermission::None, KMemoryAttribute::None); -} +KMemoryBlockManager::KMemoryBlockManager() = default; -KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) { - auto node{memory_block_tree.begin()}; - while (node != end()) { - const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()}; - if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) { - return node; - } - node = std::next(node); - } - return end(); +Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) { + // Allocate a block to encapsulate the address space, insert it into the tree. + KMemoryBlock* start_block = slab_manager->Allocate(); + R_UNLESS(start_block != nullptr, ResultOutOfResource); + + // Set our start and end. + m_start_address = st; + m_end_address = nd; + ASSERT(Common::IsAligned(m_start_address, PageSize)); + ASSERT(Common::IsAligned(m_end_address, PageSize)); + + // Initialize and insert the block. + start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize, + KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None); + m_memory_block_tree.insert(*start_block); + + R_SUCCEED(); } -VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages, - std::size_t num_pages, std::size_t align, - std::size_t offset, std::size_t guard_pages) { - if (num_pages == 0) { - return {}; +void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager, + HostUnmapCallback&& host_unmap_callback) { + // Erase every block until we have none left. + auto it = m_memory_block_tree.begin(); + while (it != m_memory_block_tree.end()) { + KMemoryBlock* block = std::addressof(*it); + it = m_memory_block_tree.erase(it); + slab_manager->Free(block); + host_unmap_callback(block->GetAddress(), block->GetSize()); } - const VAddr region_end{region_start + region_num_pages * PageSize}; - const VAddr region_last{region_end - 1}; - for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) { - const auto info{it->GetMemoryInfo()}; - if (region_last < info.GetAddress()) { - break; - } + ASSERT(m_memory_block_tree.empty()); +} - if (info.state != KMemoryState::Free) { - continue; - } +VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages, + size_t num_pages, size_t alignment, size_t offset, + size_t guard_pages) const { + if (num_pages > 0) { + const VAddr region_end = region_start + region_num_pages * PageSize; + const VAddr region_last = region_end - 1; + for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend(); + it++) { + const KMemoryInfo info = it->GetMemoryInfo(); + if (region_last < info.GetAddress()) { + break; + } + if (info.m_state != KMemoryState::Free) { + continue; + } - VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()}; - area += guard_pages * PageSize; + VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress(); + area += guard_pages * PageSize; - const VAddr offset_area{Common::AlignDown(area, align) + offset}; - area = (area <= offset_area) ? offset_area : offset_area + align; + const VAddr offset_area = Common::AlignDown(area, alignment) + offset; + area = (area <= offset_area) ? offset_area : offset_area + alignment; - const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize}; - const VAddr area_last{area_end - 1}; + const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize; + const VAddr area_last = area_end - 1; - if (info.GetAddress() <= area && area < area_last && area_last <= region_last && - area_last <= info.GetLastAddress()) { - return area; + if (info.GetAddress() <= area && area < area_last && area_last <= region_last && + area_last <= info.GetLastAddress()) { + return area; + } } } return {}; } -void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, - KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, - KMemoryState state, KMemoryPermission perm, - KMemoryAttribute attribute) { - const VAddr update_end_addr{addr + num_pages * PageSize}; - iterator node{memory_block_tree.begin()}; +void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, + VAddr address, size_t num_pages) { + // Find the iterator now that we've updated. + iterator it = this->FindIterator(address); + if (address != m_start_address) { + it--; + } - prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped; + // Coalesce blocks that we can. + while (true) { + iterator prev = it++; + if (it == m_memory_block_tree.end()) { + break; + } - while (node != memory_block_tree.end()) { - KMemoryBlock* block{&(*node)}; - iterator next_node{std::next(node)}; - const VAddr cur_addr{block->GetAddress()}; - const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; + if (prev->CanMergeWith(*it)) { + KMemoryBlock* block = std::addressof(*it); + m_memory_block_tree.erase(it); + prev->Add(*block); + allocator->Free(block); + it = prev; + } - if (addr < cur_end_addr && cur_addr < update_end_addr) { - if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) { - node = next_node; - continue; - } + if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) { + break; + } + } +} - iterator new_node{node}; - if (addr > cur_addr) { - memory_block_tree.insert(node, block->Split(addr)); +void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, + size_t num_pages, KMemoryState state, KMemoryPermission perm, + KMemoryAttribute attr, + KMemoryBlockDisableMergeAttribute set_disable_attr, + KMemoryBlockDisableMergeAttribute clear_disable_attr) { + // Ensure for auditing that we never end up with an invalid tree. + KScopedMemoryBlockManagerAuditor auditor(this); + ASSERT(Common::IsAligned(address, PageSize)); + ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == + KMemoryAttribute::None); + + VAddr cur_address = address; + size_t remaining_pages = num_pages; + iterator it = this->FindIterator(address); + + while (remaining_pages > 0) { + const size_t remaining_size = remaining_pages * PageSize; + KMemoryInfo cur_info = it->GetMemoryInfo(); + if (it->HasProperties(state, perm, attr)) { + // If we already have the right properties, just advance. + if (cur_address + remaining_size < cur_info.GetEndAddress()) { + remaining_pages = 0; + cur_address += remaining_size; + } else { + remaining_pages = + (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; + cur_address = cur_info.GetEndAddress(); } + } else { + // If we need to, create a new block before and insert it. + if (cur_info.GetAddress() != cur_address) { + KMemoryBlock* new_block = allocator->Allocate(); + + it->Split(new_block, cur_address); + it = m_memory_block_tree.insert(*new_block); + it++; - if (update_end_addr < cur_end_addr) { - new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); + cur_info = it->GetMemoryInfo(); + cur_address = cur_info.GetAddress(); } - new_node->Update(state, perm, attribute); + // If we need to, create a new block after and insert it. + if (cur_info.GetSize() > remaining_size) { + KMemoryBlock* new_block = allocator->Allocate(); - MergeAdjacent(new_node, next_node); - } + it->Split(new_block, cur_address + remaining_size); + it = m_memory_block_tree.insert(*new_block); - if (cur_end_addr - 1 >= update_end_addr - 1) { - break; - } + cur_info = it->GetMemoryInfo(); + } - node = next_node; + // Update block state. + it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr), + static_cast<u8>(clear_disable_attr)); + cur_address += cur_info.GetSize(); + remaining_pages -= cur_info.GetNumPages(); + } + it++; } + + this->CoalesceForUpdate(allocator, address, num_pages); } -void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state, - KMemoryPermission perm, KMemoryAttribute attribute) { - const VAddr update_end_addr{addr + num_pages * PageSize}; - iterator node{memory_block_tree.begin()}; +void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, + VAddr address, size_t num_pages, KMemoryState test_state, + KMemoryPermission test_perm, KMemoryAttribute test_attr, + KMemoryState state, KMemoryPermission perm, + KMemoryAttribute attr) { + // Ensure for auditing that we never end up with an invalid tree. + KScopedMemoryBlockManagerAuditor auditor(this); + ASSERT(Common::IsAligned(address, PageSize)); + ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == + KMemoryAttribute::None); + + VAddr cur_address = address; + size_t remaining_pages = num_pages; + iterator it = this->FindIterator(address); + + while (remaining_pages > 0) { + const size_t remaining_size = remaining_pages * PageSize; + KMemoryInfo cur_info = it->GetMemoryInfo(); + if (it->HasProperties(test_state, test_perm, test_attr) && + !it->HasProperties(state, perm, attr)) { + // If we need to, create a new block before and insert it. + if (cur_info.GetAddress() != cur_address) { + KMemoryBlock* new_block = allocator->Allocate(); + + it->Split(new_block, cur_address); + it = m_memory_block_tree.insert(*new_block); + it++; + + cur_info = it->GetMemoryInfo(); + cur_address = cur_info.GetAddress(); + } - while (node != memory_block_tree.end()) { - KMemoryBlock* block{&(*node)}; - iterator next_node{std::next(node)}; - const VAddr cur_addr{block->GetAddress()}; - const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; + // If we need to, create a new block after and insert it. + if (cur_info.GetSize() > remaining_size) { + KMemoryBlock* new_block = allocator->Allocate(); - if (addr < cur_end_addr && cur_addr < update_end_addr) { - iterator new_node{node}; + it->Split(new_block, cur_address + remaining_size); + it = m_memory_block_tree.insert(*new_block); - if (addr > cur_addr) { - memory_block_tree.insert(node, block->Split(addr)); + cur_info = it->GetMemoryInfo(); } - if (update_end_addr < cur_end_addr) { - new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); + // Update block state. + it->Update(state, perm, attr, false, 0, 0); + cur_address += cur_info.GetSize(); + remaining_pages -= cur_info.GetNumPages(); + } else { + // If we already have the right properties, just advance. + if (cur_address + remaining_size < cur_info.GetEndAddress()) { + remaining_pages = 0; + cur_address += remaining_size; + } else { + remaining_pages = + (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; + cur_address = cur_info.GetEndAddress(); } - - new_node->Update(state, perm, attribute); - - MergeAdjacent(new_node, next_node); - } - - if (cur_end_addr - 1 >= update_end_addr - 1) { - break; } - - node = next_node; + it++; } + + this->CoalesceForUpdate(allocator, address, num_pages); } -void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, +void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, + size_t num_pages, MemoryBlockLockFunction lock_func, KMemoryPermission perm) { - const VAddr update_end_addr{addr + num_pages * PageSize}; - iterator node{memory_block_tree.begin()}; + // Ensure for auditing that we never end up with an invalid tree. + KScopedMemoryBlockManagerAuditor auditor(this); + ASSERT(Common::IsAligned(address, PageSize)); - while (node != memory_block_tree.end()) { - KMemoryBlock* block{&(*node)}; - iterator next_node{std::next(node)}; - const VAddr cur_addr{block->GetAddress()}; - const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; + VAddr cur_address = address; + size_t remaining_pages = num_pages; + iterator it = this->FindIterator(address); - if (addr < cur_end_addr && cur_addr < update_end_addr) { - iterator new_node{node}; + const VAddr end_address = address + (num_pages * PageSize); - if (addr > cur_addr) { - memory_block_tree.insert(node, block->Split(addr)); - } + while (remaining_pages > 0) { + const size_t remaining_size = remaining_pages * PageSize; + KMemoryInfo cur_info = it->GetMemoryInfo(); - if (update_end_addr < cur_end_addr) { - new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); - } + // If we need to, create a new block before and insert it. + if (cur_info.m_address != cur_address) { + KMemoryBlock* new_block = allocator->Allocate(); - lock_func(new_node, perm); + it->Split(new_block, cur_address); + it = m_memory_block_tree.insert(*new_block); + it++; - MergeAdjacent(new_node, next_node); + cur_info = it->GetMemoryInfo(); + cur_address = cur_info.GetAddress(); } - if (cur_end_addr - 1 >= update_end_addr - 1) { - break; + if (cur_info.GetSize() > remaining_size) { + // If we need to, create a new block after and insert it. + KMemoryBlock* new_block = allocator->Allocate(); + + it->Split(new_block, cur_address + remaining_size); + it = m_memory_block_tree.insert(*new_block); + + cur_info = it->GetMemoryInfo(); } - node = next_node; + // Call the locked update function. + (std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address, + cur_info.GetEndAddress() == end_address); + cur_address += cur_info.GetSize(); + remaining_pages -= cur_info.GetNumPages(); + it++; } -} -void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) { - const_iterator it{FindIterator(start)}; - KMemoryInfo info{}; - do { - info = it->GetMemoryInfo(); - func(info); - it = std::next(it); - } while (info.addr + info.size - 1 < end - 1 && it != cend()); + this->CoalesceForUpdate(allocator, address, num_pages); } -void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) { - KMemoryBlock* block{&(*it)}; - - auto EraseIt = [&](const iterator it_to_erase) { - if (next_it == it_to_erase) { - next_it = std::next(next_it); +// Debug. +bool KMemoryBlockManager::CheckState() const { + // Loop over every block, ensuring that we are sorted and coalesced. + auto it = m_memory_block_tree.cbegin(); + auto prev = it++; + while (it != m_memory_block_tree.cend()) { + const KMemoryInfo prev_info = prev->GetMemoryInfo(); + const KMemoryInfo cur_info = it->GetMemoryInfo(); + + // Sequential blocks which can be merged should be merged. + if (prev->CanMergeWith(*it)) { + return false; } - memory_block_tree.erase(it_to_erase); - }; - if (it != memory_block_tree.begin()) { - KMemoryBlock* prev{&(*std::prev(it))}; - - if (block->HasSameProperties(*prev)) { - const iterator prev_it{std::prev(it)}; + // Sequential blocks should be sequential. + if (prev_info.GetEndAddress() != cur_info.GetAddress()) { + return false; + } - prev->Add(block->GetNumPages()); - EraseIt(it); + // If the block is ipc locked, it must have a count. + if ((cur_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None && + cur_info.m_ipc_lock_count == 0) { + return false; + } - it = prev_it; - block = prev; + // If the block is device shared, it must have a count. + if ((cur_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None && + cur_info.m_device_use_count == 0) { + return false; } + + // Advance the iterator. + prev = it++; } - if (it != cend()) { - const KMemoryBlock* const next{&(*std::next(it))}; + // Our loop will miss checking the last block, potentially, so check it. + if (prev != m_memory_block_tree.cend()) { + const KMemoryInfo prev_info = prev->GetMemoryInfo(); + // If the block is ipc locked, it must have a count. + if ((prev_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None && + prev_info.m_ipc_lock_count == 0) { + return false; + } - if (block->HasSameProperties(*next)) { - block->Add(next->GetNumPages()); - EraseIt(std::next(it)); + // If the block is device shared, it must have a count. + if ((prev_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None && + prev_info.m_device_use_count == 0) { + return false; } } + + return true; } } // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h index e14741b89..9b5873883 100644 --- a/src/core/hle/kernel/k_memory_block_manager.h +++ b/src/core/hle/kernel/k_memory_block_manager.h @@ -4,63 +4,154 @@ #pragma once #include <functional> -#include <list> +#include "common/common_funcs.h" #include "common/common_types.h" +#include "core/hle/kernel/k_dynamic_resource_manager.h" #include "core/hle/kernel/k_memory_block.h" namespace Kernel { +class KMemoryBlockManagerUpdateAllocator { +public: + static constexpr size_t MaxBlocks = 2; + +private: + KMemoryBlock* m_blocks[MaxBlocks]; + size_t m_index; + KMemoryBlockSlabManager* m_slab_manager; + +private: + Result Initialize(size_t num_blocks) { + // Check num blocks. + ASSERT(num_blocks <= MaxBlocks); + + // Set index. + m_index = MaxBlocks - num_blocks; + + // Allocate the blocks. + for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) { + m_blocks[m_index + i] = m_slab_manager->Allocate(); + R_UNLESS(m_blocks[m_index + i] != nullptr, ResultOutOfResource); + } + + R_SUCCEED(); + } + +public: + KMemoryBlockManagerUpdateAllocator(Result* out_result, KMemoryBlockSlabManager* sm, + size_t num_blocks = MaxBlocks) + : m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) { + *out_result = this->Initialize(num_blocks); + } + + ~KMemoryBlockManagerUpdateAllocator() { + for (const auto& block : m_blocks) { + if (block != nullptr) { + m_slab_manager->Free(block); + } + } + } + + KMemoryBlock* Allocate() { + ASSERT(m_index < MaxBlocks); + ASSERT(m_blocks[m_index] != nullptr); + KMemoryBlock* block = nullptr; + std::swap(block, m_blocks[m_index++]); + return block; + } + + void Free(KMemoryBlock* block) { + ASSERT(m_index <= MaxBlocks); + ASSERT(block != nullptr); + if (m_index == 0) { + m_slab_manager->Free(block); + } else { + m_blocks[--m_index] = block; + } + } +}; + class KMemoryBlockManager final { public: - using MemoryBlockTree = std::list<KMemoryBlock>; + using MemoryBlockTree = + Common::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>; + using MemoryBlockLockFunction = void (KMemoryBlock::*)(KMemoryPermission new_perm, bool left, + bool right); using iterator = MemoryBlockTree::iterator; using const_iterator = MemoryBlockTree::const_iterator; public: - KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_); + KMemoryBlockManager(); + + using HostUnmapCallback = std::function<void(VAddr, u64)>; + + Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager); + void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback); iterator end() { - return memory_block_tree.end(); + return m_memory_block_tree.end(); } const_iterator end() const { - return memory_block_tree.end(); + return m_memory_block_tree.end(); } const_iterator cend() const { - return memory_block_tree.cend(); + return m_memory_block_tree.cend(); } - iterator FindIterator(VAddr addr); + VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, + size_t alignment, size_t offset, size_t guard_pages) const; - VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, - std::size_t align, std::size_t offset, std::size_t guard_pages); + void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages, + KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, + KMemoryBlockDisableMergeAttribute set_disable_attr, + KMemoryBlockDisableMergeAttribute clear_disable_attr); + void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages, + MemoryBlockLockFunction lock_func, KMemoryPermission perm); - void Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, - KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state, - KMemoryPermission perm, KMemoryAttribute attribute); + void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, + size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, + KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, + KMemoryAttribute attr); - void Update(VAddr addr, std::size_t num_pages, KMemoryState state, - KMemoryPermission perm = KMemoryPermission::None, - KMemoryAttribute attribute = KMemoryAttribute::None); - - using LockFunc = std::function<void(iterator, KMemoryPermission)>; - void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, - KMemoryPermission perm); + iterator FindIterator(VAddr address) const { + return m_memory_block_tree.find(KMemoryBlock( + address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None)); + } - using IterateFunc = std::function<void(const KMemoryInfo&)>; - void IterateForRange(VAddr start, VAddr end, IterateFunc&& func); + const KMemoryBlock* FindBlock(VAddr address) const { + if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) { + return std::addressof(*it); + } - KMemoryBlock& FindBlock(VAddr addr) { - return *FindIterator(addr); + return nullptr; } + // Debug. + bool CheckState() const; + private: - void MergeAdjacent(iterator it, iterator& next_it); + void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, + size_t num_pages); - [[maybe_unused]] const VAddr start_addr; - [[maybe_unused]] const VAddr end_addr; + MemoryBlockTree m_memory_block_tree; + VAddr m_start_address{}; + VAddr m_end_address{}; +}; - MemoryBlockTree memory_block_tree; +class KScopedMemoryBlockManagerAuditor { +public: + explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) { + ASSERT(m_manager->CheckState()); + } + explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager& m) + : KScopedMemoryBlockManagerAuditor(std::addressof(m)) {} + ~KScopedMemoryBlockManagerAuditor() { + ASSERT(m_manager->CheckState()); + } + +private: + KMemoryBlockManager* m_manager; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 5b0a9963a..646711505 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp @@ -331,7 +331,7 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag // Set all the allocated memory. for (const auto& block : out->Nodes()) { - std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern, + std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, block.GetSize()); } diff --git a/src/core/hle/kernel/k_page_buffer.cpp b/src/core/hle/kernel/k_page_buffer.cpp index 1a0bf4439..0c16dded4 100644 --- a/src/core/hle/kernel/k_page_buffer.cpp +++ b/src/core/hle/kernel/k_page_buffer.cpp @@ -12,7 +12,7 @@ namespace Kernel { KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) { ASSERT(Common::IsAligned(phys_addr, PageSize)); - return reinterpret_cast<KPageBuffer*>(system.DeviceMemory().GetPointer(phys_addr)); + return system.DeviceMemory().GetPointer<KPageBuffer>(phys_addr); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h index 7e50dc1d1..aef06e213 100644 --- a/src/core/hle/kernel/k_page_buffer.h +++ b/src/core/hle/kernel/k_page_buffer.h @@ -13,6 +13,7 @@ namespace Kernel { class KPageBuffer final : public KSlabAllocated<KPageBuffer> { public: + explicit KPageBuffer(KernelCore&) {} KPageBuffer() = default; static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr); diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index d975de844..307e491cb 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -25,7 +25,7 @@ namespace { using namespace Common::Literals; -constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { +constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { switch (as_type) { case FileSys::ProgramAddressSpaceType::Is32Bit: case FileSys::ProgramAddressSpaceType::Is32BitNoMap: @@ -43,27 +43,29 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT } // namespace KPageTable::KPageTable(Core::System& system_) - : general_lock{system_.Kernel()}, map_physical_memory_lock{system_.Kernel()}, system{system_} {} + : m_general_lock{system_.Kernel()}, + m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {} KPageTable::~KPageTable() = default; Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, - VAddr code_addr, std::size_t code_size, + VAddr code_addr, size_t code_size, + KMemoryBlockSlabManager* mem_block_slab_manager, KMemoryManager::Pool pool) { const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { - return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type); + return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); }; const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) { - return KAddressSpaceInfo::GetAddressSpaceSize(address_space_width, type); + return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type); }; // Set our width and heap/alias sizes - address_space_width = GetAddressSpaceWidthFromType(as_type); + m_address_space_width = GetAddressSpaceWidthFromType(as_type); const VAddr start = 0; - const VAddr end{1ULL << address_space_width}; - std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; - std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; + const VAddr end{1ULL << m_address_space_width}; + size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; + size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; ASSERT(code_addr < code_addr + code_size); ASSERT(code_addr + code_size - 1 <= end - 1); @@ -75,66 +77,65 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type } // Set code regions and determine remaining - constexpr std::size_t RegionAlignment{2_MiB}; + constexpr size_t RegionAlignment{2_MiB}; VAddr process_code_start{}; VAddr process_code_end{}; - std::size_t stack_region_size{}; - std::size_t kernel_map_region_size{}; + size_t stack_region_size{}; + size_t kernel_map_region_size{}; - if (address_space_width == 39) { + if (m_address_space_width == 39) { alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack); kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); - code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); - code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); - alias_code_region_start = code_region_start; - alias_code_region_end = code_region_end; + m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); + m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); + m_alias_code_region_start = m_code_region_start; + m_alias_code_region_end = m_code_region_end; process_code_start = Common::AlignDown(code_addr, RegionAlignment); process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment); } else { stack_region_size = 0; kernel_map_region_size = 0; - code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); - code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); - stack_region_start = code_region_start; - alias_code_region_start = code_region_start; - alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + - GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); - stack_region_end = code_region_end; - kernel_map_region_start = code_region_start; - kernel_map_region_end = code_region_end; - process_code_start = code_region_start; - process_code_end = code_region_end; + m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); + m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); + m_stack_region_start = m_code_region_start; + m_alias_code_region_start = m_code_region_start; + m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + + GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); + m_stack_region_end = m_code_region_end; + m_kernel_map_region_start = m_code_region_start; + m_kernel_map_region_end = m_code_region_end; + process_code_start = m_code_region_start; + process_code_end = m_code_region_end; } // Set other basic fields - is_aslr_enabled = enable_aslr; - address_space_start = start; - address_space_end = end; - is_kernel = false; + m_enable_aslr = enable_aslr; + m_enable_device_address_space_merge = false; + m_address_space_start = start; + m_address_space_end = end; + m_is_kernel = false; + m_memory_block_slab_manager = mem_block_slab_manager; // Determine the region we can place our undetermineds in VAddr alloc_start{}; - std::size_t alloc_size{}; - if ((process_code_start - code_region_start) >= (end - process_code_end)) { - alloc_start = code_region_start; - alloc_size = process_code_start - code_region_start; + size_t alloc_size{}; + if ((process_code_start - m_code_region_start) >= (end - process_code_end)) { + alloc_start = m_code_region_start; + alloc_size = process_code_start - m_code_region_start; } else { alloc_start = process_code_end; alloc_size = end - process_code_end; } - const std::size_t needed_size{ - (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)}; - if (alloc_size < needed_size) { - ASSERT(false); - return ResultOutOfMemory; - } + const size_t needed_size = + (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size); + R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory); - const std::size_t remaining_size{alloc_size - needed_size}; + const size_t remaining_size{alloc_size - needed_size}; // Determine random placements for each region - std::size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; + size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; if (enable_aslr) { alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment; @@ -147,117 +148,130 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type } // Setup heap and alias regions - alias_region_start = alloc_start + alias_rnd; - alias_region_end = alias_region_start + alias_region_size; - heap_region_start = alloc_start + heap_rnd; - heap_region_end = heap_region_start + heap_region_size; + m_alias_region_start = alloc_start + alias_rnd; + m_alias_region_end = m_alias_region_start + alias_region_size; + m_heap_region_start = alloc_start + heap_rnd; + m_heap_region_end = m_heap_region_start + heap_region_size; if (alias_rnd <= heap_rnd) { - heap_region_start += alias_region_size; - heap_region_end += alias_region_size; + m_heap_region_start += alias_region_size; + m_heap_region_end += alias_region_size; } else { - alias_region_start += heap_region_size; - alias_region_end += heap_region_size; + m_alias_region_start += heap_region_size; + m_alias_region_end += heap_region_size; } // Setup stack region if (stack_region_size) { - stack_region_start = alloc_start + stack_rnd; - stack_region_end = stack_region_start + stack_region_size; + m_stack_region_start = alloc_start + stack_rnd; + m_stack_region_end = m_stack_region_start + stack_region_size; if (alias_rnd < stack_rnd) { - stack_region_start += alias_region_size; - stack_region_end += alias_region_size; + m_stack_region_start += alias_region_size; + m_stack_region_end += alias_region_size; } else { - alias_region_start += stack_region_size; - alias_region_end += stack_region_size; + m_alias_region_start += stack_region_size; + m_alias_region_end += stack_region_size; } if (heap_rnd < stack_rnd) { - stack_region_start += heap_region_size; - stack_region_end += heap_region_size; + m_stack_region_start += heap_region_size; + m_stack_region_end += heap_region_size; } else { - heap_region_start += stack_region_size; - heap_region_end += stack_region_size; + m_heap_region_start += stack_region_size; + m_heap_region_end += stack_region_size; } } // Setup kernel map region if (kernel_map_region_size) { - kernel_map_region_start = alloc_start + kmap_rnd; - kernel_map_region_end = kernel_map_region_start + kernel_map_region_size; + m_kernel_map_region_start = alloc_start + kmap_rnd; + m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size; if (alias_rnd < kmap_rnd) { - kernel_map_region_start += alias_region_size; - kernel_map_region_end += alias_region_size; + m_kernel_map_region_start += alias_region_size; + m_kernel_map_region_end += alias_region_size; } else { - alias_region_start += kernel_map_region_size; - alias_region_end += kernel_map_region_size; + m_alias_region_start += kernel_map_region_size; + m_alias_region_end += kernel_map_region_size; } if (heap_rnd < kmap_rnd) { - kernel_map_region_start += heap_region_size; - kernel_map_region_end += heap_region_size; + m_kernel_map_region_start += heap_region_size; + m_kernel_map_region_end += heap_region_size; } else { - heap_region_start += kernel_map_region_size; - heap_region_end += kernel_map_region_size; + m_heap_region_start += kernel_map_region_size; + m_heap_region_end += kernel_map_region_size; } if (stack_region_size) { if (stack_rnd < kmap_rnd) { - kernel_map_region_start += stack_region_size; - kernel_map_region_end += stack_region_size; + m_kernel_map_region_start += stack_region_size; + m_kernel_map_region_end += stack_region_size; } else { - stack_region_start += kernel_map_region_size; - stack_region_end += kernel_map_region_size; + m_stack_region_start += kernel_map_region_size; + m_stack_region_end += kernel_map_region_size; } } } // Set heap members - current_heap_end = heap_region_start; - max_heap_size = 0; - max_physical_memory_size = 0; + m_current_heap_end = m_heap_region_start; + m_max_heap_size = 0; + m_max_physical_memory_size = 0; // Ensure that we regions inside our address space auto IsInAddressSpace = [&](VAddr addr) { - return address_space_start <= addr && addr <= address_space_end; + return m_address_space_start <= addr && addr <= m_address_space_end; }; - ASSERT(IsInAddressSpace(alias_region_start)); - ASSERT(IsInAddressSpace(alias_region_end)); - ASSERT(IsInAddressSpace(heap_region_start)); - ASSERT(IsInAddressSpace(heap_region_end)); - ASSERT(IsInAddressSpace(stack_region_start)); - ASSERT(IsInAddressSpace(stack_region_end)); - ASSERT(IsInAddressSpace(kernel_map_region_start)); - ASSERT(IsInAddressSpace(kernel_map_region_end)); + ASSERT(IsInAddressSpace(m_alias_region_start)); + ASSERT(IsInAddressSpace(m_alias_region_end)); + ASSERT(IsInAddressSpace(m_heap_region_start)); + ASSERT(IsInAddressSpace(m_heap_region_end)); + ASSERT(IsInAddressSpace(m_stack_region_start)); + ASSERT(IsInAddressSpace(m_stack_region_end)); + ASSERT(IsInAddressSpace(m_kernel_map_region_start)); + ASSERT(IsInAddressSpace(m_kernel_map_region_end)); // Ensure that we selected regions that don't overlap - const VAddr alias_start{alias_region_start}; - const VAddr alias_last{alias_region_end - 1}; - const VAddr heap_start{heap_region_start}; - const VAddr heap_last{heap_region_end - 1}; - const VAddr stack_start{stack_region_start}; - const VAddr stack_last{stack_region_end - 1}; - const VAddr kmap_start{kernel_map_region_start}; - const VAddr kmap_last{kernel_map_region_end - 1}; + const VAddr alias_start{m_alias_region_start}; + const VAddr alias_last{m_alias_region_end - 1}; + const VAddr heap_start{m_heap_region_start}; + const VAddr heap_last{m_heap_region_end - 1}; + const VAddr stack_start{m_stack_region_start}; + const VAddr stack_last{m_stack_region_end - 1}; + const VAddr kmap_start{m_kernel_map_region_start}; + const VAddr kmap_last{m_kernel_map_region_end - 1}; ASSERT(alias_last < heap_start || heap_last < alias_start); ASSERT(alias_last < stack_start || stack_last < alias_start); ASSERT(alias_last < kmap_start || kmap_last < alias_start); ASSERT(heap_last < stack_start || stack_last < heap_start); ASSERT(heap_last < kmap_start || kmap_last < heap_start); - current_heap_end = heap_region_start; - max_heap_size = 0; - mapped_physical_memory_size = 0; - memory_pool = pool; + m_current_heap_end = m_heap_region_start; + m_max_heap_size = 0; + m_mapped_physical_memory_size = 0; + m_memory_pool = pool; + + m_page_table_impl = std::make_unique<Common::PageTable>(); + m_page_table_impl->Resize(m_address_space_width, PageBits); + + // Initialize our memory block manager. + R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, + m_memory_block_slab_manager)); +} - page_table_impl.Resize(address_space_width, PageBits); +void KPageTable::Finalize() { + // Finalize memory blocks. + m_memory_block_manager.Finalize(m_memory_block_slab_manager, [&](VAddr addr, u64 size) { + m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size); + }); - return InitializeMemoryLayout(start, end); + // Close the backing page table, as the destructor is not called for guest objects. + m_page_table_impl.reset(); } -Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state, +Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) { const u64 size{num_pages * PageSize}; @@ -265,52 +279,76 @@ Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryStat R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify that the destination memory is unmapped. R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); + + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager); + + // Allocate and open. KPageGroup pg; - R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( + R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( &pg, num_pages, - KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option))); + KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup)); - block_manager->Update(addr, num_pages, state, perm); + // Update the blocks. + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) { +Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size) { // Validate the mapping request. R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), ResultInvalidMemoryRegion); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify that the source memory is normal heap. KMemoryState src_state{}; KMemoryPermission src_perm{}; - std::size_t num_src_allocator_blocks{}; + size_t num_src_allocator_blocks{}; R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks, src_address, size, KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)); // Verify that the destination memory is unmapped. - std::size_t num_dst_allocator_blocks{}; + size_t num_dst_allocator_blocks{}; R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); + // Create an update allocator for the source. + Result src_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + m_memory_block_slab_manager, + num_src_allocator_blocks); + R_TRY(src_allocator_result); + + // Create an update allocator for the destination. + Result dst_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + m_memory_block_slab_manager, + num_dst_allocator_blocks); + R_TRY(dst_allocator_result); + // Map the code memory. { // Determine the number of pages being operated on. - const std::size_t num_pages = size / PageSize; + const size_t num_pages = size / PageSize; // Create page groups for the memory being mapped. KPageGroup pg; @@ -335,33 +373,37 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size unprot_guard.Cancel(); // Apply the memory block updates. - block_manager->Update(src_address, num_pages, src_state, new_perm, - KMemoryAttribute::Locked); - block_manager->Update(dst_address, num_pages, KMemoryState::AliasCode, new_perm, - KMemoryAttribute::None); + m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, + src_state, new_perm, KMemoryAttribute::Locked, + KMemoryBlockDisableMergeAttribute::Locked, + KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, + KMemoryState::AliasCode, new_perm, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); } - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, +Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size, ICacheInvalidationStrategy icache_invalidation_strategy) { // Validate the mapping request. R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), ResultInvalidMemoryRegion); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify that the source memory is locked normal heap. - std::size_t num_src_allocator_blocks{}; + size_t num_src_allocator_blocks{}; R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::Locked)); // Verify that the destination memory is aliasable code. - std::size_t num_dst_allocator_blocks{}; + size_t num_dst_allocator_blocks{}; R_TRY(this->CheckMemoryStateContiguous( std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, @@ -370,7 +412,7 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si // Determine whether any pages being unmapped are code. bool any_code_pages = false; { - KMemoryBlockManager::const_iterator it = block_manager->FindIterator(dst_address); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address); while (true) { // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -396,9 +438,9 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si SCOPE_EXIT({ if (reprotected_pages && any_code_pages) { if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) { - system.InvalidateCpuInstructionCacheRange(dst_address, size); + m_system.InvalidateCpuInstructionCacheRange(dst_address, size); } else { - system.InvalidateCpuInstructionCaches(); + m_system.InvalidateCpuInstructionCaches(); } } }); @@ -406,7 +448,21 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si // Unmap. { // Determine the number of pages being operated on. - const std::size_t num_pages = size / PageSize; + const size_t num_pages = size / PageSize; + + // Create an update allocator for the source. + Result src_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + m_memory_block_slab_manager, + num_src_allocator_blocks); + R_TRY(src_allocator_result); + + // Create an update allocator for the destination. + Result dst_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + m_memory_block_slab_manager, + num_dst_allocator_blocks); + R_TRY(dst_allocator_result); // Unmap the aliased copy of the pages. R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); @@ -416,73 +472,34 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si OperationType::ChangePermissions)); // Apply the memory block updates. - block_manager->Update(dst_address, num_pages, KMemoryState::None); - block_manager->Update(src_address, num_pages, KMemoryState::Normal, - KMemoryPermission::UserReadWrite); + m_memory_block_manager.Update( + std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); + m_memory_block_manager.Update( + std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); // Note that we reprotected pages. reprotected_pages = true; } - return ResultSuccess; + R_SUCCEED(); } -VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages, - std::size_t num_pages, std::size_t alignment, std::size_t offset, - std::size_t guard_pages) { +VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, + size_t alignment, size_t offset, size_t guard_pages) { VAddr address = 0; if (num_pages <= region_num_pages) { if (this->IsAslrEnabled()) { - // Try to directly find a free area up to 8 times. - for (std::size_t i = 0; i < 8; i++) { - const std::size_t random_offset = - KSystemControl::GenerateRandomRange( - 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) * - alignment; - const VAddr candidate = - Common::AlignDown((region_start + random_offset), alignment) + offset; - - KMemoryInfo info = this->QueryInfoImpl(candidate); - - if (info.state != KMemoryState::Free) { - continue; - } - if (region_start > candidate) { - continue; - } - if (info.GetAddress() + guard_pages * PageSize > candidate) { - continue; - } - - const VAddr candidate_end = candidate + (num_pages + guard_pages) * PageSize - 1; - if (candidate_end > info.GetLastAddress()) { - continue; - } - if (candidate_end > region_start + region_num_pages * PageSize - 1) { - continue; - } - - address = candidate; - break; - } - // Fall back to finding the first free area with a random offset. - if (address == 0) { - // NOTE: Nintendo does not account for guard pages here. - // This may theoretically cause an offset to be chosen that cannot be mapped. We - // will account for guard pages. - const std::size_t offset_pages = KSystemControl::GenerateRandomRange( - 0, region_num_pages - num_pages - guard_pages); - address = block_manager->FindFreeArea(region_start + offset_pages * PageSize, - region_num_pages - offset_pages, num_pages, - alignment, offset, guard_pages); - } + UNIMPLEMENTED(); } - // Find the first free area. if (address == 0) { - address = block_manager->FindFreeArea(region_start, region_num_pages, num_pages, - alignment, offset, guard_pages); + address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, + alignment, offset, guard_pages); } } @@ -500,7 +517,8 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { // Begin traversal. Common::PageTable::TraversalContext context; Common::PageTable::TraversalEntry next_entry; - R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory); + R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, addr), + ResultInvalidCurrentMemory); // Prepare tracking variables. PAddr cur_addr = next_entry.phys_addr; @@ -508,9 +526,9 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { size_t tot_size = cur_size; // Iterate, adding to group as we go. - const auto& memory_layout = system.Kernel().MemoryLayout(); + const auto& memory_layout = m_system.Kernel().MemoryLayout(); while (tot_size < size) { - R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context), + R_UNLESS(m_page_table_impl->ContinueTraversal(next_entry, context), ResultInvalidCurrentMemory); if (next_entry.phys_addr != (cur_addr + cur_size)) { @@ -538,7 +556,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory); R_TRY(pg.AddBlock(cur_addr, cur_pages)); - return ResultSuccess; + R_SUCCEED(); } bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) { @@ -546,7 +564,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu const size_t size = num_pages * PageSize; const auto& pg = pg_ll.Nodes(); - const auto& memory_layout = system.Kernel().MemoryLayout(); + const auto& memory_layout = m_system.Kernel().MemoryLayout(); // Empty groups are necessarily invalid. if (pg.empty()) { @@ -573,7 +591,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu // Begin traversal. Common::PageTable::TraversalContext context; Common::PageTable::TraversalEntry next_entry; - if (!page_table_impl.BeginTraversal(next_entry, context, addr)) { + if (!m_page_table_impl->BeginTraversal(next_entry, context, addr)) { return false; } @@ -584,7 +602,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu // Iterate, comparing expected to actual. while (tot_size < size) { - if (!page_table_impl.ContinueTraversal(next_entry, context)) { + if (!m_page_table_impl->ContinueTraversal(next_entry, context)) { return false; } @@ -630,11 +648,11 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); } -Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, +Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, VAddr src_addr) { - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); - const std::size_t num_pages{size / PageSize}; + const size_t num_pages{size / PageSize}; // Check that the memory is mapped in the destination process. size_t num_allocator_blocks; @@ -649,43 +667,51 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTab KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); // Apply the memory block update. - block_manager->Update(dst_addr, num_pages, KMemoryState::Free, KMemoryPermission::None, - KMemoryAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages, + KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); - system.InvalidateCpuInstructionCaches(); + m_system.InvalidateCpuInstructionCaches(); - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { +Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { // Lock the physical memory lock. - KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); + KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); // Calculate the last address for convenience. const VAddr last_address = address + size - 1; // Define iteration variables. VAddr cur_address; - std::size_t mapped_size; + size_t mapped_size; // The entire mapping process can be retried. while (true) { // Check if the memory is already mapped. { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Iterate over the memory. cur_address = address; mapped_size = 0; - auto it = block_manager->FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != block_manager->end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -716,20 +742,20 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { { // Reserve the memory from the process resource limit. KScopedResourceReservation memory_reservation( - system.Kernel().CurrentProcess()->GetResourceLimit(), + m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, size - mapped_size); R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); // Allocate pages for the new memory. KPageGroup pg; - R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( + R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( &pg, (size - mapped_size) / PageSize, - KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); + KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); // Map the memory. { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); size_t num_allocator_blocks = 0; @@ -739,10 +765,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { size_t checked_mapped_size = 0; cur_address = address; - auto it = block_manager->FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != block_manager->end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -782,6 +808,14 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { } } + // Create an update allocator. + ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, + num_allocator_blocks); + R_TRY(allocator_result); + // Reset the current tracking address, and make sure we clean up on failure. cur_address = address; auto unmap_guard = detail::ScopeExit([&] { @@ -791,10 +825,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { // Iterate, unmapping the pages. cur_address = address; - auto it = block_manager->FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != block_manager->end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -830,10 +864,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { PAddr pg_phys_addr = pg_it->GetAddress(); size_t pg_pages = pg_it->GetNumPages(); - auto it = block_manager->FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != block_manager->end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -886,37 +920,37 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { memory_reservation.Commit(); // Increase our tracked mapped size. - mapped_physical_memory_size += (size - mapped_size); + m_mapped_physical_memory_size += (size - mapped_size); // Update the relevant memory blocks. - block_manager->Update(address, size / PageSize, KMemoryState::Free, - KMemoryPermission::None, KMemoryAttribute::None, - KMemoryState::Normal, KMemoryPermission::UserReadWrite, - KMemoryAttribute::None); + m_memory_block_manager.UpdateIfMatch( + std::addressof(allocator), address, size / PageSize, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None); // Cancel our guard. unmap_guard.Cancel(); - return ResultSuccess; + R_SUCCEED(); } } } } -Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { +Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { // Lock the physical memory lock. - KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); + KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Calculate the last address for convenience. const VAddr last_address = address + size - 1; // Define iteration variables. VAddr cur_address = 0; - std::size_t mapped_size = 0; - std::size_t num_allocator_blocks = 0; + size_t mapped_size = 0; + size_t num_allocator_blocks = 0; // Check if the memory is mapped. { @@ -924,10 +958,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { cur_address = address; mapped_size = 0; - auto it = block_manager->FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != block_manager->end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -1022,6 +1056,13 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { } ASSERT(pg.GetNumPages() == mapped_size / PageSize); + // Create an update allocator. + ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Reset the current tracking address, and make sure we clean up on failure. cur_address = address; auto remap_guard = detail::ScopeExit([&] { @@ -1030,7 +1071,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { cur_address = address; // Iterate over the memory we unmapped. - auto it = block_manager->FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); auto pg_it = pg.Nodes().begin(); PAddr pg_phys_addr = pg_it->GetAddress(); size_t pg_pages = pg_it->GetNumPages(); @@ -1085,10 +1126,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { }); // Iterate over the memory, unmapping as we go. - auto it = block_manager->FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != block_manager->end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -1115,104 +1156,159 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { } // Release the memory resource. - mapped_physical_memory_size -= mapped_size; - auto process{system.Kernel().CurrentProcess()}; + m_mapped_physical_memory_size -= mapped_size; + auto process{m_system.Kernel().CurrentProcess()}; process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); // Update memory blocks. - block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, - KMemoryAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, + KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); // TODO(bunnei): This is a workaround until the next set of changes, where we add reference // counting for mapped pages. Until then, we must manually close the reference to the page // group. - system.Kernel().MemoryManager().Close(pg); + m_system.Kernel().MemoryManager().Close(pg); // We succeeded. remap_guard.Cancel(); - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { - KScopedLightLock lk(general_lock); - - KMemoryState src_state{}; - CASCADE_CODE(CheckMemoryState( - &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, - KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::UserReadWrite, - KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); +Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Validate that the source address's state is valid. + KMemoryState src_state; + size_t num_src_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr, + std::addressof(num_src_allocator_blocks), src_address, size, + KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, + KMemoryPermission::All, KMemoryPermission::UserReadWrite, + KMemoryAttribute::All, KMemoryAttribute::None)); - if (IsRegionMapped(dst_addr, size)) { - return ResultInvalidCurrentMemory; - } + // Validate that the dst address's state is valid. + size_t num_dst_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, + KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryAttribute::None)); + // Create an update allocator for the source. + Result src_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + m_memory_block_slab_manager, + num_src_allocator_blocks); + R_TRY(src_allocator_result); + + // Create an update allocator for the destination. + Result dst_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + m_memory_block_slab_manager, + num_dst_allocator_blocks); + R_TRY(dst_allocator_result); + + // Map the memory. KPageGroup page_linked_list; - const std::size_t num_pages{size / PageSize}; - - AddRegionToPages(src_addr, num_pages, page_linked_list); + const size_t num_pages{size / PageSize}; + const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( + KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); + const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; + AddRegionToPages(src_address, num_pages, page_linked_list); { + // Reprotect the source as kernel-read/not mapped. auto block_guard = detail::ScopeExit([&] { - Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite, + Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, OperationType::ChangePermissions); }); - - CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None, - OperationType::ChangePermissions)); - CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::UserReadWrite)); + R_TRY(Operate(src_address, num_pages, new_src_perm, OperationType::ChangePermissions)); + R_TRY(MapPages(dst_address, page_linked_list, KMemoryPermission::UserReadWrite)); block_guard.Cancel(); } - block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::None, - KMemoryAttribute::Locked); - block_manager->Update(dst_addr, num_pages, KMemoryState::Stack, - KMemoryPermission::UserReadWrite); - - return ResultSuccess; + // Apply the memory block updates. + m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, + new_src_perm, new_src_attr, + KMemoryBlockDisableMergeAttribute::Locked, + KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, + KMemoryState::Stack, KMemoryPermission::UserReadWrite, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); + + R_SUCCEED(); } -Result KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { - KScopedLightLock lk(general_lock); +Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Validate that the source address's state is valid. + KMemoryState src_state; + size_t num_src_allocator_blocks; + R_TRY(this->CheckMemoryState( + std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks), + src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, + KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead, + KMemoryAttribute::All, KMemoryAttribute::Locked)); + + // Validate that the dst address's state is valid. + KMemoryPermission dst_perm; + size_t num_dst_allocator_blocks; + R_TRY(this->CheckMemoryState( + nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks), + dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); - KMemoryState src_state{}; - CASCADE_CODE(CheckMemoryState( - &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, - KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::None, - KMemoryAttribute::Mask, KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); + // Create an update allocator for the source. + Result src_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + m_memory_block_slab_manager, + num_src_allocator_blocks); + R_TRY(src_allocator_result); - KMemoryPermission dst_perm{}; - CASCADE_CODE(CheckMemoryState(nullptr, &dst_perm, nullptr, nullptr, dst_addr, size, - KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None, - KMemoryPermission::None, KMemoryAttribute::Mask, - KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); + // Create an update allocator for the destination. + Result dst_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + m_memory_block_slab_manager, + num_dst_allocator_blocks); + R_TRY(dst_allocator_result); KPageGroup src_pages; KPageGroup dst_pages; - const std::size_t num_pages{size / PageSize}; + const size_t num_pages{size / PageSize}; - AddRegionToPages(src_addr, num_pages, src_pages); - AddRegionToPages(dst_addr, num_pages, dst_pages); + AddRegionToPages(src_address, num_pages, src_pages); + AddRegionToPages(dst_address, num_pages, dst_pages); - if (!dst_pages.IsEqual(src_pages)) { - return ResultInvalidMemoryRegion; - } + R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion); { - auto block_guard = detail::ScopeExit([&] { MapPages(dst_addr, dst_pages, dst_perm); }); + auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); - CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); - CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite, - OperationType::ChangePermissions)); + R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); + R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, + OperationType::ChangePermissions)); block_guard.Cancel(); } - block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::UserReadWrite); - block_manager->Update(dst_addr, num_pages, KMemoryState::Free); - - return ResultSuccess; + // Apply the memory block updates. + m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Locked); + m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, + KMemoryState::None, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); + + R_SUCCEED(); } Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, @@ -1225,48 +1321,54 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, if (const auto result{ Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; result.IsError()) { - const std::size_t num_pages{(addr - cur_addr) / PageSize}; + const size_t num_pages{(addr - cur_addr) / PageSize}; ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap) .IsSuccess()); - return result; + R_RETURN(result); } cur_addr += node.GetNumPages() * PageSize; } - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state, KMemoryPermission perm) { // Check that the map is in range. - const std::size_t num_pages{page_linked_list.GetNumPages()}; - const std::size_t size{num_pages * PageSize}; + const size_t num_pages{page_linked_list.GetNumPages()}; + const size_t size{num_pages * PageSize}; R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check the memory state. R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager); + // Map the pages. R_TRY(MapPages(address, page_linked_list, perm)); // Update the blocks. - block_manager->Update(address, num_pages, state, perm); + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, - PAddr phys_addr, bool is_pa_valid, VAddr region_start, - std::size_t region_num_pages, KMemoryState state, - KMemoryPermission perm) { +Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, + bool is_pa_valid, VAddr region_start, size_t region_num_pages, + KMemoryState state, KMemoryPermission perm) { ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); // Ensure this is a valid map request. @@ -1275,7 +1377,7 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Find a random address to map at. VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, @@ -1288,6 +1390,11 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t KMemoryAttribute::None, KMemoryAttribute::None) .IsSuccess()); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager); + // Perform mapping operation. if (is_pa_valid) { R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr)); @@ -1296,11 +1403,13 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t } // Update the blocks. - block_manager->Update(addr, num_pages, state, perm); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); // We successfully mapped the pages. *out_addr = addr; - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { @@ -1312,60 +1421,80 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, OperationType::Unmap)}; result.IsError()) { - return result; + R_RETURN(result); } cur_addr += node.GetNumPages() * PageSize; } - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state) { +Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) { // Check that the unmap is in range. - const std::size_t num_pages{page_linked_list.GetNumPages()}; - const std::size_t size{num_pages * PageSize}; - R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); + const size_t num_pages{page_linked_list.GetNumPages()}; + const size_t size{num_pages * PageSize}; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check the memory state. - R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, state, KMemoryPermission::None, + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, + KMemoryState::All, state, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Perform the unmap. - R_TRY(UnmapPages(addr, page_linked_list)); + R_TRY(UnmapPages(address, page_linked_list)); // Update the blocks. - block_manager->Update(addr, num_pages, state, KMemoryPermission::None); + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) { +Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) { // Check that the unmap is in range. - const std::size_t size = num_pages * PageSize; + const size_t size = num_pages * PageSize; R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check the memory state. - std::size_t num_allocator_blocks{}; + size_t num_allocator_blocks{}; R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState::All, state, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Perform the unmap. R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap)); // Update the blocks. - block_manager->Update(address, num_pages, KMemoryState::Free, KMemoryPermission::None); + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, @@ -1380,7 +1509,7 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check if state allows us to create the group. R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted, @@ -1390,15 +1519,15 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n // Create a new page group for the region. R_TRY(this->MakePageGroup(*out, address, num_pages)); - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, +Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm) { const size_t num_pages = size / PageSize; // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify we can change the memory permission. KMemoryState old_state; @@ -1435,105 +1564,101 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, // Succeed if there's nothing to do. R_SUCCEED_IF(old_perm == new_perm && old_state == new_state); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Perform mapping operation. const auto operation = was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions; R_TRY(Operate(addr, num_pages, new_perm, operation)); // Update the blocks. - block_manager->Update(addr, num_pages, new_state, new_perm, KMemoryAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); // Ensure cache coherency, if we're setting pages as executable. if (is_x) { - system.InvalidateCpuInstructionCacheRange(addr, size); + m_system.InvalidateCpuInstructionCacheRange(addr, size); } - return ResultSuccess; + R_SUCCEED(); } KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) { - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); - return block_manager->FindBlock(addr).GetMemoryInfo(); + return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo(); } KMemoryInfo KPageTable::QueryInfo(VAddr addr) { if (!Contains(addr, 1)) { - return {address_space_end, 0 - address_space_end, KMemoryState::Inaccessible, - KMemoryPermission::None, KMemoryAttribute::None, KMemoryPermission::None}; + return { + .m_address = m_address_space_end, + .m_size = 0 - m_address_space_end, + .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible), + .m_device_disable_merge_left_count = 0, + .m_device_disable_merge_right_count = 0, + .m_ipc_lock_count = 0, + .m_device_use_count = 0, + .m_ipc_disable_merge_count = 0, + .m_permission = KMemoryPermission::None, + .m_attribute = KMemoryAttribute::None, + .m_original_permission = KMemoryPermission::None, + .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None, + }; } return QueryInfoImpl(addr); } -Result KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) { - KScopedLightLock lk(general_lock); - - KMemoryState state{}; - KMemoryAttribute attribute{}; - - R_TRY(CheckMemoryState(&state, nullptr, &attribute, nullptr, addr, size, - KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, - KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, - KMemoryPermission::All, KMemoryPermission::UserReadWrite, - KMemoryAttribute::Mask, KMemoryAttribute::None, - KMemoryAttribute::IpcAndDeviceMapped)); - - block_manager->Update(addr, size / PageSize, state, perm, attribute | KMemoryAttribute::Locked); - - return ResultSuccess; -} - -Result KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) { - KScopedLightLock lk(general_lock); - - KMemoryState state{}; - - R_TRY(CheckMemoryState(&state, nullptr, nullptr, nullptr, addr, size, - KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, - KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, - KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::Mask, - KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); - - block_manager->Update(addr, size / PageSize, state, KMemoryPermission::UserReadWrite); - return ResultSuccess; -} - -Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size, - Svc::MemoryPermission svc_perm) { +Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm) { const size_t num_pages = size / PageSize; // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify we can change the memory permission. KMemoryState old_state; KMemoryPermission old_perm; - R_TRY(this->CheckMemoryState( - std::addressof(old_state), std::addressof(old_perm), nullptr, nullptr, addr, size, - KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, KMemoryPermission::None, - KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, + std::addressof(num_allocator_blocks), addr, size, + KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, + KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::All, KMemoryAttribute::None)); // Determine new perm. const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); R_SUCCEED_IF(old_perm == new_perm); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Perform mapping operation. R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); // Update the blocks. - block_manager->Update(addr, num_pages, old_state, new_perm, KMemoryAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) { +Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr) { const size_t num_pages = size / PageSize; ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) == KMemoryAttribute::SetMask); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify we can change the memory attribute. KMemoryState old_state; @@ -1548,6 +1673,12 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3 KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Determine the new attribute. const KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) | @@ -1557,123 +1688,142 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3 this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh); // Update the blocks. - block_manager->Update(addr, num_pages, old_state, old_perm, new_attr); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm, + new_attr, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::SetMaxHeapSize(std::size_t size) { +Result KPageTable::SetMaxHeapSize(size_t size) { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Only process page tables are allowed to set heap size. ASSERT(!this->IsKernel()); - max_heap_size = size; + m_max_heap_size = size; - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { +Result KPageTable::SetHeapSize(VAddr* out, size_t size) { // Lock the physical memory mutex. - KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); + KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); // Try to perform a reduction in heap, instead of an extension. VAddr cur_address{}; - std::size_t allocation_size{}; + size_t allocation_size{}; { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Validate that setting heap size is possible at all. - R_UNLESS(!is_kernel, ResultOutOfMemory); - R_UNLESS(size <= static_cast<std::size_t>(heap_region_end - heap_region_start), + R_UNLESS(!m_is_kernel, ResultOutOfMemory); + R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start), ResultOutOfMemory); - R_UNLESS(size <= max_heap_size, ResultOutOfMemory); + R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory); if (size < GetHeapSize()) { // The size being requested is less than the current size, so we need to free the end of // the heap. // Validate memory state. - std::size_t num_allocator_blocks; + size_t num_allocator_blocks; R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), - heap_region_start + size, GetHeapSize() - size, + m_heap_region_start + size, GetHeapSize() - size, KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, + num_allocator_blocks); + R_TRY(allocator_result); + // Unmap the end of the heap. const auto num_pages = (GetHeapSize() - size) / PageSize; - R_TRY(Operate(heap_region_start + size, num_pages, KMemoryPermission::None, + R_TRY(Operate(m_heap_region_start + size, num_pages, KMemoryPermission::None, OperationType::Unmap)); // Release the memory from the resource limit. - system.Kernel().CurrentProcess()->GetResourceLimit()->Release( + m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release( LimitableResource::PhysicalMemory, num_pages * PageSize); // Apply the memory block update. - block_manager->Update(heap_region_start + size, num_pages, KMemoryState::Free, - KMemoryPermission::None, KMemoryAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, + num_pages, KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + size == 0 ? KMemoryBlockDisableMergeAttribute::Normal + : KMemoryBlockDisableMergeAttribute::None); // Update the current heap end. - current_heap_end = heap_region_start + size; + m_current_heap_end = m_heap_region_start + size; // Set the output. - *out = heap_region_start; - return ResultSuccess; + *out = m_heap_region_start; + R_SUCCEED(); } else if (size == GetHeapSize()) { // The size requested is exactly the current size. - *out = heap_region_start; - return ResultSuccess; + *out = m_heap_region_start; + R_SUCCEED(); } else { // We have to allocate memory. Determine how much to allocate and where while the table // is locked. - cur_address = current_heap_end; + cur_address = m_current_heap_end; allocation_size = size - GetHeapSize(); } } // Reserve memory for the heap extension. KScopedResourceReservation memory_reservation( - system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, + m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, allocation_size); R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); // Allocate pages for the heap extension. KPageGroup pg; - R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( + R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( &pg, allocation_size / PageSize, - KMemoryManager::EncodeOption(memory_pool, allocation_option))); + KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); // Clear all the newly allocated pages. for (const auto& it : pg.Nodes()) { - std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value, + std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, it.GetSize()); } // Map the pages. { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Ensure that the heap hasn't changed since we began executing. - ASSERT(cur_address == current_heap_end); + ASSERT(cur_address == m_current_heap_end); // Check the memory state. - std::size_t num_allocator_blocks{}; - R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), current_heap_end, + size_t num_allocator_blocks{}; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end, allocation_size, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator( + std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Map the pages. const auto num_pages = allocation_size / PageSize; - R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup)); + R_TRY(Operate(m_current_heap_end, num_pages, pg, OperationType::MapGroup)); // Clear all the newly allocated pages. - for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) { - std::memset(system.Memory().GetPointer(current_heap_end + (cur_page * PageSize)), 0, + for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) { + std::memset(m_system.Memory().GetPointer(m_current_heap_end + (cur_page * PageSize)), 0, PageSize); } @@ -1681,133 +1831,172 @@ Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { memory_reservation.Commit(); // Apply the memory block update. - block_manager->Update(current_heap_end, num_pages, KMemoryState::Normal, - KMemoryPermission::UserReadWrite, KMemoryAttribute::None); + m_memory_block_manager.Update( + std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal + : KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); // Update the current heap end. - current_heap_end = heap_region_start + size; + m_current_heap_end = m_heap_region_start + size; // Set the output. - *out = heap_region_start; - return ResultSuccess; + *out = m_heap_region_start; + R_SUCCEED(); } } -ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, +ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only, VAddr region_start, - std::size_t region_num_pages, KMemoryState state, + size_t region_num_pages, KMemoryState state, KMemoryPermission perm, PAddr map_addr) { - KScopedLightLock lk(general_lock); - - if (!CanContain(region_start, region_num_pages * PageSize, state)) { - return ResultInvalidCurrentMemory; - } - - if (region_num_pages <= needed_num_pages) { - return ResultOutOfMemory; - } + KScopedLightLock lk(m_general_lock); + R_UNLESS(CanContain(region_start, region_num_pages * PageSize, state), + ResultInvalidCurrentMemory); + R_UNLESS(region_num_pages > needed_num_pages, ResultOutOfMemory); const VAddr addr{ AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)}; - if (!addr) { - return ResultOutOfMemory; - } + R_UNLESS(addr, ResultOutOfMemory); + + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager); if (is_map_only) { R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); } else { KPageGroup page_group; - R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( + R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( &page_group, needed_num_pages, - KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); + KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); } - block_manager->Update(addr, needed_num_pages, state, perm); + // Update the blocks. + m_memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); return addr; } -Result KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) { - KScopedLightLock lk(general_lock); - - KMemoryPermission perm{}; - if (const Result result{CheckMemoryState( - nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute, - KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, - KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None, - KMemoryAttribute::DeviceSharedAndUncached)}; - result.IsError()) { - return result; - } +Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, + bool is_aligned) { + // Lightly validate the range before doing anything else. + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); - block_manager->UpdateLock( - addr, size / PageSize, - [](KMemoryBlockManager::iterator block, KMemoryPermission permission) { - block->ShareToDevice(permission); - }, - perm); + // Lock the table. + KScopedLightLock lk(m_general_lock); - return ResultSuccess; + // Check the memory state. + const auto test_state = + (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap); + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state, + test_state, perm, perm, + KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, + KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); + + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // Update the memory blocks. + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, + &KMemoryBlock::ShareToDevice, KMemoryPermission::None); + + R_SUCCEED(); } -Result KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) { - KScopedLightLock lk(general_lock); - - KMemoryPermission perm{}; - if (const Result result{CheckMemoryState( - nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute, - KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, - KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None, - KMemoryAttribute::DeviceSharedAndUncached)}; - result.IsError()) { - return result; - } +Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) { + // Lightly validate the range before doing anything else. + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); - block_manager->UpdateLock( - addr, size / PageSize, - [](KMemoryBlockManager::iterator block, KMemoryPermission permission) { - block->UnshareToDevice(permission); - }, - perm); + // Lock the table. + KScopedLightLock lk(m_general_lock); - return ResultSuccess; + // Check the memory state. + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryStateContiguous( + std::addressof(num_allocator_blocks), address, size, + KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, + KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, + KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); + + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // Update the memory blocks. + const KMemoryBlockManager::MemoryBlockLockFunction lock_func = + m_enable_device_address_space_merge + ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare + : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight; + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, + KMemoryPermission::None); + + R_SUCCEED(); } -Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size) { - return this->LockMemoryAndOpen( +Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) { + // Lightly validate the range before doing anything else. + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check the memory state. + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryStateContiguous( + std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap, + KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); + + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // Update the memory blocks. + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, + &KMemoryBlock::UnshareToDevice, KMemoryPermission::None); + + R_SUCCEED(); +} + +Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) { + R_RETURN(this->LockMemoryAndOpen( out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None, static_cast<KMemoryPermission>(KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite), - KMemoryAttribute::Locked); + KMemoryAttribute::Locked)); } -Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg) { - return this->UnlockMemory( +Result KPageTable::UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg) { + R_RETURN(this->UnlockMemory( addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, - KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg); -} - -Result KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) { - block_manager = std::make_unique<KMemoryBlockManager>(start, end); - - return ResultSuccess; -} - -bool KPageTable::IsRegionMapped(VAddr address, u64 size) { - return CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, - KMemoryPermission::All, KMemoryPermission::None, KMemoryAttribute::Mask, - KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped) - .IsError(); + KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg)); } bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { - auto start_ptr = system.Memory().GetPointer(addr); + auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(addr); for (u64 offset{}; offset < size; offset += PageSize) { - if (start_ptr != system.Memory().GetPointer(addr + offset)) { + if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(addr + offset)) { return false; } start_ptr += PageSize; @@ -1815,8 +2004,7 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { return true; } -void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages, - KPageGroup& page_linked_list) { +void KPageTable::AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list) { VAddr addr{start}; while (addr < start + (num_pages * PageSize)) { const PAddr paddr{GetPhysicalAddr(addr)}; @@ -1826,16 +2014,16 @@ void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages, } } -VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, - u64 needed_num_pages, std::size_t align) { - if (is_aslr_enabled) { +VAddr KPageTable::AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages, + size_t align) { + if (m_enable_aslr) { UNIMPLEMENTED(); } - return block_manager->FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, - IsKernel() ? 1 : 4); + return m_memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, + IsKernel() ? 1 : 4); } -Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, +Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group, OperationType operation) { ASSERT(this->IsLockedByCurrentThread()); @@ -1844,11 +2032,11 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& ASSERT(num_pages == page_group.GetNumPages()); for (const auto& node : page_group.Nodes()) { - const std::size_t size{node.GetNumPages() * PageSize}; + const size_t size{node.GetNumPages() * PageSize}; switch (operation) { case OperationType::MapGroup: - system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress()); + m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); break; default: ASSERT(false); @@ -1857,10 +2045,10 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& addr += size; } - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, +Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, PAddr map_addr) { ASSERT(this->IsLockedByCurrentThread()); @@ -1870,12 +2058,12 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission switch (operation) { case OperationType::Unmap: - system.Memory().UnmapRegion(page_table_impl, addr, num_pages * PageSize); + m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); break; case OperationType::Map: { ASSERT(map_addr); ASSERT(Common::IsAligned(map_addr, PageSize)); - system.Memory().MapMemoryRegion(page_table_impl, addr, num_pages * PageSize, map_addr); + m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); break; } case OperationType::ChangePermissions: @@ -1884,25 +2072,25 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission default: ASSERT(false); } - return ResultSuccess; + R_SUCCEED(); } VAddr KPageTable::GetRegionAddress(KMemoryState state) const { switch (state) { case KMemoryState::Free: case KMemoryState::Kernel: - return address_space_start; + return m_address_space_start; case KMemoryState::Normal: - return heap_region_start; + return m_heap_region_start; case KMemoryState::Ipc: case KMemoryState::NonSecureIpc: case KMemoryState::NonDeviceIpc: - return alias_region_start; + return m_alias_region_start; case KMemoryState::Stack: - return stack_region_start; + return m_stack_region_start; case KMemoryState::Static: case KMemoryState::ThreadLocal: - return kernel_map_region_start; + return m_kernel_map_region_start; case KMemoryState::Io: case KMemoryState::Shared: case KMemoryState::AliasCode: @@ -1913,31 +2101,31 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const { case KMemoryState::GeneratedCode: case KMemoryState::CodeOut: case KMemoryState::Coverage: - return alias_code_region_start; + return m_alias_code_region_start; case KMemoryState::Code: case KMemoryState::CodeData: - return code_region_start; + return m_code_region_start; default: UNREACHABLE(); } } -std::size_t KPageTable::GetRegionSize(KMemoryState state) const { +size_t KPageTable::GetRegionSize(KMemoryState state) const { switch (state) { case KMemoryState::Free: case KMemoryState::Kernel: - return address_space_end - address_space_start; + return m_address_space_end - m_address_space_start; case KMemoryState::Normal: - return heap_region_end - heap_region_start; + return m_heap_region_end - m_heap_region_start; case KMemoryState::Ipc: case KMemoryState::NonSecureIpc: case KMemoryState::NonDeviceIpc: - return alias_region_end - alias_region_start; + return m_alias_region_end - m_alias_region_start; case KMemoryState::Stack: - return stack_region_end - stack_region_start; + return m_stack_region_end - m_stack_region_start; case KMemoryState::Static: case KMemoryState::ThreadLocal: - return kernel_map_region_end - kernel_map_region_start; + return m_kernel_map_region_end - m_kernel_map_region_start; case KMemoryState::Io: case KMemoryState::Shared: case KMemoryState::AliasCode: @@ -1948,16 +2136,16 @@ std::size_t KPageTable::GetRegionSize(KMemoryState state) const { case KMemoryState::GeneratedCode: case KMemoryState::CodeOut: case KMemoryState::Coverage: - return alias_code_region_end - alias_code_region_start; + return m_alias_code_region_end - m_alias_code_region_start; case KMemoryState::Code: case KMemoryState::CodeData: - return code_region_end - code_region_start; + return m_code_region_end - m_code_region_start; default: UNREACHABLE(); } } -bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const { +bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const { const VAddr end = addr + size; const VAddr last = end - 1; @@ -1966,10 +2154,10 @@ bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) co const bool is_in_region = region_start <= addr && addr < end && last <= region_start + region_size - 1; - const bool is_in_heap = !(end <= heap_region_start || heap_region_end <= addr || - heap_region_start == heap_region_end); - const bool is_in_alias = !(end <= alias_region_start || alias_region_end <= addr || - alias_region_start == alias_region_end); + const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr || + m_heap_region_start == m_heap_region_end); + const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || + m_alias_region_start == m_alias_region_end); switch (state) { case KMemoryState::Free: case KMemoryState::Kernel: @@ -2008,23 +2196,23 @@ Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_ KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const { // Validate the states match expectation. - R_UNLESS((info.state & state_mask) == state, ResultInvalidCurrentMemory); - R_UNLESS((info.perm & perm_mask) == perm, ResultInvalidCurrentMemory); - R_UNLESS((info.attribute & attr_mask) == attr, ResultInvalidCurrentMemory); + R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory); + R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory); + R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory); - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, - std::size_t size, KMemoryState state_mask, - KMemoryState state, KMemoryPermission perm_mask, - KMemoryPermission perm, KMemoryAttribute attr_mask, +Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size, + KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr) const { ASSERT(this->IsLockedByCurrentThread()); // Get information about the first block. const VAddr last_addr = addr + size - 1; - KMemoryBlockManager::const_iterator it = block_manager->FindIterator(addr); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); KMemoryInfo info = it->GetMemoryInfo(); // If the start address isn't aligned, we need a block. @@ -2042,7 +2230,7 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA // Advance our iterator. it++; - ASSERT(it != block_manager->cend()); + ASSERT(it != m_memory_block_manager.cend()); info = it->GetMemoryInfo(); } @@ -2054,12 +2242,12 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; } - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, - KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, - VAddr addr, std::size_t size, KMemoryState state_mask, + KMemoryAttribute* out_attr, size_t* out_blocks_needed, + VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { @@ -2067,7 +2255,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* // Get information about the first block. const VAddr last_addr = addr + size - 1; - KMemoryBlockManager::const_iterator it = block_manager->FindIterator(addr); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); KMemoryInfo info = it->GetMemoryInfo(); // If the start address isn't aligned, we need a block. @@ -2075,14 +2263,14 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0; // Validate all blocks in the range have correct state. - const KMemoryState first_state = info.state; - const KMemoryPermission first_perm = info.perm; - const KMemoryAttribute first_attr = info.attribute; + const KMemoryState first_state = info.m_state; + const KMemoryPermission first_perm = info.m_permission; + const KMemoryAttribute first_attr = info.m_attribute; while (true) { // Validate the current block. - R_UNLESS(info.state == first_state, ResultInvalidCurrentMemory); - R_UNLESS(info.perm == first_perm, ResultInvalidCurrentMemory); - R_UNLESS((info.attribute | ignore_attr) == (first_attr | ignore_attr), + R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory); + R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory); + R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr), ResultInvalidCurrentMemory); // Validate against the provided masks. @@ -2095,7 +2283,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* // Advance our iterator. it++; - ASSERT(it != block_manager->cend()); + ASSERT(it != m_memory_block_manager.cend()); info = it->GetMemoryInfo(); } @@ -2116,7 +2304,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* if (out_blocks_needed != nullptr) { *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; } - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, @@ -2134,7 +2322,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check that the output page group is empty, if it exists. if (out_pg) { @@ -2162,6 +2350,12 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr R_TRY(this->MakePageGroup(*out_pg, addr, num_pages)); } + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Decide on new perm and attr. new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr); @@ -2172,9 +2366,11 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr } // Apply the memory block updates. - block_manager->Update(addr, num_pages, old_state, new_perm, new_attr); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, + new_attr, KMemoryBlockDisableMergeAttribute::Locked, + KMemoryBlockDisableMergeAttribute::None); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, @@ -2191,7 +2387,7 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check the state. KMemoryState old_state{}; @@ -2213,15 +2409,23 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Update permission, if we need to. if (new_perm != old_perm) { R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); } // Apply the memory block updates. - block_manager->Update(addr, num_pages, old_state, new_perm, new_attr); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, + new_attr, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Locked); - return ResultSuccess; + R_SUCCEED(); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 25774f232..c6aeacd96 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -9,8 +9,10 @@ #include "common/common_types.h" #include "common/page_table.h" #include "core/file_sys/program_metadata.h" +#include "core/hle/kernel/k_dynamic_resource_manager.h" #include "core/hle/kernel/k_light_lock.h" #include "core/hle/kernel/k_memory_block.h" +#include "core/hle/kernel/k_memory_block_manager.h" #include "core/hle/kernel/k_memory_layout.h" #include "core/hle/kernel/k_memory_manager.h" #include "core/hle/result.h" @@ -34,58 +36,66 @@ public: ~KPageTable(); Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, - VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool); - Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state, + VAddr code_addr, size_t code_size, + KMemoryBlockSlabManager* mem_block_slab_manager, + KMemoryManager::Pool pool); + + void Finalize(); + + Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state, KMemoryPermission perm); - Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size); - Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, + Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size); + Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size, ICacheInvalidationStrategy icache_invalidation_strategy); - Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, + Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, VAddr src_addr); - Result MapPhysicalMemory(VAddr addr, std::size_t size); - Result UnmapPhysicalMemory(VAddr addr, std::size_t size); - Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); - Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); + Result MapPhysicalMemory(VAddr addr, size_t size); + Result UnmapPhysicalMemory(VAddr addr, size_t size); + Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size); + Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size); Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state, KMemoryPermission perm); - Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, + Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, KMemoryState state, KMemoryPermission perm) { - return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, - this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, - state, perm); + R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, + this->GetRegionAddress(state), + this->GetRegionSize(state) / PageSize, state, perm)); } Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state); - Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state); - Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm); + Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state); + Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm); KMemoryInfo QueryInfo(VAddr addr); - Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm); - Result ResetTransferMemory(VAddr addr, std::size_t size); - Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm); - Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr); - Result SetMaxHeapSize(std::size_t size); - Result SetHeapSize(VAddr* out, std::size_t size); - ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, - bool is_map_only, VAddr region_start, - std::size_t region_num_pages, KMemoryState state, - KMemoryPermission perm, PAddr map_addr = 0); - Result LockForDeviceAddressSpace(VAddr addr, std::size_t size); - Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size); - Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size); - Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg); + Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm); + Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr); + Result SetMaxHeapSize(size_t size); + Result SetHeapSize(VAddr* out, size_t size); + ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only, + VAddr region_start, size_t region_num_pages, + KMemoryState state, KMemoryPermission perm, + PAddr map_addr = 0); + + Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, + bool is_aligned); + Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size); + + Result UnlockForDeviceAddressSpace(VAddr addr, size_t size); + + Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size); + Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg); Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr); Common::PageTable& PageTableImpl() { - return page_table_impl; + return *m_page_table_impl; } const Common::PageTable& PageTableImpl() const { - return page_table_impl; + return *m_page_table_impl; } - bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const; + bool CanContain(VAddr addr, size_t size, KMemoryState state) const; private: enum class OperationType : u32 { @@ -96,67 +106,65 @@ private: ChangePermissionsAndRefresh, }; - static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = KMemoryAttribute::DontCareMask | - KMemoryAttribute::IpcLocked | - KMemoryAttribute::DeviceShared; + static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = + KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; - Result InitializeMemoryLayout(VAddr start, VAddr end); Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm); - Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, - bool is_pa_valid, VAddr region_start, std::size_t region_num_pages, + Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, + bool is_pa_valid, VAddr region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm); Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list); - bool IsRegionMapped(VAddr address, u64 size); bool IsRegionContiguous(VAddr addr, u64 size) const; - void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list); + void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list); KMemoryInfo QueryInfoImpl(VAddr addr); - VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages, - std::size_t align); - Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, + VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages, + size_t align); + Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group, OperationType operation); - Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, - OperationType operation, PAddr map_addr = 0); + Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, + PAddr map_addr = 0); VAddr GetRegionAddress(KMemoryState state) const; - std::size_t GetRegionSize(KMemoryState state) const; + size_t GetRegionSize(KMemoryState state) const; - VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, - std::size_t alignment, std::size_t offset, std::size_t guard_pages); + VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, + size_t alignment, size_t offset, size_t guard_pages); - Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, + Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const; - Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask, + Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const { - return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, - perm, attr_mask, attr); + R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, + perm, attr_mask, attr)); } Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const; Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, - KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr, - std::size_t size, KMemoryState state_mask, KMemoryState state, + KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr, + size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; - Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, + Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { - return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, - state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); + R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, + state_mask, state, perm_mask, perm, attr_mask, attr, + ignore_attr)); } - Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask, - KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, + Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { - return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, - attr_mask, attr, ignore_attr); + R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, + attr_mask, attr, ignore_attr)); } Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, @@ -174,13 +182,13 @@ private: bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages); bool IsLockedByCurrentThread() const { - return general_lock.IsLockedByCurrentThread(); + return m_general_lock.IsLockedByCurrentThread(); } bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) { ASSERT(this->IsLockedByCurrentThread()); - return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr); + return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr); } bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const { @@ -191,95 +199,93 @@ private: return *out != 0; } - mutable KLightLock general_lock; - mutable KLightLock map_physical_memory_lock; - - std::unique_ptr<KMemoryBlockManager> block_manager; + mutable KLightLock m_general_lock; + mutable KLightLock m_map_physical_memory_lock; public: constexpr VAddr GetAddressSpaceStart() const { - return address_space_start; + return m_address_space_start; } constexpr VAddr GetAddressSpaceEnd() const { - return address_space_end; + return m_address_space_end; } - constexpr std::size_t GetAddressSpaceSize() const { - return address_space_end - address_space_start; + constexpr size_t GetAddressSpaceSize() const { + return m_address_space_end - m_address_space_start; } constexpr VAddr GetHeapRegionStart() const { - return heap_region_start; + return m_heap_region_start; } constexpr VAddr GetHeapRegionEnd() const { - return heap_region_end; + return m_heap_region_end; } - constexpr std::size_t GetHeapRegionSize() const { - return heap_region_end - heap_region_start; + constexpr size_t GetHeapRegionSize() const { + return m_heap_region_end - m_heap_region_start; } constexpr VAddr GetAliasRegionStart() const { - return alias_region_start; + return m_alias_region_start; } constexpr VAddr GetAliasRegionEnd() const { - return alias_region_end; + return m_alias_region_end; } - constexpr std::size_t GetAliasRegionSize() const { - return alias_region_end - alias_region_start; + constexpr size_t GetAliasRegionSize() const { + return m_alias_region_end - m_alias_region_start; } constexpr VAddr GetStackRegionStart() const { - return stack_region_start; + return m_stack_region_start; } constexpr VAddr GetStackRegionEnd() const { - return stack_region_end; + return m_stack_region_end; } - constexpr std::size_t GetStackRegionSize() const { - return stack_region_end - stack_region_start; + constexpr size_t GetStackRegionSize() const { + return m_stack_region_end - m_stack_region_start; } constexpr VAddr GetKernelMapRegionStart() const { - return kernel_map_region_start; + return m_kernel_map_region_start; } constexpr VAddr GetKernelMapRegionEnd() const { - return kernel_map_region_end; + return m_kernel_map_region_end; } constexpr VAddr GetCodeRegionStart() const { - return code_region_start; + return m_code_region_start; } constexpr VAddr GetCodeRegionEnd() const { - return code_region_end; + return m_code_region_end; } constexpr VAddr GetAliasCodeRegionStart() const { - return alias_code_region_start; + return m_alias_code_region_start; } constexpr VAddr GetAliasCodeRegionSize() const { - return alias_code_region_end - alias_code_region_start; + return m_alias_code_region_end - m_alias_code_region_start; } - std::size_t GetNormalMemorySize() { - KScopedLightLock lk(general_lock); - return GetHeapSize() + mapped_physical_memory_size; + size_t GetNormalMemorySize() { + KScopedLightLock lk(m_general_lock); + return GetHeapSize() + m_mapped_physical_memory_size; } - constexpr std::size_t GetAddressSpaceWidth() const { - return address_space_width; + constexpr size_t GetAddressSpaceWidth() const { + return m_address_space_width; } - constexpr std::size_t GetHeapSize() const { - return current_heap_end - heap_region_start; + constexpr size_t GetHeapSize() const { + return m_current_heap_end - m_heap_region_start; } - constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const { - return address_space_start <= address && address + size - 1 <= address_space_end - 1; + constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const { + return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1; } - constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const { - return alias_region_start > address || address + size - 1 > alias_region_end - 1; + constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const { + return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1; } - constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const { - return stack_region_start > address || address + size - 1 > stack_region_end - 1; + constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const { + return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1; } - constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const { + constexpr bool IsInvalidRegion(VAddr address, size_t size) const { return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1; } - constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const { - return address + size > heap_region_start && heap_region_end > address; + constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const { + return address + size > m_heap_region_start && m_heap_region_end > address; } - constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const { - return address + size > alias_region_start && alias_region_end > address; + constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const { + return address + size > m_alias_region_start && m_alias_region_end > address; } - constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const { + constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const { if (IsInvalidRegion(address, size)) { return true; } @@ -291,73 +297,78 @@ public: } return {}; } - constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const { + constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const { return !IsOutsideASLRRegion(address, size); } - constexpr std::size_t GetNumGuardPages() const { + constexpr size_t GetNumGuardPages() const { return IsKernel() ? 1 : 4; } PAddr GetPhysicalAddr(VAddr addr) const { - const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits]; + const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits]; ASSERT(backing_addr); return backing_addr + addr; } constexpr bool Contains(VAddr addr) const { - return address_space_start <= addr && addr <= address_space_end - 1; + return m_address_space_start <= addr && addr <= m_address_space_end - 1; } - constexpr bool Contains(VAddr addr, std::size_t size) const { - return address_space_start <= addr && addr < addr + size && - addr + size - 1 <= address_space_end - 1; + constexpr bool Contains(VAddr addr, size_t size) const { + return m_address_space_start <= addr && addr < addr + size && + addr + size - 1 <= m_address_space_end - 1; } private: constexpr bool IsKernel() const { - return is_kernel; + return m_is_kernel; } constexpr bool IsAslrEnabled() const { - return is_aslr_enabled; + return m_enable_aslr; } - constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const { - return (address_space_start <= addr) && - (num_pages <= (address_space_end - address_space_start) / PageSize) && - (addr + num_pages * PageSize - 1 <= address_space_end - 1); + constexpr bool ContainsPages(VAddr addr, size_t num_pages) const { + return (m_address_space_start <= addr) && + (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && + (addr + num_pages * PageSize - 1 <= m_address_space_end - 1); } private: - VAddr address_space_start{}; - VAddr address_space_end{}; - VAddr heap_region_start{}; - VAddr heap_region_end{}; - VAddr current_heap_end{}; - VAddr alias_region_start{}; - VAddr alias_region_end{}; - VAddr stack_region_start{}; - VAddr stack_region_end{}; - VAddr kernel_map_region_start{}; - VAddr kernel_map_region_end{}; - VAddr code_region_start{}; - VAddr code_region_end{}; - VAddr alias_code_region_start{}; - VAddr alias_code_region_end{}; - - std::size_t mapped_physical_memory_size{}; - std::size_t max_heap_size{}; - std::size_t max_physical_memory_size{}; - std::size_t address_space_width{}; - - bool is_kernel{}; - bool is_aslr_enabled{}; - - u32 heap_fill_value{}; - const KMemoryRegion* cached_physical_heap_region{}; - - KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application}; - KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront}; - - Common::PageTable page_table_impl; - - Core::System& system; + VAddr m_address_space_start{}; + VAddr m_address_space_end{}; + VAddr m_heap_region_start{}; + VAddr m_heap_region_end{}; + VAddr m_current_heap_end{}; + VAddr m_alias_region_start{}; + VAddr m_alias_region_end{}; + VAddr m_stack_region_start{}; + VAddr m_stack_region_end{}; + VAddr m_kernel_map_region_start{}; + VAddr m_kernel_map_region_end{}; + VAddr m_code_region_start{}; + VAddr m_code_region_end{}; + VAddr m_alias_code_region_start{}; + VAddr m_alias_code_region_end{}; + + size_t m_mapped_physical_memory_size{}; + size_t m_max_heap_size{}; + size_t m_max_physical_memory_size{}; + size_t m_address_space_width{}; + + KMemoryBlockManager m_memory_block_manager; + + bool m_is_kernel{}; + bool m_enable_aslr{}; + bool m_enable_device_address_space_merge{}; + + KMemoryBlockSlabManager* m_memory_block_slab_manager{}; + + u32 m_heap_fill_value{}; + const KMemoryRegion* m_cached_physical_heap_region{}; + + KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; + KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront}; + + std::unique_ptr<Common::PageTable> m_page_table_impl; + + Core::System& m_system; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index d3e99665f..8c3495e5a 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -72,7 +72,8 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string process->name = std::move(process_name); process->resource_limit = res_limit; - process->status = ProcessStatus::Created; + process->system_resource_address = 0; + process->state = State::Created; process->program_id = 0; process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() : kernel.CreateNewUserProcessID(); @@ -92,11 +93,12 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string process->exception_thread = nullptr; process->is_suspended = false; process->schedule_count = 0; + process->is_handle_table_initialized = false; // Open a reference to the resource limit. process->resource_limit->Open(); - return ResultSuccess; + R_SUCCEED(); } void KProcess::DoWorkerTaskImpl() { @@ -121,9 +123,9 @@ void KProcess::DecrementRunningThreadCount() { } } -u64 KProcess::GetTotalPhysicalMemoryAvailable() const { +u64 KProcess::GetTotalPhysicalMemoryAvailable() { const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) + - page_table->GetNormalMemorySize() + GetSystemResourceSize() + image_size + + page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size + main_thread_stack_size}; if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); capacity != pool_size) { @@ -135,16 +137,16 @@ u64 KProcess::GetTotalPhysicalMemoryAvailable() const { return memory_usage_capacity; } -u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { +u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() { return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize(); } -u64 KProcess::GetTotalPhysicalMemoryUsed() const { - return image_size + main_thread_stack_size + page_table->GetNormalMemorySize() + +u64 KProcess::GetTotalPhysicalMemoryUsed() { + return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() + GetSystemResourceSize(); } -u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { +u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() { return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); } @@ -244,7 +246,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad shmem->Open(); shemen_info->Open(); - return ResultSuccess; + R_SUCCEED(); } void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, @@ -289,12 +291,12 @@ Result KProcess::Reset() { KScopedSchedulerLock sl{kernel}; // Validate that we're in a state that we can reset. - R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState); + R_UNLESS(state != State::Terminated, ResultInvalidState); R_UNLESS(is_signaled, ResultInvalidState); // Clear signaled. is_signaled = false; - return ResultSuccess; + R_SUCCEED(); } Result KProcess::SetActivity(ProcessActivity activity) { @@ -304,15 +306,13 @@ Result KProcess::SetActivity(ProcessActivity activity) { KScopedSchedulerLock sl{kernel}; // Validate our state. - R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState); - R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState); + R_UNLESS(state != State::Terminating, ResultInvalidState); + R_UNLESS(state != State::Terminated, ResultInvalidState); // Either pause or resume. if (activity == ProcessActivity::Paused) { // Verify that we're not suspended. - if (is_suspended) { - return ResultInvalidState; - } + R_UNLESS(!is_suspended, ResultInvalidState); // Suspend all threads. for (auto* thread : GetThreadList()) { @@ -325,9 +325,7 @@ Result KProcess::SetActivity(ProcessActivity activity) { ASSERT(activity == ProcessActivity::Runnable); // Verify that we're suspended. - if (!is_suspended) { - return ResultInvalidState; - } + R_UNLESS(is_suspended, ResultInvalidState); // Resume all threads. for (auto* thread : GetThreadList()) { @@ -338,7 +336,7 @@ Result KProcess::SetActivity(ProcessActivity activity) { SetSuspended(false); } - return ResultSuccess; + R_SUCCEED(); } Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) { @@ -348,35 +346,38 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: system_resource_size = metadata.GetSystemResourceSize(); image_size = code_size; + // We currently do not support process-specific system resource + UNIMPLEMENTED_IF(system_resource_size != 0); + KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory, code_size + system_resource_size); if (!memory_reservation.Succeeded()) { LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", code_size + system_resource_size); - return ResultLimitReached; + R_RETURN(ResultLimitReached); } // Initialize proces address space - if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, - 0x8000000, code_size, - KMemoryManager::Pool::Application)}; + if (const Result result{page_table.InitializeForProcess( + metadata.GetAddressSpaceType(), false, 0x8000000, code_size, + &kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)}; result.IsError()) { - return result; + R_RETURN(result); } // Map process code region - if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(), - code_size / PageSize, KMemoryState::Code, - KMemoryPermission::None)}; + if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(), + code_size / PageSize, KMemoryState::Code, + KMemoryPermission::None)}; result.IsError()) { - return result; + R_RETURN(result); } // Initialize process capabilities const auto& caps{metadata.GetKernelCapabilities()}; if (const Result result{ - capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)}; + capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)}; result.IsError()) { - return result; + R_RETURN(result); } // Set memory usage capacity @@ -384,12 +385,12 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: case FileSys::ProgramAddressSpaceType::Is32Bit: case FileSys::ProgramAddressSpaceType::Is36Bit: case FileSys::ProgramAddressSpaceType::Is39Bit: - memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart(); + memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart(); break; case FileSys::ProgramAddressSpaceType::Is32BitNoMap: - memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() + - page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart(); + memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() + + page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart(); break; default: @@ -397,10 +398,10 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: } // Create TLS region - R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address))); + R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address))); memory_reservation.Commit(); - return handle_table.Initialize(capabilities.GetHandleTableSize()); + R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize())); } void KProcess::Run(s32 main_thread_priority, u64 stack_size) { @@ -409,15 +410,15 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) { resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size); const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; - ASSERT(!page_table->SetMaxHeapSize(heap_capacity).IsError()); + ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError()); - ChangeStatus(ProcessStatus::Running); + ChangeState(State::Running); SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top); } void KProcess::PrepareForTermination() { - ChangeStatus(ProcessStatus::Exiting); + ChangeState(State::Terminating); const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) { for (auto* thread : in_thread_list) { @@ -437,15 +438,15 @@ void KProcess::PrepareForTermination() { stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList()); - this->DeleteThreadLocalRegion(tls_region_address); - tls_region_address = 0; + this->DeleteThreadLocalRegion(plr_address); + plr_address = 0; if (resource_limit) { resource_limit->Release(LimitableResource::PhysicalMemory, main_thread_stack_size + image_size); } - ChangeStatus(ProcessStatus::Exited); + ChangeState(State::Terminated); } void KProcess::Finalize() { @@ -474,7 +475,7 @@ void KProcess::Finalize() { } // Finalize the page table. - page_table.reset(); + page_table.Finalize(); // Perform inherited finalization. KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize(); @@ -499,7 +500,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) { } *out = tlr; - return ResultSuccess; + R_SUCCEED(); } } @@ -528,7 +529,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) { // We succeeded! tlp_guard.Cancel(); *out = tlr; - return ResultSuccess; + R_SUCCEED(); } Result KProcess::DeleteThreadLocalRegion(VAddr addr) { @@ -576,7 +577,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) { KThreadLocalPage::Free(kernel, page_to_free); } - return ResultSuccess; + R_SUCCEED(); } bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size, @@ -628,7 +629,7 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { const auto ReprotectSegment = [&](const CodeSet::Segment& segment, Svc::MemoryPermission permission) { - page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); + page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); }; kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), @@ -645,19 +646,18 @@ bool KProcess::IsSignaled() const { } KProcess::KProcess(KernelCore& kernel_) - : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique<KPageTable>( - kernel_.System())}, + : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()}, handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, state_lock{kernel_}, list_lock{kernel_} {} KProcess::~KProcess() = default; -void KProcess::ChangeStatus(ProcessStatus new_status) { - if (status == new_status) { +void KProcess::ChangeState(State new_state) { + if (state == new_state) { return; } - status = new_status; + state = new_state; is_signaled = true; NotifyAvailable(); } @@ -668,17 +668,17 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { // The kernel always ensures that the given stack size is page aligned. main_thread_stack_size = Common::AlignUp(stack_size, PageSize); - const VAddr start{page_table->GetStackRegionStart()}; - const std::size_t size{page_table->GetStackRegionEnd() - start}; + const VAddr start{page_table.GetStackRegionStart()}; + const std::size_t size{page_table.GetStackRegionEnd() - start}; CASCADE_RESULT(main_thread_stack_top, - page_table->AllocateAndMapMemory( + page_table.AllocateAndMapMemory( main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize, KMemoryState::Stack, KMemoryPermission::UserReadWrite)); main_thread_stack_top += main_thread_stack_size; - return ResultSuccess; + R_SUCCEED(); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index d56d73bab..2e0cc3d0b 100644 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h @@ -13,6 +13,7 @@ #include "core/hle/kernel/k_auto_object.h" #include "core/hle/kernel/k_condition_variable.h" #include "core/hle/kernel/k_handle_table.h" +#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_thread_local_page.h" #include "core/hle/kernel/k_worker_task.h" @@ -31,7 +32,6 @@ class ProgramMetadata; namespace Kernel { class KernelCore; -class KPageTable; class KResourceLimit; class KThread; class KSharedMemoryInfo; @@ -45,24 +45,6 @@ enum class MemoryRegion : u16 { BASE = 3, }; -/** - * Indicates the status of a Process instance. - * - * @note These match the values as used by kernel, - * so new entries should only be added if RE - * shows that a new value has been introduced. - */ -enum class ProcessStatus { - Created, - CreatedWithDebuggerAttached, - Running, - WaitingForDebuggerToAttach, - DebuggerAttached, - Exiting, - Exited, - DebugBreak, -}; - enum class ProcessActivity : u32 { Runnable, Paused, @@ -89,6 +71,17 @@ public: explicit KProcess(KernelCore& kernel_); ~KProcess() override; + enum class State { + Created = static_cast<u32>(Svc::ProcessState::Created), + CreatedAttached = static_cast<u32>(Svc::ProcessState::CreatedAttached), + Running = static_cast<u32>(Svc::ProcessState::Running), + Crashed = static_cast<u32>(Svc::ProcessState::Crashed), + RunningAttached = static_cast<u32>(Svc::ProcessState::RunningAttached), + Terminating = static_cast<u32>(Svc::ProcessState::Terminating), + Terminated = static_cast<u32>(Svc::ProcessState::Terminated), + DebugBreak = static_cast<u32>(Svc::ProcessState::DebugBreak), + }; + enum : u64 { /// Lowest allowed process ID for a kernel initial process. InitialKIPIDMin = 1, @@ -114,12 +107,12 @@ public: /// Gets a reference to the process' page table. KPageTable& PageTable() { - return *page_table; + return page_table; } /// Gets const a reference to the process' page table. const KPageTable& PageTable() const { - return *page_table; + return page_table; } /// Gets a reference to the process' handle table. @@ -145,26 +138,25 @@ public: } Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) { - return condition_var.Wait(address, cv_key, tag, ns); + R_RETURN(condition_var.Wait(address, cv_key, tag, ns)); } Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) { - return address_arbiter.SignalToAddress(address, signal_type, value, count); + R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count)); } Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value, s64 timeout) { - return address_arbiter.WaitForAddress(address, arb_type, value, timeout); + R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout)); } - /// Gets the address to the process' dedicated TLS region. - VAddr GetTLSRegionAddress() const { - return tls_region_address; + VAddr GetProcessLocalRegionAddress() const { + return plr_address; } /// Gets the current status of the process - ProcessStatus GetStatus() const { - return status; + State GetState() const { + return state; } /// Gets the unique ID that identifies this particular process. @@ -286,18 +278,18 @@ public: } /// Retrieves the total physical memory available to this process in bytes. - u64 GetTotalPhysicalMemoryAvailable() const; + u64 GetTotalPhysicalMemoryAvailable(); /// Retrieves the total physical memory available to this process in bytes, /// without the size of the personal system resource heap added to it. - u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const; + u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource(); /// Retrieves the total physical memory used by this process in bytes. - u64 GetTotalPhysicalMemoryUsed() const; + u64 GetTotalPhysicalMemoryUsed(); /// Retrieves the total physical memory used by this process in bytes, /// without the size of the personal system resource heap added to it. - u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const; + u64 GetTotalPhysicalMemoryUsedWithoutSystemResource(); /// Gets the list of all threads created with this process as their owner. std::list<KThread*>& GetThreadList() { @@ -415,19 +407,24 @@ private: pinned_threads[core_id] = nullptr; } - /// Changes the process status. If the status is different - /// from the current process status, then this will trigger - /// a process signal. - void ChangeStatus(ProcessStatus new_status); + void FinalizeHandleTable() { + // Finalize the table. + handle_table.Finalize(); + + // Note that the table is finalized. + is_handle_table_initialized = false; + } + + void ChangeState(State new_state); /// Allocates the main thread stack for the process, given the stack size in bytes. Result AllocateMainThreadStack(std::size_t stack_size); /// Memory manager for this process - std::unique_ptr<KPageTable> page_table; + KPageTable page_table; /// Current status of the process - ProcessStatus status{}; + State state{}; /// The ID of this process u64 process_id = 0; @@ -443,6 +440,8 @@ private: /// Resource limit descriptor for this process KResourceLimit* resource_limit{}; + VAddr system_resource_address{}; + /// The ideal CPU core for this process, threads are scheduled on this core by default. u8 ideal_core = 0; @@ -469,7 +468,7 @@ private: KConditionVariable condition_var; /// Address indicating the location of the process' dedicated TLS region. - VAddr tls_region_address = 0; + VAddr plr_address = 0; /// Random values for svcGetInfo RandomEntropy std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{}; @@ -495,8 +494,12 @@ private: /// Schedule count of this process s64 schedule_count{}; + size_t memory_release_hint{}; + bool is_signaled{}; bool is_suspended{}; + bool is_immortal{}; + bool is_handle_table_initialized{}; bool is_initialized{}; std::atomic<u16> num_running_threads{}; diff --git a/src/core/hle/kernel/k_readable_event.cpp b/src/core/hle/kernel/k_readable_event.cpp index 94c5464fe..5c942d47c 100644 --- a/src/core/hle/kernel/k_readable_event.cpp +++ b/src/core/hle/kernel/k_readable_event.cpp @@ -15,31 +15,44 @@ KReadableEvent::KReadableEvent(KernelCore& kernel_) : KSynchronizationObject{ker KReadableEvent::~KReadableEvent() = default; +void KReadableEvent::Initialize(KEvent* parent) { + m_is_signaled = false; + m_parent = parent; + + if (m_parent != nullptr) { + m_parent->Open(); + } +} + bool KReadableEvent::IsSignaled() const { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); - return is_signaled; + return m_is_signaled; } void KReadableEvent::Destroy() { - if (parent) { - parent->Close(); + if (m_parent) { + { + KScopedSchedulerLock sl{kernel}; + m_parent->OnReadableEventDestroyed(); + } + m_parent->Close(); } } Result KReadableEvent::Signal() { KScopedSchedulerLock lk{kernel}; - if (!is_signaled) { - is_signaled = true; - NotifyAvailable(); + if (!m_is_signaled) { + m_is_signaled = true; + this->NotifyAvailable(); } return ResultSuccess; } Result KReadableEvent::Clear() { - Reset(); + this->Reset(); return ResultSuccess; } @@ -47,11 +60,11 @@ Result KReadableEvent::Clear() { Result KReadableEvent::Reset() { KScopedSchedulerLock lk{kernel}; - if (!is_signaled) { + if (!m_is_signaled) { return ResultInvalidState; } - is_signaled = false; + m_is_signaled = false; return ResultSuccess; } diff --git a/src/core/hle/kernel/k_readable_event.h b/src/core/hle/kernel/k_readable_event.h index 18dcad289..743f96bf5 100644 --- a/src/core/hle/kernel/k_readable_event.h +++ b/src/core/hle/kernel/k_readable_event.h @@ -20,26 +20,23 @@ public: explicit KReadableEvent(KernelCore& kernel_); ~KReadableEvent() override; - void Initialize(KEvent* parent_event_, std::string&& name_) { - is_signaled = false; - parent = parent_event_; - name = std::move(name_); - } + void Initialize(KEvent* parent); KEvent* GetParent() const { - return parent; + return m_parent; } + Result Signal(); + Result Clear(); + bool IsSignaled() const override; void Destroy() override; - Result Signal(); - Result Clear(); Result Reset(); private: - bool is_signaled{}; - KEvent* parent{}; + bool m_is_signaled{}; + KEvent* m_parent{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp index 802c646a6..faf03fcc8 100644 --- a/src/core/hle/kernel/k_server_session.cpp +++ b/src/core/hle/kernel/k_server_session.cpp @@ -7,6 +7,8 @@ #include "common/assert.h" #include "common/common_types.h" #include "common/logging/log.h" +#include "common/scope_exit.h" +#include "core/core.h" #include "core/core_timing.h" #include "core/hle/ipc_helpers.h" #include "core/hle/kernel/hle_ipc.h" @@ -18,13 +20,16 @@ #include "core/hle/kernel/k_server_session.h" #include "core/hle/kernel/k_session.h" #include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/service_thread.h" #include "core/memory.h" namespace Kernel { -KServerSession::KServerSession(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} +using ThreadQueueImplForKServerSessionRequest = KThreadQueue; + +KServerSession::KServerSession(KernelCore& kernel_) + : KSynchronizationObject{kernel_}, m_lock{kernel_} {} KServerSession::~KServerSession() = default; @@ -33,17 +38,14 @@ void KServerSession::Initialize(KSession* parent_session_, std::string&& name_, // Set member variables. parent = parent_session_; name = std::move(name_); - - if (manager_) { - manager = manager_; - } else { - manager = std::make_shared<SessionRequestManager>(kernel); - } + manager = manager_; } void KServerSession::Destroy() { parent->OnServerClosed(); + this->CleanupRequests(); + parent->Close(); // Release host emulation members. @@ -54,13 +56,13 @@ void KServerSession::Destroy() { } void KServerSession::OnClientClosed() { - if (manager->HasSessionHandler()) { + if (manager && manager->HasSessionHandler()) { manager->SessionHandler().ClientDisconnected(this); } } bool KServerSession::IsSignaled() const { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); // If the client is closed, we're always signaled. if (parent->IsClientClosed()) { @@ -68,114 +70,281 @@ bool KServerSession::IsSignaled() const { } // Otherwise, we're signaled if we have a request and aren't handling one. - return false; + return !m_request_list.empty() && m_current_request == nullptr; } -void KServerSession::AppendDomainHandler(SessionRequestHandlerPtr handler) { - manager->AppendDomainHandler(std::move(handler)); +Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) { + u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))}; + auto context = std::make_shared<HLERequestContext>(kernel, memory, this, thread); + + context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); + + return manager->QueueSyncRequest(parent, std::move(context)); } -std::size_t KServerSession::NumDomainRequestHandlers() const { - return manager->DomainHandlerCount(); +Result KServerSession::CompleteSyncRequest(HLERequestContext& context) { + Result result = manager->CompleteSyncRequest(this, context); + + // The calling thread is waiting for this request to complete, so wake it up. + context.GetThread().EndWait(result); + + return result; } -Result KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) { - if (!context.HasDomainMessageHeader()) { - return ResultSuccess; - } +Result KServerSession::OnRequest(KSessionRequest* request) { + // Create the wait queue. + ThreadQueueImplForKServerSessionRequest wait_queue{kernel}; - // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs - context.SetSessionRequestManager(manager); - - // If there is a DomainMessageHeader, then this is CommandType "Request" - const auto& domain_message_header = context.GetDomainMessageHeader(); - const u32 object_id{domain_message_header.object_id}; - switch (domain_message_header.command) { - case IPC::DomainMessageHeader::CommandType::SendMessage: - if (object_id > manager->DomainHandlerCount()) { - LOG_CRITICAL(IPC, - "object_id {} is too big! This probably means a recent service call " - "to {} needed to return a new interface!", - object_id, name); - ASSERT(false); - return ResultSuccess; // Ignore error if asserts are off - } - if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) { - return strong_ptr->HandleSyncRequest(*this, context); + { + // Lock the scheduler. + KScopedSchedulerLock sl{kernel}; + + // Ensure that we can handle new requests. + R_UNLESS(!parent->IsServerClosed(), ResultSessionClosed); + + // Check that we're not terminating. + R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested); + + if (manager) { + // HLE request. + auto& memory{kernel.System().Memory()}; + this->QueueSyncRequest(GetCurrentThreadPointer(kernel), memory); } else { - ASSERT(false); - return ResultSuccess; - } + // Non-HLE request. - case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: { - LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id); + // Get whether we're empty. + const bool was_empty = m_request_list.empty(); - manager->CloseDomainHandler(object_id - 1); + // Add the request to the list. + request->Open(); + m_request_list.push_back(*request); - IPC::ResponseBuilder rb{context, 2}; - rb.Push(ResultSuccess); - return ResultSuccess; - } + // If we were empty, signal. + if (was_empty) { + this->NotifyAvailable(); + } + } + + // If we have a request event, this is asynchronous, and we don't need to wait. + R_SUCCEED_IF(request->GetEvent() != nullptr); + + // This is a synchronous request, so we should wait for our request to complete. + GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); + GetCurrentThread(kernel).BeginWait(&wait_queue); } - LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value()); - ASSERT(false); - return ResultSuccess; + return GetCurrentThread(kernel).GetWaitResult(); } -Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) { - u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))}; - auto context = std::make_shared<HLERequestContext>(kernel, memory, this, thread); +Result KServerSession::SendReply() { + // Lock the session. + KScopedLightLock lk{m_lock}; - context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); + // Get the request. + KSessionRequest* request; + { + KScopedSchedulerLock sl{kernel}; - // Ensure we have a session request handler - if (manager->HasSessionRequestHandler(*context)) { - if (auto strong_ptr = manager->GetServiceThread().lock()) { - strong_ptr->QueueSyncRequest(*parent, std::move(context)); - } else { - ASSERT_MSG(false, "strong_ptr is nullptr!"); + // Get the current request. + request = m_current_request; + R_UNLESS(request != nullptr, ResultInvalidState); + + // Clear the current request, since we're processing it. + m_current_request = nullptr; + if (!m_request_list.empty()) { + this->NotifyAvailable(); } - } else { - ASSERT_MSG(false, "handler is invalid!"); } - return ResultSuccess; -} + // Close reference to the request once we're done processing it. + SCOPE_EXIT({ request->Close(); }); -Result KServerSession::CompleteSyncRequest(HLERequestContext& context) { - Result result = ResultSuccess; + // Extract relevant information from the request. + const uintptr_t client_message = request->GetAddress(); + const size_t client_buffer_size = request->GetSize(); + KThread* client_thread = request->GetThread(); + KEvent* event = request->GetEvent(); - // If the session has been converted to a domain, handle the domain request - if (manager->HasSessionRequestHandler(context)) { - if (IsDomain() && context.HasDomainMessageHeader()) { - result = HandleDomainSyncRequest(context); - // If there is no domain header, the regular session handler is used - } else if (manager->HasSessionHandler()) { - // If this ServerSession has an associated HLE handler, forward the request to it. - result = manager->SessionHandler().HandleSyncRequest(*this, context); - } + // Check whether we're closed. + const bool closed = (client_thread == nullptr || parent->IsClientClosed()); + + Result result = ResultSuccess; + if (!closed) { + // If we're not closed, send the reply. + Core::Memory::Memory& memory{kernel.System().Memory()}; + KThread* server_thread{GetCurrentThreadPointer(kernel)}; + UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); + + auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); + auto* dst_msg_buffer = memory.GetPointer(client_message); + std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); } else { - ASSERT_MSG(false, "Session handler is invalid, stubbing response!"); - IPC::ResponseBuilder rb(context, 2); - rb.Push(ResultSuccess); + result = ResultSessionClosed; } - if (convert_to_domain) { - ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance."); - manager->ConvertToDomain(); - convert_to_domain = false; + // Select a result for the client. + Result client_result = result; + if (closed && R_SUCCEEDED(result)) { + result = ResultSessionClosed; + client_result = ResultSessionClosed; + } else { + result = ResultSuccess; } - // The calling thread is waiting for this request to complete, so wake it up. - context.GetThread().EndWait(result); + // If there's a client thread, update it. + if (client_thread != nullptr) { + if (event != nullptr) { + // // Get the client process/page table. + // KProcess *client_process = client_thread->GetOwnerProcess(); + // KPageTable *client_page_table = &client_process->PageTable(); + + // // If we need to, reply with an async error. + // if (R_FAILED(client_result)) { + // ReplyAsyncError(client_process, client_message, client_buffer_size, + // client_result); + // } + + // // Unlock the client buffer. + // // NOTE: Nintendo does not check the result of this. + // client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size); + + // Signal the event. + event->Signal(); + } else { + // End the client thread's wait. + KScopedSchedulerLock sl{kernel}; + + if (!client_thread->IsTerminationRequested()) { + client_thread->EndWait(client_result); + } + } + } return result; } -Result KServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory, - Core::Timing::CoreTiming& core_timing) { - return QueueSyncRequest(thread, memory); +Result KServerSession::ReceiveRequest() { + // Lock the session. + KScopedLightLock lk{m_lock}; + + // Get the request and client thread. + KSessionRequest* request; + KThread* client_thread; + + { + KScopedSchedulerLock sl{kernel}; + + // Ensure that we can service the request. + R_UNLESS(!parent->IsClientClosed(), ResultSessionClosed); + + // Ensure we aren't already servicing a request. + R_UNLESS(m_current_request == nullptr, ResultNotFound); + + // Ensure we have a request to service. + R_UNLESS(!m_request_list.empty(), ResultNotFound); + + // Pop the first request from the list. + request = &m_request_list.front(); + m_request_list.pop_front(); + + // Get the thread for the request. + client_thread = request->GetThread(); + R_UNLESS(client_thread != nullptr, ResultSessionClosed); + + // Open the client thread. + client_thread->Open(); + } + + SCOPE_EXIT({ client_thread->Close(); }); + + // Set the request as our current. + m_current_request = request; + + // Get the client address. + uintptr_t client_message = request->GetAddress(); + size_t client_buffer_size = request->GetSize(); + // bool recv_list_broken = false; + + // Receive the message. + Core::Memory::Memory& memory{kernel.System().Memory()}; + KThread* server_thread{GetCurrentThreadPointer(kernel)}; + UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); + + auto* src_msg_buffer = memory.GetPointer(client_message); + auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); + std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); + + // We succeeded. + return ResultSuccess; +} + +void KServerSession::CleanupRequests() { + KScopedLightLock lk(m_lock); + + // Clean up any pending requests. + while (true) { + // Get the next request. + KSessionRequest* request = nullptr; + { + KScopedSchedulerLock sl{kernel}; + + if (m_current_request) { + // Choose the current request if we have one. + request = m_current_request; + m_current_request = nullptr; + } else if (!m_request_list.empty()) { + // Pop the request from the front of the list. + request = &m_request_list.front(); + m_request_list.pop_front(); + } + } + + // If there's no request, we're done. + if (request == nullptr) { + break; + } + + // Close a reference to the request once it's cleaned up. + SCOPE_EXIT({ request->Close(); }); + + // Extract relevant information from the request. + // const uintptr_t client_message = request->GetAddress(); + // const size_t client_buffer_size = request->GetSize(); + KThread* client_thread = request->GetThread(); + KEvent* event = request->GetEvent(); + + // KProcess *server_process = request->GetServerProcess(); + // KProcess *client_process = (client_thread != nullptr) ? + // client_thread->GetOwnerProcess() : nullptr; + // KProcessPageTable *client_page_table = (client_process != nullptr) ? + // &client_process->GetPageTable() : nullptr; + + // Cleanup the mappings. + // Result result = CleanupMap(request, server_process, client_page_table); + + // If there's a client thread, update it. + if (client_thread != nullptr) { + if (event != nullptr) { + // // We need to reply async. + // ReplyAsyncError(client_process, client_message, client_buffer_size, + // (R_SUCCEEDED(result) ? ResultSessionClosed : result)); + + // // Unlock the client buffer. + // NOTE: Nintendo does not check the result of this. + // client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size); + + // Signal the event. + event->Signal(); + } else { + // End the client thread's wait. + KScopedSchedulerLock sl{kernel}; + + if (!client_thread->IsTerminationRequested()) { + client_thread->EndWait(ResultSessionClosed); + } + } + } + } } } // namespace Kernel diff --git a/src/core/hle/kernel/k_server_session.h b/src/core/hle/kernel/k_server_session.h index 6d0821945..32135473b 100644 --- a/src/core/hle/kernel/k_server_session.h +++ b/src/core/hle/kernel/k_server_session.h @@ -3,6 +3,7 @@ #pragma once +#include <list> #include <memory> #include <string> #include <utility> @@ -10,6 +11,8 @@ #include <boost/intrusive/list.hpp> #include "core/hle/kernel/hle_ipc.h" +#include "core/hle/kernel/k_light_lock.h" +#include "core/hle/kernel/k_session_request.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/result.h" @@ -55,64 +58,29 @@ public: } bool IsSignaled() const override; - void OnClientClosed(); - void ClientConnected(SessionRequestHandlerPtr handler) { - manager->SetSessionHandler(std::move(handler)); - } - - void ClientDisconnected() { - manager = nullptr; - } - - /** - * Handle a sync request from the emulated application. - * - * @param thread Thread that initiated the request. - * @param memory Memory context to handle the sync request under. - * @param core_timing Core timing context to schedule the request event under. - * - * @returns Result from the operation. - */ - Result HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory, - Core::Timing::CoreTiming& core_timing); - - /// Adds a new domain request handler to the collection of request handlers within - /// this ServerSession instance. - void AppendDomainHandler(SessionRequestHandlerPtr handler); - - /// Retrieves the total number of domain request handlers that have been - /// appended to this ServerSession instance. - std::size_t NumDomainRequestHandlers() const; - - /// Returns true if the session has been converted to a domain, otherwise False - bool IsDomain() const { - return manager->IsDomain(); - } - - /// Converts the session to a domain at the end of the current command - void ConvertToDomain() { - convert_to_domain = true; - } - /// Gets the session request manager, which forwards requests to the underlying service std::shared_ptr<SessionRequestManager>& GetSessionRequestManager() { return manager; } + /// TODO: flesh these out to match the real kernel + Result OnRequest(KSessionRequest* request); + Result SendReply(); + Result ReceiveRequest(); + private: + /// Frees up waiting client sessions when this server session is about to die + void CleanupRequests(); + /// Queues a sync request from the emulated application. Result QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory); /// Completes a sync request from the emulated application. Result CompleteSyncRequest(HLERequestContext& context); - /// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an - /// object handle. - Result HandleDomainSyncRequest(Kernel::HLERequestContext& context); - - /// This session's HLE request handlers + /// This session's HLE request handlers; if nullptr, this is not an HLE server std::shared_ptr<SessionRequestManager> manager; /// When set to True, converts the session to a domain at the end of the command @@ -120,6 +88,12 @@ private: /// KSession that owns this KServerSession KSession* parent{}; + + /// List of threads which are pending a reply. + boost::intrusive::list<KSessionRequest> m_request_list; + KSessionRequest* m_current_request; + + KLightLock m_lock; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_session_request.cpp b/src/core/hle/kernel/k_session_request.cpp new file mode 100644 index 000000000..520da6aa7 --- /dev/null +++ b/src/core/hle/kernel/k_session_request.cpp @@ -0,0 +1,61 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/hle/kernel/k_page_buffer.h" +#include "core/hle/kernel/k_session_request.h" + +namespace Kernel { + +Result KSessionRequest::SessionMappings::PushMap(VAddr client, VAddr server, size_t size, + KMemoryState state, size_t index) { + // At most 15 buffers of each type (4-bit descriptor counts). + ASSERT(index < ((1ul << 4) - 1) * 3); + + // Get the mapping. + Mapping* mapping; + if (index < NumStaticMappings) { + mapping = &m_static_mappings[index]; + } else { + // Allocate a page for the extra mappings. + if (m_mappings == nullptr) { + KPageBuffer* page_buffer = KPageBuffer::Allocate(kernel); + R_UNLESS(page_buffer != nullptr, ResultOutOfMemory); + + m_mappings = reinterpret_cast<Mapping*>(page_buffer); + } + + mapping = &m_mappings[index - NumStaticMappings]; + } + + // Set the mapping. + mapping->Set(client, server, size, state); + + return ResultSuccess; +} + +Result KSessionRequest::SessionMappings::PushSend(VAddr client, VAddr server, size_t size, + KMemoryState state) { + ASSERT(m_num_recv == 0); + ASSERT(m_num_exch == 0); + return this->PushMap(client, server, size, state, m_num_send++); +} + +Result KSessionRequest::SessionMappings::PushReceive(VAddr client, VAddr server, size_t size, + KMemoryState state) { + ASSERT(m_num_exch == 0); + return this->PushMap(client, server, size, state, m_num_send + m_num_recv++); +} + +Result KSessionRequest::SessionMappings::PushExchange(VAddr client, VAddr server, size_t size, + KMemoryState state) { + return this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++); +} + +void KSessionRequest::SessionMappings::Finalize() { + if (m_mappings) { + KPageBuffer::Free(kernel, reinterpret_cast<KPageBuffer*>(m_mappings)); + m_mappings = nullptr; + } +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_session_request.h b/src/core/hle/kernel/k_session_request.h new file mode 100644 index 000000000..e5558bc2c --- /dev/null +++ b/src/core/hle/kernel/k_session_request.h @@ -0,0 +1,306 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <array> + +#include "core/hle/kernel/k_auto_object.h" +#include "core/hle/kernel/k_event.h" +#include "core/hle/kernel/k_memory_block.h" +#include "core/hle/kernel/k_process.h" +#include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/slab_helpers.h" + +namespace Kernel { + +class KSessionRequest final : public KSlabAllocated<KSessionRequest>, + public KAutoObject, + public boost::intrusive::list_base_hook<> { + KERNEL_AUTOOBJECT_TRAITS(KSessionRequest, KAutoObject); + +public: + class SessionMappings { + private: + static constexpr size_t NumStaticMappings = 8; + + class Mapping { + public: + constexpr void Set(VAddr c, VAddr s, size_t sz, KMemoryState st) { + m_client_address = c; + m_server_address = s; + m_size = sz; + m_state = st; + } + + constexpr VAddr GetClientAddress() const { + return m_client_address; + } + constexpr VAddr GetServerAddress() const { + return m_server_address; + } + constexpr size_t GetSize() const { + return m_size; + } + constexpr KMemoryState GetMemoryState() const { + return m_state; + } + + private: + VAddr m_client_address; + VAddr m_server_address; + size_t m_size; + KMemoryState m_state; + }; + + public: + explicit SessionMappings(KernelCore& kernel_) : kernel(kernel_) {} + + void Initialize() {} + void Finalize(); + + size_t GetSendCount() const { + return m_num_send; + } + size_t GetReceiveCount() const { + return m_num_recv; + } + size_t GetExchangeCount() const { + return m_num_exch; + } + + Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state); + Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state); + Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state); + + VAddr GetSendClientAddress(size_t i) const { + return GetSendMapping(i).GetClientAddress(); + } + VAddr GetSendServerAddress(size_t i) const { + return GetSendMapping(i).GetServerAddress(); + } + size_t GetSendSize(size_t i) const { + return GetSendMapping(i).GetSize(); + } + KMemoryState GetSendMemoryState(size_t i) const { + return GetSendMapping(i).GetMemoryState(); + } + + VAddr GetReceiveClientAddress(size_t i) const { + return GetReceiveMapping(i).GetClientAddress(); + } + VAddr GetReceiveServerAddress(size_t i) const { + return GetReceiveMapping(i).GetServerAddress(); + } + size_t GetReceiveSize(size_t i) const { + return GetReceiveMapping(i).GetSize(); + } + KMemoryState GetReceiveMemoryState(size_t i) const { + return GetReceiveMapping(i).GetMemoryState(); + } + + VAddr GetExchangeClientAddress(size_t i) const { + return GetExchangeMapping(i).GetClientAddress(); + } + VAddr GetExchangeServerAddress(size_t i) const { + return GetExchangeMapping(i).GetServerAddress(); + } + size_t GetExchangeSize(size_t i) const { + return GetExchangeMapping(i).GetSize(); + } + KMemoryState GetExchangeMemoryState(size_t i) const { + return GetExchangeMapping(i).GetMemoryState(); + } + + private: + Result PushMap(VAddr client, VAddr server, size_t size, KMemoryState state, size_t index); + + const Mapping& GetSendMapping(size_t i) const { + ASSERT(i < m_num_send); + + const size_t index = i; + if (index < NumStaticMappings) { + return m_static_mappings[index]; + } else { + return m_mappings[index - NumStaticMappings]; + } + } + + const Mapping& GetReceiveMapping(size_t i) const { + ASSERT(i < m_num_recv); + + const size_t index = m_num_send + i; + if (index < NumStaticMappings) { + return m_static_mappings[index]; + } else { + return m_mappings[index - NumStaticMappings]; + } + } + + const Mapping& GetExchangeMapping(size_t i) const { + ASSERT(i < m_num_exch); + + const size_t index = m_num_send + m_num_recv + i; + if (index < NumStaticMappings) { + return m_static_mappings[index]; + } else { + return m_mappings[index - NumStaticMappings]; + } + } + + private: + KernelCore& kernel; + std::array<Mapping, NumStaticMappings> m_static_mappings; + Mapping* m_mappings{}; + u8 m_num_send{}; + u8 m_num_recv{}; + u8 m_num_exch{}; + }; + +public: + explicit KSessionRequest(KernelCore& kernel_) : KAutoObject(kernel_), m_mappings(kernel_) {} + + static KSessionRequest* Create(KernelCore& kernel) { + KSessionRequest* req = KSessionRequest::Allocate(kernel); + if (req != nullptr) [[likely]] { + KAutoObject::Create(req); + } + return req; + } + + void Destroy() override { + this->Finalize(); + KSessionRequest::Free(kernel, this); + } + + void Initialize(KEvent* event, uintptr_t address, size_t size) { + m_mappings.Initialize(); + + m_thread = GetCurrentThreadPointer(kernel); + m_event = event; + m_address = address; + m_size = size; + + m_thread->Open(); + if (m_event != nullptr) { + m_event->Open(); + } + } + + static void PostDestroy(uintptr_t arg) {} + + KThread* GetThread() const { + return m_thread; + } + KEvent* GetEvent() const { + return m_event; + } + uintptr_t GetAddress() const { + return m_address; + } + size_t GetSize() const { + return m_size; + } + KProcess* GetServerProcess() const { + return m_server; + } + + void SetServerProcess(KProcess* process) { + m_server = process; + m_server->Open(); + } + + void ClearThread() { + m_thread = nullptr; + } + void ClearEvent() { + m_event = nullptr; + } + + size_t GetSendCount() const { + return m_mappings.GetSendCount(); + } + size_t GetReceiveCount() const { + return m_mappings.GetReceiveCount(); + } + size_t GetExchangeCount() const { + return m_mappings.GetExchangeCount(); + } + + Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state) { + return m_mappings.PushSend(client, server, size, state); + } + + Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state) { + return m_mappings.PushReceive(client, server, size, state); + } + + Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state) { + return m_mappings.PushExchange(client, server, size, state); + } + + VAddr GetSendClientAddress(size_t i) const { + return m_mappings.GetSendClientAddress(i); + } + VAddr GetSendServerAddress(size_t i) const { + return m_mappings.GetSendServerAddress(i); + } + size_t GetSendSize(size_t i) const { + return m_mappings.GetSendSize(i); + } + KMemoryState GetSendMemoryState(size_t i) const { + return m_mappings.GetSendMemoryState(i); + } + + VAddr GetReceiveClientAddress(size_t i) const { + return m_mappings.GetReceiveClientAddress(i); + } + VAddr GetReceiveServerAddress(size_t i) const { + return m_mappings.GetReceiveServerAddress(i); + } + size_t GetReceiveSize(size_t i) const { + return m_mappings.GetReceiveSize(i); + } + KMemoryState GetReceiveMemoryState(size_t i) const { + return m_mappings.GetReceiveMemoryState(i); + } + + VAddr GetExchangeClientAddress(size_t i) const { + return m_mappings.GetExchangeClientAddress(i); + } + VAddr GetExchangeServerAddress(size_t i) const { + return m_mappings.GetExchangeServerAddress(i); + } + size_t GetExchangeSize(size_t i) const { + return m_mappings.GetExchangeSize(i); + } + KMemoryState GetExchangeMemoryState(size_t i) const { + return m_mappings.GetExchangeMemoryState(i); + } + +private: + // NOTE: This is public and virtual in Nintendo's kernel. + void Finalize() override { + m_mappings.Finalize(); + + if (m_thread) { + m_thread->Close(); + } + if (m_event) { + m_event->Close(); + } + if (m_server) { + m_server->Close(); + } + } + +private: + SessionMappings m_mappings; + KThread* m_thread{}; + KProcess* m_server{}; + KEvent* m_event{}; + uintptr_t m_address{}; + size_t m_size{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp index 8ff1545b6..a039cc591 100644 --- a/src/core/hle/kernel/k_shared_memory.cpp +++ b/src/core/hle/kernel/k_shared_memory.cpp @@ -50,7 +50,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o is_initialized = true; // Clear all pages in the memory. - std::memset(device_memory_.GetPointer(physical_address_), 0, size_); + std::memset(device_memory_.GetPointer<void>(physical_address_), 0, size_); return ResultSuccess; } diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h index 34cb98456..5620c3660 100644 --- a/src/core/hle/kernel/k_shared_memory.h +++ b/src/core/hle/kernel/k_shared_memory.h @@ -54,7 +54,7 @@ public: * @return A pointer to the shared memory block from the specified offset */ u8* GetPointer(std::size_t offset = 0) { - return device_memory->GetPointer(physical_address + offset); + return device_memory->GetPointer<u8>(physical_address + offset); } /** @@ -63,7 +63,7 @@ public: * @return A pointer to the shared memory block from the specified offset */ const u8* GetPointer(std::size_t offset = 0) const { - return device_memory->GetPointer(physical_address + offset); + return device_memory->GetPointer<u8>(physical_address + offset); } void Finalize() override; diff --git a/src/core/hle/kernel/k_shared_memory_info.h b/src/core/hle/kernel/k_shared_memory_info.h index e43db8515..2bb6b6d08 100644 --- a/src/core/hle/kernel/k_shared_memory_info.h +++ b/src/core/hle/kernel/k_shared_memory_info.h @@ -15,7 +15,8 @@ class KSharedMemoryInfo final : public KSlabAllocated<KSharedMemoryInfo>, public boost::intrusive::list_base_hook<> { public: - explicit KSharedMemoryInfo() = default; + explicit KSharedMemoryInfo(KernelCore&) {} + KSharedMemoryInfo() = default; constexpr void Initialize(KSharedMemory* shmem) { shared_memory = shmem; diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h index 2b303537e..a8c77a7d4 100644 --- a/src/core/hle/kernel/k_slab_heap.h +++ b/src/core/hle/kernel/k_slab_heap.h @@ -8,6 +8,7 @@ #include "common/assert.h" #include "common/common_funcs.h" #include "common/common_types.h" +#include "common/spin_lock.h" namespace Kernel { @@ -36,28 +37,34 @@ public: } void* Allocate() { - Node* ret = m_head.load(); + // KScopedInterruptDisable di; - do { - if (ret == nullptr) { - break; - } - } while (!m_head.compare_exchange_weak(ret, ret->next)); + m_lock.lock(); + + Node* ret = m_head; + if (ret != nullptr) [[likely]] { + m_head = ret->next; + } + m_lock.unlock(); return ret; } void Free(void* obj) { + // KScopedInterruptDisable di; + + m_lock.lock(); + Node* node = static_cast<Node*>(obj); + node->next = m_head; + m_head = node; - Node* cur_head = m_head.load(); - do { - node->next = cur_head; - } while (!m_head.compare_exchange_weak(cur_head, node)); + m_lock.unlock(); } private: std::atomic<Node*> m_head{}; + Common::SpinLock m_lock; }; } // namespace impl diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 174afc80d..b7bfcdce3 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -30,6 +30,7 @@ #include "core/hle/kernel/k_worker_task_manager.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/svc_results.h" +#include "core/hle/kernel/svc_types.h" #include "core/hle/result.h" #include "core/memory.h" @@ -38,6 +39,9 @@ #endif namespace { + +constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1; + static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, u32 entry_point, u32 arg) { context = {}; @@ -241,7 +245,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack } } - return ResultSuccess; + R_SUCCEED(); } Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, @@ -254,7 +258,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_ thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func)); thread->is_single_core = !Settings::values.use_multi_core.GetValue(); - return ResultSuccess; + R_SUCCEED(); } Result KThread::InitializeDummyThread(KThread* thread) { @@ -264,31 +268,32 @@ Result KThread::InitializeDummyThread(KThread* thread) { // Initialize emulation parameters. thread->stack_parameters.disable_count = 0; - return ResultSuccess; + R_SUCCEED(); } Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) { - return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, - system.GetCpuManager().GetGuestActivateFunc()); + R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, + ThreadType::Main, system.GetCpuManager().GetGuestActivateFunc())); } Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { - return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, - system.GetCpuManager().GetIdleThreadStartFunc()); + R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, + ThreadType::Main, system.GetCpuManager().GetIdleThreadStartFunc())); } Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, KThreadFunction func, uintptr_t arg, s32 virt_core) { - return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority, - system.GetCpuManager().GetShutdownThreadStartFunc()); + R_RETURN(InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, + ThreadType::HighPriority, + system.GetCpuManager().GetShutdownThreadStartFunc())); } Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core, KProcess* owner) { system.Kernel().GlobalSchedulerContext().AddThread(thread); - return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, - ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()); + R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, + ThreadType::User, system.GetCpuManager().GetGuestThreadFunc())); } void KThread::PostDestroy(uintptr_t arg) { @@ -538,7 +543,7 @@ Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { *out_ideal_core = virtual_ideal_core_id; *out_affinity_mask = virtual_affinity_mask; - return ResultSuccess; + R_SUCCEED(); } Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { @@ -554,7 +559,7 @@ Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask(); } - return ResultSuccess; + R_SUCCEED(); } Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { @@ -666,7 +671,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { } while (retry_update); } - return ResultSuccess; + R_SUCCEED(); } void KThread::SetBasePriority(s32 value) { @@ -839,7 +844,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) { } while (thread_is_current); } - return ResultSuccess; + R_SUCCEED(); } Result KThread::GetThreadContext3(std::vector<u8>& out) { @@ -874,7 +879,7 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) { } } - return ResultSuccess; + R_SUCCEED(); } void KThread::AddWaiterImpl(KThread* thread) { @@ -1038,7 +1043,7 @@ Result KThread::Run() { // Set our state and finish. SetState(ThreadState::Runnable); - return ResultSuccess; + R_SUCCEED(); } } @@ -1073,6 +1078,78 @@ void KThread::Exit() { UNREACHABLE_MSG("KThread::Exit() would return"); } +Result KThread::Terminate() { + ASSERT(this != GetCurrentThreadPointer(kernel)); + + // Request the thread terminate if it hasn't already. + if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) { + // If the thread isn't terminated, wait for it to terminate. + s32 index; + KSynchronizationObject* objects[] = {this}; + R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1, + Svc::WaitInfinite)); + } + + R_SUCCEED(); +} + +ThreadState KThread::RequestTerminate() { + ASSERT(this != GetCurrentThreadPointer(kernel)); + + KScopedSchedulerLock sl{kernel}; + + // Determine if this is the first termination request. + const bool first_request = [&]() -> bool { + // Perform an atomic compare-and-swap from false to true. + bool expected = false; + return termination_requested.compare_exchange_strong(expected, true); + }(); + + // If this is the first request, start termination procedure. + if (first_request) { + // If the thread is in initialized state, just change state to terminated. + if (this->GetState() == ThreadState::Initialized) { + thread_state = ThreadState::Terminated; + return ThreadState::Terminated; + } + + // Register the terminating dpc. + this->RegisterDpc(DpcFlag::Terminating); + + // If the thread is pinned, unpin it. + if (this->GetStackParameters().is_pinned) { + this->GetOwnerProcess()->UnpinThread(this); + } + + // If the thread is suspended, continue it. + if (this->IsSuspended()) { + suspend_allowed_flags = 0; + this->UpdateState(); + } + + // Change the thread's priority to be higher than any system thread's. + if (this->GetBasePriority() >= Svc::SystemThreadPriorityHighest) { + this->SetBasePriority(TerminatingThreadPriority); + } + + // If the thread is runnable, send a termination interrupt to other cores. + if (this->GetState() == ThreadState::Runnable) { + if (const u64 core_mask = + physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel)); + core_mask != 0) { + Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask); + } + } + + // Wake up the thread. + if (this->GetState() == ThreadState::Waiting) { + wait_queue->CancelWait(this, ResultTerminationRequested, true); + } + } + + return this->GetState(); +} + Result KThread::Sleep(s64 timeout) { ASSERT(!kernel.GlobalSchedulerContext().IsLocked()); ASSERT(this == GetCurrentThreadPointer(kernel)); @@ -1086,7 +1163,7 @@ Result KThread::Sleep(s64 timeout) { // Check if the thread should terminate. if (this->IsTerminationRequested()) { slp.CancelSleep(); - return ResultTerminationRequested; + R_THROW(ResultTerminationRequested); } // Wait for the sleep to end. @@ -1094,7 +1171,7 @@ Result KThread::Sleep(s64 timeout) { SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); } - return ResultSuccess; + R_SUCCEED(); } void KThread::IfDummyThreadTryWait() { diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 9ee20208e..e2a27d603 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -180,6 +180,10 @@ public: void Exit(); + Result Terminate(); + + ThreadState RequestTerminate(); + [[nodiscard]] u32 GetSuspendFlags() const { return suspend_allowed_flags & suspend_request_flags; } diff --git a/src/core/hle/kernel/k_thread_local_page.h b/src/core/hle/kernel/k_thread_local_page.h index 0a7f22680..5d466ace7 100644 --- a/src/core/hle/kernel/k_thread_local_page.h +++ b/src/core/hle/kernel/k_thread_local_page.h @@ -26,7 +26,7 @@ public: static_assert(RegionsPerPage > 0); public: - constexpr explicit KThreadLocalPage(VAddr addr = {}) : m_virt_addr(addr) { + constexpr explicit KThreadLocalPage(KernelCore&, VAddr addr = {}) : m_virt_addr(addr) { m_is_region_free.fill(true); } diff --git a/src/core/hle/kernel/k_worker_task_manager.cpp b/src/core/hle/kernel/k_worker_task_manager.cpp index 221f341ee..04042bf8f 100644 --- a/src/core/hle/kernel/k_worker_task_manager.cpp +++ b/src/core/hle/kernel/k_worker_task_manager.cpp @@ -23,7 +23,7 @@ void KWorkerTask::DoWorkerTask() { } } -KWorkerTaskManager::KWorkerTaskManager() : m_waiting_thread(1, "yuzu:KWorkerTaskManager") {} +KWorkerTaskManager::KWorkerTaskManager() : m_waiting_thread(1, "KWorkerTaskManager") {} void KWorkerTaskManager::AddTask(KernelCore& kernel, WorkerType type, KWorkerTask* task) { ASSERT(type <= WorkerType::Count); diff --git a/src/core/hle/kernel/k_writable_event.cpp b/src/core/hle/kernel/k_writable_event.cpp deleted file mode 100644 index ff88c5acd..000000000 --- a/src/core/hle/kernel/k_writable_event.cpp +++ /dev/null @@ -1,35 +0,0 @@ -// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later - -#include "core/hle/kernel/k_event.h" -#include "core/hle/kernel/k_readable_event.h" -#include "core/hle/kernel/k_writable_event.h" - -namespace Kernel { - -KWritableEvent::KWritableEvent(KernelCore& kernel_) - : KAutoObjectWithSlabHeapAndContainer{kernel_} {} - -KWritableEvent::~KWritableEvent() = default; - -void KWritableEvent::Initialize(KEvent* parent_event_, std::string&& name_) { - parent = parent_event_; - name = std::move(name_); - parent->GetReadableEvent().Open(); -} - -Result KWritableEvent::Signal() { - return parent->GetReadableEvent().Signal(); -} - -Result KWritableEvent::Clear() { - return parent->GetReadableEvent().Clear(); -} - -void KWritableEvent::Destroy() { - // Close our references. - parent->GetReadableEvent().Close(); - parent->Close(); -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/k_writable_event.h b/src/core/hle/kernel/k_writable_event.h deleted file mode 100644 index 3fd0c7d0a..000000000 --- a/src/core/hle/kernel/k_writable_event.h +++ /dev/null @@ -1,39 +0,0 @@ -// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later - -#pragma once - -#include "core/hle/kernel/k_auto_object.h" -#include "core/hle/kernel/slab_helpers.h" -#include "core/hle/result.h" - -namespace Kernel { - -class KernelCore; -class KEvent; - -class KWritableEvent final - : public KAutoObjectWithSlabHeapAndContainer<KWritableEvent, KAutoObjectWithList> { - KERNEL_AUTOOBJECT_TRAITS(KWritableEvent, KAutoObject); - -public: - explicit KWritableEvent(KernelCore& kernel_); - ~KWritableEvent() override; - - void Destroy() override; - - static void PostDestroy([[maybe_unused]] uintptr_t arg) {} - - void Initialize(KEvent* parent_, std::string&& name_); - Result Signal(); - Result Clear(); - - KEvent* GetParent() const { - return parent; - } - -private: - KEvent* parent{}; -}; - -} // namespace Kernel diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index ce7fa8275..eed2dc9f3 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -24,6 +24,7 @@ #include "core/hardware_properties.h" #include "core/hle/kernel/init/init_slab_setup.h" #include "core/hle/kernel/k_client_port.h" +#include "core/hle/kernel/k_dynamic_resource_manager.h" #include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/k_memory_layout.h" #include "core/hle/kernel/k_memory_manager.h" @@ -48,7 +49,7 @@ namespace Kernel { struct KernelCore::Impl { explicit Impl(Core::System& system_, KernelCore& kernel_) : time_manager{system_}, - service_threads_manager{1, "yuzu:ServiceThreadsManager"}, system{system_} {} + service_threads_manager{1, "ServiceThreadsManager"}, system{system_} {} void SetMulticore(bool is_multi) { is_multicore = is_multi; @@ -73,8 +74,16 @@ struct KernelCore::Impl { InitializeMemoryLayout(); Init::InitializeKPageBufferSlabHeap(system); InitializeShutdownThreads(); - InitializePreemption(kernel); InitializePhysicalCores(); + InitializePreemption(kernel); + + // Initialize the Dynamic Slab Heaps. + { + const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion(); + ASSERT(pt_heap_region.GetEndAddress() != 0); + + InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize()); + } RegisterHostThread(); } @@ -86,6 +95,15 @@ struct KernelCore::Impl { } } + void CloseCurrentProcess() { + (*current_process).Finalize(); + // current_process->Close(); + // TODO: The current process should be destroyed based on accurate ref counting after + // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak. + (*current_process).Destroy(); + current_process = nullptr; + } + void Shutdown() { is_shutting_down.store(true, std::memory_order_relaxed); SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); }); @@ -99,10 +117,6 @@ struct KernelCore::Impl { next_user_process_id = KProcess::ProcessIDMin; next_thread_id = 1; - for (auto& core : cores) { - core = nullptr; - } - global_handle_table->Finalize(); global_handle_table.reset(); @@ -152,15 +166,7 @@ struct KernelCore::Impl { } } - // Shutdown all processes. - if (current_process) { - (*current_process).Finalize(); - // current_process->Close(); - // TODO: The current process should be destroyed based on accurate ref counting after - // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak. - (*current_process).Destroy(); - current_process = nullptr; - } + CloseCurrentProcess(); // Track kernel objects that were not freed on shutdown { @@ -257,6 +263,18 @@ struct KernelCore::Impl { system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event); } + void InitializeResourceManagers(VAddr address, size_t size) { + dynamic_page_manager = std::make_unique<KDynamicPageManager>(); + memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>(); + app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>(); + + dynamic_page_manager->Initialize(address, size); + static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000; + memory_block_heap->Initialize(dynamic_page_manager.get(), + ApplicationMemoryBlockSlabHeapSize); + app_memory_block_manager->Initialize(nullptr, memory_block_heap.get()); + } + void InitializeShutdownThreads() { for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { shutdown_threads[core_id] = KThread::Create(system.Kernel()); @@ -344,11 +362,6 @@ struct KernelCore::Impl { static inline thread_local KThread* current_thread{nullptr}; KThread* GetCurrentEmuThread() { - // If we are shutting down the kernel, none of this is relevant anymore. - if (IsShuttingDown()) { - return {}; - } - const auto thread_id = GetCurrentHostThreadID(); if (thread_id >= Core::Hardware::NUM_CPU_CORES) { return GetHostDummyThread(); @@ -770,6 +783,11 @@ struct KernelCore::Impl { // Kernel memory management std::unique_ptr<KMemoryManager> memory_manager; + // Dynamic slab managers + std::unique_ptr<KDynamicPageManager> dynamic_page_manager; + std::unique_ptr<KMemoryBlockSlabHeap> memory_block_heap; + std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager; + // Shared memory for services Kernel::KSharedMemory* hid_shared_mem{}; Kernel::KSharedMemory* font_shared_mem{}; @@ -853,6 +871,10 @@ const KProcess* KernelCore::CurrentProcess() const { return impl->current_process; } +void KernelCore::CloseCurrentProcess() { + impl->CloseCurrentProcess(); +} + const std::vector<KProcess*>& KernelCore::GetProcessList() const { return impl->process_list; } @@ -1041,6 +1063,14 @@ const KMemoryManager& KernelCore::MemoryManager() const { return *impl->memory_manager; } +KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() { + return *impl->app_memory_block_manager; +} + +const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const { + return *impl->app_memory_block_manager; +} + Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { return *impl->hid_shared_mem; } diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index bcf016a97..266be2bc4 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -37,6 +37,7 @@ class KClientSession; class KEvent; class KHandleTable; class KLinkedListNode; +class KMemoryBlockSlabManager; class KMemoryLayout; class KMemoryManager; class KPageBuffer; @@ -46,13 +47,13 @@ class KResourceLimit; class KScheduler; class KServerSession; class KSession; +class KSessionRequest; class KSharedMemory; class KSharedMemoryInfo; class KThread; class KThreadLocalPage; class KTransferMemory; class KWorkerTaskManager; -class KWritableEvent; class KCodeMemory; class PhysicalCore; class ServiceThread; @@ -131,6 +132,9 @@ public: /// Retrieves a const pointer to the current process. const KProcess* CurrentProcess() const; + /// Closes the current process. + void CloseCurrentProcess(); + /// Retrieves the list of processes. const std::vector<KProcess*>& GetProcessList() const; @@ -239,6 +243,12 @@ public: /// Gets the virtual memory manager for the kernel. const KMemoryManager& MemoryManager() const; + /// Gets the application memory block manager for the kernel. + KMemoryBlockSlabManager& GetApplicationMemoryBlockManager(); + + /// Gets the application memory block manager for the kernel. + const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const; + /// Gets the shared memory object for HID services. Kernel::KSharedMemory& GetHidSharedMem(); @@ -345,14 +355,14 @@ public: return slab_heap_container->thread; } else if constexpr (std::is_same_v<T, KTransferMemory>) { return slab_heap_container->transfer_memory; - } else if constexpr (std::is_same_v<T, KWritableEvent>) { - return slab_heap_container->writeable_event; } else if constexpr (std::is_same_v<T, KCodeMemory>) { return slab_heap_container->code_memory; } else if constexpr (std::is_same_v<T, KPageBuffer>) { return slab_heap_container->page_buffer; } else if constexpr (std::is_same_v<T, KThreadLocalPage>) { return slab_heap_container->thread_local_page; + } else if constexpr (std::is_same_v<T, KSessionRequest>) { + return slab_heap_container->session_request; } } @@ -412,10 +422,10 @@ private: KSlabHeap<KSharedMemoryInfo> shared_memory_info; KSlabHeap<KThread> thread; KSlabHeap<KTransferMemory> transfer_memory; - KSlabHeap<KWritableEvent> writeable_event; KSlabHeap<KCodeMemory> code_memory; KSlabHeap<KPageBuffer> page_buffer; KSlabHeap<KThreadLocalPage> thread_local_page; + KSlabHeap<KSessionRequest> session_request; }; std::unique_ptr<SlabHeapContainer> slab_heap_container; diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp index 2e87b4ea4..d23d76706 100644 --- a/src/core/hle/kernel/service_thread.cpp +++ b/src/core/hle/kernel/service_thread.cpp @@ -36,7 +36,7 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std : service_name{name} { for (std::size_t i = 0; i < num_threads; ++i) { threads.emplace_back([this, &kernel](std::stop_token stop_token) { - Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str()); + Common::SetCurrentThreadName(std::string{service_name}.c_str()); // Wait for first request before trying to acquire a render context { diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h index 299a981a8..06b51e919 100644 --- a/src/core/hle/kernel/slab_helpers.h +++ b/src/core/hle/kernel/slab_helpers.h @@ -24,7 +24,7 @@ public: } static Derived* Allocate(KernelCore& kernel) { - return kernel.SlabHeap<Derived>().Allocate(); + return kernel.SlabHeap<Derived>().Allocate(kernel); } static void Free(KernelCore& kernel, Derived* obj) { diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 27e5a805d..4aca5b27d 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -29,12 +29,12 @@ #include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_resource_reservation.h" +#include "core/hle/kernel/k_session.h" #include "core/hle/kernel/k_shared_memory.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/k_transfer_memory.h" -#include "core/hle/kernel/k_writable_event.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/svc.h" @@ -256,6 +256,93 @@ static Result UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u3 return UnmapMemory(system, dst_addr, src_addr, size); } +template <typename T> +Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client, u64 name) { + auto& process = *system.CurrentProcess(); + auto& handle_table = process.GetHandleTable(); + + // Declare the session we're going to allocate. + T* session; + + // Reserve a new session from the process resource limit. + // FIXME: LimitableResource_SessionCountMax + KScopedResourceReservation session_reservation(&process, LimitableResource::Sessions); + if (session_reservation.Succeeded()) { + session = T::Create(system.Kernel()); + } else { + return ResultLimitReached; + + // // We couldn't reserve a session. Check that we support dynamically expanding the + // // resource limit. + // R_UNLESS(process.GetResourceLimit() == + // &system.Kernel().GetSystemResourceLimit(), ResultLimitReached); + // R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), ResultLimitReached()); + + // // Try to allocate a session from unused slab memory. + // session = T::CreateFromUnusedSlabMemory(); + // R_UNLESS(session != nullptr, ResultLimitReached); + // ON_RESULT_FAILURE { session->Close(); }; + + // // If we're creating a KSession, we want to add two KSessionRequests to the heap, to + // // prevent request exhaustion. + // // NOTE: Nintendo checks if session->DynamicCast<KSession *>() != nullptr, but there's + // // no reason to not do this statically. + // if constexpr (std::same_as<T, KSession>) { + // for (size_t i = 0; i < 2; i++) { + // KSessionRequest* request = KSessionRequest::CreateFromUnusedSlabMemory(); + // R_UNLESS(request != nullptr, ResultLimitReached); + // request->Close(); + // } + // } + + // We successfully allocated a session, so add the object we allocated to the resource + // limit. + // system.Kernel().GetSystemResourceLimit().Reserve(LimitableResource::Sessions, 1); + } + + // Check that we successfully created a session. + R_UNLESS(session != nullptr, ResultOutOfResource); + + // Initialize the session. + session->Initialize(nullptr, fmt::format("{}", name)); + + // Commit the session reservation. + session_reservation.Commit(); + + // Ensure that we clean up the session (and its only references are handle table) on function + // end. + SCOPE_EXIT({ + session->GetClientSession().Close(); + session->GetServerSession().Close(); + }); + + // Register the session. + T::Register(system.Kernel(), session); + + // Add the server session to the handle table. + R_TRY(handle_table.Add(out_server, &session->GetServerSession())); + + // Add the client session to the handle table. + const auto result = handle_table.Add(out_client, &session->GetClientSession()); + + if (!R_SUCCEEDED(result)) { + // Ensure that we maintaing a clean handle state on exit. + handle_table.Remove(*out_server); + } + + return result; +} + +static Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client, + u32 is_light, u64 name) { + if (is_light) { + // return CreateSession<KLightSession>(system, out_server, out_client, name); + return ResultUnknown; + } else { + return CreateSession<KSession>(system, out_server, out_client, name); + } +} + /// Connect to an OS service given the port name, returns the handle to the port to out static Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) { auto& memory = system.Memory(); @@ -295,7 +382,8 @@ static Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_n // Create a session. KClientSession* session{}; - R_TRY(port->CreateSession(std::addressof(session))); + R_TRY(port->CreateSession(std::addressof(session), + std::make_shared<SessionRequestManager>(kernel))); port->Close(); // Register the session in the table, close the extra reference. @@ -313,7 +401,7 @@ static Result ConnectToNamedPort32(Core::System& system, Handle* out_handle, return ConnectToNamedPort(system, out_handle, port_name_address); } -/// Makes a blocking IPC call to an OS service. +/// Makes a blocking IPC call to a service. static Result SendSyncRequest(Core::System& system, Handle handle) { auto& kernel = system.Kernel(); @@ -327,22 +415,75 @@ static Result SendSyncRequest(Core::System& system, Handle handle) { LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); - { - KScopedSchedulerLock lock(kernel); - - // This is a synchronous request, so we should wait for our request to complete. - GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue)); - GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); - session->SendSyncRequest(&GetCurrentThread(kernel), system.Memory(), system.CoreTiming()); - } - - return GetCurrentThread(kernel).GetWaitResult(); + return session->SendSyncRequest(); } static Result SendSyncRequest32(Core::System& system, Handle handle) { return SendSyncRequest(system, handle); } +static Result ReplyAndReceive(Core::System& system, s32* out_index, Handle* handles, + s32 num_handles, Handle reply_target, s64 timeout_ns) { + auto& kernel = system.Kernel(); + auto& handle_table = GetCurrentThread(kernel).GetOwnerProcess()->GetHandleTable(); + + // Convert handle list to object table. + std::vector<KSynchronizationObject*> objs(num_handles); + R_UNLESS( + handle_table.GetMultipleObjects<KSynchronizationObject>(objs.data(), handles, num_handles), + ResultInvalidHandle); + + // Ensure handles are closed when we're done. + SCOPE_EXIT({ + for (auto i = 0; i < num_handles; ++i) { + objs[i]->Close(); + } + }); + + // Reply to the target, if one is specified. + if (reply_target != InvalidHandle) { + KScopedAutoObject session = handle_table.GetObject<KServerSession>(reply_target); + R_UNLESS(session.IsNotNull(), ResultInvalidHandle); + + // If we fail to reply, we want to set the output index to -1. + // ON_RESULT_FAILURE { *out_index = -1; }; + + // Send the reply. + // R_TRY(session->SendReply()); + + Result rc = session->SendReply(); + if (!R_SUCCEEDED(rc)) { + *out_index = -1; + return rc; + } + } + + // Wait for a message. + while (true) { + // Wait for an object. + s32 index; + Result result = KSynchronizationObject::Wait(kernel, &index, objs.data(), + static_cast<s32>(objs.size()), timeout_ns); + if (result == ResultTimedOut) { + return result; + } + + // Receive the request. + if (R_SUCCEEDED(result)) { + KServerSession* session = objs[index]->DynamicCast<KServerSession*>(); + if (session != nullptr) { + result = session->ReceiveRequest(); + if (result == ResultNotFound) { + continue; + } + } + } + + *out_index = index; + return result; + } +} + /// Get the ID for the specified thread. static Result GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) { // Get the thread from its handle. @@ -610,8 +751,8 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { } system.GetReporter().SaveSvcBreakReport( - static_cast<u32>(break_reason.break_type.Value()), break_reason.signal_debugger, info1, - info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt); + static_cast<u32>(break_reason.break_type.Value()), break_reason.signal_debugger.As<bool>(), + info1, info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt); if (!break_reason.signal_debugger) { LOG_CRITICAL( @@ -792,7 +933,7 @@ static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle han return ResultSuccess; case GetInfoType::UserExceptionContextAddr: - *result = process->GetTLSRegionAddress(); + *result = process->GetProcessLocalRegionAddress(); return ResultSuccess; case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource: @@ -1747,7 +1888,7 @@ static void ExitProcess(Core::System& system) { auto* current_process = system.Kernel().CurrentProcess(); LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID()); - ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running, + ASSERT_MSG(current_process->GetState() == KProcess::State::Running, "Process has already exited"); system.Exit(); @@ -2303,11 +2444,11 @@ static Result SignalEvent(Core::System& system, Handle event_handle) { // Get the current handle table. const KHandleTable& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - // Get the writable event. - KScopedAutoObject writable_event = handle_table.GetObject<KWritableEvent>(event_handle); - R_UNLESS(writable_event.IsNotNull(), ResultInvalidHandle); + // Get the event. + KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle); + R_UNLESS(event.IsNotNull(), ResultInvalidHandle); - return writable_event->Signal(); + return event->Signal(); } static Result SignalEvent32(Core::System& system, Handle event_handle) { @@ -2322,9 +2463,9 @@ static Result ClearEvent(Core::System& system, Handle event_handle) { // Try to clear the writable event. { - KScopedAutoObject writable_event = handle_table.GetObject<KWritableEvent>(event_handle); - if (writable_event.IsNotNull()) { - return writable_event->Clear(); + KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle); + if (event.IsNotNull()) { + return event->Clear(); } } @@ -2362,24 +2503,24 @@ static Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_r R_UNLESS(event != nullptr, ResultOutOfResource); // Initialize the event. - event->Initialize("CreateEvent", kernel.CurrentProcess()); + event->Initialize(kernel.CurrentProcess()); // Commit the thread reservation. event_reservation.Commit(); // Ensure that we clean up the event (and its only references are handle table) on function end. SCOPE_EXIT({ - event->GetWritableEvent().Close(); event->GetReadableEvent().Close(); + event->Close(); }); // Register the event. KEvent::Register(kernel, event); - // Add the writable event to the handle table. - R_TRY(handle_table.Add(out_write, std::addressof(event->GetWritableEvent()))); + // Add the event to the handle table. + R_TRY(handle_table.Add(out_write, event)); - // Add the writable event to the handle table. + // Ensure that we maintaing a clean handle state on exit. auto handle_guard = SCOPE_GUARD({ handle_table.Remove(*out_write); }); // Add the readable event to the handle table. @@ -2416,7 +2557,7 @@ static Result GetProcessInfo(Core::System& system, u64* out, Handle process_hand return ResultInvalidEnumValue; } - *out = static_cast<u64>(process->GetStatus()); + *out = static_cast<u64>(process->GetState()); return ResultSuccess; } @@ -2860,10 +3001,10 @@ static const FunctionDef SVC_Table_64[] = { {0x3D, SvcWrap64<ChangeKernelTraceState>, "ChangeKernelTraceState"}, {0x3E, nullptr, "Unknown3e"}, {0x3F, nullptr, "Unknown3f"}, - {0x40, nullptr, "CreateSession"}, + {0x40, SvcWrap64<CreateSession>, "CreateSession"}, {0x41, nullptr, "AcceptSession"}, {0x42, nullptr, "ReplyAndReceiveLight"}, - {0x43, nullptr, "ReplyAndReceive"}, + {0x43, SvcWrap64<ReplyAndReceive>, "ReplyAndReceive"}, {0x44, nullptr, "ReplyAndReceiveWithUserBuffer"}, {0x45, SvcWrap64<CreateEvent>, "CreateEvent"}, {0x46, nullptr, "MapIoRegion"}, diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h index 95750c3eb..85506710e 100644 --- a/src/core/hle/kernel/svc_common.h +++ b/src/core/hle/kernel/svc_common.h @@ -14,8 +14,11 @@ namespace Kernel::Svc { using namespace Common::Literals; -constexpr s32 ArgumentHandleCountMax = 0x40; -constexpr u32 HandleWaitMask{1u << 30}; +constexpr inline s32 ArgumentHandleCountMax = 0x40; + +constexpr inline u32 HandleWaitMask = 1u << 30; + +constexpr inline s64 WaitInfinite = -1; constexpr inline std::size_t HeapSizeAlignment = 2_MiB; diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h index 79e15183a..abb9847fe 100644 --- a/src/core/hle/kernel/svc_types.h +++ b/src/core/hle/kernel/svc_types.h @@ -95,6 +95,19 @@ constexpr inline s32 IdealCoreNoUpdate = -3; constexpr inline s32 LowestThreadPriority = 63; constexpr inline s32 HighestThreadPriority = 0; +constexpr inline s32 SystemThreadPriorityHighest = 16; + +enum class ProcessState : u32 { + Created = 0, + CreatedAttached = 1, + Running = 2, + Crashed = 3, + RunningAttached = 4, + Terminating = 5, + Terminated = 6, + DebugBreak = 7, +}; + constexpr inline size_t ThreadLocalRegionSize = 0x200; } // namespace Kernel::Svc diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index 4bc49087e..272c54cf7 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h @@ -8,6 +8,7 @@ #include "core/core.h" #include "core/hle/kernel/svc_types.h" #include "core/hle/result.h" +#include "core/memory.h" namespace Kernel { @@ -346,6 +347,37 @@ void SvcWrap64(Core::System& system) { FuncReturn(system, retval); } +// Used by CreateSession +template <Result func(Core::System&, Handle*, Handle*, u32, u64)> +void SvcWrap64(Core::System& system) { + Handle param_1 = 0; + Handle param_2 = 0; + const u32 retval = func(system, ¶m_1, ¶m_2, static_cast<u32>(Param(system, 2)), + static_cast<u32>(Param(system, 3))) + .raw; + + system.CurrentArmInterface().SetReg(1, param_1); + system.CurrentArmInterface().SetReg(2, param_2); + FuncReturn(system, retval); +} + +// Used by ReplyAndReceive +template <Result func(Core::System&, s32*, Handle*, s32, Handle, s64)> +void SvcWrap64(Core::System& system) { + s32 param_1 = 0; + s32 num_handles = static_cast<s32>(Param(system, 2)); + + std::vector<Handle> handles(num_handles); + system.Memory().ReadBlock(Param(system, 1), handles.data(), num_handles * sizeof(Handle)); + + const u32 retval = func(system, ¶m_1, handles.data(), num_handles, + static_cast<s32>(Param(system, 3)), static_cast<s64>(Param(system, 4))) + .raw; + + system.CurrentArmInterface().SetReg(1, param_1); + FuncReturn(system, retval); +} + // Used by WaitForAddress template <Result func(Core::System&, u64, Svc::ArbitrationType, s32, s64)> void SvcWrap64(Core::System& system) { diff --git a/src/core/hle/result.h b/src/core/hle/result.h index 4de44cd06..ef4b2d417 100644 --- a/src/core/hle/result.h +++ b/src/core/hle/result.h @@ -5,6 +5,7 @@ #include "common/assert.h" #include "common/bit_field.h" +#include "common/common_funcs.h" #include "common/common_types.h" #include "common/expected.h" @@ -117,6 +118,7 @@ union Result { BitField<0, 9, ErrorModule> module; BitField<9, 13, u32> description; + Result() = default; constexpr explicit Result(u32 raw_) : raw(raw_) {} constexpr Result(ErrorModule module_, u32 description_) @@ -129,7 +131,20 @@ union Result { [[nodiscard]] constexpr bool IsError() const { return !IsSuccess(); } + + [[nodiscard]] constexpr bool IsFailure() const { + return !IsSuccess(); + } + + [[nodiscard]] constexpr u32 GetInnerValue() const { + return static_cast<u32>(module.Value()) | (description << module.bits); + } + + [[nodiscard]] constexpr bool Includes(Result result) const { + return GetInnerValue() == result.GetInnerValue(); + } }; +static_assert(std::is_trivial_v<Result>); [[nodiscard]] constexpr bool operator==(const Result& a, const Result& b) { return a.raw == b.raw; @@ -347,19 +362,115 @@ private: } \ } while (false) -#define R_SUCCEEDED(res) (res.IsSuccess()) +#define R_SUCCEEDED(res) (static_cast<Result>(res).IsSuccess()) +#define R_FAILED(res) (static_cast<Result>(res).IsFailure()) -/// Evaluates a boolean expression, and succeeds if that expression is true. -#define R_SUCCEED_IF(expr) R_UNLESS(!(expr), ResultSuccess) +namespace ResultImpl { +template <auto EvaluateResult, class F> +class ScopedResultGuard { + YUZU_NON_COPYABLE(ScopedResultGuard); + YUZU_NON_MOVEABLE(ScopedResultGuard); + +private: + Result& m_ref; + F m_f; + +public: + constexpr ScopedResultGuard(Result& ref, F f) : m_ref(ref), m_f(std::move(f)) {} + constexpr ~ScopedResultGuard() { + if (EvaluateResult(m_ref)) { + m_f(); + } + } +}; + +template <auto EvaluateResult> +class ResultReferenceForScopedResultGuard { +private: + Result& m_ref; + +public: + constexpr ResultReferenceForScopedResultGuard(Result& r) : m_ref(r) {} + constexpr operator Result&() const { + return m_ref; + } +}; + +template <auto EvaluateResult, typename F> +constexpr ScopedResultGuard<EvaluateResult, F> operator+( + ResultReferenceForScopedResultGuard<EvaluateResult> ref, F&& f) { + return ScopedResultGuard<EvaluateResult, F>(static_cast<Result&>(ref), std::forward<F>(f)); +} + +constexpr bool EvaluateResultSuccess(const Result& r) { + return R_SUCCEEDED(r); +} +constexpr bool EvaluateResultFailure(const Result& r) { + return R_FAILED(r); +} + +template <typename T> +constexpr void UpdateCurrentResultReference(T result_reference, Result result) = delete; +// Intentionally not defined + +template <> +constexpr void UpdateCurrentResultReference<Result&>(Result& result_reference, Result result) { + result_reference = result; +} + +template <> +constexpr void UpdateCurrentResultReference<const Result>(Result result_reference, Result result) {} +} // namespace ResultImpl + +#define DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(COUNTER_VALUE) \ + [[maybe_unused]] constexpr bool HasPrevRef_##COUNTER_VALUE = \ + std::same_as<decltype(__TmpCurrentResultReference), Result&>; \ + [[maybe_unused]] auto& PrevRef_##COUNTER_VALUE = __TmpCurrentResultReference; \ + [[maybe_unused]] Result __tmp_result_##COUNTER_VALUE = ResultSuccess; \ + Result& __TmpCurrentResultReference = \ + HasPrevRef_##COUNTER_VALUE ? PrevRef_##COUNTER_VALUE : __tmp_result_##COUNTER_VALUE + +#define ON_RESULT_RETURN_IMPL(...) \ + static_assert(std::same_as<decltype(__TmpCurrentResultReference), Result&>); \ + auto RESULT_GUARD_STATE_##__COUNTER__ = \ + ResultImpl::ResultReferenceForScopedResultGuard<__VA_ARGS__>( \ + __TmpCurrentResultReference) + \ + [&]() + +#define ON_RESULT_FAILURE_2 ON_RESULT_RETURN_IMPL(ResultImpl::EvaluateResultFailure) + +#define ON_RESULT_FAILURE \ + DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(__COUNTER__); \ + ON_RESULT_FAILURE_2 + +#define ON_RESULT_SUCCESS_2 ON_RESULT_RETURN_IMPL(ResultImpl::EvaluateResultSuccess) + +#define ON_RESULT_SUCCESS \ + DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(__COUNTER__); \ + ON_RESULT_SUCCESS_2 + +constexpr inline Result __TmpCurrentResultReference = ResultSuccess; + +/// Returns a result. +#define R_RETURN(res_expr) \ + { \ + const Result _tmp_r_throw_rc = (res_expr); \ + ResultImpl::UpdateCurrentResultReference<decltype(__TmpCurrentResultReference)>( \ + __TmpCurrentResultReference, _tmp_r_throw_rc); \ + return _tmp_r_throw_rc; \ + } + +/// Returns ResultSuccess() +#define R_SUCCEED() R_RETURN(ResultSuccess) + +/// Throws a result. +#define R_THROW(res_expr) R_RETURN(res_expr) /// Evaluates a boolean expression, and returns a result unless that expression is true. #define R_UNLESS(expr, res) \ { \ if (!(expr)) { \ - if (res.IsError()) { \ - LOG_ERROR(Kernel, "Failed with result: {}", res.raw); \ - } \ - return res; \ + R_THROW(res); \ } \ } @@ -367,7 +478,10 @@ private: #define R_TRY(res_expr) \ { \ const auto _tmp_r_try_rc = (res_expr); \ - if (_tmp_r_try_rc.IsError()) { \ - return _tmp_r_try_rc; \ + if (R_FAILED(_tmp_r_try_rc)) { \ + R_THROW(_tmp_r_try_rc); \ } \ } + +/// Evaluates a boolean expression, and succeeds if that expression is true. +#define R_SUCCEED_IF(expr) R_UNLESS(!(expr), ResultSuccess) diff --git a/src/core/hle/service/acc/async_context.cpp b/src/core/hle/service/acc/async_context.cpp index c85b2e43a..713689d8f 100644 --- a/src/core/hle/service/acc/async_context.cpp +++ b/src/core/hle/service/acc/async_context.cpp @@ -64,7 +64,7 @@ void IAsyncContext::GetResult(Kernel::HLERequestContext& ctx) { void IAsyncContext::MarkComplete() { is_complete.store(true); - completion_event->GetWritableEvent().Signal(); + completion_event->Signal(); } } // namespace Service::Account diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp index 6fb7e198e..e55233054 100644 --- a/src/core/hle/service/am/am.cpp +++ b/src/core/hle/service/am/am.cpp @@ -316,7 +316,7 @@ ISelfController::ISelfController(Core::System& system_, NVFlinger::NVFlinger& nv accumulated_suspended_tick_changed_event = service_context.CreateEvent("ISelfController:AccumulatedSuspendedTickChangedEvent"); - accumulated_suspended_tick_changed_event->GetWritableEvent().Signal(); + accumulated_suspended_tick_changed_event->Signal(); } ISelfController::~ISelfController() { @@ -378,7 +378,7 @@ void ISelfController::LeaveFatalSection(Kernel::HLERequestContext& ctx) { void ISelfController::GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext& ctx) { LOG_WARNING(Service_AM, "(STUBBED) called"); - launchable_event->GetWritableEvent().Signal(); + launchable_event->Signal(); IPC::ResponseBuilder rb{ctx, 2, 1}; rb.Push(ResultSuccess); @@ -618,18 +618,18 @@ Kernel::KReadableEvent& AppletMessageQueue::GetOperationModeChangedEvent() { void AppletMessageQueue::PushMessage(AppletMessage msg) { messages.push(msg); - on_new_message->GetWritableEvent().Signal(); + on_new_message->Signal(); } AppletMessageQueue::AppletMessage AppletMessageQueue::PopMessage() { if (messages.empty()) { - on_new_message->GetWritableEvent().Clear(); + on_new_message->Clear(); return AppletMessage::None; } auto msg = messages.front(); messages.pop(); if (messages.empty()) { - on_new_message->GetWritableEvent().Clear(); + on_new_message->Clear(); } return msg; } @@ -653,7 +653,7 @@ void AppletMessageQueue::FocusStateChanged() { void AppletMessageQueue::OperationModeChanged() { PushMessage(AppletMessage::OperationModeChanged); PushMessage(AppletMessage::PerformanceModeChanged); - on_operation_mode_changed->GetWritableEvent().Signal(); + on_operation_mode_changed->Signal(); } ICommonStateGetter::ICommonStateGetter(Core::System& system_, diff --git a/src/core/hle/service/am/applets/applet_mii_edit_types.h b/src/core/hle/service/am/applets/applet_mii_edit_types.h index 1b145b696..4705d019f 100644 --- a/src/core/hle/service/am/applets/applet_mii_edit_types.h +++ b/src/core/hle/service/am/applets/applet_mii_edit_types.h @@ -32,7 +32,7 @@ enum class MiiEditResult : u32 { }; struct MiiEditCharInfo { - Service::Mii::MiiInfo mii_info{}; + Service::Mii::CharInfo mii_info{}; }; static_assert(sizeof(MiiEditCharInfo) == 0x58, "MiiEditCharInfo has incorrect size."); diff --git a/src/core/hle/service/am/applets/applets.cpp b/src/core/hle/service/am/applets/applets.cpp index b5b8e4cad..7062df21c 100644 --- a/src/core/hle/service/am/applets/applets.cpp +++ b/src/core/hle/service/am/applets/applets.cpp @@ -65,7 +65,7 @@ std::shared_ptr<IStorage> AppletDataBroker::PopNormalDataToGame() { auto out = std::move(out_channel.front()); out_channel.pop_front(); - pop_out_data_event->GetWritableEvent().Clear(); + pop_out_data_event->Clear(); return out; } @@ -84,7 +84,7 @@ std::shared_ptr<IStorage> AppletDataBroker::PopInteractiveDataToGame() { auto out = std::move(out_interactive_channel.front()); out_interactive_channel.pop_front(); - pop_interactive_out_data_event->GetWritableEvent().Clear(); + pop_interactive_out_data_event->Clear(); return out; } @@ -103,7 +103,7 @@ void AppletDataBroker::PushNormalDataFromGame(std::shared_ptr<IStorage>&& storag void AppletDataBroker::PushNormalDataFromApplet(std::shared_ptr<IStorage>&& storage) { out_channel.emplace_back(std::move(storage)); - pop_out_data_event->GetWritableEvent().Signal(); + pop_out_data_event->Signal(); } void AppletDataBroker::PushInteractiveDataFromGame(std::shared_ptr<IStorage>&& storage) { @@ -112,11 +112,11 @@ void AppletDataBroker::PushInteractiveDataFromGame(std::shared_ptr<IStorage>&& s void AppletDataBroker::PushInteractiveDataFromApplet(std::shared_ptr<IStorage>&& storage) { out_interactive_channel.emplace_back(std::move(storage)); - pop_interactive_out_data_event->GetWritableEvent().Signal(); + pop_interactive_out_data_event->Signal(); } void AppletDataBroker::SignalStateChanged() { - state_changed_event->GetWritableEvent().Signal(); + state_changed_event->Signal(); switch (applet_mode) { case LibraryAppletMode::AllForeground: diff --git a/src/core/hle/service/am/applets/applets.h b/src/core/hle/service/am/applets/applets.h index e78a57657..12c6a5b1a 100644 --- a/src/core/hle/service/am/applets/applets.h +++ b/src/core/hle/service/am/applets/applets.h @@ -164,7 +164,7 @@ protected: u32_le size; u32_le library_version; u32_le theme_color; - u8 play_startup_sound; + bool play_startup_sound; u64_le system_tick; }; static_assert(sizeof(CommonArguments) == 0x20, "CommonArguments has incorrect size."); diff --git a/src/core/hle/service/audio/audctl.cpp b/src/core/hle/service/audio/audctl.cpp index 4a2ae5f88..5abf22ba4 100644 --- a/src/core/hle/service/audio/audctl.cpp +++ b/src/core/hle/service/audio/audctl.cpp @@ -45,9 +45,25 @@ AudCtl::AudCtl(Core::System& system_) : ServiceFramework{system_, "audctl"} { {32, nullptr, "GetActiveOutputTarget"}, {33, nullptr, "GetTargetDeviceInfo"}, {34, nullptr, "AcquireTargetNotification"}, + {35, nullptr, "SetHearingProtectionSafeguardTimerRemainingTimeForDebug"}, + {36, nullptr, "GetHearingProtectionSafeguardTimerRemainingTimeForDebug"}, + {37, nullptr, "SetHearingProtectionSafeguardEnabled"}, + {38, nullptr, "IsHearingProtectionSafeguardEnabled"}, + {39, nullptr, "IsHearingProtectionSafeguardMonitoringOutputForDebug"}, + {40, nullptr, "GetSystemInformationForDebug"}, + {41, nullptr, "SetVolumeButtonLongPressTime"}, + {42, nullptr, "SetNativeVolumeForDebug"}, {10000, nullptr, "NotifyAudioOutputTargetForPlayReport"}, {10001, nullptr, "NotifyAudioOutputChannelCountForPlayReport"}, {10002, nullptr, "NotifyUnsupportedUsbOutputDeviceAttachedForPlayReport"}, + {10100, nullptr, "GetAudioVolumeDataForPlayReport"}, + {10101, nullptr, "BindAudioVolumeUpdateEventForPlayReport"}, + {10102, nullptr, "BindAudioOutputTargetUpdateEventForPlayReport"}, + {10103, nullptr, "GetAudioOutputTargetForPlayReport"}, + {10104, nullptr, "GetAudioOutputChannelCountForPlayReport"}, + {10105, nullptr, "BindAudioOutputChannelCountUpdateEventForPlayReport"}, + {10106, nullptr, "GetDefaultAudioOutputTargetForPlayReport"}, + {50000, nullptr, "SetAnalogInputBoostGainForPrototyping"}, }; // clang-format on diff --git a/src/core/hle/service/audio/audout_u.cpp b/src/core/hle/service/audio/audout_u.cpp index a44dd842a..49c092301 100644 --- a/src/core/hle/service/audio/audout_u.cpp +++ b/src/core/hle/service/audio/audout_u.cpp @@ -246,9 +246,8 @@ void AudOutU::ListAudioOuts(Kernel::HLERequestContext& ctx) { const auto write_count = static_cast<u32>(ctx.GetWriteBufferSize() / sizeof(AudioDevice::AudioDeviceName)); std::vector<AudioDevice::AudioDeviceName> device_names{}; - std::string print_names{}; if (write_count > 0) { - device_names.push_back(AudioDevice::AudioDeviceName("DeviceOut")); + device_names.emplace_back("DeviceOut"); LOG_DEBUG(Service_Audio, "called. \nName=DeviceOut"); } else { LOG_DEBUG(Service_Audio, "called. Empty buffer passed in."); diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp index bc69117c6..13423dca6 100644 --- a/src/core/hle/service/audio/audren_u.cpp +++ b/src/core/hle/service/audio/audren_u.cpp @@ -52,6 +52,8 @@ public: {9, &IAudioRenderer::GetRenderingTimeLimit, "GetRenderingTimeLimit"}, {10, &IAudioRenderer::RequestUpdate, "RequestUpdateAuto"}, {11, nullptr, "ExecuteAudioRendererRendering"}, + {12, &IAudioRenderer::SetVoiceDropParameter, "SetVoiceDropParameter"}, + {13, &IAudioRenderer::GetVoiceDropParameter, "GetVoiceDropParameter"}, }; // clang-format on RegisterHandlers(functions); @@ -205,6 +207,30 @@ private: LOG_DEBUG(Service_Audio, "called"); } + void SetVoiceDropParameter(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_Audio, "called"); + + IPC::RequestParser rp{ctx}; + auto voice_drop_param{rp.Pop<f32>()}; + + auto& system_ = impl->GetSystem(); + system_.SetVoiceDropParameter(voice_drop_param); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); + } + + void GetVoiceDropParameter(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_Audio, "called"); + + auto& system_ = impl->GetSystem(); + auto voice_drop_param{system_.GetVoiceDropParameter()}; + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(ResultSuccess); + rb.Push(voice_drop_param); + } + KernelHelpers::ServiceContext service_context; Kernel::KEvent* rendered_event; Manager& manager; @@ -239,7 +265,7 @@ public: }; RegisterHandlers(functions); - event->GetWritableEvent().Signal(); + event->Signal(); } ~IAudioDevice() override { @@ -252,7 +278,7 @@ private: std::vector<AudioDevice::AudioDeviceName> out_names{}; - u32 out_count = impl->ListAudioDeviceName(out_names, in_count); + const u32 out_count = impl->ListAudioDeviceName(out_names, in_count); std::string out{}; for (u32 i = 0; i < out_count; i++) { @@ -325,7 +351,7 @@ private: void QueryAudioDeviceSystemEvent(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_Audio, "(STUBBED) called"); - event->GetWritableEvent().Signal(); + event->Signal(); IPC::ResponseBuilder rb{ctx, 2, 1}; rb.Push(ResultSuccess); @@ -365,7 +391,7 @@ private: std::vector<AudioDevice::AudioDeviceName> out_names{}; - u32 out_count = impl->ListAudioOutputDeviceName(out_names, in_count); + const u32 out_count = impl->ListAudioOutputDeviceName(out_names, in_count); std::string out{}; for (u32 i = 0; i < out_count; i++) { diff --git a/src/core/hle/service/audio/hwopus.cpp b/src/core/hle/service/audio/hwopus.cpp index 4f2ed2d52..8bafc3a98 100644 --- a/src/core/hle/service/audio/hwopus.cpp +++ b/src/core/hle/service/audio/hwopus.cpp @@ -255,6 +255,32 @@ void HwOpus::GetWorkBufferSizeEx(Kernel::HLERequestContext& ctx) { GetWorkBufferSize(ctx); } +void HwOpus::GetWorkBufferSizeForMultiStreamEx(Kernel::HLERequestContext& ctx) { + OpusMultiStreamParametersEx param; + std::memcpy(¶m, ctx.ReadBuffer().data(), ctx.GetReadBufferSize()); + + const auto sample_rate = param.sample_rate; + const auto channel_count = param.channel_count; + const auto number_streams = param.number_streams; + const auto number_stereo_streams = param.number_stereo_streams; + + LOG_DEBUG( + Audio, + "called with sample_rate={}, channel_count={}, number_streams={}, number_stereo_streams={}", + sample_rate, channel_count, number_streams, number_stereo_streams); + + ASSERT_MSG(sample_rate == 48000 || sample_rate == 24000 || sample_rate == 16000 || + sample_rate == 12000 || sample_rate == 8000, + "Invalid sample rate"); + + const u32 worker_buffer_sz = + static_cast<u32>(opus_multistream_decoder_get_size(number_streams, number_stereo_streams)); + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(ResultSuccess); + rb.Push<u32>(worker_buffer_sz); +} + void HwOpus::OpenHardwareOpusDecoder(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; const auto sample_rate = rp.Pop<u32>(); @@ -335,7 +361,7 @@ HwOpus::HwOpus(Core::System& system_) : ServiceFramework{system_, "hwopus"} { {4, &HwOpus::OpenHardwareOpusDecoderEx, "OpenHardwareOpusDecoderEx"}, {5, &HwOpus::GetWorkBufferSizeEx, "GetWorkBufferSizeEx"}, {6, nullptr, "OpenHardwareOpusDecoderForMultiStreamEx"}, - {7, nullptr, "GetWorkBufferSizeForMultiStreamEx"}, + {7, &HwOpus::GetWorkBufferSizeForMultiStreamEx, "GetWorkBufferSizeForMultiStreamEx"}, }; RegisterHandlers(functions); } diff --git a/src/core/hle/service/audio/hwopus.h b/src/core/hle/service/audio/hwopus.h index 265dd0cc6..e6092e290 100644 --- a/src/core/hle/service/audio/hwopus.h +++ b/src/core/hle/service/audio/hwopus.h @@ -11,6 +11,16 @@ class System; namespace Service::Audio { +struct OpusMultiStreamParametersEx { + u32 sample_rate; + u32 channel_count; + u32 number_streams; + u32 number_stereo_streams; + u32 use_large_frame_size; + u32 padding; + std::array<u32, 64> channel_mappings; +}; + class HwOpus final : public ServiceFramework<HwOpus> { public: explicit HwOpus(Core::System& system_); @@ -21,6 +31,7 @@ private: void OpenHardwareOpusDecoderEx(Kernel::HLERequestContext& ctx); void GetWorkBufferSize(Kernel::HLERequestContext& ctx); void GetWorkBufferSizeEx(Kernel::HLERequestContext& ctx); + void GetWorkBufferSizeForMultiStreamEx(Kernel::HLERequestContext& ctx); }; } // namespace Service::Audio diff --git a/src/core/hle/service/bcat/backend/backend.cpp b/src/core/hle/service/bcat/backend/backend.cpp index cd0b405ff..847f76987 100644 --- a/src/core/hle/service/bcat/backend/backend.cpp +++ b/src/core/hle/service/bcat/backend/backend.cpp @@ -82,7 +82,7 @@ void ProgressServiceBackend::FinishDownload(Result result) { } void ProgressServiceBackend::SignalUpdate() { - update_event->GetWritableEvent().Signal(); + update_event->Signal(); } Backend::Backend(DirectoryGetter getter) : dir_getter(std::move(getter)) {} diff --git a/src/core/hle/service/filesystem/fsp_srv.cpp b/src/core/hle/service/filesystem/fsp_srv.cpp index e23eae36a..c08274ef9 100644 --- a/src/core/hle/service/filesystem/fsp_srv.cpp +++ b/src/core/hle/service/filesystem/fsp_srv.cpp @@ -707,7 +707,7 @@ FSP_SRV::FSP_SRV(Core::System& system_) {31, nullptr, "OpenGameCardFileSystem"}, {32, nullptr, "ExtendSaveDataFileSystem"}, {33, nullptr, "DeleteCacheStorage"}, - {34, nullptr, "GetCacheStorageSize"}, + {34, &FSP_SRV::GetCacheStorageSize, "GetCacheStorageSize"}, {35, nullptr, "CreateSaveDataFileSystemByHashSalt"}, {36, nullptr, "OpenHostFileSystemWithOption"}, {51, &FSP_SRV::OpenSaveDataFileSystem, "OpenSaveDataFileSystem"}, @@ -1107,6 +1107,18 @@ void FSP_SRV::GetProgramIndexForAccessLog(Kernel::HLERequestContext& ctx) { rb.Push(access_log_program_index); } +void FSP_SRV::GetCacheStorageSize(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto index{rp.Pop<s32>()}; + + LOG_WARNING(Service_FS, "(STUBBED) called with index={}", index); + + IPC::ResponseBuilder rb{ctx, 6}; + rb.Push(ResultSuccess); + rb.Push(s64{0}); + rb.Push(s64{0}); +} + class IMultiCommitManager final : public ServiceFramework<IMultiCommitManager> { public: explicit IMultiCommitManager(Core::System& system_) diff --git a/src/core/hle/service/filesystem/fsp_srv.h b/src/core/hle/service/filesystem/fsp_srv.h index 36f552e05..3d88b97f9 100644 --- a/src/core/hle/service/filesystem/fsp_srv.h +++ b/src/core/hle/service/filesystem/fsp_srv.h @@ -54,6 +54,7 @@ private: void OutputAccessLogToSdCard(Kernel::HLERequestContext& ctx); void GetProgramIndexForAccessLog(Kernel::HLERequestContext& ctx); void OpenMultiCommitManager(Kernel::HLERequestContext& ctx); + void GetCacheStorageSize(Kernel::HLERequestContext& ctx); FileSystemController& fsc; const FileSys::ContentProvider& content_provider; diff --git a/src/core/hle/service/friend/friend.cpp b/src/core/hle/service/friend/friend.cpp index e0db787fc..fad532115 100644 --- a/src/core/hle/service/friend/friend.cpp +++ b/src/core/hle/service/friend/friend.cpp @@ -26,7 +26,7 @@ public: {10101, &IFriendService::GetFriendList, "GetFriendList"}, {10102, nullptr, "UpdateFriendInfo"}, {10110, nullptr, "GetFriendProfileImage"}, - {10120, nullptr, "IsFriendListCacheAvailable"}, + {10120, &IFriendService::CheckFriendListAvailability, "CheckFriendListAvailability"}, {10121, nullptr, "EnsureFriendListAvailable"}, {10200, nullptr, "SendFriendRequestForApplication"}, {10211, nullptr, "AddFacedFriendRequestForApplication"}, @@ -194,6 +194,17 @@ private: // TODO(ogniK): Return a buffer of u64s which are the "NetworkServiceAccountId" } + void CheckFriendListAvailability(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto uuid{rp.PopRaw<Common::UUID>()}; + + LOG_WARNING(Service_Friend, "(STUBBED) called, uuid=0x{}", uuid.RawString()); + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(ResultSuccess); + rb.Push(true); + } + KernelHelpers::ServiceContext service_context; Kernel::KEvent* completion_event; diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp index cb29004e8..3b26e96de 100644 --- a/src/core/hle/service/hid/controllers/npad.cpp +++ b/src/core/hle/service/hid/controllers/npad.cpp @@ -16,7 +16,6 @@ #include "core/hid/hid_core.h" #include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_readable_event.h" -#include "core/hle/kernel/k_writable_event.h" #include "core/hle/service/hid/controllers/npad.h" #include "core/hle/service/hid/errors.h" #include "core/hle/service/kernel_helpers.h" @@ -167,7 +166,7 @@ void Controller_NPad::InitNewlyAddedController(Core::HID::NpadIdType npad_id) { const auto& battery_level = controller.device->GetBattery(); auto* shared_memory = controller.shared_memory; if (controller_type == Core::HID::NpadStyleIndex::None) { - controller.styleset_changed_event->GetWritableEvent().Signal(); + controller.styleset_changed_event->Signal(); return; } @@ -660,7 +659,6 @@ void Controller_NPad::OnMotionUpdate(const Core::Timing::CoreTiming& core_timing ASSERT(false); break; case Core::HID::NpadStyleIndex::ProController: - case Core::HID::NpadStyleIndex::Pokeball: set_motion_state(sixaxis_fullkey_state, motion_state[0]); break; case Core::HID::NpadStyleIndex::Handheld: @@ -676,6 +674,11 @@ void Controller_NPad::OnMotionUpdate(const Core::Timing::CoreTiming& core_timing case Core::HID::NpadStyleIndex::JoyconRight: set_motion_state(sixaxis_right_lifo_state, motion_state[1]); break; + case Core::HID::NpadStyleIndex::Pokeball: + using namespace std::literals::chrono_literals; + set_motion_state(sixaxis_fullkey_state, motion_state[0]); + sixaxis_fullkey_state.delta_time = std::chrono::nanoseconds(15ms).count(); + break; default: break; } @@ -742,8 +745,9 @@ void Controller_NPad::SetSupportedNpadIdTypes(u8* data, std::size_t length) { } void Controller_NPad::GetSupportedNpadIdTypes(u32* data, std::size_t max_length) { - ASSERT(max_length < supported_npad_id_types.size()); - std::memcpy(data, supported_npad_id_types.data(), supported_npad_id_types.size()); + const auto copy_amount = supported_npad_id_types.size() * sizeof(u32); + ASSERT(max_length <= copy_amount); + std::memcpy(data, supported_npad_id_types.data(), copy_amount); } std::size_t Controller_NPad::GetSupportedNpadIdTypesSize() const { @@ -1029,7 +1033,7 @@ Kernel::KReadableEvent& Controller_NPad::GetStyleSetChangedEvent(Core::HID::Npad void Controller_NPad::SignalStyleSetChangedEvent(Core::HID::NpadIdType npad_id) const { const auto& controller = GetControllerFromNpadIdType(npad_id); - controller.styleset_changed_event->GetWritableEvent().Signal(); + controller.styleset_changed_event->Signal(); } void Controller_NPad::AddNewControllerAt(Core::HID::NpadStyleIndex controller, @@ -1498,25 +1502,25 @@ bool Controller_NPad::IsControllerSupported(Core::HID::NpadStyleIndex controller Core::HID::NpadStyleTag style = GetSupportedStyleSet(); switch (controller) { case Core::HID::NpadStyleIndex::ProController: - return style.fullkey; + return style.fullkey.As<bool>(); case Core::HID::NpadStyleIndex::JoyconDual: - return style.joycon_dual; + return style.joycon_dual.As<bool>(); case Core::HID::NpadStyleIndex::JoyconLeft: - return style.joycon_left; + return style.joycon_left.As<bool>(); case Core::HID::NpadStyleIndex::JoyconRight: - return style.joycon_right; + return style.joycon_right.As<bool>(); case Core::HID::NpadStyleIndex::GameCube: - return style.gamecube; + return style.gamecube.As<bool>(); case Core::HID::NpadStyleIndex::Pokeball: - return style.palma; + return style.palma.As<bool>(); case Core::HID::NpadStyleIndex::NES: - return style.lark; + return style.lark.As<bool>(); case Core::HID::NpadStyleIndex::SNES: - return style.lucia; + return style.lucia.As<bool>(); case Core::HID::NpadStyleIndex::N64: - return style.lagoon; + return style.lagoon.As<bool>(); case Core::HID::NpadStyleIndex::SegaGenesis: - return style.lager; + return style.lager.As<bool>(); default: return false; } diff --git a/src/core/hle/service/hid/controllers/palma.cpp b/src/core/hle/service/hid/controllers/palma.cpp new file mode 100644 index 000000000..4564ea1e2 --- /dev/null +++ b/src/core/hle/service/hid/controllers/palma.cpp @@ -0,0 +1,229 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/core_timing.h" +#include "core/hid/emulated_controller.h" +#include "core/hid/hid_core.h" +#include "core/hid/hid_types.h" +#include "core/hle/kernel/k_event.h" +#include "core/hle/kernel/k_readable_event.h" +#include "core/hle/service/hid/controllers/palma.h" +#include "core/hle/service/kernel_helpers.h" + +namespace Service::HID { + +Controller_Palma::Controller_Palma(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_, + KernelHelpers::ServiceContext& service_context_) + : ControllerBase{hid_core_}, service_context{service_context_} { + controller = hid_core.GetEmulatedController(Core::HID::NpadIdType::Other); + operation_complete_event = service_context.CreateEvent("hid:PalmaOperationCompleteEvent"); +} + +Controller_Palma::~Controller_Palma() = default; + +void Controller_Palma::OnInit() {} + +void Controller_Palma::OnRelease() {} + +void Controller_Palma::OnUpdate(const Core::Timing::CoreTiming& core_timing) { + if (!IsControllerActivated()) { + return; + } +} + +Result Controller_Palma::GetPalmaConnectionHandle(Core::HID::NpadIdType npad_id, + PalmaConnectionHandle& handle) { + active_handle.npad_id = npad_id; + handle = active_handle; + return ResultSuccess; +} + +Result Controller_Palma::InitializePalma(const PalmaConnectionHandle& handle) { + if (handle.npad_id != active_handle.npad_id) { + return InvalidPalmaHandle; + } + ActivateController(); + return ResultSuccess; +} + +Kernel::KReadableEvent& Controller_Palma::AcquirePalmaOperationCompleteEvent( + const PalmaConnectionHandle& handle) const { + if (handle.npad_id != active_handle.npad_id) { + LOG_ERROR(Service_HID, "Invalid npad id {}", handle.npad_id); + } + return operation_complete_event->GetReadableEvent(); +} + +Result Controller_Palma::GetPalmaOperationInfo(const PalmaConnectionHandle& handle, + PalmaOperationType& operation_type, + PalmaOperationData& data) const { + if (handle.npad_id != active_handle.npad_id) { + return InvalidPalmaHandle; + } + operation_type = operation.operation; + data = operation.data; + return ResultSuccess; +} + +Result Controller_Palma::PlayPalmaActivity(const PalmaConnectionHandle& handle, + u64 palma_activity) { + if (handle.npad_id != active_handle.npad_id) { + return InvalidPalmaHandle; + } + operation.operation = PalmaOperationType::PlayActivity; + operation.result = PalmaResultSuccess; + operation.data = {}; + operation_complete_event->Signal(); + return ResultSuccess; +} + +Result Controller_Palma::SetPalmaFrModeType(const PalmaConnectionHandle& handle, + PalmaFrModeType fr_mode_) { + if (handle.npad_id != active_handle.npad_id) { + return InvalidPalmaHandle; + } + fr_mode = fr_mode_; + return ResultSuccess; +} + +Result Controller_Palma::ReadPalmaStep(const PalmaConnectionHandle& handle) { + if (handle.npad_id != active_handle.npad_id) { + return InvalidPalmaHandle; + } + operation.operation = PalmaOperationType::ReadStep; + operation.result = PalmaResultSuccess; + operation.data = {}; + operation_complete_event->Signal(); + return ResultSuccess; +} + +Result Controller_Palma::EnablePalmaStep(const PalmaConnectionHandle& handle, bool is_enabled) { + if (handle.npad_id != active_handle.npad_id) { + return InvalidPalmaHandle; + } + return ResultSuccess; +} + +Result Controller_Palma::ResetPalmaStep(const PalmaConnectionHandle& handle) { + if (handle.npad_id != active_handle.npad_id) { + return InvalidPalmaHandle; + } + return ResultSuccess; +} + +void Controller_Palma::ReadPalmaApplicationSection() {} + +void Controller_Palma::WritePalmaApplicationSection() {} + +Result Controller_Palma::ReadPalmaUniqueCode(const PalmaConnectionHandle& handle) { + if (handle.npad_id != active_handle.npad_id) { + return InvalidPalmaHandle; + } + operation.operation = PalmaOperationType::ReadUniqueCode; + operation.result = PalmaResultSuccess; + operation.data = {}; + operation_complete_event->Signal(); + return ResultSuccess; +} + +Result Controller_Palma::SetPalmaUniqueCodeInvalid(const PalmaConnectionHandle& handle) { + if (handle.npad_id != active_handle.npad_id) { + return InvalidPalmaHandle; + } + operation.operation = PalmaOperationType::SetUniqueCodeInvalid; + operation.result = PalmaResultSuccess; + operation.data = {}; + operation_complete_event->Signal(); + return ResultSuccess; +} + +void Controller_Palma::WritePalmaActivityEntry() {} + +Result Controller_Palma::WritePalmaRgbLedPatternEntry(const PalmaConnectionHandle& handle, + u64 unknown) { + if (handle.npad_id != active_handle.npad_id) { + return InvalidPalmaHandle; + } + operation.operation = PalmaOperationType::WriteRgbLedPatternEntry; + operation.result = PalmaResultSuccess; + operation.data = {}; + operation_complete_event->Signal(); + return ResultSuccess; +} + +Result Controller_Palma::WritePalmaWaveEntry(const PalmaConnectionHandle& handle, PalmaWaveSet wave, + u8* t_mem, u64 size) { + if (handle.npad_id != active_handle.npad_id) { + return InvalidPalmaHandle; + } + operation.operation = PalmaOperationType::WriteWaveEntry; + operation.result = PalmaResultSuccess; + operation.data = {}; + operation_complete_event->Signal(); + return ResultSuccess; +} + +Result Controller_Palma::SetPalmaDataBaseIdentificationVersion(const PalmaConnectionHandle& handle, + s32 database_id_version_) { + if (handle.npad_id != active_handle.npad_id) { + return InvalidPalmaHandle; + } + database_id_version = database_id_version_; + operation.operation = PalmaOperationType::ReadDataBaseIdentificationVersion; + operation.result = PalmaResultSuccess; + operation.data[0] = {}; + operation_complete_event->Signal(); + return ResultSuccess; +} + +Result Controller_Palma::GetPalmaDataBaseIdentificationVersion( + const PalmaConnectionHandle& handle) { + if (handle.npad_id != active_handle.npad_id) { + return InvalidPalmaHandle; + } + operation.operation = PalmaOperationType::ReadDataBaseIdentificationVersion; + operation.result = PalmaResultSuccess; + operation.data = {}; + operation.data[0] = static_cast<u8>(database_id_version); + operation_complete_event->Signal(); + return ResultSuccess; +} + +void Controller_Palma::SuspendPalmaFeature() {} + +Result Controller_Palma::GetPalmaOperationResult(const PalmaConnectionHandle& handle) const { + if (handle.npad_id != active_handle.npad_id) { + return InvalidPalmaHandle; + } + return operation.result; +} +void Controller_Palma::ReadPalmaPlayLog() {} + +void Controller_Palma::ResetPalmaPlayLog() {} + +void Controller_Palma::SetIsPalmaAllConnectable(bool is_all_connectable) { + // If true controllers are able to be paired + is_connectable = is_all_connectable; +} + +void Controller_Palma::SetIsPalmaPairedConnectable() {} + +Result Controller_Palma::PairPalma(const PalmaConnectionHandle& handle) { + if (handle.npad_id != active_handle.npad_id) { + return InvalidPalmaHandle; + } + // TODO: Do something + return ResultSuccess; +} + +void Controller_Palma::SetPalmaBoostMode(bool boost_mode) {} + +void Controller_Palma::CancelWritePalmaWaveEntry() {} + +void Controller_Palma::EnablePalmaBoostMode() {} + +void Controller_Palma::GetPalmaBluetoothAddress() {} + +void Controller_Palma::SetDisallowedPalmaConnection() {} + +} // namespace Service::HID diff --git a/src/core/hle/service/hid/controllers/palma.h b/src/core/hle/service/hid/controllers/palma.h new file mode 100644 index 000000000..1d7fc94e1 --- /dev/null +++ b/src/core/hle/service/hid/controllers/palma.h @@ -0,0 +1,163 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <array> +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "core/hle/service/hid/controllers/controller_base.h" +#include "core/hle/service/hid/errors.h" + +namespace Kernel { +class KEvent; +class KReadableEvent; +} // namespace Kernel + +namespace Service::KernelHelpers { +class ServiceContext; +} + +namespace Core::HID { +class EmulatedController; +} // namespace Core::HID + +namespace Service::HID { +class Controller_Palma final : public ControllerBase { +public: + using PalmaOperationData = std::array<u8, 0x140>; + + // This is nn::hid::PalmaOperationType + enum class PalmaOperationType { + PlayActivity, + SetFrModeType, + ReadStep, + EnableStep, + ResetStep, + ReadApplicationSection, + WriteApplicationSection, + ReadUniqueCode, + SetUniqueCodeInvalid, + WriteActivityEntry, + WriteRgbLedPatternEntry, + WriteWaveEntry, + ReadDataBaseIdentificationVersion, + WriteDataBaseIdentificationVersion, + SuspendFeature, + ReadPlayLog, + ResetPlayLog, + }; + + // This is nn::hid::PalmaWaveSet + enum class PalmaWaveSet : u64 { + Small, + Medium, + Large, + }; + + // This is nn::hid::PalmaFrModeType + enum class PalmaFrModeType : u64 { + Off, + B01, + B02, + B03, + Downloaded, + }; + + // This is nn::hid::PalmaFeature + enum class PalmaFeature : u64 { + FrMode, + RumbleFeedback, + Step, + MuteSwitch, + }; + + // This is nn::hid::PalmaOperationInfo + struct PalmaOperationInfo { + PalmaOperationType operation{}; + Result result{PalmaResultSuccess}; + PalmaOperationData data{}; + }; + static_assert(sizeof(PalmaOperationInfo) == 0x148, "PalmaOperationInfo is an invalid size"); + + // This is nn::hid::PalmaActivityEntry + struct PalmaActivityEntry { + u32 rgb_led_pattern_index; + INSERT_PADDING_BYTES(2); + PalmaWaveSet wave_set; + u32 wave_index; + INSERT_PADDING_BYTES(12); + }; + static_assert(sizeof(PalmaActivityEntry) == 0x20, "PalmaActivityEntry is an invalid size"); + + struct PalmaConnectionHandle { + Core::HID::NpadIdType npad_id; + INSERT_PADDING_BYTES(4); // Unknown + }; + static_assert(sizeof(PalmaConnectionHandle) == 0x8, + "PalmaConnectionHandle has incorrect size."); + + explicit Controller_Palma(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_, + KernelHelpers::ServiceContext& service_context_); + ~Controller_Palma() override; + + // Called when the controller is initialized + void OnInit() override; + + // When the controller is released + void OnRelease() override; + + // When the controller is requesting an update for the shared memory + void OnUpdate(const Core::Timing::CoreTiming& core_timing) override; + + Result GetPalmaConnectionHandle(Core::HID::NpadIdType npad_id, PalmaConnectionHandle& handle); + Result InitializePalma(const PalmaConnectionHandle& handle); + Kernel::KReadableEvent& AcquirePalmaOperationCompleteEvent( + const PalmaConnectionHandle& handle) const; + Result GetPalmaOperationInfo(const PalmaConnectionHandle& handle, + PalmaOperationType& operation_type, + PalmaOperationData& data) const; + Result PlayPalmaActivity(const PalmaConnectionHandle& handle, u64 palma_activity); + Result SetPalmaFrModeType(const PalmaConnectionHandle& handle, PalmaFrModeType fr_mode_); + Result ReadPalmaStep(const PalmaConnectionHandle& handle); + Result EnablePalmaStep(const PalmaConnectionHandle& handle, bool is_enabled); + Result ResetPalmaStep(const PalmaConnectionHandle& handle); + Result ReadPalmaUniqueCode(const PalmaConnectionHandle& handle); + Result SetPalmaUniqueCodeInvalid(const PalmaConnectionHandle& handle); + Result WritePalmaRgbLedPatternEntry(const PalmaConnectionHandle& handle, u64 unknown); + Result WritePalmaWaveEntry(const PalmaConnectionHandle& handle, PalmaWaveSet wave, u8* t_mem, + u64 size); + Result SetPalmaDataBaseIdentificationVersion(const PalmaConnectionHandle& handle, + s32 database_id_version_); + Result GetPalmaDataBaseIdentificationVersion(const PalmaConnectionHandle& handle); + Result GetPalmaOperationResult(const PalmaConnectionHandle& handle) const; + void SetIsPalmaAllConnectable(bool is_all_connectable); + Result PairPalma(const PalmaConnectionHandle& handle); + void SetPalmaBoostMode(bool boost_mode); + +private: + void ReadPalmaApplicationSection(); + void WritePalmaApplicationSection(); + void WritePalmaActivityEntry(); + void SuspendPalmaFeature(); + void ReadPalmaPlayLog(); + void ResetPalmaPlayLog(); + void SetIsPalmaPairedConnectable(); + void CancelWritePalmaWaveEntry(); + void EnablePalmaBoostMode(); + void GetPalmaBluetoothAddress(); + void SetDisallowedPalmaConnection(); + + bool is_connectable{}; + s32 database_id_version{}; + PalmaOperationInfo operation{}; + PalmaFrModeType fr_mode{}; + PalmaConnectionHandle active_handle{}; + + Core::HID::EmulatedController* controller; + + Kernel::KEvent* operation_complete_event; + KernelHelpers::ServiceContext& service_context; +}; + +} // namespace Service::HID diff --git a/src/core/hle/service/hid/errors.h b/src/core/hle/service/hid/errors.h index 4613a4e60..76208e9a4 100644 --- a/src/core/hle/service/hid/errors.h +++ b/src/core/hle/service/hid/errors.h @@ -7,6 +7,7 @@ namespace Service::HID { +constexpr Result PalmaResultSuccess{ErrorModule::HID, 0}; constexpr Result NpadInvalidHandle{ErrorModule::HID, 100}; constexpr Result NpadDeviceIndexOutOfRange{ErrorModule::HID, 107}; constexpr Result VibrationInvalidStyleIndex{ErrorModule::HID, 122}; @@ -17,6 +18,7 @@ constexpr Result NpadIsDualJoycon{ErrorModule::HID, 601}; constexpr Result NpadIsSameType{ErrorModule::HID, 602}; constexpr Result InvalidNpadId{ErrorModule::HID, 709}; constexpr Result NpadNotConnected{ErrorModule::HID, 710}; +constexpr Result InvalidPalmaHandle{ErrorModule::HID, 3302}; } // namespace Service::HID diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp index 3d3457160..79375bd2f 100644 --- a/src/core/hle/service/hid/hid.cpp +++ b/src/core/hle/service/hid/hid.cpp @@ -27,6 +27,7 @@ #include "core/hle/service/hid/controllers/keyboard.h" #include "core/hle/service/hid/controllers/mouse.h" #include "core/hle/service/hid/controllers/npad.h" +#include "core/hle/service/hid/controllers/palma.h" #include "core/hle/service/hid/controllers/stubbed.h" #include "core/hle/service/hid/controllers/touchscreen.h" #include "core/hle/service/hid/controllers/xpad.h" @@ -35,7 +36,8 @@ namespace Service::HID { // Updating period for each HID device. // Period time is obtained by measuring the number of samples in a second on HW using a homebrew -constexpr auto pad_update_ns = std::chrono::nanoseconds{4 * 1000 * 1000}; // (4ms, 250Hz) +// Correct pad_update_ns is 4ms this is overclocked to lower input lag +constexpr auto pad_update_ns = std::chrono::nanoseconds{1 * 1000 * 1000}; // (1ms, 1000Hz) constexpr auto mouse_keyboard_update_ns = std::chrono::nanoseconds{8 * 1000 * 1000}; // (8ms, 125Hz) constexpr auto motion_update_ns = std::chrono::nanoseconds{5 * 1000 * 1000}; // (5ms, 200Hz) @@ -60,6 +62,7 @@ IAppletResource::IAppletResource(Core::System& system_, MakeControllerWithServiceContext<Controller_NPad>(HidController::NPad, shared_memory); MakeController<Controller_Gesture>(HidController::Gesture, shared_memory); MakeController<Controller_ConsoleSixAxis>(HidController::ConsoleSixAxisSensor, shared_memory); + MakeControllerWithServiceContext<Controller_Palma>(HidController::Palma, shared_memory); // Homebrew doesn't try to activate some controllers, so we activate them by default GetController<Controller_NPad>(HidController::NPad).ActivateController(); @@ -310,36 +313,36 @@ Hid::Hid(Core::System& system_) {406, nullptr, "GetNpadLeftRightInterfaceType"}, {407, nullptr, "GetNpadOfHighestBatteryLevel"}, {408, nullptr, "GetNpadOfHighestBatteryLevelForJoyRight"}, - {500, nullptr, "GetPalmaConnectionHandle"}, - {501, nullptr, "InitializePalma"}, - {502, nullptr, "AcquirePalmaOperationCompleteEvent"}, - {503, nullptr, "GetPalmaOperationInfo"}, - {504, nullptr, "PlayPalmaActivity"}, - {505, nullptr, "SetPalmaFrModeType"}, - {506, nullptr, "ReadPalmaStep"}, - {507, nullptr, "EnablePalmaStep"}, - {508, nullptr, "ResetPalmaStep"}, - {509, nullptr, "ReadPalmaApplicationSection"}, - {510, nullptr, "WritePalmaApplicationSection"}, - {511, nullptr, "ReadPalmaUniqueCode"}, - {512, nullptr, "SetPalmaUniqueCodeInvalid"}, - {513, nullptr, "WritePalmaActivityEntry"}, - {514, nullptr, "WritePalmaRgbLedPatternEntry"}, - {515, nullptr, "WritePalmaWaveEntry"}, - {516, nullptr, "SetPalmaDataBaseIdentificationVersion"}, - {517, nullptr, "GetPalmaDataBaseIdentificationVersion"}, - {518, nullptr, "SuspendPalmaFeature"}, - {519, nullptr, "GetPalmaOperationResult"}, - {520, nullptr, "ReadPalmaPlayLog"}, - {521, nullptr, "ResetPalmaPlayLog"}, + {500, &Hid::GetPalmaConnectionHandle, "GetPalmaConnectionHandle"}, + {501, &Hid::InitializePalma, "InitializePalma"}, + {502, &Hid::AcquirePalmaOperationCompleteEvent, "AcquirePalmaOperationCompleteEvent"}, + {503, &Hid::GetPalmaOperationInfo, "GetPalmaOperationInfo"}, + {504, &Hid::PlayPalmaActivity, "PlayPalmaActivity"}, + {505, &Hid::SetPalmaFrModeType, "SetPalmaFrModeType"}, + {506, &Hid::ReadPalmaStep, "ReadPalmaStep"}, + {507, &Hid::EnablePalmaStep, "EnablePalmaStep"}, + {508, &Hid::ResetPalmaStep, "ResetPalmaStep"}, + {509, &Hid::ReadPalmaApplicationSection, "ReadPalmaApplicationSection"}, + {510, &Hid::WritePalmaApplicationSection, "WritePalmaApplicationSection"}, + {511, &Hid::ReadPalmaUniqueCode, "ReadPalmaUniqueCode"}, + {512, &Hid::SetPalmaUniqueCodeInvalid, "SetPalmaUniqueCodeInvalid"}, + {513, &Hid::WritePalmaActivityEntry, "WritePalmaActivityEntry"}, + {514, &Hid::WritePalmaRgbLedPatternEntry, "WritePalmaRgbLedPatternEntry"}, + {515, &Hid::WritePalmaWaveEntry, "WritePalmaWaveEntry"}, + {516, &Hid::SetPalmaDataBaseIdentificationVersion, "SetPalmaDataBaseIdentificationVersion"}, + {517, &Hid::GetPalmaDataBaseIdentificationVersion, "GetPalmaDataBaseIdentificationVersion"}, + {518, &Hid::SuspendPalmaFeature, "SuspendPalmaFeature"}, + {519, &Hid::GetPalmaOperationResult, "GetPalmaOperationResult"}, + {520, &Hid::ReadPalmaPlayLog, "ReadPalmaPlayLog"}, + {521, &Hid::ResetPalmaPlayLog, "ResetPalmaPlayLog"}, {522, &Hid::SetIsPalmaAllConnectable, "SetIsPalmaAllConnectable"}, - {523, nullptr, "SetIsPalmaPairedConnectable"}, - {524, nullptr, "PairPalma"}, + {523, &Hid::SetIsPalmaPairedConnectable, "SetIsPalmaPairedConnectable"}, + {524, &Hid::PairPalma, "PairPalma"}, {525, &Hid::SetPalmaBoostMode, "SetPalmaBoostMode"}, - {526, nullptr, "CancelWritePalmaWaveEntry"}, - {527, nullptr, "EnablePalmaBoostMode"}, - {528, nullptr, "GetPalmaBluetoothAddress"}, - {529, nullptr, "SetDisallowedPalmaConnection"}, + {526, &Hid::CancelWritePalmaWaveEntry, "CancelWritePalmaWaveEntry"}, + {527, &Hid::EnablePalmaBoostMode, "EnablePalmaBoostMode"}, + {528, &Hid::GetPalmaBluetoothAddress, "GetPalmaBluetoothAddress"}, + {529, &Hid::SetDisallowedPalmaConnection, "SetDisallowedPalmaConnection"}, {1000, &Hid::SetNpadCommunicationMode, "SetNpadCommunicationMode"}, {1001, &Hid::GetNpadCommunicationMode, "GetNpadCommunicationMode"}, {1002, &Hid::SetTouchScreenConfiguration, "SetTouchScreenConfiguration"}, @@ -1878,14 +1881,361 @@ void Hid::IsUsbFullKeyControllerEnabled(Kernel::HLERequestContext& ctx) { rb.Push(false); } +void Hid::GetPalmaConnectionHandle(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + struct Parameters { + Core::HID::NpadIdType npad_id; + INSERT_PADDING_WORDS_NOINIT(1); + u64 applet_resource_user_id; + }; + static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size."); + + const auto parameters{rp.PopRaw<Parameters>()}; + + LOG_WARNING(Service_HID, "(STUBBED) called, npad_id={}, applet_resource_user_id={}", + parameters.npad_id, parameters.applet_resource_user_id); + + Controller_Palma::PalmaConnectionHandle handle; + auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma); + const auto result = controller.GetPalmaConnectionHandle(parameters.npad_id, handle); + + IPC::ResponseBuilder rb{ctx, 4}; + rb.Push(result); + rb.PushRaw(handle); +} + +void Hid::InitializePalma(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()}; + + LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id); + + auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma); + const auto result = controller.InitializePalma(connection_handle); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void Hid::AcquirePalmaOperationCompleteEvent(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()}; + + LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id); + + auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma); + + IPC::ResponseBuilder rb{ctx, 2, 1}; + rb.Push(ResultSuccess); + rb.PushCopyObjects(controller.AcquirePalmaOperationCompleteEvent(connection_handle)); +} + +void Hid::GetPalmaOperationInfo(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()}; + + LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id); + + Controller_Palma::PalmaOperationType operation_type; + Controller_Palma::PalmaOperationData data; + auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma); + const auto result = controller.GetPalmaOperationInfo(connection_handle, operation_type, data); + + if (result.IsError()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); + } + + ctx.WriteBuffer(data); + IPC::ResponseBuilder rb{ctx, 4}; + rb.Push(result); + rb.Push(static_cast<u64>(operation_type)); +} + +void Hid::PlayPalmaActivity(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()}; + const auto palma_activity{rp.Pop<u64>()}; + + LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, palma_activity={}", + connection_handle.npad_id, palma_activity); + + auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma); + const auto result = controller.PlayPalmaActivity(connection_handle, palma_activity); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void Hid::SetPalmaFrModeType(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()}; + const auto fr_mode{rp.PopEnum<Controller_Palma::PalmaFrModeType>()}; + + LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, fr_mode={}", + connection_handle.npad_id, fr_mode); + + auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma); + const auto result = controller.SetPalmaFrModeType(connection_handle, fr_mode); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void Hid::ReadPalmaStep(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()}; + + LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id); + + auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma); + const auto result = controller.ReadPalmaStep(connection_handle); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void Hid::EnablePalmaStep(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + struct Parameters { + bool is_enabled; + INSERT_PADDING_WORDS_NOINIT(1); + Controller_Palma::PalmaConnectionHandle connection_handle; + }; + static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size."); + + const auto parameters{rp.PopRaw<Parameters>()}; + + LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, is_enabled={}", + parameters.connection_handle.npad_id, parameters.is_enabled); + + auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma); + const auto result = + controller.EnablePalmaStep(parameters.connection_handle, parameters.is_enabled); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void Hid::ResetPalmaStep(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()}; + + LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id); + + auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma); + const auto result = controller.ResetPalmaStep(connection_handle); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void Hid::ReadPalmaApplicationSection(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_HID, "(STUBBED) called"); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +void Hid::WritePalmaApplicationSection(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_HID, "(STUBBED) called"); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +void Hid::ReadPalmaUniqueCode(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()}; + + LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id); + + applet_resource->GetController<Controller_Palma>(HidController::Palma) + .ReadPalmaUniqueCode(connection_handle); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +void Hid::SetPalmaUniqueCodeInvalid(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()}; + + LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id); + + applet_resource->GetController<Controller_Palma>(HidController::Palma) + .SetPalmaUniqueCodeInvalid(connection_handle); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +void Hid::WritePalmaActivityEntry(Kernel::HLERequestContext& ctx) { + LOG_CRITICAL(Service_HID, "(STUBBED) called"); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +void Hid::WritePalmaRgbLedPatternEntry(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()}; + const auto unknown{rp.Pop<u64>()}; + + const auto buffer = ctx.ReadBuffer(); + + LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, unknown={}", + connection_handle.npad_id, unknown); + + applet_resource->GetController<Controller_Palma>(HidController::Palma) + .WritePalmaRgbLedPatternEntry(connection_handle, unknown); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +void Hid::WritePalmaWaveEntry(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()}; + const auto wave_set{rp.PopEnum<Controller_Palma::PalmaWaveSet>()}; + const auto unknown{rp.Pop<u64>()}; + const auto t_mem_size{rp.Pop<u64>()}; + const auto t_mem_handle{ctx.GetCopyHandle(0)}; + const auto size{rp.Pop<u64>()}; + + ASSERT_MSG(t_mem_size == 0x3000, "t_mem_size is not 0x3000 bytes"); + + auto t_mem = + system.CurrentProcess()->GetHandleTable().GetObject<Kernel::KTransferMemory>(t_mem_handle); + + if (t_mem.IsNull()) { + LOG_ERROR(Service_HID, "t_mem is a nullptr for handle=0x{:08X}", t_mem_handle); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultUnknown); + return; + } + + ASSERT_MSG(t_mem->GetSize() == 0x3000, "t_mem has incorrect size"); + + LOG_WARNING(Service_HID, + "(STUBBED) called, connection_handle={}, wave_set={}, unknown={}, " + "t_mem_handle=0x{:08X}, t_mem_size={}, size={}", + connection_handle.npad_id, wave_set, unknown, t_mem_handle, t_mem_size, size); + + applet_resource->GetController<Controller_Palma>(HidController::Palma) + .WritePalmaWaveEntry(connection_handle, wave_set, + system.Memory().GetPointer(t_mem->GetSourceAddress()), t_mem_size); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +void Hid::SetPalmaDataBaseIdentificationVersion(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + struct Parameters { + s32 database_id_version; + INSERT_PADDING_WORDS_NOINIT(1); + Controller_Palma::PalmaConnectionHandle connection_handle; + }; + static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size."); + + const auto parameters{rp.PopRaw<Parameters>()}; + + LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, database_id_version={}", + parameters.connection_handle.npad_id, parameters.database_id_version); + + applet_resource->GetController<Controller_Palma>(HidController::Palma) + .SetPalmaDataBaseIdentificationVersion(parameters.connection_handle, + parameters.database_id_version); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +void Hid::GetPalmaDataBaseIdentificationVersion(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()}; + + LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id); + + applet_resource->GetController<Controller_Palma>(HidController::Palma) + .GetPalmaDataBaseIdentificationVersion(connection_handle); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +void Hid::SuspendPalmaFeature(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_HID, "(STUBBED) called"); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +void Hid::GetPalmaOperationResult(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()}; + + LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id); + + const auto result = applet_resource->GetController<Controller_Palma>(HidController::Palma) + .GetPalmaOperationResult(connection_handle); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void Hid::ReadPalmaPlayLog(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_HID, "(STUBBED) called"); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +void Hid::ResetPalmaPlayLog(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_HID, "(STUBBED) called"); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + void Hid::SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; - const auto applet_resource_user_id{rp.Pop<u64>()}; - const auto is_palma_all_connectable{rp.Pop<bool>()}; + struct Parameters { + bool is_palma_all_connectable; + INSERT_PADDING_BYTES_NOINIT(7); + u64 applet_resource_user_id; + }; + static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size."); + + const auto parameters{rp.PopRaw<Parameters>()}; LOG_WARNING(Service_HID, - "(STUBBED) called, applet_resource_user_id={}, is_palma_all_connectable={}", - applet_resource_user_id, is_palma_all_connectable); + "(STUBBED) called, is_palma_all_connectable={},applet_resource_user_id={}", + parameters.is_palma_all_connectable, parameters.applet_resource_user_id); + + applet_resource->GetController<Controller_Palma>(HidController::Palma) + .SetIsPalmaAllConnectable(parameters.is_palma_all_connectable); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +void Hid::SetIsPalmaPairedConnectable(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_HID, "(STUBBED) called"); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +void Hid::PairPalma(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()}; + + LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id); + + applet_resource->GetController<Controller_Palma>(HidController::Palma) + .PairPalma(connection_handle); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(ResultSuccess); @@ -1897,6 +2247,37 @@ void Hid::SetPalmaBoostMode(Kernel::HLERequestContext& ctx) { LOG_WARNING(Service_HID, "(STUBBED) called, palma_boost_mode={}", palma_boost_mode); + applet_resource->GetController<Controller_Palma>(HidController::Palma) + .SetPalmaBoostMode(palma_boost_mode); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +void Hid::CancelWritePalmaWaveEntry(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_HID, "(STUBBED) called"); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +void Hid::EnablePalmaBoostMode(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_HID, "(STUBBED) called"); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +void Hid::GetPalmaBluetoothAddress(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_HID, "(STUBBED) called"); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +void Hid::SetDisallowedPalmaConnection(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_HID, "(STUBBED) called"); + IPC::ResponseBuilder rb{ctx, 2}; rb.Push(ResultSuccess); } diff --git a/src/core/hle/service/hid/hid.h b/src/core/hle/service/hid/hid.h index ac4333022..340d26fdc 100644 --- a/src/core/hle/service/hid/hid.h +++ b/src/core/hle/service/hid/hid.h @@ -33,6 +33,7 @@ enum class HidController : std::size_t { NPad, Gesture, ConsoleSixAxisSensor, + Palma, MaxControllers, }; @@ -166,8 +167,36 @@ private: void FinalizeSevenSixAxisSensor(Kernel::HLERequestContext& ctx); void ResetSevenSixAxisSensorTimestamp(Kernel::HLERequestContext& ctx); void IsUsbFullKeyControllerEnabled(Kernel::HLERequestContext& ctx); + void GetPalmaConnectionHandle(Kernel::HLERequestContext& ctx); + void InitializePalma(Kernel::HLERequestContext& ctx); + void AcquirePalmaOperationCompleteEvent(Kernel::HLERequestContext& ctx); + void GetPalmaOperationInfo(Kernel::HLERequestContext& ctx); + void PlayPalmaActivity(Kernel::HLERequestContext& ctx); + void SetPalmaFrModeType(Kernel::HLERequestContext& ctx); + void ReadPalmaStep(Kernel::HLERequestContext& ctx); + void EnablePalmaStep(Kernel::HLERequestContext& ctx); + void ResetPalmaStep(Kernel::HLERequestContext& ctx); + void ReadPalmaApplicationSection(Kernel::HLERequestContext& ctx); + void WritePalmaApplicationSection(Kernel::HLERequestContext& ctx); + void ReadPalmaUniqueCode(Kernel::HLERequestContext& ctx); + void SetPalmaUniqueCodeInvalid(Kernel::HLERequestContext& ctx); + void WritePalmaActivityEntry(Kernel::HLERequestContext& ctx); + void WritePalmaRgbLedPatternEntry(Kernel::HLERequestContext& ctx); + void WritePalmaWaveEntry(Kernel::HLERequestContext& ctx); + void SetPalmaDataBaseIdentificationVersion(Kernel::HLERequestContext& ctx); + void GetPalmaDataBaseIdentificationVersion(Kernel::HLERequestContext& ctx); + void SuspendPalmaFeature(Kernel::HLERequestContext& ctx); + void GetPalmaOperationResult(Kernel::HLERequestContext& ctx); + void ReadPalmaPlayLog(Kernel::HLERequestContext& ctx); + void ResetPalmaPlayLog(Kernel::HLERequestContext& ctx); void SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx); + void SetIsPalmaPairedConnectable(Kernel::HLERequestContext& ctx); + void PairPalma(Kernel::HLERequestContext& ctx); void SetPalmaBoostMode(Kernel::HLERequestContext& ctx); + void CancelWritePalmaWaveEntry(Kernel::HLERequestContext& ctx); + void EnablePalmaBoostMode(Kernel::HLERequestContext& ctx); + void GetPalmaBluetoothAddress(Kernel::HLERequestContext& ctx); + void SetDisallowedPalmaConnection(Kernel::HLERequestContext& ctx); void SetNpadCommunicationMode(Kernel::HLERequestContext& ctx); void GetNpadCommunicationMode(Kernel::HLERequestContext& ctx); void SetTouchScreenConfiguration(Kernel::HLERequestContext& ctx); diff --git a/src/core/hle/service/hid/hidbus/ringcon.cpp b/src/core/hle/service/hid/hidbus/ringcon.cpp index ad223d649..57f1a2a26 100644 --- a/src/core/hle/service/hid/hidbus/ringcon.cpp +++ b/src/core/hle/service/hid/hidbus/ringcon.cpp @@ -131,12 +131,12 @@ bool RingController::SetCommand(const std::vector<u8>& data) { case RingConCommands::ReadRepCount: case RingConCommands::ReadTotalPushCount: ASSERT_MSG(data.size() == 0x4, "data.size is not 0x4 bytes"); - send_command_async_event->GetWritableEvent().Signal(); + send_command_async_event->Signal(); return true; case RingConCommands::ResetRepCount: ASSERT_MSG(data.size() == 0x4, "data.size is not 0x4 bytes"); total_rep_count = 0; - send_command_async_event->GetWritableEvent().Signal(); + send_command_async_event->Signal(); return true; case RingConCommands::SaveCalData: { ASSERT_MSG(data.size() == 0x14, "data.size is not 0x14 bytes"); @@ -144,14 +144,14 @@ bool RingController::SetCommand(const std::vector<u8>& data) { SaveCalData save_info{}; std::memcpy(&save_info, data.data(), sizeof(SaveCalData)); user_calibration = save_info.calibration; - send_command_async_event->GetWritableEvent().Signal(); + send_command_async_event->Signal(); return true; } default: LOG_ERROR(Service_HID, "Command not implemented {}", command); command = RingConCommands::Error; // Signal a reply to avoid softlocking the game - send_command_async_event->GetWritableEvent().Signal(); + send_command_async_event->Signal(); return false; } } diff --git a/src/core/hle/service/hid/irs.cpp b/src/core/hle/service/hid/irs.cpp index c4b44cbf9..6a3453457 100644 --- a/src/core/hle/service/hid/irs.cpp +++ b/src/core/hle/service/hid/irs.cpp @@ -542,7 +542,8 @@ Result IRS::IsIrCameraHandleValid(const Core::IrSensor::IrCameraHandle& camera_h Core::IrSensor::DeviceFormat& IRS::GetIrCameraSharedMemoryDeviceEntry( const Core::IrSensor::IrCameraHandle& camera_handle) { - ASSERT_MSG(sizeof(StatusManager::device) > camera_handle.npad_id, "invalid npad_id"); + const auto npad_id_max_index = static_cast<u8>(sizeof(StatusManager::device)); + ASSERT_MSG(camera_handle.npad_id < npad_id_max_index, "invalid npad_id"); return shared_memory->device[camera_handle.npad_id]; } diff --git a/src/core/hle/service/hid/irsensor/pointing_processor.h b/src/core/hle/service/hid/irsensor/pointing_processor.h index cf4930794..d63423aff 100644 --- a/src/core/hle/service/hid/irsensor/pointing_processor.h +++ b/src/core/hle/service/hid/irsensor/pointing_processor.h @@ -37,10 +37,10 @@ private: u8 pointing_status; INSERT_PADDING_BYTES(3); u32 unknown; - float unkown_float1; + float unknown_float1; float position_x; float position_y; - float unkown_float2; + float unknown_float2; Core::IrSensor::IrsRect window_of_interest; }; static_assert(sizeof(PointingProcessorMarkerData) == 0x20, diff --git a/src/core/hle/service/kernel_helpers.cpp b/src/core/hle/service/kernel_helpers.cpp index 3e317367b..af133af93 100644 --- a/src/core/hle/service/kernel_helpers.cpp +++ b/src/core/hle/service/kernel_helpers.cpp @@ -9,7 +9,6 @@ #include "core/hle/kernel/k_readable_event.h" #include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_scoped_resource_reservation.h" -#include "core/hle/kernel/k_writable_event.h" #include "core/hle/service/kernel_helpers.h" namespace Service::KernelHelpers { @@ -46,7 +45,7 @@ Kernel::KEvent* ServiceContext::CreateEvent(std::string&& name) { } // Initialize the event. - event->Initialize(std::move(name), process); + event->Initialize(process); // Commit the thread reservation. event_reservation.Commit(); @@ -59,7 +58,7 @@ Kernel::KEvent* ServiceContext::CreateEvent(std::string&& name) { void ServiceContext::CloseEvent(Kernel::KEvent* event) { event->GetReadableEvent().Close(); - event->GetWritableEvent().Close(); + event->Close(); } } // namespace Service::KernelHelpers diff --git a/src/core/hle/service/ldn/lan_discovery.cpp b/src/core/hle/service/ldn/lan_discovery.cpp new file mode 100644 index 000000000..8f3c04550 --- /dev/null +++ b/src/core/hle/service/ldn/lan_discovery.cpp @@ -0,0 +1,633 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/hle/service/ldn/lan_discovery.h" +#include "core/internal_network/network.h" +#include "core/internal_network/network_interface.h" + +namespace Service::LDN { + +LanStation::LanStation(s8 node_id_, LANDiscovery* discovery_) + : node_info(nullptr), status(NodeStatus::Disconnected), node_id(node_id_), + discovery(discovery_) {} + +LanStation::~LanStation() = default; + +NodeStatus LanStation::GetStatus() const { + return status; +} + +void LanStation::OnClose() { + LOG_INFO(Service_LDN, "OnClose {}", node_id); + Reset(); + discovery->UpdateNodes(); +} + +void LanStation::Reset() { + status = NodeStatus::Disconnected; +}; + +void LanStation::OverrideInfo() { + bool connected = GetStatus() == NodeStatus::Connected; + node_info->node_id = node_id; + node_info->is_connected = connected ? 1 : 0; +} + +LANDiscovery::LANDiscovery(Network::RoomNetwork& room_network_) + : stations({{{1, this}, {2, this}, {3, this}, {4, this}, {5, this}, {6, this}, {7, this}}}), + room_network{room_network_} {} + +LANDiscovery::~LANDiscovery() { + if (inited) { + Result rc = Finalize(); + LOG_INFO(Service_LDN, "Finalize: {}", rc.raw); + } +} + +void LANDiscovery::InitNetworkInfo() { + network_info.common.bssid = GetFakeMac(); + network_info.common.channel = WifiChannel::Wifi24_6; + network_info.common.link_level = LinkLevel::Good; + network_info.common.network_type = PackedNetworkType::Ldn; + network_info.common.ssid = fake_ssid; + + auto& nodes = network_info.ldn.nodes; + for (std::size_t i = 0; i < NodeCountMax; i++) { + nodes[i].node_id = static_cast<s8>(i); + nodes[i].is_connected = 0; + } +} + +void LANDiscovery::InitNodeStateChange() { + for (auto& node_update : node_changes) { + node_update.state_change = NodeStateChange::None; + } + for (auto& node_state : node_last_states) { + node_state = 0; + } +} + +State LANDiscovery::GetState() const { + return state; +} + +void LANDiscovery::SetState(State new_state) { + state = new_state; +} + +Result LANDiscovery::GetNetworkInfo(NetworkInfo& out_network) const { + if (state == State::AccessPointCreated || state == State::StationConnected) { + std::memcpy(&out_network, &network_info, sizeof(network_info)); + return ResultSuccess; + } + + return ResultBadState; +} + +Result LANDiscovery::GetNetworkInfo(NetworkInfo& out_network, + std::vector<NodeLatestUpdate>& out_updates, + std::size_t buffer_count) { + if (buffer_count > NodeCountMax) { + return ResultInvalidBufferCount; + } + + if (state == State::AccessPointCreated || state == State::StationConnected) { + std::memcpy(&out_network, &network_info, sizeof(network_info)); + for (std::size_t i = 0; i < buffer_count; i++) { + out_updates[i].state_change = node_changes[i].state_change; + node_changes[i].state_change = NodeStateChange::None; + } + return ResultSuccess; + } + + return ResultBadState; +} + +DisconnectReason LANDiscovery::GetDisconnectReason() const { + return disconnect_reason; +} + +Result LANDiscovery::Scan(std::vector<NetworkInfo>& networks, u16& count, + const ScanFilter& filter) { + if (!IsFlagSet(filter.flag, ScanFilterFlag::NetworkType) || + filter.network_type <= NetworkType::All) { + if (!IsFlagSet(filter.flag, ScanFilterFlag::Ssid) && filter.ssid.length >= SsidLengthMax) { + return ResultBadInput; + } + } + + { + std::scoped_lock lock{packet_mutex}; + scan_results.clear(); + + SendBroadcast(Network::LDNPacketType::Scan); + } + + LOG_INFO(Service_LDN, "Waiting for scan replies"); + std::this_thread::sleep_for(std::chrono::seconds(1)); + + std::scoped_lock lock{packet_mutex}; + for (const auto& [key, info] : scan_results) { + if (count >= networks.size()) { + break; + } + + if (IsFlagSet(filter.flag, ScanFilterFlag::LocalCommunicationId)) { + if (filter.network_id.intent_id.local_communication_id != + info.network_id.intent_id.local_communication_id) { + continue; + } + } + if (IsFlagSet(filter.flag, ScanFilterFlag::SessionId)) { + if (filter.network_id.session_id != info.network_id.session_id) { + continue; + } + } + if (IsFlagSet(filter.flag, ScanFilterFlag::NetworkType)) { + if (filter.network_type != static_cast<NetworkType>(info.common.network_type)) { + continue; + } + } + if (IsFlagSet(filter.flag, ScanFilterFlag::Ssid)) { + if (filter.ssid != info.common.ssid) { + continue; + } + } + if (IsFlagSet(filter.flag, ScanFilterFlag::SceneId)) { + if (filter.network_id.intent_id.scene_id != info.network_id.intent_id.scene_id) { + continue; + } + } + + networks[count++] = info; + } + + return ResultSuccess; +} + +Result LANDiscovery::SetAdvertiseData(std::span<const u8> data) { + std::scoped_lock lock{packet_mutex}; + const std::size_t size = data.size(); + if (size > AdvertiseDataSizeMax) { + return ResultAdvertiseDataTooLarge; + } + + std::memcpy(network_info.ldn.advertise_data.data(), data.data(), size); + network_info.ldn.advertise_data_size = static_cast<u16>(size); + + UpdateNodes(); + + return ResultSuccess; +} + +Result LANDiscovery::OpenAccessPoint() { + std::scoped_lock lock{packet_mutex}; + disconnect_reason = DisconnectReason::None; + if (state == State::None) { + return ResultBadState; + } + + ResetStations(); + SetState(State::AccessPointOpened); + + return ResultSuccess; +} + +Result LANDiscovery::CloseAccessPoint() { + std::scoped_lock lock{packet_mutex}; + if (state == State::None) { + return ResultBadState; + } + + if (state == State::AccessPointCreated) { + DestroyNetwork(); + } + + ResetStations(); + SetState(State::Initialized); + + return ResultSuccess; +} + +Result LANDiscovery::OpenStation() { + std::scoped_lock lock{packet_mutex}; + disconnect_reason = DisconnectReason::None; + if (state == State::None) { + return ResultBadState; + } + + ResetStations(); + SetState(State::StationOpened); + + return ResultSuccess; +} + +Result LANDiscovery::CloseStation() { + std::scoped_lock lock{packet_mutex}; + if (state == State::None) { + return ResultBadState; + } + + if (state == State::StationConnected) { + Disconnect(); + } + + ResetStations(); + SetState(State::Initialized); + + return ResultSuccess; +} + +Result LANDiscovery::CreateNetwork(const SecurityConfig& security_config, + const UserConfig& user_config, + const NetworkConfig& network_config) { + std::scoped_lock lock{packet_mutex}; + + if (state != State::AccessPointOpened) { + return ResultBadState; + } + + InitNetworkInfo(); + network_info.ldn.node_count_max = network_config.node_count_max; + network_info.ldn.security_mode = security_config.security_mode; + + if (network_config.channel == WifiChannel::Default) { + network_info.common.channel = WifiChannel::Wifi24_6; + } else { + network_info.common.channel = network_config.channel; + } + + std::independent_bits_engine<std::mt19937, 64, u64> bits_engine; + network_info.network_id.session_id.high = bits_engine(); + network_info.network_id.session_id.low = bits_engine(); + network_info.network_id.intent_id = network_config.intent_id; + + NodeInfo& node0 = network_info.ldn.nodes[0]; + const Result rc2 = GetNodeInfo(node0, user_config, network_config.local_communication_version); + if (rc2.IsError()) { + return ResultAccessPointConnectionFailed; + } + + SetState(State::AccessPointCreated); + + InitNodeStateChange(); + node0.is_connected = 1; + UpdateNodes(); + + return rc2; +} + +Result LANDiscovery::DestroyNetwork() { + for (auto local_ip : connected_clients) { + SendPacket(Network::LDNPacketType::DestroyNetwork, local_ip); + } + + ResetStations(); + + SetState(State::AccessPointOpened); + lan_event(); + + return ResultSuccess; +} + +Result LANDiscovery::Connect(const NetworkInfo& network_info_, const UserConfig& user_config, + u16 local_communication_version) { + std::scoped_lock lock{packet_mutex}; + if (network_info_.ldn.node_count == 0) { + return ResultInvalidNodeCount; + } + + Result rc = GetNodeInfo(node_info, user_config, local_communication_version); + if (rc.IsError()) { + return ResultConnectionFailed; + } + + Ipv4Address node_host = network_info_.ldn.nodes[0].ipv4_address; + std::reverse(std::begin(node_host), std::end(node_host)); // htonl + host_ip = node_host; + SendPacket(Network::LDNPacketType::Connect, node_info, *host_ip); + + InitNodeStateChange(); + + std::this_thread::sleep_for(std::chrono::seconds(1)); + + return ResultSuccess; +} + +Result LANDiscovery::Disconnect() { + if (host_ip) { + SendPacket(Network::LDNPacketType::Disconnect, node_info, *host_ip); + } + + SetState(State::StationOpened); + lan_event(); + + return ResultSuccess; +} + +Result LANDiscovery::Initialize(LanEventFunc lan_event_, bool listening) { + std::scoped_lock lock{packet_mutex}; + if (inited) { + return ResultSuccess; + } + + for (auto& station : stations) { + station.discovery = this; + station.node_info = &network_info.ldn.nodes[station.node_id]; + station.Reset(); + } + + connected_clients.clear(); + lan_event = lan_event_; + + SetState(State::Initialized); + + inited = true; + return ResultSuccess; +} + +Result LANDiscovery::Finalize() { + std::scoped_lock lock{packet_mutex}; + Result rc = ResultSuccess; + + if (inited) { + if (state == State::AccessPointCreated) { + DestroyNetwork(); + } + if (state == State::StationConnected) { + Disconnect(); + } + + ResetStations(); + inited = false; + } + + SetState(State::None); + + return rc; +} + +void LANDiscovery::ResetStations() { + for (auto& station : stations) { + station.Reset(); + } + connected_clients.clear(); +} + +void LANDiscovery::UpdateNodes() { + u8 count = 0; + for (auto& station : stations) { + bool connected = station.GetStatus() == NodeStatus::Connected; + if (connected) { + count++; + } + station.OverrideInfo(); + } + network_info.ldn.node_count = count + 1; + + for (auto local_ip : connected_clients) { + SendPacket(Network::LDNPacketType::SyncNetwork, network_info, local_ip); + } + + OnNetworkInfoChanged(); +} + +void LANDiscovery::OnSyncNetwork(const NetworkInfo& info) { + network_info = info; + if (state == State::StationOpened) { + SetState(State::StationConnected); + } + OnNetworkInfoChanged(); +} + +void LANDiscovery::OnDisconnectFromHost() { + LOG_INFO(Service_LDN, "OnDisconnectFromHost state: {}", static_cast<int>(state)); + host_ip = std::nullopt; + if (state == State::StationConnected) { + SetState(State::StationOpened); + lan_event(); + } +} + +void LANDiscovery::OnNetworkInfoChanged() { + if (IsNodeStateChanged()) { + lan_event(); + } + return; +} + +Network::IPv4Address LANDiscovery::GetLocalIp() const { + Network::IPv4Address local_ip{0xFF, 0xFF, 0xFF, 0xFF}; + if (auto room_member = room_network.GetRoomMember().lock()) { + if (room_member->IsConnected()) { + local_ip = room_member->GetFakeIpAddress(); + } + } + return local_ip; +} + +template <typename Data> +void LANDiscovery::SendPacket(Network::LDNPacketType type, const Data& data, + Ipv4Address remote_ip) { + Network::LDNPacket packet; + packet.type = type; + + packet.broadcast = false; + packet.local_ip = GetLocalIp(); + packet.remote_ip = remote_ip; + + packet.data.resize(sizeof(data)); + std::memcpy(packet.data.data(), &data, sizeof(data)); + SendPacket(packet); +} + +void LANDiscovery::SendPacket(Network::LDNPacketType type, Ipv4Address remote_ip) { + Network::LDNPacket packet; + packet.type = type; + + packet.broadcast = false; + packet.local_ip = GetLocalIp(); + packet.remote_ip = remote_ip; + + SendPacket(packet); +} + +template <typename Data> +void LANDiscovery::SendBroadcast(Network::LDNPacketType type, const Data& data) { + Network::LDNPacket packet; + packet.type = type; + + packet.broadcast = true; + packet.local_ip = GetLocalIp(); + + packet.data.resize(sizeof(data)); + std::memcpy(packet.data.data(), &data, sizeof(data)); + SendPacket(packet); +} + +void LANDiscovery::SendBroadcast(Network::LDNPacketType type) { + Network::LDNPacket packet; + packet.type = type; + + packet.broadcast = true; + packet.local_ip = GetLocalIp(); + + SendPacket(packet); +} + +void LANDiscovery::SendPacket(const Network::LDNPacket& packet) { + if (auto room_member = room_network.GetRoomMember().lock()) { + if (room_member->IsConnected()) { + room_member->SendLdnPacket(packet); + } + } +} + +void LANDiscovery::ReceivePacket(const Network::LDNPacket& packet) { + std::scoped_lock lock{packet_mutex}; + switch (packet.type) { + case Network::LDNPacketType::Scan: { + LOG_INFO(Frontend, "Scan packet received!"); + if (state == State::AccessPointCreated) { + // Reply to the sender + SendPacket(Network::LDNPacketType::ScanResp, network_info, packet.local_ip); + } + break; + } + case Network::LDNPacketType::ScanResp: { + LOG_INFO(Frontend, "ScanResp packet received!"); + + NetworkInfo info{}; + std::memcpy(&info, packet.data.data(), sizeof(NetworkInfo)); + scan_results.insert({info.common.bssid, info}); + + break; + } + case Network::LDNPacketType::Connect: { + LOG_INFO(Frontend, "Connect packet received!"); + + NodeInfo info{}; + std::memcpy(&info, packet.data.data(), sizeof(NodeInfo)); + + connected_clients.push_back(packet.local_ip); + + for (LanStation& station : stations) { + if (station.status != NodeStatus::Connected) { + *station.node_info = info; + station.status = NodeStatus::Connected; + break; + } + } + + UpdateNodes(); + + break; + } + case Network::LDNPacketType::Disconnect: { + LOG_INFO(Frontend, "Disconnect packet received!"); + + connected_clients.erase( + std::remove(connected_clients.begin(), connected_clients.end(), packet.local_ip), + connected_clients.end()); + + NodeInfo info{}; + std::memcpy(&info, packet.data.data(), sizeof(NodeInfo)); + + for (LanStation& station : stations) { + if (station.status == NodeStatus::Connected && + station.node_info->mac_address == info.mac_address) { + station.OnClose(); + break; + } + } + + break; + } + case Network::LDNPacketType::DestroyNetwork: { + ResetStations(); + OnDisconnectFromHost(); + break; + } + case Network::LDNPacketType::SyncNetwork: { + if (state == State::StationOpened || state == State::StationConnected) { + LOG_INFO(Frontend, "SyncNetwork packet received!"); + NetworkInfo info{}; + std::memcpy(&info, packet.data.data(), sizeof(NetworkInfo)); + + OnSyncNetwork(info); + } else { + LOG_INFO(Frontend, "SyncNetwork packet received but in wrong State!"); + } + + break; + } + default: { + LOG_INFO(Frontend, "ReceivePacket unhandled type {}", static_cast<int>(packet.type)); + break; + } + } +} + +bool LANDiscovery::IsNodeStateChanged() { + bool changed = false; + const auto& nodes = network_info.ldn.nodes; + for (int i = 0; i < NodeCountMax; i++) { + if (nodes[i].is_connected != node_last_states[i]) { + if (nodes[i].is_connected) { + node_changes[i].state_change |= NodeStateChange::Connect; + } else { + node_changes[i].state_change |= NodeStateChange::Disconnect; + } + node_last_states[i] = nodes[i].is_connected; + changed = true; + } + } + return changed; +} + +bool LANDiscovery::IsFlagSet(ScanFilterFlag flag, ScanFilterFlag search_flag) const { + const auto flag_value = static_cast<u32>(flag); + const auto search_flag_value = static_cast<u32>(search_flag); + return (flag_value & search_flag_value) == search_flag_value; +} + +int LANDiscovery::GetStationCount() const { + return static_cast<int>( + std::count_if(stations.begin(), stations.end(), [](const auto& station) { + return station.GetStatus() != NodeStatus::Disconnected; + })); +} + +MacAddress LANDiscovery::GetFakeMac() const { + MacAddress mac{}; + mac.raw[0] = 0x02; + mac.raw[1] = 0x00; + + const auto ip = GetLocalIp(); + memcpy(mac.raw.data() + 2, &ip, sizeof(ip)); + + return mac; +} + +Result LANDiscovery::GetNodeInfo(NodeInfo& node, const UserConfig& userConfig, + u16 localCommunicationVersion) { + const auto network_interface = Network::GetSelectedNetworkInterface(); + + if (!network_interface) { + LOG_ERROR(Service_LDN, "No network interface available"); + return ResultNoIpAddress; + } + + node.mac_address = GetFakeMac(); + node.is_connected = 1; + std::memcpy(node.user_name.data(), userConfig.user_name.data(), UserNameBytesMax + 1); + node.local_communication_version = localCommunicationVersion; + + Ipv4Address current_address = GetLocalIp(); + std::reverse(std::begin(current_address), std::end(current_address)); // ntohl + node.ipv4_address = current_address; + + return ResultSuccess; +} + +} // namespace Service::LDN diff --git a/src/core/hle/service/ldn/lan_discovery.h b/src/core/hle/service/ldn/lan_discovery.h new file mode 100644 index 000000000..3833cd764 --- /dev/null +++ b/src/core/hle/service/ldn/lan_discovery.h @@ -0,0 +1,134 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <array> +#include <cstring> +#include <functional> +#include <memory> +#include <mutex> +#include <optional> +#include <random> +#include <span> +#include <thread> +#include <unordered_map> + +#include "common/logging/log.h" +#include "common/socket_types.h" +#include "core/hle/result.h" +#include "core/hle/service/ldn/ldn_results.h" +#include "core/hle/service/ldn/ldn_types.h" +#include "network/network.h" + +namespace Service::LDN { + +class LANDiscovery; + +class LanStation { +public: + LanStation(s8 node_id_, LANDiscovery* discovery_); + ~LanStation(); + + void OnClose(); + NodeStatus GetStatus() const; + void Reset(); + void OverrideInfo(); + +protected: + friend class LANDiscovery; + NodeInfo* node_info; + NodeStatus status; + s8 node_id; + LANDiscovery* discovery; +}; + +class LANDiscovery { +public: + using LanEventFunc = std::function<void()>; + + LANDiscovery(Network::RoomNetwork& room_network_); + ~LANDiscovery(); + + State GetState() const; + void SetState(State new_state); + + Result GetNetworkInfo(NetworkInfo& out_network) const; + Result GetNetworkInfo(NetworkInfo& out_network, std::vector<NodeLatestUpdate>& out_updates, + std::size_t buffer_count); + + DisconnectReason GetDisconnectReason() const; + Result Scan(std::vector<NetworkInfo>& networks, u16& count, const ScanFilter& filter); + Result SetAdvertiseData(std::span<const u8> data); + + Result OpenAccessPoint(); + Result CloseAccessPoint(); + + Result OpenStation(); + Result CloseStation(); + + Result CreateNetwork(const SecurityConfig& security_config, const UserConfig& user_config, + const NetworkConfig& network_config); + Result DestroyNetwork(); + + Result Connect(const NetworkInfo& network_info_, const UserConfig& user_config, + u16 local_communication_version); + Result Disconnect(); + + Result Initialize(LanEventFunc lan_event_ = empty_func, bool listening = true); + Result Finalize(); + + void ReceivePacket(const Network::LDNPacket& packet); + +protected: + friend class LanStation; + + void InitNetworkInfo(); + void InitNodeStateChange(); + + void ResetStations(); + void UpdateNodes(); + + void OnSyncNetwork(const NetworkInfo& info); + void OnDisconnectFromHost(); + void OnNetworkInfoChanged(); + + bool IsNodeStateChanged(); + bool IsFlagSet(ScanFilterFlag flag, ScanFilterFlag search_flag) const; + int GetStationCount() const; + MacAddress GetFakeMac() const; + Result GetNodeInfo(NodeInfo& node, const UserConfig& user_config, + u16 local_communication_version); + + Network::IPv4Address GetLocalIp() const; + template <typename Data> + void SendPacket(Network::LDNPacketType type, const Data& data, Ipv4Address remote_ip); + void SendPacket(Network::LDNPacketType type, Ipv4Address remote_ip); + template <typename Data> + void SendBroadcast(Network::LDNPacketType type, const Data& data); + void SendBroadcast(Network::LDNPacketType type); + void SendPacket(const Network::LDNPacket& packet); + + static const LanEventFunc empty_func; + static constexpr Ssid fake_ssid{"YuzuFakeSsidForLdn"}; + + bool inited{}; + std::mutex packet_mutex; + std::array<LanStation, StationCountMax> stations; + std::array<NodeLatestUpdate, NodeCountMax> node_changes{}; + std::array<u8, NodeCountMax> node_last_states{}; + std::unordered_map<MacAddress, NetworkInfo, MACAddressHash> scan_results{}; + NodeInfo node_info{}; + NetworkInfo network_info{}; + State state{State::None}; + DisconnectReason disconnect_reason{DisconnectReason::None}; + + // TODO (flTobi): Should this be an std::set? + std::vector<Ipv4Address> connected_clients; + std::optional<Ipv4Address> host_ip; + + LanEventFunc lan_event; + + Network::RoomNetwork& room_network; +}; +} // namespace Service::LDN diff --git a/src/core/hle/service/ldn/ldn.cpp b/src/core/hle/service/ldn/ldn.cpp index c11daff54..6df563136 100644 --- a/src/core/hle/service/ldn/ldn.cpp +++ b/src/core/hle/service/ldn/ldn.cpp @@ -4,11 +4,13 @@ #include <memory> #include "core/core.h" +#include "core/hle/service/ldn/lan_discovery.h" #include "core/hle/service/ldn/ldn.h" #include "core/hle/service/ldn/ldn_results.h" #include "core/hle/service/ldn/ldn_types.h" #include "core/internal_network/network.h" #include "core/internal_network/network_interface.h" +#include "network/network.h" // This is defined by synchapi.h and conflicts with ServiceContext::CreateEvent #undef CreateEvent @@ -105,13 +107,13 @@ class IUserLocalCommunicationService final public: explicit IUserLocalCommunicationService(Core::System& system_) : ServiceFramework{system_, "IUserLocalCommunicationService", ServiceThreadType::CreateNew}, - service_context{system, "IUserLocalCommunicationService"}, room_network{ - system_.GetRoomNetwork()} { + service_context{system, "IUserLocalCommunicationService"}, + room_network{system_.GetRoomNetwork()}, lan_discovery{room_network} { // clang-format off static const FunctionInfo functions[] = { {0, &IUserLocalCommunicationService::GetState, "GetState"}, {1, &IUserLocalCommunicationService::GetNetworkInfo, "GetNetworkInfo"}, - {2, nullptr, "GetIpv4Address"}, + {2, &IUserLocalCommunicationService::GetIpv4Address, "GetIpv4Address"}, {3, &IUserLocalCommunicationService::GetDisconnectReason, "GetDisconnectReason"}, {4, &IUserLocalCommunicationService::GetSecurityParameter, "GetSecurityParameter"}, {5, &IUserLocalCommunicationService::GetNetworkConfig, "GetNetworkConfig"}, @@ -119,7 +121,7 @@ public: {101, &IUserLocalCommunicationService::GetNetworkInfoLatestUpdate, "GetNetworkInfoLatestUpdate"}, {102, &IUserLocalCommunicationService::Scan, "Scan"}, {103, &IUserLocalCommunicationService::ScanPrivate, "ScanPrivate"}, - {104, nullptr, "SetWirelessControllerRestriction"}, + {104, &IUserLocalCommunicationService::SetWirelessControllerRestriction, "SetWirelessControllerRestriction"}, {200, &IUserLocalCommunicationService::OpenAccessPoint, "OpenAccessPoint"}, {201, &IUserLocalCommunicationService::CloseAccessPoint, "CloseAccessPoint"}, {202, &IUserLocalCommunicationService::CreateNetwork, "CreateNetwork"}, @@ -148,16 +150,30 @@ public: } ~IUserLocalCommunicationService() { + if (is_initialized) { + if (auto room_member = room_network.GetRoomMember().lock()) { + room_member->Unbind(ldn_packet_received); + } + } + service_context.CloseEvent(state_change_event); } + /// Callback to parse and handle a received LDN packet. + void OnLDNPacketReceived(const Network::LDNPacket& packet) { + lan_discovery.ReceivePacket(packet); + } + void OnEventFired() { - state_change_event->GetWritableEvent().Signal(); + state_change_event->Signal(); } void GetState(Kernel::HLERequestContext& ctx) { State state = State::Error; - LOG_WARNING(Service_LDN, "(STUBBED) called, state = {}", state); + + if (is_initialized) { + state = lan_discovery.GetState(); + } IPC::ResponseBuilder rb{ctx, 3}; rb.Push(ResultSuccess); @@ -175,7 +191,7 @@ public: } NetworkInfo network_info{}; - const auto rc = ResultSuccess; + const auto rc = lan_discovery.GetNetworkInfo(network_info); if (rc.IsError()) { LOG_ERROR(Service_LDN, "NetworkInfo is not valid {}", rc.raw); IPC::ResponseBuilder rb{ctx, 2}; @@ -183,28 +199,50 @@ public: return; } - LOG_WARNING(Service_LDN, "(STUBBED) called, ssid='{}', nodes={}", - network_info.common.ssid.GetStringValue(), network_info.ldn.node_count); - ctx.WriteBuffer<NetworkInfo>(network_info); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(rc); + rb.Push(ResultSuccess); } - void GetDisconnectReason(Kernel::HLERequestContext& ctx) { - const auto disconnect_reason = DisconnectReason::None; + void GetIpv4Address(Kernel::HLERequestContext& ctx) { + const auto network_interface = Network::GetSelectedNetworkInterface(); + + if (!network_interface) { + LOG_ERROR(Service_LDN, "No network interface available"); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultNoIpAddress); + return; + } - LOG_WARNING(Service_LDN, "(STUBBED) called, disconnect_reason={}", disconnect_reason); + Ipv4Address current_address{Network::TranslateIPv4(network_interface->ip_address)}; + Ipv4Address subnet_mask{Network::TranslateIPv4(network_interface->subnet_mask)}; + + // When we're connected to a room, spoof the hosts IP address + if (auto room_member = room_network.GetRoomMember().lock()) { + if (room_member->IsConnected()) { + current_address = room_member->GetFakeIpAddress(); + } + } + + std::reverse(std::begin(current_address), std::end(current_address)); // ntohl + std::reverse(std::begin(subnet_mask), std::end(subnet_mask)); // ntohl + + IPC::ResponseBuilder rb{ctx, 4}; + rb.Push(ResultSuccess); + rb.PushRaw(current_address); + rb.PushRaw(subnet_mask); + } + void GetDisconnectReason(Kernel::HLERequestContext& ctx) { IPC::ResponseBuilder rb{ctx, 3}; rb.Push(ResultSuccess); - rb.PushEnum(disconnect_reason); + rb.PushEnum(lan_discovery.GetDisconnectReason()); } void GetSecurityParameter(Kernel::HLERequestContext& ctx) { SecurityParameter security_parameter{}; NetworkInfo info{}; - const Result rc = ResultSuccess; + const Result rc = lan_discovery.GetNetworkInfo(info); if (rc.IsError()) { LOG_ERROR(Service_LDN, "NetworkInfo is not valid {}", rc.raw); @@ -217,8 +255,6 @@ public: std::memcpy(security_parameter.data.data(), info.ldn.security_parameter.data(), sizeof(SecurityParameter::data)); - LOG_WARNING(Service_LDN, "(STUBBED) called"); - IPC::ResponseBuilder rb{ctx, 10}; rb.Push(rc); rb.PushRaw<SecurityParameter>(security_parameter); @@ -227,7 +263,7 @@ public: void GetNetworkConfig(Kernel::HLERequestContext& ctx) { NetworkConfig config{}; NetworkInfo info{}; - const Result rc = ResultSuccess; + const Result rc = lan_discovery.GetNetworkInfo(info); if (rc.IsError()) { LOG_ERROR(Service_LDN, "NetworkConfig is not valid {}", rc.raw); @@ -241,12 +277,6 @@ public: config.node_count_max = info.ldn.node_count_max; config.local_communication_version = info.ldn.nodes[0].local_communication_version; - LOG_WARNING(Service_LDN, - "(STUBBED) called, intent_id={}/{}, channel={}, node_count_max={}, " - "local_communication_version={}", - config.intent_id.local_communication_id, config.intent_id.scene_id, - config.channel, config.node_count_max, config.local_communication_version); - IPC::ResponseBuilder rb{ctx, 10}; rb.Push(rc); rb.PushRaw<NetworkConfig>(config); @@ -265,17 +295,17 @@ public: const std::size_t node_buffer_count = ctx.GetWriteBufferSize(1) / sizeof(NodeLatestUpdate); if (node_buffer_count == 0 || network_buffer_size != sizeof(NetworkInfo)) { - LOG_ERROR(Service_LDN, "Invalid buffer size {}, {}", network_buffer_size, + LOG_ERROR(Service_LDN, "Invalid buffer, size = {}, count = {}", network_buffer_size, node_buffer_count); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(ResultBadInput); return; } - NetworkInfo info; + NetworkInfo info{}; std::vector<NodeLatestUpdate> latest_update(node_buffer_count); - const auto rc = ResultSuccess; + const auto rc = lan_discovery.GetNetworkInfo(info, latest_update, latest_update.size()); if (rc.IsError()) { LOG_ERROR(Service_LDN, "NetworkInfo is not valid {}", rc.raw); IPC::ResponseBuilder rb{ctx, 2}; @@ -283,9 +313,6 @@ public: return; } - LOG_WARNING(Service_LDN, "(STUBBED) called, ssid='{}', nodes={}", - info.common.ssid.GetStringValue(), info.ldn.node_count); - ctx.WriteBuffer(info, 0); ctx.WriteBuffer(latest_update, 1); @@ -317,92 +344,78 @@ public: u16 count = 0; std::vector<NetworkInfo> network_infos(network_info_size); + Result rc = lan_discovery.Scan(network_infos, count, scan_filter); - LOG_WARNING(Service_LDN, - "(STUBBED) called, channel={}, filter_scan_flag={}, filter_network_type={}", - channel, scan_filter.flag, scan_filter.network_type); + LOG_INFO(Service_LDN, + "called, channel={}, filter_scan_flag={}, filter_network_type={}, is_private={}", + channel, scan_filter.flag, scan_filter.network_type, is_private); ctx.WriteBuffer(network_infos); IPC::ResponseBuilder rb{ctx, 3}; - rb.Push(ResultSuccess); + rb.Push(rc); rb.Push<u32>(count); } - void OpenAccessPoint(Kernel::HLERequestContext& ctx) { + void SetWirelessControllerRestriction(Kernel::HLERequestContext& ctx) { LOG_WARNING(Service_LDN, "(STUBBED) called"); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(ResultSuccess); } + void OpenAccessPoint(Kernel::HLERequestContext& ctx) { + LOG_INFO(Service_LDN, "called"); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(lan_discovery.OpenAccessPoint()); + } + void CloseAccessPoint(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_LDN, "(STUBBED) called"); + LOG_INFO(Service_LDN, "called"); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultSuccess); + rb.Push(lan_discovery.CloseAccessPoint()); } void CreateNetwork(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - struct Parameters { - SecurityConfig security_config; - UserConfig user_config; - INSERT_PADDING_WORDS_NOINIT(1); - NetworkConfig network_config; - }; - static_assert(sizeof(Parameters) == 0x98, "Parameters has incorrect size."); + LOG_INFO(Service_LDN, "called"); - const auto parameters{rp.PopRaw<Parameters>()}; + CreateNetworkImpl(ctx); + } - LOG_WARNING(Service_LDN, - "(STUBBED) called, passphrase_size={}, security_mode={}, " - "local_communication_version={}", - parameters.security_config.passphrase_size, - parameters.security_config.security_mode, - parameters.network_config.local_communication_version); + void CreateNetworkPrivate(Kernel::HLERequestContext& ctx) { + LOG_INFO(Service_LDN, "called"); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultSuccess); + CreateNetworkImpl(ctx, true); } - void CreateNetworkPrivate(Kernel::HLERequestContext& ctx) { + void CreateNetworkImpl(Kernel::HLERequestContext& ctx, bool is_private = false) { IPC::RequestParser rp{ctx}; - struct Parameters { - SecurityConfig security_config; - SecurityParameter security_parameter; - UserConfig user_config; - NetworkConfig network_config; - }; - static_assert(sizeof(Parameters) == 0xB8, "Parameters has incorrect size."); - - const auto parameters{rp.PopRaw<Parameters>()}; - LOG_WARNING(Service_LDN, - "(STUBBED) called, passphrase_size={}, security_mode={}, " - "local_communication_version={}", - parameters.security_config.passphrase_size, - parameters.security_config.security_mode, - parameters.network_config.local_communication_version); + const auto security_config{rp.PopRaw<SecurityConfig>()}; + [[maybe_unused]] const auto security_parameter{is_private ? rp.PopRaw<SecurityParameter>() + : SecurityParameter{}}; + const auto user_config{rp.PopRaw<UserConfig>()}; + rp.Pop<u32>(); // Padding + const auto network_Config{rp.PopRaw<NetworkConfig>()}; IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultSuccess); + rb.Push(lan_discovery.CreateNetwork(security_config, user_config, network_Config)); } void DestroyNetwork(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_LDN, "(STUBBED) called"); + LOG_INFO(Service_LDN, "called"); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultSuccess); + rb.Push(lan_discovery.DestroyNetwork()); } void SetAdvertiseData(Kernel::HLERequestContext& ctx) { std::vector<u8> read_buffer = ctx.ReadBuffer(); - LOG_WARNING(Service_LDN, "(STUBBED) called, size {}", read_buffer.size()); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultSuccess); + rb.Push(lan_discovery.SetAdvertiseData(read_buffer)); } void SetStationAcceptPolicy(Kernel::HLERequestContext& ctx) { @@ -420,17 +433,17 @@ public: } void OpenStation(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_LDN, "(STUBBED) called"); + LOG_INFO(Service_LDN, "called"); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultSuccess); + rb.Push(lan_discovery.OpenStation()); } void CloseStation(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_LDN, "(STUBBED) called"); + LOG_INFO(Service_LDN, "called"); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultSuccess); + rb.Push(lan_discovery.CloseStation()); } void Connect(Kernel::HLERequestContext& ctx) { @@ -445,16 +458,13 @@ public: const auto parameters{rp.PopRaw<Parameters>()}; - LOG_WARNING(Service_LDN, - "(STUBBED) called, passphrase_size={}, security_mode={}, " - "local_communication_version={}", - parameters.security_config.passphrase_size, - parameters.security_config.security_mode, - parameters.local_communication_version); + LOG_INFO(Service_LDN, + "called, passphrase_size={}, security_mode={}, " + "local_communication_version={}", + parameters.security_config.passphrase_size, + parameters.security_config.security_mode, parameters.local_communication_version); const std::vector<u8> read_buffer = ctx.ReadBuffer(); - NetworkInfo network_info{}; - if (read_buffer.size() != sizeof(NetworkInfo)) { LOG_ERROR(Frontend, "NetworkInfo doesn't match read_buffer size!"); IPC::ResponseBuilder rb{ctx, 2}; @@ -462,40 +472,47 @@ public: return; } + NetworkInfo network_info{}; std::memcpy(&network_info, read_buffer.data(), read_buffer.size()); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultSuccess); + rb.Push(lan_discovery.Connect(network_info, parameters.user_config, + static_cast<u16>(parameters.local_communication_version))); } void Disconnect(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_LDN, "(STUBBED) called"); + LOG_INFO(Service_LDN, "called"); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultSuccess); + rb.Push(lan_discovery.Disconnect()); } - void Initialize(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_LDN, "(STUBBED) called"); + void Initialize(Kernel::HLERequestContext& ctx) { const auto rc = InitializeImpl(ctx); + if (rc.IsError()) { + LOG_ERROR(Service_LDN, "Network isn't initialized, rc={}", rc.raw); + } IPC::ResponseBuilder rb{ctx, 2}; rb.Push(rc); } void Finalize(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_LDN, "(STUBBED) called"); + if (auto room_member = room_network.GetRoomMember().lock()) { + room_member->Unbind(ldn_packet_received); + } is_initialized = false; IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultSuccess); + rb.Push(lan_discovery.Finalize()); } void Initialize2(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_LDN, "(STUBBED) called"); - const auto rc = InitializeImpl(ctx); + if (rc.IsError()) { + LOG_ERROR(Service_LDN, "Network isn't initialized, rc={}", rc.raw); + } IPC::ResponseBuilder rb{ctx, 2}; rb.Push(rc); @@ -508,14 +525,26 @@ public: return ResultAirplaneModeEnabled; } + if (auto room_member = room_network.GetRoomMember().lock()) { + ldn_packet_received = room_member->BindOnLdnPacketReceived( + [this](const Network::LDNPacket& packet) { OnLDNPacketReceived(packet); }); + } else { + LOG_ERROR(Service_LDN, "Couldn't bind callback!"); + return ResultAirplaneModeEnabled; + } + + lan_discovery.Initialize([&]() { OnEventFired(); }); is_initialized = true; - // TODO (flTobi): Change this to ResultSuccess when LDN is fully implemented - return ResultAirplaneModeEnabled; + return ResultSuccess; } KernelHelpers::ServiceContext service_context; Kernel::KEvent* state_change_event; Network::RoomNetwork& room_network; + LANDiscovery lan_discovery; + + // Callback identifier for the OnLDNPacketReceived event. + Network::RoomMember::CallbackHandle<Network::LDNPacket> ldn_packet_received; bool is_initialized{}; }; diff --git a/src/core/hle/service/ldn/ldn_types.h b/src/core/hle/service/ldn/ldn_types.h index 6231e936d..44c2c773b 100644 --- a/src/core/hle/service/ldn/ldn_types.h +++ b/src/core/hle/service/ldn/ldn_types.h @@ -31,6 +31,8 @@ enum class NodeStateChange : u8 { DisconnectAndConnect, }; +DECLARE_ENUM_FLAG_OPERATORS(NodeStateChange) + enum class ScanFilterFlag : u32 { None = 0, LocalCommunicationId = 1 << 0, @@ -100,13 +102,13 @@ enum class AcceptPolicy : u8 { enum class WifiChannel : s16 { Default = 0, - wifi24_1 = 1, - wifi24_6 = 6, - wifi24_11 = 11, - wifi50_36 = 36, - wifi50_40 = 40, - wifi50_44 = 44, - wifi50_48 = 48, + Wifi24_1 = 1, + Wifi24_6 = 6, + Wifi24_11 = 11, + Wifi50_36 = 36, + Wifi50_40 = 40, + Wifi50_44 = 44, + Wifi50_48 = 48, }; enum class LinkLevel : s8 { @@ -116,6 +118,11 @@ enum class LinkLevel : s8 { Excellent, }; +enum class NodeStatus : u8 { + Disconnected, + Connected, +}; + struct NodeLatestUpdate { NodeStateChange state_change; INSERT_PADDING_BYTES(0x7); // Unknown @@ -150,7 +157,7 @@ struct Ssid { Ssid() = default; - explicit Ssid(std::string_view data) { + constexpr explicit Ssid(std::string_view data) { length = static_cast<u8>(std::min(data.size(), SsidLengthMax)); data.copy(raw.data(), length); raw[length] = 0; @@ -159,19 +166,18 @@ struct Ssid { std::string GetStringValue() const { return std::string(raw.data()); } -}; -static_assert(sizeof(Ssid) == 0x22, "Ssid is an invalid size"); -struct Ipv4Address { - union { - u32 raw{}; - std::array<u8, 4> bytes; - }; + bool operator==(const Ssid& b) const { + return (length == b.length) && (std::memcmp(raw.data(), b.raw.data(), length) == 0); + } - std::string GetStringValue() const { - return fmt::format("{}.{}.{}.{}", bytes[3], bytes[2], bytes[1], bytes[0]); + bool operator!=(const Ssid& b) const { + return !operator==(b); } }; +static_assert(sizeof(Ssid) == 0x22, "Ssid is an invalid size"); + +using Ipv4Address = std::array<u8, 4>; static_assert(sizeof(Ipv4Address) == 0x4, "Ipv4Address is an invalid size"); struct MacAddress { @@ -181,6 +187,14 @@ struct MacAddress { }; static_assert(sizeof(MacAddress) == 0x6, "MacAddress is an invalid size"); +struct MACAddressHash { + size_t operator()(const MacAddress& address) const { + u64 value{}; + std::memcpy(&value, address.raw.data(), sizeof(address.raw)); + return value; + } +}; + struct ScanFilter { NetworkId network_id; NetworkType network_type; diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp index becd6d1b9..652441bc2 100644 --- a/src/core/hle/service/ldr/ldr.cpp +++ b/src/core/hle/service/ldr/ldr.cpp @@ -290,7 +290,7 @@ public: const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize}; const auto start_info{page_table.QueryInfo(start - 1)}; - if (start_info.state != Kernel::KMemoryState::Free) { + if (start_info.GetState() != Kernel::KMemoryState::Free) { return {}; } @@ -300,7 +300,7 @@ public: const auto end_info{page_table.QueryInfo(start + size)}; - if (end_info.state != Kernel::KMemoryState::Free) { + if (end_info.GetState() != Kernel::KMemoryState::Free) { return {}; } diff --git a/src/core/hle/service/mii/mii.cpp b/src/core/hle/service/mii/mii.cpp index efb569993..390514fdc 100644 --- a/src/core/hle/service/mii/mii.cpp +++ b/src/core/hle/service/mii/mii.cpp @@ -43,7 +43,7 @@ public: {20, nullptr, "IsBrokenDatabaseWithClearFlag"}, {21, &IDatabaseService::GetIndex, "GetIndex"}, {22, &IDatabaseService::SetInterfaceVersion, "SetInterfaceVersion"}, - {23, nullptr, "Convert"}, + {23, &IDatabaseService::Convert, "Convert"}, {24, nullptr, "ConvertCoreDataToCharInfo"}, {25, nullptr, "ConvertCharInfoToCoreData"}, {26, nullptr, "Append"}, @@ -130,7 +130,7 @@ private: return; } - std::vector<MiiInfo> values; + std::vector<CharInfo> values; for (const auto& element : *result) { values.emplace_back(element.info); } @@ -144,7 +144,7 @@ private: void UpdateLatest(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; - const auto info{rp.PopRaw<MiiInfo>()}; + const auto info{rp.PopRaw<CharInfo>()}; const auto source_flag{rp.PopRaw<SourceFlag>()}; LOG_DEBUG(Service_Mii, "called with source_flag={}", source_flag); @@ -156,9 +156,9 @@ private: return; } - IPC::ResponseBuilder rb{ctx, 2 + sizeof(MiiInfo) / sizeof(u32)}; + IPC::ResponseBuilder rb{ctx, 2 + sizeof(CharInfo) / sizeof(u32)}; rb.Push(ResultSuccess); - rb.PushRaw<MiiInfo>(*result); + rb.PushRaw<CharInfo>(*result); } void BuildRandom(Kernel::HLERequestContext& ctx) { @@ -191,9 +191,9 @@ private: return; } - IPC::ResponseBuilder rb{ctx, 2 + sizeof(MiiInfo) / sizeof(u32)}; + IPC::ResponseBuilder rb{ctx, 2 + sizeof(CharInfo) / sizeof(u32)}; rb.Push(ResultSuccess); - rb.PushRaw<MiiInfo>(manager.BuildRandom(age, gender, race)); + rb.PushRaw<CharInfo>(manager.BuildRandom(age, gender, race)); } void BuildDefault(Kernel::HLERequestContext& ctx) { @@ -210,14 +210,14 @@ private: return; } - IPC::ResponseBuilder rb{ctx, 2 + sizeof(MiiInfo) / sizeof(u32)}; + IPC::ResponseBuilder rb{ctx, 2 + sizeof(CharInfo) / sizeof(u32)}; rb.Push(ResultSuccess); - rb.PushRaw<MiiInfo>(manager.BuildDefault(index)); + rb.PushRaw<CharInfo>(manager.BuildDefault(index)); } void GetIndex(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; - const auto info{rp.PopRaw<MiiInfo>()}; + const auto info{rp.PopRaw<CharInfo>()}; LOG_DEBUG(Service_Mii, "called"); @@ -239,6 +239,18 @@ private: rb.Push(ResultSuccess); } + void Convert(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + + const auto mii_v3{rp.PopRaw<Ver3StoreData>()}; + + LOG_INFO(Service_Mii, "called"); + + IPC::ResponseBuilder rb{ctx, 2 + sizeof(CharInfo) / sizeof(u32)}; + rb.Push(ResultSuccess); + rb.PushRaw<CharInfo>(manager.ConvertV3ToCharInfo(mii_v3)); + } + constexpr bool IsInterfaceVersionSupported(u32 interface_version) const { return current_interface_version >= interface_version; } diff --git a/src/core/hle/service/mii/mii_manager.cpp b/src/core/hle/service/mii/mii_manager.cpp index 544c92a00..3a2fe938f 100644 --- a/src/core/hle/service/mii/mii_manager.cpp +++ b/src/core/hle/service/mii/mii_manager.cpp @@ -42,7 +42,7 @@ std::array<T, DestArraySize> ResizeArray(const std::array<T, SourceArraySize>& i return out; } -MiiInfo ConvertStoreDataToInfo(const MiiStoreData& data) { +CharInfo ConvertStoreDataToInfo(const MiiStoreData& data) { MiiStoreBitFields bf; std::memcpy(&bf, data.data.data.data(), sizeof(MiiStoreBitFields)); @@ -409,8 +409,8 @@ u32 MiiManager::GetCount(SourceFlag source_flag) const { return static_cast<u32>(count); } -ResultVal<MiiInfo> MiiManager::UpdateLatest([[maybe_unused]] const MiiInfo& info, - SourceFlag source_flag) { +ResultVal<CharInfo> MiiManager::UpdateLatest([[maybe_unused]] const CharInfo& info, + SourceFlag source_flag) { if ((source_flag & SourceFlag::Database) == SourceFlag::None) { return ERROR_CANNOT_FIND_ENTRY; } @@ -419,14 +419,242 @@ ResultVal<MiiInfo> MiiManager::UpdateLatest([[maybe_unused]] const MiiInfo& info return ERROR_CANNOT_FIND_ENTRY; } -MiiInfo MiiManager::BuildRandom(Age age, Gender gender, Race race) { +CharInfo MiiManager::BuildRandom(Age age, Gender gender, Race race) { return ConvertStoreDataToInfo(BuildRandomStoreData(age, gender, race, user_id)); } -MiiInfo MiiManager::BuildDefault(std::size_t index) { +CharInfo MiiManager::BuildDefault(std::size_t index) { return ConvertStoreDataToInfo(BuildDefaultStoreData(RawData::DefaultMii.at(index), user_id)); } +CharInfo MiiManager::ConvertV3ToCharInfo(const Ver3StoreData& mii_v3) const { + Service::Mii::MiiManager manager; + auto mii = manager.BuildDefault(0); + + if (!ValidateV3Info(mii_v3)) { + return mii; + } + + // TODO: We are ignoring a bunch of data from the mii_v3 + + mii.gender = static_cast<u8>(mii_v3.mii_information.gender); + mii.favorite_color = static_cast<u8>(mii_v3.mii_information.favorite_color); + mii.height = mii_v3.height; + mii.build = mii_v3.build; + + // Copy name until string terminator + mii.name = {}; + for (std::size_t index = 0; index < mii.name.size() - 1; index++) { + mii.name[index] = mii_v3.mii_name[index]; + if (mii.name[index] == 0) { + break; + } + } + + mii.font_region = mii_v3.region_information.character_set; + + mii.faceline_type = mii_v3.appearance_bits1.face_shape; + mii.faceline_color = mii_v3.appearance_bits1.skin_color; + mii.faceline_wrinkle = mii_v3.appearance_bits2.wrinkles; + mii.faceline_make = mii_v3.appearance_bits2.makeup; + + mii.hair_type = mii_v3.hair_style; + mii.hair_color = mii_v3.appearance_bits3.hair_color; + mii.hair_flip = mii_v3.appearance_bits3.flip_hair; + + mii.eye_type = static_cast<u8>(mii_v3.appearance_bits4.eye_type); + mii.eye_color = static_cast<u8>(mii_v3.appearance_bits4.eye_color); + mii.eye_scale = static_cast<u8>(mii_v3.appearance_bits4.eye_scale); + mii.eye_aspect = static_cast<u8>(mii_v3.appearance_bits4.eye_vertical_stretch); + mii.eye_rotate = static_cast<u8>(mii_v3.appearance_bits4.eye_rotation); + mii.eye_x = static_cast<u8>(mii_v3.appearance_bits4.eye_spacing); + mii.eye_y = static_cast<u8>(mii_v3.appearance_bits4.eye_y_position); + + mii.eyebrow_type = static_cast<u8>(mii_v3.appearance_bits5.eyebrow_style); + mii.eyebrow_color = static_cast<u8>(mii_v3.appearance_bits5.eyebrow_color); + mii.eyebrow_scale = static_cast<u8>(mii_v3.appearance_bits5.eyebrow_scale); + mii.eyebrow_aspect = static_cast<u8>(mii_v3.appearance_bits5.eyebrow_yscale); + mii.eyebrow_rotate = static_cast<u8>(mii_v3.appearance_bits5.eyebrow_rotation); + mii.eyebrow_x = static_cast<u8>(mii_v3.appearance_bits5.eyebrow_spacing); + mii.eyebrow_y = static_cast<u8>(mii_v3.appearance_bits5.eyebrow_y_position); + + mii.nose_type = static_cast<u8>(mii_v3.appearance_bits6.nose_type); + mii.nose_scale = static_cast<u8>(mii_v3.appearance_bits6.nose_scale); + mii.nose_y = static_cast<u8>(mii_v3.appearance_bits6.nose_y_position); + + mii.mouth_type = static_cast<u8>(mii_v3.appearance_bits7.mouth_type); + mii.mouth_color = static_cast<u8>(mii_v3.appearance_bits7.mouth_color); + mii.mouth_scale = static_cast<u8>(mii_v3.appearance_bits7.mouth_scale); + mii.mouth_aspect = static_cast<u8>(mii_v3.appearance_bits7.mouth_horizontal_stretch); + mii.mouth_y = static_cast<u8>(mii_v3.appearance_bits8.mouth_y_position); + + mii.mustache_type = static_cast<u8>(mii_v3.appearance_bits8.mustache_type); + mii.mustache_scale = static_cast<u8>(mii_v3.appearance_bits9.mustache_scale); + mii.mustache_y = static_cast<u8>(mii_v3.appearance_bits9.mustache_y_position); + + mii.beard_type = static_cast<u8>(mii_v3.appearance_bits9.bear_type); + mii.beard_color = static_cast<u8>(mii_v3.appearance_bits9.facial_hair_color); + + mii.glasses_type = static_cast<u8>(mii_v3.appearance_bits10.glasses_type); + mii.glasses_color = static_cast<u8>(mii_v3.appearance_bits10.glasses_color); + mii.glasses_scale = static_cast<u8>(mii_v3.appearance_bits10.glasses_scale); + mii.glasses_y = static_cast<u8>(mii_v3.appearance_bits10.glasses_y_position); + + mii.mole_type = static_cast<u8>(mii_v3.appearance_bits11.mole_enabled); + mii.mole_scale = static_cast<u8>(mii_v3.appearance_bits11.mole_scale); + mii.mole_x = static_cast<u8>(mii_v3.appearance_bits11.mole_x_position); + mii.mole_y = static_cast<u8>(mii_v3.appearance_bits11.mole_y_position); + + // TODO: Validate mii data + + return mii; +} + +Ver3StoreData MiiManager::ConvertCharInfoToV3(const CharInfo& mii) const { + Service::Mii::MiiManager manager; + Ver3StoreData mii_v3{}; + + // TODO: We are ignoring a bunch of data from the mii_v3 + + mii_v3.version = 1; + mii_v3.mii_information.gender.Assign(mii.gender); + mii_v3.mii_information.favorite_color.Assign(mii.favorite_color); + mii_v3.height = mii.height; + mii_v3.build = mii.build; + + // Copy name until string terminator + mii_v3.mii_name = {}; + for (std::size_t index = 0; index < mii.name.size() - 1; index++) { + mii_v3.mii_name[index] = mii.name[index]; + if (mii_v3.mii_name[index] == 0) { + break; + } + } + + mii_v3.region_information.character_set.Assign(mii.font_region); + + mii_v3.appearance_bits1.face_shape.Assign(mii.faceline_type); + mii_v3.appearance_bits1.skin_color.Assign(mii.faceline_color); + mii_v3.appearance_bits2.wrinkles.Assign(mii.faceline_wrinkle); + mii_v3.appearance_bits2.makeup.Assign(mii.faceline_make); + + mii_v3.hair_style = mii.hair_type; + mii_v3.appearance_bits3.hair_color.Assign(mii.hair_color); + mii_v3.appearance_bits3.flip_hair.Assign(mii.hair_flip); + + mii_v3.appearance_bits4.eye_type.Assign(mii.eye_type); + mii_v3.appearance_bits4.eye_color.Assign(mii.eye_color); + mii_v3.appearance_bits4.eye_scale.Assign(mii.eye_scale); + mii_v3.appearance_bits4.eye_vertical_stretch.Assign(mii.eye_aspect); + mii_v3.appearance_bits4.eye_rotation.Assign(mii.eye_rotate); + mii_v3.appearance_bits4.eye_spacing.Assign(mii.eye_x); + mii_v3.appearance_bits4.eye_y_position.Assign(mii.eye_y); + + mii_v3.appearance_bits5.eyebrow_style.Assign(mii.eyebrow_type); + mii_v3.appearance_bits5.eyebrow_color.Assign(mii.eyebrow_color); + mii_v3.appearance_bits5.eyebrow_scale.Assign(mii.eyebrow_scale); + mii_v3.appearance_bits5.eyebrow_yscale.Assign(mii.eyebrow_aspect); + mii_v3.appearance_bits5.eyebrow_rotation.Assign(mii.eyebrow_rotate); + mii_v3.appearance_bits5.eyebrow_spacing.Assign(mii.eyebrow_x); + mii_v3.appearance_bits5.eyebrow_y_position.Assign(mii.eyebrow_y); + + mii_v3.appearance_bits6.nose_type.Assign(mii.nose_type); + mii_v3.appearance_bits6.nose_scale.Assign(mii.nose_scale); + mii_v3.appearance_bits6.nose_y_position.Assign(mii.nose_y); + + mii_v3.appearance_bits7.mouth_type.Assign(mii.mouth_type); + mii_v3.appearance_bits7.mouth_color.Assign(mii.mouth_color); + mii_v3.appearance_bits7.mouth_scale.Assign(mii.mouth_scale); + mii_v3.appearance_bits7.mouth_horizontal_stretch.Assign(mii.mouth_aspect); + mii_v3.appearance_bits8.mouth_y_position.Assign(mii.mouth_y); + + mii_v3.appearance_bits8.mustache_type.Assign(mii.mustache_type); + mii_v3.appearance_bits9.mustache_scale.Assign(mii.mustache_scale); + mii_v3.appearance_bits9.mustache_y_position.Assign(mii.mustache_y); + + mii_v3.appearance_bits9.bear_type.Assign(mii.beard_type); + mii_v3.appearance_bits9.facial_hair_color.Assign(mii.beard_color); + + mii_v3.appearance_bits10.glasses_type.Assign(mii.glasses_type); + mii_v3.appearance_bits10.glasses_color.Assign(mii.glasses_color); + mii_v3.appearance_bits10.glasses_scale.Assign(mii.glasses_scale); + mii_v3.appearance_bits10.glasses_y_position.Assign(mii.glasses_y); + + mii_v3.appearance_bits11.mole_enabled.Assign(mii.mole_type); + mii_v3.appearance_bits11.mole_scale.Assign(mii.mole_scale); + mii_v3.appearance_bits11.mole_x_position.Assign(mii.mole_x); + mii_v3.appearance_bits11.mole_y_position.Assign(mii.mole_y); + + // TODO: Validate mii_v3 data + + return mii_v3; +} + +bool MiiManager::ValidateV3Info(const Ver3StoreData& mii_v3) const { + bool is_valid = mii_v3.version == 0 || mii_v3.version == 3; + + is_valid = is_valid && (mii_v3.mii_name[0] != 0); + + is_valid = is_valid && (mii_v3.mii_information.birth_month < 13); + is_valid = is_valid && (mii_v3.mii_information.birth_day < 32); + is_valid = is_valid && (mii_v3.mii_information.favorite_color < 12); + is_valid = is_valid && (mii_v3.height < 128); + is_valid = is_valid && (mii_v3.build < 128); + + is_valid = is_valid && (mii_v3.appearance_bits1.face_shape < 12); + is_valid = is_valid && (mii_v3.appearance_bits1.skin_color < 7); + is_valid = is_valid && (mii_v3.appearance_bits2.wrinkles < 12); + is_valid = is_valid && (mii_v3.appearance_bits2.makeup < 12); + + is_valid = is_valid && (mii_v3.hair_style < 132); + is_valid = is_valid && (mii_v3.appearance_bits3.hair_color < 8); + + is_valid = is_valid && (mii_v3.appearance_bits4.eye_type < 60); + is_valid = is_valid && (mii_v3.appearance_bits4.eye_color < 6); + is_valid = is_valid && (mii_v3.appearance_bits4.eye_scale < 8); + is_valid = is_valid && (mii_v3.appearance_bits4.eye_vertical_stretch < 7); + is_valid = is_valid && (mii_v3.appearance_bits4.eye_rotation < 8); + is_valid = is_valid && (mii_v3.appearance_bits4.eye_spacing < 13); + is_valid = is_valid && (mii_v3.appearance_bits4.eye_y_position < 19); + + is_valid = is_valid && (mii_v3.appearance_bits5.eyebrow_style < 25); + is_valid = is_valid && (mii_v3.appearance_bits5.eyebrow_color < 8); + is_valid = is_valid && (mii_v3.appearance_bits5.eyebrow_scale < 9); + is_valid = is_valid && (mii_v3.appearance_bits5.eyebrow_yscale < 7); + is_valid = is_valid && (mii_v3.appearance_bits5.eyebrow_rotation < 12); + is_valid = is_valid && (mii_v3.appearance_bits5.eyebrow_spacing < 12); + is_valid = is_valid && (mii_v3.appearance_bits5.eyebrow_y_position < 19); + + is_valid = is_valid && (mii_v3.appearance_bits6.nose_type < 18); + is_valid = is_valid && (mii_v3.appearance_bits6.nose_scale < 9); + is_valid = is_valid && (mii_v3.appearance_bits6.nose_y_position < 19); + + is_valid = is_valid && (mii_v3.appearance_bits7.mouth_type < 36); + is_valid = is_valid && (mii_v3.appearance_bits7.mouth_color < 5); + is_valid = is_valid && (mii_v3.appearance_bits7.mouth_scale < 9); + is_valid = is_valid && (mii_v3.appearance_bits7.mouth_horizontal_stretch < 7); + is_valid = is_valid && (mii_v3.appearance_bits8.mouth_y_position < 19); + + is_valid = is_valid && (mii_v3.appearance_bits8.mustache_type < 6); + is_valid = is_valid && (mii_v3.appearance_bits9.mustache_scale < 7); + is_valid = is_valid && (mii_v3.appearance_bits9.mustache_y_position < 17); + + is_valid = is_valid && (mii_v3.appearance_bits9.bear_type < 6); + is_valid = is_valid && (mii_v3.appearance_bits9.facial_hair_color < 8); + + is_valid = is_valid && (mii_v3.appearance_bits10.glasses_type < 9); + is_valid = is_valid && (mii_v3.appearance_bits10.glasses_color < 6); + is_valid = is_valid && (mii_v3.appearance_bits10.glasses_scale < 8); + is_valid = is_valid && (mii_v3.appearance_bits10.glasses_y_position < 21); + + is_valid = is_valid && (mii_v3.appearance_bits11.mole_enabled < 2); + is_valid = is_valid && (mii_v3.appearance_bits11.mole_scale < 9); + is_valid = is_valid && (mii_v3.appearance_bits11.mole_x_position < 17); + is_valid = is_valid && (mii_v3.appearance_bits11.mole_y_position < 31); + + return is_valid; +} + ResultVal<std::vector<MiiInfoElement>> MiiManager::GetDefault(SourceFlag source_flag) { std::vector<MiiInfoElement> result; @@ -441,7 +669,7 @@ ResultVal<std::vector<MiiInfoElement>> MiiManager::GetDefault(SourceFlag source_ return result; } -Result MiiManager::GetIndex([[maybe_unused]] const MiiInfo& info, u32& index) { +Result MiiManager::GetIndex([[maybe_unused]] const CharInfo& info, u32& index) { constexpr u32 INVALID_INDEX{0xFFFFFFFF}; index = INVALID_INDEX; diff --git a/src/core/hle/service/mii/mii_manager.h b/src/core/hle/service/mii/mii_manager.h index 6a286bd96..83ad3d343 100644 --- a/src/core/hle/service/mii/mii_manager.h +++ b/src/core/hle/service/mii/mii_manager.h @@ -19,11 +19,14 @@ public: bool CheckAndResetUpdateCounter(SourceFlag source_flag, u64& current_update_counter); bool IsFullDatabase() const; u32 GetCount(SourceFlag source_flag) const; - ResultVal<MiiInfo> UpdateLatest(const MiiInfo& info, SourceFlag source_flag); - MiiInfo BuildRandom(Age age, Gender gender, Race race); - MiiInfo BuildDefault(std::size_t index); + ResultVal<CharInfo> UpdateLatest(const CharInfo& info, SourceFlag source_flag); + CharInfo BuildRandom(Age age, Gender gender, Race race); + CharInfo BuildDefault(std::size_t index); + CharInfo ConvertV3ToCharInfo(const Ver3StoreData& mii_v3) const; + Ver3StoreData ConvertCharInfoToV3(const CharInfo& mii) const; + bool ValidateV3Info(const Ver3StoreData& mii_v3) const; ResultVal<std::vector<MiiInfoElement>> GetDefault(SourceFlag source_flag); - Result GetIndex(const MiiInfo& info, u32& index); + Result GetIndex(const CharInfo& info, u32& index); private: const Common::UUID user_id{}; diff --git a/src/core/hle/service/mii/types.h b/src/core/hle/service/mii/types.h index 45edbfeae..9e3247397 100644 --- a/src/core/hle/service/mii/types.h +++ b/src/core/hle/service/mii/types.h @@ -86,7 +86,8 @@ enum class SourceFlag : u32 { }; DECLARE_ENUM_FLAG_OPERATORS(SourceFlag); -struct MiiInfo { +// nn::mii::CharInfo +struct CharInfo { Common::UUID uuid; std::array<char16_t, 11> name; u8 font_region; @@ -140,16 +141,16 @@ struct MiiInfo { u8 mole_y; u8 padding; }; -static_assert(sizeof(MiiInfo) == 0x58, "MiiInfo has incorrect size."); -static_assert(std::has_unique_object_representations_v<MiiInfo>, - "All bits of MiiInfo must contribute to its value."); +static_assert(sizeof(CharInfo) == 0x58, "CharInfo has incorrect size."); +static_assert(std::has_unique_object_representations_v<CharInfo>, + "All bits of CharInfo must contribute to its value."); #pragma pack(push, 4) struct MiiInfoElement { - MiiInfoElement(const MiiInfo& info_, Source source_) : info{info_}, source{source_} {} + MiiInfoElement(const CharInfo& info_, Source source_) : info{info_}, source{source_} {} - MiiInfo info{}; + CharInfo info{}; Source source{}; }; static_assert(sizeof(MiiInfoElement) == 0x5c, "MiiInfoElement has incorrect size."); @@ -243,6 +244,131 @@ static_assert(sizeof(MiiStoreBitFields) == 0x1c, "MiiStoreBitFields has incorrec static_assert(std::is_trivially_copyable_v<MiiStoreBitFields>, "MiiStoreBitFields is not trivially copyable."); +// This is nn::mii::Ver3StoreData +// Based on citra HLE::Applets::MiiData and PretendoNetwork. +// https://github.com/citra-emu/citra/blob/master/src/core/hle/applets/mii_selector.h#L48 +// https://github.com/PretendoNetwork/mii-js/blob/master/mii.js#L299 +struct Ver3StoreData { + u8 version; + union { + u8 raw; + + BitField<0, 1, u8> allow_copying; + BitField<1, 1, u8> profanity_flag; + BitField<2, 2, u8> region_lock; + BitField<4, 2, u8> character_set; + } region_information; + u16_be mii_id; + u64_be system_id; + u32_be specialness_and_creation_date; + std::array<u8, 0x6> creator_mac; + u16_be padding; + union { + u16 raw; + + BitField<0, 1, u16> gender; + BitField<1, 4, u16> birth_month; + BitField<5, 5, u16> birth_day; + BitField<10, 4, u16> favorite_color; + BitField<14, 1, u16> favorite; + } mii_information; + std::array<char16_t, 0xA> mii_name; + u8 height; + u8 build; + union { + u8 raw; + + BitField<0, 1, u8> disable_sharing; + BitField<1, 4, u8> face_shape; + BitField<5, 3, u8> skin_color; + } appearance_bits1; + union { + u8 raw; + + BitField<0, 4, u8> wrinkles; + BitField<4, 4, u8> makeup; + } appearance_bits2; + u8 hair_style; + union { + u8 raw; + + BitField<0, 3, u8> hair_color; + BitField<3, 1, u8> flip_hair; + } appearance_bits3; + union { + u32 raw; + + BitField<0, 6, u32> eye_type; + BitField<6, 3, u32> eye_color; + BitField<9, 4, u32> eye_scale; + BitField<13, 3, u32> eye_vertical_stretch; + BitField<16, 5, u32> eye_rotation; + BitField<21, 4, u32> eye_spacing; + BitField<25, 5, u32> eye_y_position; + } appearance_bits4; + union { + u32 raw; + + BitField<0, 5, u32> eyebrow_style; + BitField<5, 3, u32> eyebrow_color; + BitField<8, 4, u32> eyebrow_scale; + BitField<12, 3, u32> eyebrow_yscale; + BitField<16, 4, u32> eyebrow_rotation; + BitField<21, 4, u32> eyebrow_spacing; + BitField<25, 5, u32> eyebrow_y_position; + } appearance_bits5; + union { + u16 raw; + + BitField<0, 5, u16> nose_type; + BitField<5, 4, u16> nose_scale; + BitField<9, 5, u16> nose_y_position; + } appearance_bits6; + union { + u16 raw; + + BitField<0, 6, u16> mouth_type; + BitField<6, 3, u16> mouth_color; + BitField<9, 4, u16> mouth_scale; + BitField<13, 3, u16> mouth_horizontal_stretch; + } appearance_bits7; + union { + u8 raw; + + BitField<0, 5, u8> mouth_y_position; + BitField<5, 3, u8> mustache_type; + } appearance_bits8; + u8 allow_copying; + union { + u16 raw; + + BitField<0, 3, u16> bear_type; + BitField<3, 3, u16> facial_hair_color; + BitField<6, 4, u16> mustache_scale; + BitField<10, 5, u16> mustache_y_position; + } appearance_bits9; + union { + u16 raw; + + BitField<0, 4, u16> glasses_type; + BitField<4, 3, u16> glasses_color; + BitField<7, 4, u16> glasses_scale; + BitField<11, 5, u16> glasses_y_position; + } appearance_bits10; + union { + u16 raw; + + BitField<0, 1, u16> mole_enabled; + BitField<1, 4, u16> mole_scale; + BitField<5, 5, u16> mole_x_position; + BitField<10, 5, u16> mole_y_position; + } appearance_bits11; + + std::array<u16_le, 0xA> author_name; + INSERT_PADDING_BYTES(0x4); +}; +static_assert(sizeof(Ver3StoreData) == 0x60, "Ver3StoreData is an invalid size"); + struct MiiStoreData { using Name = std::array<char16_t, 10>; diff --git a/src/core/hle/service/nfc/nfc.cpp b/src/core/hle/service/nfc/nfc.cpp index 13a843a28..046c5f18f 100644 --- a/src/core/hle/service/nfc/nfc.cpp +++ b/src/core/hle/service/nfc/nfc.cpp @@ -106,10 +106,10 @@ public: {1, &IUser::FinalizeOld, "FinalizeOld"}, {2, &IUser::GetStateOld, "GetStateOld"}, {3, &IUser::IsNfcEnabledOld, "IsNfcEnabledOld"}, - {400, nullptr, "Initialize"}, - {401, nullptr, "Finalize"}, - {402, nullptr, "GetState"}, - {403, nullptr, "IsNfcEnabled"}, + {400, &IUser::InitializeOld, "Initialize"}, + {401, &IUser::FinalizeOld, "Finalize"}, + {402, &IUser::GetStateOld, "GetState"}, + {403, &IUser::IsNfcEnabledOld, "IsNfcEnabled"}, {404, nullptr, "ListDevices"}, {405, nullptr, "GetDeviceState"}, {406, nullptr, "GetNpadId"}, diff --git a/src/core/hle/service/nfp/amiibo_crypto.cpp b/src/core/hle/service/nfp/amiibo_crypto.cpp new file mode 100644 index 000000000..c32a6816b --- /dev/null +++ b/src/core/hle/service/nfp/amiibo_crypto.cpp @@ -0,0 +1,388 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +// SPDX-FileCopyrightText: Copyright 2017 socram8888/amiitool +// SPDX-License-Identifier: MIT + +#include <array> +#include <mbedtls/aes.h> +#include <mbedtls/hmac_drbg.h> + +#include "common/fs/file.h" +#include "common/fs/path_util.h" +#include "common/logging/log.h" +#include "core/hle/service/mii/mii_manager.h" +#include "core/hle/service/nfp/amiibo_crypto.h" + +namespace Service::NFP::AmiiboCrypto { + +bool IsAmiiboValid(const EncryptedNTAG215File& ntag_file) { + const auto& amiibo_data = ntag_file.user_memory; + LOG_DEBUG(Service_NFP, "uuid_lock=0x{0:x}", ntag_file.static_lock); + LOG_DEBUG(Service_NFP, "compability_container=0x{0:x}", ntag_file.compability_container); + LOG_DEBUG(Service_NFP, "write_count={}", static_cast<u16>(amiibo_data.write_counter)); + + LOG_DEBUG(Service_NFP, "character_id=0x{0:x}", amiibo_data.model_info.character_id); + LOG_DEBUG(Service_NFP, "character_variant={}", amiibo_data.model_info.character_variant); + LOG_DEBUG(Service_NFP, "amiibo_type={}", amiibo_data.model_info.amiibo_type); + LOG_DEBUG(Service_NFP, "model_number=0x{0:x}", + static_cast<u16>(amiibo_data.model_info.model_number)); + LOG_DEBUG(Service_NFP, "series={}", amiibo_data.model_info.series); + LOG_DEBUG(Service_NFP, "tag_type=0x{0:x}", amiibo_data.model_info.tag_type); + + LOG_DEBUG(Service_NFP, "tag_dynamic_lock=0x{0:x}", ntag_file.dynamic_lock); + LOG_DEBUG(Service_NFP, "tag_CFG0=0x{0:x}", ntag_file.CFG0); + LOG_DEBUG(Service_NFP, "tag_CFG1=0x{0:x}", ntag_file.CFG1); + + // Validate UUID + constexpr u8 CT = 0x88; // As defined in `ISO / IEC 14443 - 3` + if ((CT ^ ntag_file.uuid.uid[0] ^ ntag_file.uuid.uid[1] ^ ntag_file.uuid.uid[2]) != + ntag_file.uuid.uid[3]) { + return false; + } + if ((ntag_file.uuid.uid[4] ^ ntag_file.uuid.uid[5] ^ ntag_file.uuid.uid[6] ^ + ntag_file.uuid.nintendo_id) != ntag_file.uuid.lock_bytes[0]) { + return false; + } + + // Check against all know constants on an amiibo binary + if (ntag_file.static_lock != 0xE00F) { + return false; + } + if (ntag_file.compability_container != 0xEEFF10F1U) { + return false; + } + if (amiibo_data.constant_value != 0xA5) { + return false; + } + if (amiibo_data.model_info.tag_type != PackedTagType::Type2) { + return false; + } + if ((ntag_file.dynamic_lock & 0xFFFFFF) != 0x0F0001U) { + return false; + } + if (ntag_file.CFG0 != 0x04000000U) { + return false; + } + if (ntag_file.CFG1 != 0x5F) { + return false; + } + return true; +} + +NTAG215File NfcDataToEncodedData(const EncryptedNTAG215File& nfc_data) { + NTAG215File encoded_data{}; + + encoded_data.uid = nfc_data.uuid.uid; + encoded_data.nintendo_id = nfc_data.uuid.nintendo_id; + encoded_data.static_lock = nfc_data.static_lock; + encoded_data.compability_container = nfc_data.compability_container; + encoded_data.hmac_data = nfc_data.user_memory.hmac_data; + encoded_data.constant_value = nfc_data.user_memory.constant_value; + encoded_data.write_counter = nfc_data.user_memory.write_counter; + encoded_data.settings = nfc_data.user_memory.settings; + encoded_data.owner_mii = nfc_data.user_memory.owner_mii; + encoded_data.title_id = nfc_data.user_memory.title_id; + encoded_data.applicaton_write_counter = nfc_data.user_memory.applicaton_write_counter; + encoded_data.application_area_id = nfc_data.user_memory.application_area_id; + encoded_data.unknown = nfc_data.user_memory.unknown; + encoded_data.unknown2 = nfc_data.user_memory.unknown2; + encoded_data.application_area = nfc_data.user_memory.application_area; + encoded_data.hmac_tag = nfc_data.user_memory.hmac_tag; + encoded_data.lock_bytes = nfc_data.uuid.lock_bytes; + encoded_data.model_info = nfc_data.user_memory.model_info; + encoded_data.keygen_salt = nfc_data.user_memory.keygen_salt; + encoded_data.dynamic_lock = nfc_data.dynamic_lock; + encoded_data.CFG0 = nfc_data.CFG0; + encoded_data.CFG1 = nfc_data.CFG1; + encoded_data.password = nfc_data.password; + + return encoded_data; +} + +EncryptedNTAG215File EncodedDataToNfcData(const NTAG215File& encoded_data) { + EncryptedNTAG215File nfc_data{}; + + nfc_data.uuid.uid = encoded_data.uid; + nfc_data.uuid.nintendo_id = encoded_data.nintendo_id; + nfc_data.uuid.lock_bytes = encoded_data.lock_bytes; + nfc_data.static_lock = encoded_data.static_lock; + nfc_data.compability_container = encoded_data.compability_container; + nfc_data.user_memory.hmac_data = encoded_data.hmac_data; + nfc_data.user_memory.constant_value = encoded_data.constant_value; + nfc_data.user_memory.write_counter = encoded_data.write_counter; + nfc_data.user_memory.settings = encoded_data.settings; + nfc_data.user_memory.owner_mii = encoded_data.owner_mii; + nfc_data.user_memory.title_id = encoded_data.title_id; + nfc_data.user_memory.applicaton_write_counter = encoded_data.applicaton_write_counter; + nfc_data.user_memory.application_area_id = encoded_data.application_area_id; + nfc_data.user_memory.unknown = encoded_data.unknown; + nfc_data.user_memory.unknown2 = encoded_data.unknown2; + nfc_data.user_memory.application_area = encoded_data.application_area; + nfc_data.user_memory.hmac_tag = encoded_data.hmac_tag; + nfc_data.user_memory.model_info = encoded_data.model_info; + nfc_data.user_memory.keygen_salt = encoded_data.keygen_salt; + nfc_data.dynamic_lock = encoded_data.dynamic_lock; + nfc_data.CFG0 = encoded_data.CFG0; + nfc_data.CFG1 = encoded_data.CFG1; + nfc_data.password = encoded_data.password; + + return nfc_data; +} + +u32 GetTagPassword(const TagUuid& uuid) { + // Verifiy that the generated password is correct + u32 password = 0xAA ^ (uuid.uid[1] ^ uuid.uid[3]); + password &= (0x55 ^ (uuid.uid[2] ^ uuid.uid[4])) << 8; + password &= (0xAA ^ (uuid.uid[3] ^ uuid.uid[5])) << 16; + password &= (0x55 ^ (uuid.uid[4] ^ uuid.uid[6])) << 24; + return password; +} + +HashSeed GetSeed(const NTAG215File& data) { + HashSeed seed{ + .magic = data.write_counter, + .padding = {}, + .uid_1 = data.uid, + .nintendo_id_1 = data.nintendo_id, + .uid_2 = data.uid, + .nintendo_id_2 = data.nintendo_id, + .keygen_salt = data.keygen_salt, + }; + + return seed; +} + +std::vector<u8> GenerateInternalKey(const InternalKey& key, const HashSeed& seed) { + const std::size_t seedPart1Len = sizeof(key.magic_bytes) - key.magic_length; + const std::size_t string_size = key.type_string.size(); + std::vector<u8> output(string_size + seedPart1Len); + + // Copy whole type string + memccpy(output.data(), key.type_string.data(), '\0', string_size); + + // Append (16 - magic_length) from the input seed + memcpy(output.data() + string_size, &seed, seedPart1Len); + + // Append all bytes from magicBytes + output.insert(output.end(), key.magic_bytes.begin(), + key.magic_bytes.begin() + key.magic_length); + + output.insert(output.end(), seed.uid_1.begin(), seed.uid_1.end()); + output.emplace_back(seed.nintendo_id_1); + output.insert(output.end(), seed.uid_2.begin(), seed.uid_2.end()); + output.emplace_back(seed.nintendo_id_2); + + for (std::size_t i = 0; i < sizeof(seed.keygen_salt); i++) { + output.emplace_back(static_cast<u8>(seed.keygen_salt[i] ^ key.xor_pad[i])); + } + + return output; +} + +void CryptoInit(CryptoCtx& ctx, mbedtls_md_context_t& hmac_ctx, const HmacKey& hmac_key, + const std::vector<u8>& seed) { + // Initialize context + ctx.used = false; + ctx.counter = 0; + ctx.buffer_size = sizeof(ctx.counter) + seed.size(); + memcpy(ctx.buffer.data() + sizeof(u16), seed.data(), seed.size()); + + // Initialize HMAC context + mbedtls_md_init(&hmac_ctx); + mbedtls_md_setup(&hmac_ctx, mbedtls_md_info_from_type(MBEDTLS_MD_SHA256), 1); + mbedtls_md_hmac_starts(&hmac_ctx, hmac_key.data(), hmac_key.size()); +} + +void CryptoStep(CryptoCtx& ctx, mbedtls_md_context_t& hmac_ctx, DrgbOutput& output) { + // If used at least once, reinitialize the HMAC + if (ctx.used) { + mbedtls_md_hmac_reset(&hmac_ctx); + } + + ctx.used = true; + + // Store counter in big endian, and increment it + ctx.buffer[0] = static_cast<u8>(ctx.counter >> 8); + ctx.buffer[1] = static_cast<u8>(ctx.counter >> 0); + ctx.counter++; + + // Do HMAC magic + mbedtls_md_hmac_update(&hmac_ctx, reinterpret_cast<const unsigned char*>(ctx.buffer.data()), + ctx.buffer_size); + mbedtls_md_hmac_finish(&hmac_ctx, output.data()); +} + +DerivedKeys GenerateKey(const InternalKey& key, const NTAG215File& data) { + const auto seed = GetSeed(data); + + // Generate internal seed + const std::vector<u8> internal_key = GenerateInternalKey(key, seed); + + // Initialize context + CryptoCtx ctx{}; + mbedtls_md_context_t hmac_ctx; + CryptoInit(ctx, hmac_ctx, key.hmac_key, internal_key); + + // Generate derived keys + DerivedKeys derived_keys{}; + std::array<DrgbOutput, 2> temp{}; + CryptoStep(ctx, hmac_ctx, temp[0]); + CryptoStep(ctx, hmac_ctx, temp[1]); + memcpy(&derived_keys, temp.data(), sizeof(DerivedKeys)); + + // Cleanup context + mbedtls_md_free(&hmac_ctx); + + return derived_keys; +} + +void Cipher(const DerivedKeys& keys, const NTAG215File& in_data, NTAG215File& out_data) { + mbedtls_aes_context aes; + std::size_t nc_off = 0; + std::array<u8, sizeof(keys.aes_iv)> nonce_counter{}; + std::array<u8, sizeof(keys.aes_iv)> stream_block{}; + + const auto aes_key_size = static_cast<u32>(keys.aes_key.size() * 8); + mbedtls_aes_setkey_enc(&aes, keys.aes_key.data(), aes_key_size); + memcpy(nonce_counter.data(), keys.aes_iv.data(), sizeof(keys.aes_iv)); + + constexpr std::size_t encrypted_data_size = HMAC_TAG_START - SETTINGS_START; + mbedtls_aes_crypt_ctr(&aes, encrypted_data_size, &nc_off, nonce_counter.data(), + stream_block.data(), + reinterpret_cast<const unsigned char*>(&in_data.settings), + reinterpret_cast<unsigned char*>(&out_data.settings)); + + // Copy the rest of the data directly + out_data.uid = in_data.uid; + out_data.nintendo_id = in_data.nintendo_id; + out_data.lock_bytes = in_data.lock_bytes; + out_data.static_lock = in_data.static_lock; + out_data.compability_container = in_data.compability_container; + + out_data.constant_value = in_data.constant_value; + out_data.write_counter = in_data.write_counter; + + out_data.model_info = in_data.model_info; + out_data.keygen_salt = in_data.keygen_salt; + out_data.dynamic_lock = in_data.dynamic_lock; + out_data.CFG0 = in_data.CFG0; + out_data.CFG1 = in_data.CFG1; + out_data.password = in_data.password; +} + +bool LoadKeys(InternalKey& locked_secret, InternalKey& unfixed_info) { + const auto yuzu_keys_dir = Common::FS::GetYuzuPath(Common::FS::YuzuPath::KeysDir); + + const Common::FS::IOFile keys_file{yuzu_keys_dir / "key_retail.bin", + Common::FS::FileAccessMode::Read, + Common::FS::FileType::BinaryFile}; + + if (!keys_file.IsOpen()) { + LOG_ERROR(Service_NFP, "No keys detected"); + return false; + } + + if (keys_file.Read(unfixed_info) != 1) { + LOG_ERROR(Service_NFP, "Failed to read unfixed_info"); + return false; + } + if (keys_file.Read(locked_secret) != 1) { + LOG_ERROR(Service_NFP, "Failed to read locked-secret"); + return false; + } + + return true; +} + +bool DecodeAmiibo(const EncryptedNTAG215File& encrypted_tag_data, NTAG215File& tag_data) { + InternalKey locked_secret{}; + InternalKey unfixed_info{}; + + if (!LoadKeys(locked_secret, unfixed_info)) { + return false; + } + + // Generate keys + NTAG215File encoded_data = NfcDataToEncodedData(encrypted_tag_data); + const auto data_keys = GenerateKey(unfixed_info, encoded_data); + const auto tag_keys = GenerateKey(locked_secret, encoded_data); + + // Decrypt + Cipher(data_keys, encoded_data, tag_data); + + // Regenerate tag HMAC. Note: order matters, data HMAC depends on tag HMAC! + constexpr std::size_t input_length = DYNAMIC_LOCK_START - UUID_START; + mbedtls_md_hmac(mbedtls_md_info_from_type(MBEDTLS_MD_SHA256), tag_keys.hmac_key.data(), + sizeof(HmacKey), reinterpret_cast<const unsigned char*>(&tag_data.uid), + input_length, reinterpret_cast<unsigned char*>(&tag_data.hmac_tag)); + + // Regenerate data HMAC + constexpr std::size_t input_length2 = DYNAMIC_LOCK_START - WRITE_COUNTER_START; + mbedtls_md_hmac(mbedtls_md_info_from_type(MBEDTLS_MD_SHA256), data_keys.hmac_key.data(), + sizeof(HmacKey), + reinterpret_cast<const unsigned char*>(&tag_data.write_counter), input_length2, + reinterpret_cast<unsigned char*>(&tag_data.hmac_data)); + + if (tag_data.hmac_data != encrypted_tag_data.user_memory.hmac_data) { + LOG_ERROR(Service_NFP, "hmac_data doesn't match"); + return false; + } + + if (tag_data.hmac_tag != encrypted_tag_data.user_memory.hmac_tag) { + LOG_ERROR(Service_NFP, "hmac_tag doesn't match"); + return false; + } + + return true; +} + +bool EncodeAmiibo(const NTAG215File& tag_data, EncryptedNTAG215File& encrypted_tag_data) { + InternalKey locked_secret{}; + InternalKey unfixed_info{}; + + if (!LoadKeys(locked_secret, unfixed_info)) { + return false; + } + + // Generate keys + const auto data_keys = GenerateKey(unfixed_info, tag_data); + const auto tag_keys = GenerateKey(locked_secret, tag_data); + + NTAG215File encoded_tag_data{}; + + // Generate tag HMAC + constexpr std::size_t input_length = DYNAMIC_LOCK_START - UUID_START; + constexpr std::size_t input_length2 = HMAC_TAG_START - WRITE_COUNTER_START; + mbedtls_md_hmac(mbedtls_md_info_from_type(MBEDTLS_MD_SHA256), tag_keys.hmac_key.data(), + sizeof(HmacKey), reinterpret_cast<const unsigned char*>(&tag_data.uid), + input_length, reinterpret_cast<unsigned char*>(&encoded_tag_data.hmac_tag)); + + // Init mbedtls HMAC context + mbedtls_md_context_t ctx; + mbedtls_md_init(&ctx); + mbedtls_md_setup(&ctx, mbedtls_md_info_from_type(MBEDTLS_MD_SHA256), 1); + + // Generate data HMAC + mbedtls_md_hmac_starts(&ctx, data_keys.hmac_key.data(), sizeof(HmacKey)); + mbedtls_md_hmac_update(&ctx, reinterpret_cast<const unsigned char*>(&tag_data.write_counter), + input_length2); // Data + mbedtls_md_hmac_update(&ctx, reinterpret_cast<unsigned char*>(&encoded_tag_data.hmac_tag), + sizeof(HashData)); // Tag HMAC + mbedtls_md_hmac_update(&ctx, reinterpret_cast<const unsigned char*>(&tag_data.uid), + input_length); + mbedtls_md_hmac_finish(&ctx, reinterpret_cast<unsigned char*>(&encoded_tag_data.hmac_data)); + + // HMAC cleanup + mbedtls_md_free(&ctx); + + // Encrypt + Cipher(data_keys, tag_data, encoded_tag_data); + + // Convert back to hardware + encrypted_tag_data = EncodedDataToNfcData(encoded_tag_data); + + return true; +} + +} // namespace Service::NFP::AmiiboCrypto diff --git a/src/core/hle/service/nfp/amiibo_crypto.h b/src/core/hle/service/nfp/amiibo_crypto.h new file mode 100644 index 000000000..0175ced91 --- /dev/null +++ b/src/core/hle/service/nfp/amiibo_crypto.h @@ -0,0 +1,100 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include <array> + +#include "core/hle/service/nfp/nfp_types.h" + +struct mbedtls_md_context_t; + +namespace Service::NFP::AmiiboCrypto { +// Byte locations in Service::NFP::NTAG215File +constexpr std::size_t HMAC_DATA_START = 0x8; +constexpr std::size_t SETTINGS_START = 0x2c; +constexpr std::size_t WRITE_COUNTER_START = 0x29; +constexpr std::size_t HMAC_TAG_START = 0x1B4; +constexpr std::size_t UUID_START = 0x1D4; +constexpr std::size_t DYNAMIC_LOCK_START = 0x208; + +using HmacKey = std::array<u8, 0x10>; +using DrgbOutput = std::array<u8, 0x20>; + +struct HashSeed { + u16_be magic; + std::array<u8, 0xE> padding; + UniqueSerialNumber uid_1; + u8 nintendo_id_1; + UniqueSerialNumber uid_2; + u8 nintendo_id_2; + std::array<u8, 0x20> keygen_salt; +}; +static_assert(sizeof(HashSeed) == 0x40, "HashSeed is an invalid size"); + +struct InternalKey { + HmacKey hmac_key; + std::array<char, 0xE> type_string; + u8 reserved; + u8 magic_length; + std::array<u8, 0x10> magic_bytes; + std::array<u8, 0x20> xor_pad; +}; +static_assert(sizeof(InternalKey) == 0x50, "InternalKey is an invalid size"); +static_assert(std::is_trivially_copyable_v<InternalKey>, "InternalKey must be trivially copyable."); + +struct CryptoCtx { + std::array<char, 480> buffer; + bool used; + std::size_t buffer_size; + s16 counter; +}; + +struct DerivedKeys { + std::array<u8, 0x10> aes_key; + std::array<u8, 0x10> aes_iv; + std::array<u8, 0x10> hmac_key; +}; +static_assert(sizeof(DerivedKeys) == 0x30, "DerivedKeys is an invalid size"); + +/// Validates that the amiibo file is not corrupted +bool IsAmiiboValid(const EncryptedNTAG215File& ntag_file); + +/// Converts from encrypted file format to encoded file format +NTAG215File NfcDataToEncodedData(const EncryptedNTAG215File& nfc_data); + +/// Converts from encoded file format to encrypted file format +EncryptedNTAG215File EncodedDataToNfcData(const NTAG215File& encoded_data); + +/// Returns password needed to allow write access to protected memory +u32 GetTagPassword(const TagUuid& uuid); + +// Generates Seed needed for key derivation +HashSeed GetSeed(const NTAG215File& data); + +// Middle step on the generation of derived keys +std::vector<u8> GenerateInternalKey(const InternalKey& key, const HashSeed& seed); + +// Initializes mbedtls context +void CryptoInit(CryptoCtx& ctx, mbedtls_md_context_t& hmac_ctx, const HmacKey& hmac_key, + const std::vector<u8>& seed); + +// Feeds data to mbedtls context to generate the derived key +void CryptoStep(CryptoCtx& ctx, mbedtls_md_context_t& hmac_ctx, DrgbOutput& output); + +// Generates the derived key from amiibo data +DerivedKeys GenerateKey(const InternalKey& key, const NTAG215File& data); + +// Encodes or decodes amiibo data +void Cipher(const DerivedKeys& keys, const NTAG215File& in_data, NTAG215File& out_data); + +/// Loads both amiibo keys from key_retail.bin +bool LoadKeys(InternalKey& locked_secret, InternalKey& unfixed_info); + +/// Decodes encripted amiibo data returns true if output is valid +bool DecodeAmiibo(const EncryptedNTAG215File& encrypted_tag_data, NTAG215File& tag_data); + +/// Encodes plain amiibo data returns true if output is valid +bool EncodeAmiibo(const NTAG215File& tag_data, EncryptedNTAG215File& encrypted_tag_data); + +} // namespace Service::NFP::AmiiboCrypto diff --git a/src/core/hle/service/nfp/nfp.cpp b/src/core/hle/service/nfp/nfp.cpp index 6c5b41dd1..0cb55ca49 100644 --- a/src/core/hle/service/nfp/nfp.cpp +++ b/src/core/hle/service/nfp/nfp.cpp @@ -1,811 +1,43 @@ // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later -#include <array> -#include <atomic> - #include "common/logging/log.h" -#include "core/core.h" -#include "core/hid/emulated_controller.h" -#include "core/hid/hid_core.h" -#include "core/hid/hid_types.h" #include "core/hle/ipc_helpers.h" -#include "core/hle/kernel/k_event.h" -#include "core/hle/service/mii/mii_manager.h" #include "core/hle/service/nfp/nfp.h" #include "core/hle/service/nfp/nfp_user.h" namespace Service::NFP { -namespace ErrCodes { -constexpr Result DeviceNotFound(ErrorModule::NFP, 64); -constexpr Result WrongDeviceState(ErrorModule::NFP, 73); -constexpr Result ApplicationAreaIsNotInitialized(ErrorModule::NFP, 128); -constexpr Result ApplicationAreaExist(ErrorModule::NFP, 168); -} // namespace ErrCodes - -constexpr u32 ApplicationAreaSize = 0xD8; - -IUser::IUser(Module::Interface& nfp_interface_, Core::System& system_) - : ServiceFramework{system_, "NFP::IUser"}, service_context{system_, service_name}, - nfp_interface{nfp_interface_} { - static const FunctionInfo functions[] = { - {0, &IUser::Initialize, "Initialize"}, - {1, &IUser::Finalize, "Finalize"}, - {2, &IUser::ListDevices, "ListDevices"}, - {3, &IUser::StartDetection, "StartDetection"}, - {4, &IUser::StopDetection, "StopDetection"}, - {5, &IUser::Mount, "Mount"}, - {6, &IUser::Unmount, "Unmount"}, - {7, &IUser::OpenApplicationArea, "OpenApplicationArea"}, - {8, &IUser::GetApplicationArea, "GetApplicationArea"}, - {9, &IUser::SetApplicationArea, "SetApplicationArea"}, - {10, nullptr, "Flush"}, - {11, nullptr, "Restore"}, - {12, &IUser::CreateApplicationArea, "CreateApplicationArea"}, - {13, &IUser::GetTagInfo, "GetTagInfo"}, - {14, &IUser::GetRegisterInfo, "GetRegisterInfo"}, - {15, &IUser::GetCommonInfo, "GetCommonInfo"}, - {16, &IUser::GetModelInfo, "GetModelInfo"}, - {17, &IUser::AttachActivateEvent, "AttachActivateEvent"}, - {18, &IUser::AttachDeactivateEvent, "AttachDeactivateEvent"}, - {19, &IUser::GetState, "GetState"}, - {20, &IUser::GetDeviceState, "GetDeviceState"}, - {21, &IUser::GetNpadId, "GetNpadId"}, - {22, &IUser::GetApplicationAreaSize, "GetApplicationAreaSize"}, - {23, &IUser::AttachAvailabilityChangeEvent, "AttachAvailabilityChangeEvent"}, - {24, nullptr, "RecreateApplicationArea"}, - }; - RegisterHandlers(functions); - - availability_change_event = service_context.CreateEvent("IUser:AvailabilityChangeEvent"); -} - -void IUser::Initialize(Kernel::HLERequestContext& ctx) { - LOG_INFO(Service_NFC, "called"); - - state = State::Initialized; - - // TODO(german77): Loop through all interfaces - nfp_interface.Initialize(); - - IPC::ResponseBuilder rb{ctx, 2, 0}; - rb.Push(ResultSuccess); -} - -void IUser::Finalize(Kernel::HLERequestContext& ctx) { - LOG_INFO(Service_NFP, "called"); - - state = State::NonInitialized; - - // TODO(german77): Loop through all interfaces - nfp_interface.Finalize(); - - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultSuccess); -} - -void IUser::ListDevices(Kernel::HLERequestContext& ctx) { - LOG_INFO(Service_NFP, "called"); - - std::vector<u64> devices; - - // TODO(german77): Loop through all interfaces - devices.push_back(nfp_interface.GetHandle()); - - ctx.WriteBuffer(devices); - - IPC::ResponseBuilder rb{ctx, 3}; - rb.Push(ResultSuccess); - rb.Push(static_cast<s32>(devices.size())); -} - -void IUser::StartDetection(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const auto device_handle{rp.Pop<u64>()}; - const auto nfp_protocol{rp.Pop<s32>()}; - LOG_INFO(Service_NFP, "called, device_handle={}, nfp_protocol={}", device_handle, nfp_protocol); - - // TODO(german77): Loop through all interfaces - if (device_handle == nfp_interface.GetHandle()) { - const auto result = nfp_interface.StartDetection(nfp_protocol); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(result); - return; - } - - LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ErrCodes::DeviceNotFound); -} - -void IUser::StopDetection(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const auto device_handle{rp.Pop<u64>()}; - LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); - - // TODO(german77): Loop through all interfaces - if (device_handle == nfp_interface.GetHandle()) { - const auto result = nfp_interface.StopDetection(); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(result); - return; - } - - LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ErrCodes::DeviceNotFound); -} - -void IUser::Mount(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const auto device_handle{rp.Pop<u64>()}; - const auto model_type{rp.PopEnum<ModelType>()}; - const auto mount_target{rp.PopEnum<MountTarget>()}; - LOG_INFO(Service_NFP, "called, device_handle={}, model_type={}, mount_target={}", device_handle, - model_type, mount_target); - - // TODO(german77): Loop through all interfaces - if (device_handle == nfp_interface.GetHandle()) { - const auto result = nfp_interface.Mount(); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(result); - return; - } - - LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ErrCodes::DeviceNotFound); -} - -void IUser::Unmount(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const auto device_handle{rp.Pop<u64>()}; - LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); - - // TODO(german77): Loop through all interfaces - if (device_handle == nfp_interface.GetHandle()) { - const auto result = nfp_interface.Unmount(); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(result); - return; - } - - LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ErrCodes::DeviceNotFound); -} - -void IUser::OpenApplicationArea(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const auto device_handle{rp.Pop<u64>()}; - const auto access_id{rp.Pop<u32>()}; - LOG_WARNING(Service_NFP, "(STUBBED) called, device_handle={}, access_id={}", device_handle, - access_id); - - // TODO(german77): Loop through all interfaces - if (device_handle == nfp_interface.GetHandle()) { - const auto result = nfp_interface.OpenApplicationArea(access_id); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(result); - return; - } - - LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ErrCodes::DeviceNotFound); -} - -void IUser::GetApplicationArea(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const auto device_handle{rp.Pop<u64>()}; - LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); - - // TODO(german77): Loop through all interfaces - if (device_handle == nfp_interface.GetHandle()) { - std::vector<u8> data{}; - const auto result = nfp_interface.GetApplicationArea(data); - ctx.WriteBuffer(data); - IPC::ResponseBuilder rb{ctx, 3}; - rb.Push(result); - rb.Push(static_cast<u32>(data.size())); - return; - } - - LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ErrCodes::DeviceNotFound); -} - -void IUser::SetApplicationArea(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const auto device_handle{rp.Pop<u64>()}; - const auto data{ctx.ReadBuffer()}; - LOG_WARNING(Service_NFP, "(STUBBED) called, device_handle={}, data_size={}", device_handle, - data.size()); - - // TODO(german77): Loop through all interfaces - if (device_handle == nfp_interface.GetHandle()) { - const auto result = nfp_interface.SetApplicationArea(data); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(result); - return; - } - LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ErrCodes::DeviceNotFound); -} - -void IUser::CreateApplicationArea(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const auto device_handle{rp.Pop<u64>()}; - const auto access_id{rp.Pop<u32>()}; - const auto data{ctx.ReadBuffer()}; - LOG_WARNING(Service_NFP, "(STUBBED) called, device_handle={}, data_size={}, access_id={}", - device_handle, access_id, data.size()); - - // TODO(german77): Loop through all interfaces - if (device_handle == nfp_interface.GetHandle()) { - const auto result = nfp_interface.CreateApplicationArea(access_id, data); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(result); - return; - } - - LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ErrCodes::DeviceNotFound); -} - -void IUser::GetTagInfo(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const auto device_handle{rp.Pop<u64>()}; - LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); - - // TODO(german77): Loop through all interfaces - if (device_handle == nfp_interface.GetHandle()) { - TagInfo tag_info{}; - const auto result = nfp_interface.GetTagInfo(tag_info); - ctx.WriteBuffer(tag_info); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(result); - return; - } - - LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ErrCodes::DeviceNotFound); -} - -void IUser::GetRegisterInfo(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const auto device_handle{rp.Pop<u64>()}; - LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); - - // TODO(german77): Loop through all interfaces - if (device_handle == nfp_interface.GetHandle()) { - RegisterInfo register_info{}; - const auto result = nfp_interface.GetRegisterInfo(register_info); - ctx.WriteBuffer(register_info); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(result); - return; - } - - LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ErrCodes::DeviceNotFound); -} - -void IUser::GetCommonInfo(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const auto device_handle{rp.Pop<u64>()}; - LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); - - // TODO(german77): Loop through all interfaces - if (device_handle == nfp_interface.GetHandle()) { - CommonInfo common_info{}; - const auto result = nfp_interface.GetCommonInfo(common_info); - ctx.WriteBuffer(common_info); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(result); - return; - } - - LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ErrCodes::DeviceNotFound); -} - -void IUser::GetModelInfo(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const auto device_handle{rp.Pop<u64>()}; - LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); - - // TODO(german77): Loop through all interfaces - if (device_handle == nfp_interface.GetHandle()) { - ModelInfo model_info{}; - const auto result = nfp_interface.GetModelInfo(model_info); - ctx.WriteBuffer(model_info); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(result); - return; - } - - LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ErrCodes::DeviceNotFound); -} - -void IUser::AttachActivateEvent(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const auto device_handle{rp.Pop<u64>()}; - LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle); - - // TODO(german77): Loop through all interfaces - if (device_handle == nfp_interface.GetHandle()) { - IPC::ResponseBuilder rb{ctx, 2, 1}; - rb.Push(ResultSuccess); - rb.PushCopyObjects(nfp_interface.GetActivateEvent()); - return; - } - - LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ErrCodes::DeviceNotFound); -} - -void IUser::AttachDeactivateEvent(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const auto device_handle{rp.Pop<u64>()}; - LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle); - - // TODO(german77): Loop through all interfaces - if (device_handle == nfp_interface.GetHandle()) { - IPC::ResponseBuilder rb{ctx, 2, 1}; - rb.Push(ResultSuccess); - rb.PushCopyObjects(nfp_interface.GetDeactivateEvent()); - return; - } - - LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ErrCodes::DeviceNotFound); -} - -void IUser::GetState(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Service_NFC, "called"); - - IPC::ResponseBuilder rb{ctx, 3, 0}; - rb.Push(ResultSuccess); - rb.PushEnum(state); -} - -void IUser::GetDeviceState(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const auto device_handle{rp.Pop<u64>()}; - LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle); - - // TODO(german77): Loop through all interfaces - if (device_handle == nfp_interface.GetHandle()) { - IPC::ResponseBuilder rb{ctx, 3}; - rb.Push(ResultSuccess); - rb.PushEnum(nfp_interface.GetCurrentState()); - return; - } - - LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ErrCodes::DeviceNotFound); -} - -void IUser::GetNpadId(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const auto device_handle{rp.Pop<u64>()}; - LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle); - - // TODO(german77): Loop through all interfaces - if (device_handle == nfp_interface.GetHandle()) { - IPC::ResponseBuilder rb{ctx, 3}; - rb.Push(ResultSuccess); - rb.PushEnum(nfp_interface.GetNpadId()); - return; - } - - LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ErrCodes::DeviceNotFound); -} - -void IUser::GetApplicationAreaSize(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const auto device_handle{rp.Pop<u64>()}; - LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle); - - // TODO(german77): Loop through all interfaces - if (device_handle == nfp_interface.GetHandle()) { - IPC::ResponseBuilder rb{ctx, 3}; - rb.Push(ResultSuccess); - rb.Push(ApplicationAreaSize); - return; - } - - LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ErrCodes::DeviceNotFound); -} - -void IUser::AttachAvailabilityChangeEvent(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Service_NFP, "(STUBBED) called"); - - IPC::ResponseBuilder rb{ctx, 2, 1}; - rb.Push(ResultSuccess); - rb.PushCopyObjects(availability_change_event->GetReadableEvent()); -} - -Module::Interface::Interface(std::shared_ptr<Module> module_, Core::System& system_, - const char* name) - : ServiceFramework{system_, name}, module{std::move(module_)}, - npad_id{Core::HID::NpadIdType::Player1}, service_context{system_, service_name} { - activate_event = service_context.CreateEvent("IUser:NFPActivateEvent"); - deactivate_event = service_context.CreateEvent("IUser:NFPDeactivateEvent"); -} - -Module::Interface::~Interface() = default; - -void Module::Interface::CreateUserInterface(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Service_NFP, "called"); - - IPC::ResponseBuilder rb{ctx, 2, 0, 1}; - rb.Push(ResultSuccess); - rb.PushIpcInterface<IUser>(*this, system); -} - -bool Module::Interface::LoadAmiibo(const std::vector<u8>& buffer) { - if (device_state != DeviceState::SearchingForTag) { - LOG_ERROR(Service_NFP, "Game is not looking for amiibos, current state {}", device_state); - return false; - } - - constexpr auto tag_size = sizeof(NTAG215File); - constexpr auto tag_size_without_password = sizeof(NTAG215File) - sizeof(NTAG215Password); - - std::vector<u8> amiibo_buffer = buffer; - - if (amiibo_buffer.size() < tag_size_without_password) { - LOG_ERROR(Service_NFP, "Wrong file size {}", buffer.size()); - return false; - } - - // Ensure it has the correct size - if (amiibo_buffer.size() != tag_size) { - amiibo_buffer.resize(tag_size, 0); - } - - LOG_INFO(Service_NFP, "Amiibo detected"); - std::memcpy(&tag_data, buffer.data(), tag_size); - - if (!IsAmiiboValid()) { - return false; - } - - // This value can't be dumped from a tag. Generate it - tag_data.password.PWD = GetTagPassword(tag_data.uuid); - - device_state = DeviceState::TagFound; - activate_event->GetWritableEvent().Signal(); - return true; -} - -void Module::Interface::CloseAmiibo() { - LOG_INFO(Service_NFP, "Remove amiibo"); - device_state = DeviceState::TagRemoved; - is_application_area_initialized = false; - application_area_id = 0; - application_area_data.clear(); - deactivate_event->GetWritableEvent().Signal(); -} - -bool Module::Interface::IsAmiiboValid() const { - const auto& amiibo_data = tag_data.user_memory; - LOG_DEBUG(Service_NFP, "uuid_lock=0x{0:x}", tag_data.lock_bytes); - LOG_DEBUG(Service_NFP, "compability_container=0x{0:x}", tag_data.compability_container); - LOG_DEBUG(Service_NFP, "crypto_init=0x{0:x}", amiibo_data.crypto_init); - LOG_DEBUG(Service_NFP, "write_count={}", amiibo_data.write_count); - - LOG_DEBUG(Service_NFP, "character_id=0x{0:x}", amiibo_data.model_info.character_id); - LOG_DEBUG(Service_NFP, "character_variant={}", amiibo_data.model_info.character_variant); - LOG_DEBUG(Service_NFP, "amiibo_type={}", amiibo_data.model_info.amiibo_type); - LOG_DEBUG(Service_NFP, "model_number=0x{0:x}", amiibo_data.model_info.model_number); - LOG_DEBUG(Service_NFP, "series={}", amiibo_data.model_info.series); - LOG_DEBUG(Service_NFP, "fixed_value=0x{0:x}", amiibo_data.model_info.fixed); - - LOG_DEBUG(Service_NFP, "tag_dynamic_lock=0x{0:x}", tag_data.dynamic_lock); - LOG_DEBUG(Service_NFP, "tag_CFG0=0x{0:x}", tag_data.CFG0); - LOG_DEBUG(Service_NFP, "tag_CFG1=0x{0:x}", tag_data.CFG1); - - // Check against all know constants on an amiibo binary - if (tag_data.lock_bytes != 0xE00F) { - return false; - } - if (tag_data.compability_container != 0xEEFF10F1U) { - return false; - } - if ((amiibo_data.crypto_init & 0xFF) != 0xA5) { - return false; - } - if (amiibo_data.model_info.fixed != 0x02) { - return false; - } - if ((tag_data.dynamic_lock & 0xFFFFFF) != 0x0F0001) { - return false; - } - if (tag_data.CFG0 != 0x04000000U) { - return false; - } - if (tag_data.CFG1 != 0x5F) { - return false; - } - return true; -} - -Kernel::KReadableEvent& Module::Interface::GetActivateEvent() const { - return activate_event->GetReadableEvent(); -} - -Kernel::KReadableEvent& Module::Interface::GetDeactivateEvent() const { - return deactivate_event->GetReadableEvent(); -} - -void Module::Interface::Initialize() { - device_state = DeviceState::Initialized; -} - -void Module::Interface::Finalize() { - device_state = DeviceState::Unaviable; - is_application_area_initialized = false; - application_area_id = 0; - application_area_data.clear(); -} - -Result Module::Interface::StartDetection(s32 protocol_) { - auto npad_device = system.HIDCore().GetEmulatedController(npad_id); - - // TODO(german77): Add callback for when nfc data is available - - if (device_state == DeviceState::Initialized || device_state == DeviceState::TagRemoved) { - npad_device->SetPollingMode(Common::Input::PollingMode::NFC); - device_state = DeviceState::SearchingForTag; - protocol = protocol_; - return ResultSuccess; - } - - LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); - return ErrCodes::WrongDeviceState; -} - -Result Module::Interface::StopDetection() { - auto npad_device = system.HIDCore().GetEmulatedController(npad_id); - npad_device->SetPollingMode(Common::Input::PollingMode::Active); - - if (device_state == DeviceState::TagFound || device_state == DeviceState::TagMounted) { - CloseAmiibo(); - return ResultSuccess; - } - if (device_state == DeviceState::SearchingForTag || device_state == DeviceState::TagRemoved) { - device_state = DeviceState::Initialized; - return ResultSuccess; - } - - LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); - return ErrCodes::WrongDeviceState; -} - -Result Module::Interface::Mount() { - if (device_state == DeviceState::TagFound) { - device_state = DeviceState::TagMounted; - return ResultSuccess; - } - - LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); - return ErrCodes::WrongDeviceState; -} - -Result Module::Interface::Unmount() { - if (device_state == DeviceState::TagMounted) { - is_application_area_initialized = false; - application_area_id = 0; - application_area_data.clear(); - device_state = DeviceState::TagFound; - return ResultSuccess; - } - - LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); - return ErrCodes::WrongDeviceState; -} - -Result Module::Interface::GetTagInfo(TagInfo& tag_info) const { - if (device_state == DeviceState::TagFound || device_state == DeviceState::TagMounted) { - tag_info = { - .uuid = tag_data.uuid, - .uuid_length = static_cast<u8>(tag_data.uuid.size()), - .protocol = protocol, - .tag_type = static_cast<u32>(tag_data.user_memory.model_info.amiibo_type), +class IUserManager final : public ServiceFramework<IUserManager> { +public: + explicit IUserManager(Core::System& system_) : ServiceFramework{system_, "nfp:user"} { + // clang-format off + static const FunctionInfo functions[] = { + {0, &IUserManager::CreateUserInterface, "CreateUserInterface"}, }; - return ResultSuccess; - } - - LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); - return ErrCodes::WrongDeviceState; -} + // clang-format on -Result Module::Interface::GetCommonInfo(CommonInfo& common_info) const { - if (device_state != DeviceState::TagMounted) { - LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); - return ErrCodes::WrongDeviceState; + RegisterHandlers(functions); } - // Read this data from the amiibo save file - common_info = { - .last_write_year = 2022, - .last_write_month = 2, - .last_write_day = 7, - .write_counter = tag_data.user_memory.write_count, - .version = 1, - .application_area_size = ApplicationAreaSize, - }; - return ResultSuccess; -} - -Result Module::Interface::GetModelInfo(ModelInfo& model_info) const { - if (device_state != DeviceState::TagMounted) { - LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); - return ErrCodes::WrongDeviceState; - } +private: + void CreateUserInterface(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_NFP, "called"); - model_info = tag_data.user_memory.model_info; - return ResultSuccess; -} + if (user_interface == nullptr) { + user_interface = std::make_shared<IUser>(system); + } -Result Module::Interface::GetRegisterInfo(RegisterInfo& register_info) const { - if (device_state != DeviceState::TagMounted) { - LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); - return ErrCodes::WrongDeviceState; - } - - Service::Mii::MiiManager manager; - - // Read this data from the amiibo save file - register_info = { - .mii_char_info = manager.BuildDefault(0), - .first_write_year = 2022, - .first_write_month = 2, - .first_write_day = 7, - .amiibo_name = {'Y', 'u', 'z', 'u', 'A', 'm', 'i', 'i', 'b', 'o', 0}, - .unknown = {}, - }; - return ResultSuccess; -} - -Result Module::Interface::OpenApplicationArea(u32 access_id) { - if (device_state != DeviceState::TagMounted) { - LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); - return ErrCodes::WrongDeviceState; - } - if (AmiiboApplicationDataExist(access_id)) { - application_area_data = LoadAmiiboApplicationData(access_id); - application_area_id = access_id; - is_application_area_initialized = true; - } - if (!is_application_area_initialized) { - LOG_WARNING(Service_NFP, "Application area is not initialized"); - return ErrCodes::ApplicationAreaIsNotInitialized; - } - return ResultSuccess; -} - -Result Module::Interface::GetApplicationArea(std::vector<u8>& data) const { - if (device_state != DeviceState::TagMounted) { - LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); - return ErrCodes::WrongDeviceState; - } - if (!is_application_area_initialized) { - LOG_ERROR(Service_NFP, "Application area is not initialized"); - return ErrCodes::ApplicationAreaIsNotInitialized; - } - - data = application_area_data; - - return ResultSuccess; -} - -Result Module::Interface::SetApplicationArea(const std::vector<u8>& data) { - if (device_state != DeviceState::TagMounted) { - LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); - return ErrCodes::WrongDeviceState; - } - if (!is_application_area_initialized) { - LOG_ERROR(Service_NFP, "Application area is not initialized"); - return ErrCodes::ApplicationAreaIsNotInitialized; + IPC::ResponseBuilder rb{ctx, 2, 0, 1}; + rb.Push(ResultSuccess); + rb.PushIpcInterface<IUser>(user_interface); } - application_area_data = data; - SaveAmiiboApplicationData(application_area_id, application_area_data); - return ResultSuccess; -} -Result Module::Interface::CreateApplicationArea(u32 access_id, const std::vector<u8>& data) { - if (device_state != DeviceState::TagMounted) { - LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); - return ErrCodes::WrongDeviceState; - } - if (AmiiboApplicationDataExist(access_id)) { - LOG_ERROR(Service_NFP, "Application area already exist"); - return ErrCodes::ApplicationAreaExist; - } - application_area_data = data; - application_area_id = access_id; - SaveAmiiboApplicationData(application_area_id, application_area_data); - return ResultSuccess; -} - -bool Module::Interface::AmiiboApplicationDataExist(u32 access_id) const { - // TODO(german77): Check if file exist - return false; -} - -std::vector<u8> Module::Interface::LoadAmiiboApplicationData(u32 access_id) const { - // TODO(german77): Read file - std::vector<u8> data(ApplicationAreaSize); - return data; -} - -void Module::Interface::SaveAmiiboApplicationData(u32 access_id, - const std::vector<u8>& data) const { - // TODO(german77): Save file -} - -u64 Module::Interface::GetHandle() const { - // Generate a handle based of the npad id - return static_cast<u64>(npad_id); -} - -DeviceState Module::Interface::GetCurrentState() const { - return device_state; -} - -Core::HID::NpadIdType Module::Interface::GetNpadId() const { - return npad_id; -} - -u32 Module::Interface::GetTagPassword(const TagUuid& uuid) const { - // Verifiy that the generated password is correct - u32 password = 0xAA ^ (uuid[1] ^ uuid[3]); - password &= (0x55 ^ (uuid[2] ^ uuid[4])) << 8; - password &= (0xAA ^ (uuid[3] ^ uuid[5])) << 16; - password &= (0x55 ^ (uuid[4] ^ uuid[6])) << 24; - return password; -} + std::shared_ptr<IUser> user_interface; +}; void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system) { - auto module = std::make_shared<Module>(); - std::make_shared<NFP_User>(module, system)->InstallAsService(service_manager); + std::make_shared<IUserManager>(system)->InstallAsService(service_manager); } } // namespace Service::NFP diff --git a/src/core/hle/service/nfp/nfp.h b/src/core/hle/service/nfp/nfp.h index 0fc808781..a25c362b8 100644 --- a/src/core/hle/service/nfp/nfp.h +++ b/src/core/hle/service/nfp/nfp.h @@ -3,268 +3,10 @@ #pragma once -#include <array> -#include <vector> - -#include "common/common_funcs.h" -#include "core/hle/service/kernel_helpers.h" -#include "core/hle/service/mii/types.h" #include "core/hle/service/service.h" -namespace Kernel { -class KEvent; -class KReadableEvent; -} // namespace Kernel - -namespace Core::HID { -enum class NpadIdType : u32; -} // namespace Core::HID - namespace Service::NFP { -enum class ServiceType : u32 { - User, - Debug, - System, -}; - -enum class State : u32 { - NonInitialized, - Initialized, -}; - -enum class DeviceState : u32 { - Initialized, - SearchingForTag, - TagFound, - TagRemoved, - TagMounted, - Unaviable, - Finalized, -}; - -enum class ModelType : u32 { - Amiibo, -}; - -enum class MountTarget : u32 { - Rom, - Ram, - All, -}; - -enum class AmiiboType : u8 { - Figure, - Card, - Yarn, -}; - -enum class AmiiboSeries : u8 { - SuperSmashBros, - SuperMario, - ChibiRobo, - YoshiWoollyWorld, - Splatoon, - AnimalCrossing, - EightBitMario, - Skylanders, - Unknown8, - TheLegendOfZelda, - ShovelKnight, - Unknown11, - Kiby, - Pokemon, - MarioSportsSuperstars, - MonsterHunter, - BoxBoy, - Pikmin, - FireEmblem, - Metroid, - Others, - MegaMan, - Diablo -}; - -using TagUuid = std::array<u8, 10>; - -struct TagInfo { - TagUuid uuid; - u8 uuid_length; - INSERT_PADDING_BYTES(0x15); - s32 protocol; - u32 tag_type; - INSERT_PADDING_BYTES(0x30); -}; -static_assert(sizeof(TagInfo) == 0x58, "TagInfo is an invalid size"); - -struct CommonInfo { - u16 last_write_year; - u8 last_write_month; - u8 last_write_day; - u16 write_counter; - u16 version; - u32 application_area_size; - INSERT_PADDING_BYTES(0x34); -}; -static_assert(sizeof(CommonInfo) == 0x40, "CommonInfo is an invalid size"); - -struct ModelInfo { - u16 character_id; - u8 character_variant; - AmiiboType amiibo_type; - u16 model_number; - AmiiboSeries series; - u8 fixed; // Must be 02 - INSERT_PADDING_BYTES(0x4); // Unknown - INSERT_PADDING_BYTES(0x20); // Probably a SHA256-(HMAC?) hash - INSERT_PADDING_BYTES(0x14); // SHA256-HMAC -}; -static_assert(sizeof(ModelInfo) == 0x40, "ModelInfo is an invalid size"); - -struct RegisterInfo { - Service::Mii::MiiInfo mii_char_info; - u16 first_write_year; - u8 first_write_month; - u8 first_write_day; - std::array<u8, 11> amiibo_name; - u8 unknown; - INSERT_PADDING_BYTES(0x98); -}; -static_assert(sizeof(RegisterInfo) == 0x100, "RegisterInfo is an invalid size"); - -class Module final { -public: - class Interface : public ServiceFramework<Interface> { - public: - explicit Interface(std::shared_ptr<Module> module_, Core::System& system_, - const char* name); - ~Interface() override; - - struct EncryptedAmiiboFile { - u16 crypto_init; // Must be A5 XX - u16 write_count; // Number of times the amiibo has been written? - INSERT_PADDING_BYTES(0x20); // System crypts - INSERT_PADDING_BYTES(0x20); // SHA256-(HMAC?) hash - ModelInfo model_info; // This struct is bigger than documentation - INSERT_PADDING_BYTES(0xC); // SHA256-HMAC - INSERT_PADDING_BYTES(0x114); // section 1 encrypted buffer - INSERT_PADDING_BYTES(0x54); // section 2 encrypted buffer - }; - static_assert(sizeof(EncryptedAmiiboFile) == 0x1F8, "AmiiboFile is an invalid size"); - - struct NTAG215Password { - u32 PWD; // Password to allow write access - u16 PACK; // Password acknowledge reply - u16 RFUI; // Reserved for future use - }; - static_assert(sizeof(NTAG215Password) == 0x8, "NTAG215Password is an invalid size"); - - struct NTAG215File { - TagUuid uuid; // Unique serial number - u16 lock_bytes; // Set defined pages as read only - u32 compability_container; // Defines available memory - EncryptedAmiiboFile user_memory; // Writable data - u32 dynamic_lock; // Dynamic lock - u32 CFG0; // Defines memory protected by password - u32 CFG1; // Defines number of verification attempts - NTAG215Password password; // Password data - }; - static_assert(sizeof(NTAG215File) == 0x21C, "NTAG215File is an invalid size"); - - void CreateUserInterface(Kernel::HLERequestContext& ctx); - bool LoadAmiibo(const std::vector<u8>& buffer); - void CloseAmiibo(); - - void Initialize(); - void Finalize(); - - Result StartDetection(s32 protocol_); - Result StopDetection(); - Result Mount(); - Result Unmount(); - - Result GetTagInfo(TagInfo& tag_info) const; - Result GetCommonInfo(CommonInfo& common_info) const; - Result GetModelInfo(ModelInfo& model_info) const; - Result GetRegisterInfo(RegisterInfo& register_info) const; - - Result OpenApplicationArea(u32 access_id); - Result GetApplicationArea(std::vector<u8>& data) const; - Result SetApplicationArea(const std::vector<u8>& data); - Result CreateApplicationArea(u32 access_id, const std::vector<u8>& data); - - u64 GetHandle() const; - DeviceState GetCurrentState() const; - Core::HID::NpadIdType GetNpadId() const; - - Kernel::KReadableEvent& GetActivateEvent() const; - Kernel::KReadableEvent& GetDeactivateEvent() const; - - protected: - std::shared_ptr<Module> module; - - private: - /// Validates that the amiibo file is not corrupted - bool IsAmiiboValid() const; - - bool AmiiboApplicationDataExist(u32 access_id) const; - std::vector<u8> LoadAmiiboApplicationData(u32 access_id) const; - void SaveAmiiboApplicationData(u32 access_id, const std::vector<u8>& data) const; - - /// return password needed to allow write access to protected memory - u32 GetTagPassword(const TagUuid& uuid) const; - - const Core::HID::NpadIdType npad_id; - - DeviceState device_state{DeviceState::Unaviable}; - KernelHelpers::ServiceContext service_context; - Kernel::KEvent* activate_event; - Kernel::KEvent* deactivate_event; - NTAG215File tag_data{}; - s32 protocol; - bool is_application_area_initialized{}; - u32 application_area_id; - std::vector<u8> application_area_data; - }; -}; - -class IUser final : public ServiceFramework<IUser> { -public: - explicit IUser(Module::Interface& nfp_interface_, Core::System& system_); - -private: - void Initialize(Kernel::HLERequestContext& ctx); - void Finalize(Kernel::HLERequestContext& ctx); - void ListDevices(Kernel::HLERequestContext& ctx); - void StartDetection(Kernel::HLERequestContext& ctx); - void StopDetection(Kernel::HLERequestContext& ctx); - void Mount(Kernel::HLERequestContext& ctx); - void Unmount(Kernel::HLERequestContext& ctx); - void OpenApplicationArea(Kernel::HLERequestContext& ctx); - void GetApplicationArea(Kernel::HLERequestContext& ctx); - void SetApplicationArea(Kernel::HLERequestContext& ctx); - void CreateApplicationArea(Kernel::HLERequestContext& ctx); - void GetTagInfo(Kernel::HLERequestContext& ctx); - void GetRegisterInfo(Kernel::HLERequestContext& ctx); - void GetCommonInfo(Kernel::HLERequestContext& ctx); - void GetModelInfo(Kernel::HLERequestContext& ctx); - void AttachActivateEvent(Kernel::HLERequestContext& ctx); - void AttachDeactivateEvent(Kernel::HLERequestContext& ctx); - void GetState(Kernel::HLERequestContext& ctx); - void GetDeviceState(Kernel::HLERequestContext& ctx); - void GetNpadId(Kernel::HLERequestContext& ctx); - void GetApplicationAreaSize(Kernel::HLERequestContext& ctx); - void AttachAvailabilityChangeEvent(Kernel::HLERequestContext& ctx); - - KernelHelpers::ServiceContext service_context; - - // TODO(german77): We should have a vector of interfaces - Module::Interface& nfp_interface; - - State state{State::NonInitialized}; - Kernel::KEvent* availability_change_event; -}; - void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system); } // namespace Service::NFP diff --git a/src/core/hle/service/nfp/nfp_device.cpp b/src/core/hle/service/nfp/nfp_device.cpp new file mode 100644 index 000000000..76f8a267a --- /dev/null +++ b/src/core/hle/service/nfp/nfp_device.cpp @@ -0,0 +1,681 @@ +// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <array> +#include <atomic> + +#include "common/fs/file.h" +#include "common/fs/path_util.h" +#include "common/input.h" +#include "common/logging/log.h" +#include "common/string_util.h" +#include "common/tiny_mt.h" +#include "core/core.h" +#include "core/hid/emulated_controller.h" +#include "core/hid/hid_core.h" +#include "core/hid/hid_types.h" +#include "core/hle/ipc_helpers.h" +#include "core/hle/kernel/k_event.h" +#include "core/hle/service/mii/mii_manager.h" +#include "core/hle/service/nfp/amiibo_crypto.h" +#include "core/hle/service/nfp/nfp.h" +#include "core/hle/service/nfp/nfp_device.h" +#include "core/hle/service/nfp/nfp_result.h" +#include "core/hle/service/nfp/nfp_user.h" +#include "core/hle/service/time/time_manager.h" +#include "core/hle/service/time/time_zone_content_manager.h" +#include "core/hle/service/time/time_zone_types.h" + +namespace Service::NFP { +NfpDevice::NfpDevice(Core::HID::NpadIdType npad_id_, Core::System& system_, + KernelHelpers::ServiceContext& service_context_, + Kernel::KEvent* availability_change_event_) + : npad_id{npad_id_}, system{system_}, service_context{service_context_}, + availability_change_event{availability_change_event_} { + activate_event = service_context.CreateEvent("IUser:NFPActivateEvent"); + deactivate_event = service_context.CreateEvent("IUser:NFPDeactivateEvent"); + npad_device = system.HIDCore().GetEmulatedController(npad_id); + + Core::HID::ControllerUpdateCallback engine_callback{ + .on_change = [this](Core::HID::ControllerTriggerType type) { NpadUpdate(type); }, + .is_npad_service = false, + }; + is_controller_set = true; + callback_key = npad_device->SetCallback(engine_callback); + + auto& standard_steady_clock{system.GetTimeManager().GetStandardSteadyClockCore()}; + current_posix_time = standard_steady_clock.GetCurrentTimePoint(system).time_point; +} + +NfpDevice::~NfpDevice() { + if (!is_controller_set) { + return; + } + npad_device->DeleteCallback(callback_key); + is_controller_set = false; +}; + +void NfpDevice::NpadUpdate(Core::HID::ControllerTriggerType type) { + if (type == Core::HID::ControllerTriggerType::Connected || + type == Core::HID::ControllerTriggerType::Disconnected) { + availability_change_event->Signal(); + return; + } + + if (type != Core::HID::ControllerTriggerType::Nfc) { + return; + } + + if (!npad_device->IsConnected()) { + return; + } + + const auto nfc_status = npad_device->GetNfc(); + switch (nfc_status.state) { + case Common::Input::NfcState::NewAmiibo: + LoadAmiibo(nfc_status.data); + break; + case Common::Input::NfcState::AmiiboRemoved: + if (device_state != DeviceState::SearchingForTag) { + CloseAmiibo(); + } + break; + default: + break; + } +} + +bool NfpDevice::LoadAmiibo(std::span<const u8> data) { + if (device_state != DeviceState::SearchingForTag) { + LOG_ERROR(Service_NFP, "Game is not looking for amiibos, current state {}", device_state); + return false; + } + + if (data.size() != sizeof(EncryptedNTAG215File)) { + LOG_ERROR(Service_NFP, "Not an amiibo, size={}", data.size()); + return false; + } + + memcpy(&encrypted_tag_data, data.data(), sizeof(EncryptedNTAG215File)); + + device_state = DeviceState::TagFound; + deactivate_event->GetReadableEvent().Clear(); + activate_event->Signal(); + return true; +} + +void NfpDevice::CloseAmiibo() { + LOG_INFO(Service_NFP, "Remove amiibo"); + + if (device_state == DeviceState::TagMounted) { + Unmount(); + } + + device_state = DeviceState::TagRemoved; + encrypted_tag_data = {}; + tag_data = {}; + activate_event->GetReadableEvent().Clear(); + deactivate_event->Signal(); +} + +Kernel::KReadableEvent& NfpDevice::GetActivateEvent() const { + return activate_event->GetReadableEvent(); +} + +Kernel::KReadableEvent& NfpDevice::GetDeactivateEvent() const { + return deactivate_event->GetReadableEvent(); +} + +void NfpDevice::Initialize() { + device_state = npad_device->HasNfc() ? DeviceState::Initialized : DeviceState::Unavailable; + encrypted_tag_data = {}; + tag_data = {}; +} + +void NfpDevice::Finalize() { + if (device_state == DeviceState::TagMounted) { + Unmount(); + } + if (device_state == DeviceState::SearchingForTag || device_state == DeviceState::TagRemoved) { + StopDetection(); + } + device_state = DeviceState::Unavailable; +} + +Result NfpDevice::StartDetection(s32 protocol_) { + if (device_state != DeviceState::Initialized && device_state != DeviceState::TagRemoved) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + return WrongDeviceState; + } + + if (!npad_device->SetPollingMode(Common::Input::PollingMode::NFC)) { + LOG_ERROR(Service_NFP, "Nfc not supported"); + return NfcDisabled; + } + + device_state = DeviceState::SearchingForTag; + protocol = protocol_; + return ResultSuccess; +} + +Result NfpDevice::StopDetection() { + npad_device->SetPollingMode(Common::Input::PollingMode::Active); + + if (device_state == DeviceState::Initialized) { + return ResultSuccess; + } + + if (device_state == DeviceState::TagFound || device_state == DeviceState::TagMounted) { + CloseAmiibo(); + return ResultSuccess; + } + if (device_state == DeviceState::SearchingForTag || device_state == DeviceState::TagRemoved) { + device_state = DeviceState::Initialized; + return ResultSuccess; + } + + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + return WrongDeviceState; +} + +Result NfpDevice::Flush() { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + if (device_state == DeviceState::TagRemoved) { + return TagRemoved; + } + return WrongDeviceState; + } + + if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) { + LOG_ERROR(Service_NFP, "Amiibo is read only", device_state); + return WrongDeviceState; + } + + auto& settings = tag_data.settings; + + const auto& current_date = GetAmiiboDate(current_posix_time); + if (settings.write_date.raw_date != current_date.raw_date) { + settings.write_date = current_date; + settings.crc_counter++; + // TODO: Find how to calculate the crc check + // settings.crc = CalculateCRC(settings); + } + + tag_data.write_counter++; + + if (!AmiiboCrypto::EncodeAmiibo(tag_data, encrypted_tag_data)) { + LOG_ERROR(Service_NFP, "Failed to encode data"); + return WriteAmiiboFailed; + } + + std::vector<u8> data(sizeof(encrypted_tag_data)); + memcpy(data.data(), &encrypted_tag_data, sizeof(encrypted_tag_data)); + + if (!npad_device->WriteNfc(data)) { + LOG_ERROR(Service_NFP, "Error writing to file"); + return WriteAmiiboFailed; + } + + is_data_moddified = false; + + return ResultSuccess; +} + +Result NfpDevice::Mount(MountTarget mount_target_) { + if (device_state != DeviceState::TagFound) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + return WrongDeviceState; + } + + if (!AmiiboCrypto::IsAmiiboValid(encrypted_tag_data)) { + LOG_ERROR(Service_NFP, "Not an amiibo"); + return NotAnAmiibo; + } + + if (!AmiiboCrypto::DecodeAmiibo(encrypted_tag_data, tag_data)) { + LOG_ERROR(Service_NFP, "Can't decode amiibo {}", device_state); + return CorruptedData; + } + + device_state = DeviceState::TagMounted; + mount_target = mount_target_; + return ResultSuccess; +} + +Result NfpDevice::Unmount() { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + if (device_state == DeviceState::TagRemoved) { + return TagRemoved; + } + return WrongDeviceState; + } + + // Save data before unloading the amiibo + if (is_data_moddified) { + Flush(); + } + + device_state = DeviceState::TagFound; + mount_target = MountTarget::None; + is_app_area_open = false; + + return ResultSuccess; +} + +Result NfpDevice::GetTagInfo(TagInfo& tag_info) const { + if (device_state != DeviceState::TagFound && device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + if (device_state == DeviceState::TagRemoved) { + return TagRemoved; + } + return WrongDeviceState; + } + + tag_info = { + .uuid = encrypted_tag_data.uuid.uid, + .uuid_length = static_cast<u8>(encrypted_tag_data.uuid.uid.size()), + .protocol = TagProtocol::TypeA, + .tag_type = TagType::Type2, + }; + + return ResultSuccess; +} + +Result NfpDevice::GetCommonInfo(CommonInfo& common_info) const { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + if (device_state == DeviceState::TagRemoved) { + return TagRemoved; + } + return WrongDeviceState; + } + + if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) { + LOG_ERROR(Service_NFP, "Amiibo is read only", device_state); + return WrongDeviceState; + } + + const auto& settings = tag_data.settings; + + // TODO: Validate this data + common_info = { + .last_write_date = settings.write_date.GetWriteDate(), + .write_counter = tag_data.write_counter, + .version = 0, + .application_area_size = sizeof(ApplicationArea), + }; + return ResultSuccess; +} + +Result NfpDevice::GetModelInfo(ModelInfo& model_info) const { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + if (device_state == DeviceState::TagRemoved) { + return TagRemoved; + } + return WrongDeviceState; + } + + const auto& model_info_data = encrypted_tag_data.user_memory.model_info; + model_info = { + .character_id = model_info_data.character_id, + .character_variant = model_info_data.character_variant, + .amiibo_type = model_info_data.amiibo_type, + .model_number = model_info_data.model_number, + .series = model_info_data.series, + }; + return ResultSuccess; +} + +Result NfpDevice::GetRegisterInfo(RegisterInfo& register_info) const { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + if (device_state == DeviceState::TagRemoved) { + return TagRemoved; + } + return WrongDeviceState; + } + + if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) { + LOG_ERROR(Service_NFP, "Amiibo is read only", device_state); + return WrongDeviceState; + } + + if (tag_data.settings.settings.amiibo_initialized == 0) { + return RegistrationIsNotInitialized; + } + + Service::Mii::MiiManager manager; + const auto& settings = tag_data.settings; + + // TODO: Validate this data + register_info = { + .mii_char_info = manager.ConvertV3ToCharInfo(tag_data.owner_mii), + .creation_date = settings.init_date.GetWriteDate(), + .amiibo_name = GetAmiiboName(settings), + .font_region = {}, + }; + + return ResultSuccess; +} + +Result NfpDevice::SetNicknameAndOwner(const AmiiboName& amiibo_name) { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + if (device_state == DeviceState::TagRemoved) { + return TagRemoved; + } + return WrongDeviceState; + } + + if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) { + LOG_ERROR(Service_NFP, "Amiibo is read only", device_state); + return WrongDeviceState; + } + + Service::Mii::MiiManager manager; + auto& settings = tag_data.settings; + + settings.init_date = GetAmiiboDate(current_posix_time); + settings.write_date = GetAmiiboDate(current_posix_time); + settings.crc_counter++; + // TODO: Find how to calculate the crc check + // settings.crc = CalculateCRC(settings); + + SetAmiiboName(settings, amiibo_name); + tag_data.owner_mii = manager.ConvertCharInfoToV3(manager.BuildDefault(0)); + settings.settings.amiibo_initialized.Assign(1); + + return Flush(); +} + +Result NfpDevice::RestoreAmiibo() { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + if (device_state == DeviceState::TagRemoved) { + return TagRemoved; + } + return WrongDeviceState; + } + + if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) { + LOG_ERROR(Service_NFP, "Amiibo is read only", device_state); + return WrongDeviceState; + } + + // TODO: Load amiibo from backup on system + LOG_ERROR(Service_NFP, "Not Implemented"); + return ResultSuccess; +} + +Result NfpDevice::DeleteAllData() { + const auto result = DeleteApplicationArea(); + if (result.IsError()) { + return result; + } + + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + if (device_state == DeviceState::TagRemoved) { + return TagRemoved; + } + return WrongDeviceState; + } + + Common::TinyMT rng{}; + rng.GenerateRandomBytes(&tag_data.owner_mii, sizeof(tag_data.owner_mii)); + tag_data.settings.settings.amiibo_initialized.Assign(0); + + return Flush(); +} + +Result NfpDevice::OpenApplicationArea(u32 access_id) { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + if (device_state == DeviceState::TagRemoved) { + return TagRemoved; + } + return WrongDeviceState; + } + + if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) { + LOG_ERROR(Service_NFP, "Amiibo is read only", device_state); + return WrongDeviceState; + } + + if (tag_data.settings.settings.appdata_initialized.Value() == 0) { + LOG_WARNING(Service_NFP, "Application area is not initialized"); + return ApplicationAreaIsNotInitialized; + } + + if (tag_data.application_area_id != access_id) { + LOG_WARNING(Service_NFP, "Wrong application area id"); + return WrongApplicationAreaId; + } + + is_app_area_open = true; + + return ResultSuccess; +} + +Result NfpDevice::GetApplicationArea(std::vector<u8>& data) const { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + if (device_state == DeviceState::TagRemoved) { + return TagRemoved; + } + return WrongDeviceState; + } + + if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) { + LOG_ERROR(Service_NFP, "Amiibo is read only", device_state); + return WrongDeviceState; + } + + if (!is_app_area_open) { + LOG_ERROR(Service_NFP, "Application area is not open"); + return WrongDeviceState; + } + + if (tag_data.settings.settings.appdata_initialized.Value() == 0) { + LOG_ERROR(Service_NFP, "Application area is not initialized"); + return ApplicationAreaIsNotInitialized; + } + + if (data.size() > sizeof(ApplicationArea)) { + data.resize(sizeof(ApplicationArea)); + } + + memcpy(data.data(), tag_data.application_area.data(), data.size()); + + return ResultSuccess; +} + +Result NfpDevice::SetApplicationArea(std::span<const u8> data) { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + if (device_state == DeviceState::TagRemoved) { + return TagRemoved; + } + return WrongDeviceState; + } + + if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) { + LOG_ERROR(Service_NFP, "Amiibo is read only", device_state); + return WrongDeviceState; + } + + if (!is_app_area_open) { + LOG_ERROR(Service_NFP, "Application area is not open"); + return WrongDeviceState; + } + + if (tag_data.settings.settings.appdata_initialized.Value() == 0) { + LOG_ERROR(Service_NFP, "Application area is not initialized"); + return ApplicationAreaIsNotInitialized; + } + + if (data.size() > sizeof(ApplicationArea)) { + LOG_ERROR(Service_NFP, "Wrong data size {}", data.size()); + return ResultUnknown; + } + + Common::TinyMT rng{}; + std::memcpy(tag_data.application_area.data(), data.data(), data.size()); + // Fill remaining data with random numbers + rng.GenerateRandomBytes(tag_data.application_area.data() + data.size(), + sizeof(ApplicationArea) - data.size()); + + tag_data.applicaton_write_counter++; + is_data_moddified = true; + + return ResultSuccess; +} + +Result NfpDevice::CreateApplicationArea(u32 access_id, std::span<const u8> data) { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + if (device_state == DeviceState::TagRemoved) { + return TagRemoved; + } + return WrongDeviceState; + } + + if (tag_data.settings.settings.appdata_initialized.Value() != 0) { + LOG_ERROR(Service_NFP, "Application area already exist"); + return ApplicationAreaExist; + } + + return RecreateApplicationArea(access_id, data); +} + +Result NfpDevice::RecreateApplicationArea(u32 access_id, std::span<const u8> data) { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + if (device_state == DeviceState::TagRemoved) { + return TagRemoved; + } + return WrongDeviceState; + } + + if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) { + LOG_ERROR(Service_NFP, "Amiibo is read only", device_state); + return WrongDeviceState; + } + + if (data.size() > sizeof(ApplicationArea)) { + LOG_ERROR(Service_NFP, "Wrong data size {}", data.size()); + return WrongApplicationAreaSize; + } + + Common::TinyMT rng{}; + std::memcpy(tag_data.application_area.data(), data.data(), data.size()); + // Fill remaining data with random numbers + rng.GenerateRandomBytes(tag_data.application_area.data() + data.size(), + sizeof(ApplicationArea) - data.size()); + + // TODO: Investigate why the title id needs to be moddified + tag_data.title_id = system.GetCurrentProcessProgramID(); + tag_data.title_id = tag_data.title_id | 0x30000000ULL; + tag_data.settings.settings.appdata_initialized.Assign(1); + tag_data.application_area_id = access_id; + tag_data.applicaton_write_counter++; + tag_data.unknown = {}; + + return Flush(); +} + +Result NfpDevice::DeleteApplicationArea() { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + if (device_state == DeviceState::TagRemoved) { + return TagRemoved; + } + return WrongDeviceState; + } + + if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) { + LOG_ERROR(Service_NFP, "Amiibo is read only", device_state); + return WrongDeviceState; + } + + Common::TinyMT rng{}; + rng.GenerateRandomBytes(tag_data.application_area.data(), sizeof(ApplicationArea)); + rng.GenerateRandomBytes(&tag_data.title_id, sizeof(u64)); + rng.GenerateRandomBytes(&tag_data.application_area_id, sizeof(u32)); + tag_data.settings.settings.appdata_initialized.Assign(0); + tag_data.applicaton_write_counter++; + tag_data.unknown = {}; + + return Flush(); +} + +u64 NfpDevice::GetHandle() const { + // Generate a handle based of the npad id + return static_cast<u64>(npad_id); +} + +u32 NfpDevice::GetApplicationAreaSize() const { + return sizeof(ApplicationArea); +} + +DeviceState NfpDevice::GetCurrentState() const { + return device_state; +} + +Core::HID::NpadIdType NfpDevice::GetNpadId() const { + return npad_id; +} + +AmiiboName NfpDevice::GetAmiiboName(const AmiiboSettings& settings) const { + std::array<char16_t, amiibo_name_length> settings_amiibo_name{}; + AmiiboName amiibo_name{}; + + // Convert from big endian to little endian + for (std::size_t i = 0; i < amiibo_name_length; i++) { + settings_amiibo_name[i] = static_cast<u16>(settings.amiibo_name[i]); + } + + // Convert from utf16 to utf8 + const auto amiibo_name_utf8 = Common::UTF16ToUTF8(settings_amiibo_name.data()); + memcpy(amiibo_name.data(), amiibo_name_utf8.data(), amiibo_name_utf8.size()); + + return amiibo_name; +} + +void NfpDevice::SetAmiiboName(AmiiboSettings& settings, const AmiiboName& amiibo_name) { + std::array<char16_t, amiibo_name_length> settings_amiibo_name{}; + + // Convert from utf8 to utf16 + const auto amiibo_name_utf16 = Common::UTF8ToUTF16(amiibo_name.data()); + memcpy(settings_amiibo_name.data(), amiibo_name_utf16.data(), + amiibo_name_utf16.size() * sizeof(char16_t)); + + // Convert from little endian to big endian + for (std::size_t i = 0; i < amiibo_name_length; i++) { + settings.amiibo_name[i] = static_cast<u16_be>(settings_amiibo_name[i]); + } +} + +AmiiboDate NfpDevice::GetAmiiboDate(s64 posix_time) const { + const auto& time_zone_manager = + system.GetTimeManager().GetTimeZoneContentManager().GetTimeZoneManager(); + Time::TimeZone::CalendarInfo calendar_info{}; + AmiiboDate amiibo_date{}; + + amiibo_date.SetYear(2000); + amiibo_date.SetMonth(1); + amiibo_date.SetDay(1); + + if (time_zone_manager.ToCalendarTime({}, posix_time, calendar_info) == ResultSuccess) { + amiibo_date.SetYear(calendar_info.time.year); + amiibo_date.SetMonth(calendar_info.time.month); + amiibo_date.SetDay(calendar_info.time.day); + } + + return amiibo_date; +} + +} // namespace Service::NFP diff --git a/src/core/hle/service/nfp/nfp_device.h b/src/core/hle/service/nfp/nfp_device.h new file mode 100644 index 000000000..a5b72cf19 --- /dev/null +++ b/src/core/hle/service/nfp/nfp_device.h @@ -0,0 +1,101 @@ +// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <array> +#include <vector> + +#include "common/common_funcs.h" +#include "core/hle/service/kernel_helpers.h" +#include "core/hle/service/mii/types.h" +#include "core/hle/service/nfp/nfp_types.h" +#include "core/hle/service/service.h" + +namespace Kernel { +class KEvent; +class KReadableEvent; +} // namespace Kernel + +namespace Core { +class System; +} // namespace Core + +namespace Core::HID { +class EmulatedController; +enum class ControllerTriggerType; +enum class NpadIdType : u32; +} // namespace Core::HID + +namespace Service::NFP { +class NfpDevice { +public: + NfpDevice(Core::HID::NpadIdType npad_id_, Core::System& system_, + KernelHelpers::ServiceContext& service_context_, + Kernel::KEvent* availability_change_event_); + ~NfpDevice(); + + void Initialize(); + void Finalize(); + + Result StartDetection(s32 protocol_); + Result StopDetection(); + Result Mount(MountTarget mount_target); + Result Unmount(); + Result Flush(); + + Result GetTagInfo(TagInfo& tag_info) const; + Result GetCommonInfo(CommonInfo& common_info) const; + Result GetModelInfo(ModelInfo& model_info) const; + Result GetRegisterInfo(RegisterInfo& register_info) const; + + Result SetNicknameAndOwner(const AmiiboName& amiibo_name); + Result RestoreAmiibo(); + Result DeleteAllData(); + + Result OpenApplicationArea(u32 access_id); + Result GetApplicationArea(std::vector<u8>& data) const; + Result SetApplicationArea(std::span<const u8> data); + Result CreateApplicationArea(u32 access_id, std::span<const u8> data); + Result RecreateApplicationArea(u32 access_id, std::span<const u8> data); + Result DeleteApplicationArea(); + + u64 GetHandle() const; + u32 GetApplicationAreaSize() const; + DeviceState GetCurrentState() const; + Core::HID::NpadIdType GetNpadId() const; + + Kernel::KReadableEvent& GetActivateEvent() const; + Kernel::KReadableEvent& GetDeactivateEvent() const; + +private: + void NpadUpdate(Core::HID::ControllerTriggerType type); + bool LoadAmiibo(std::span<const u8> data); + void CloseAmiibo(); + + AmiiboName GetAmiiboName(const AmiiboSettings& settings) const; + void SetAmiiboName(AmiiboSettings& settings, const AmiiboName& amiibo_name); + AmiiboDate GetAmiiboDate(s64 posix_time) const; + + bool is_controller_set{}; + int callback_key; + const Core::HID::NpadIdType npad_id; + Core::System& system; + Core::HID::EmulatedController* npad_device = nullptr; + KernelHelpers::ServiceContext& service_context; + Kernel::KEvent* activate_event = nullptr; + Kernel::KEvent* deactivate_event = nullptr; + Kernel::KEvent* availability_change_event = nullptr; + + bool is_data_moddified{}; + bool is_app_area_open{}; + s32 protocol{}; + s64 current_posix_time{}; + MountTarget mount_target{MountTarget::None}; + DeviceState device_state{DeviceState::Unavailable}; + + NTAG215File tag_data{}; + EncryptedNTAG215File encrypted_tag_data{}; +}; + +} // namespace Service::NFP diff --git a/src/core/hle/service/nfp/nfp_result.h b/src/core/hle/service/nfp/nfp_result.h new file mode 100644 index 000000000..d8e4cf094 --- /dev/null +++ b/src/core/hle/service/nfp/nfp_result.h @@ -0,0 +1,24 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include "core/hle/result.h" + +namespace Service::NFP { + +constexpr Result DeviceNotFound(ErrorModule::NFP, 64); +constexpr Result InvalidArgument(ErrorModule::NFP, 65); +constexpr Result WrongApplicationAreaSize(ErrorModule::NFP, 68); +constexpr Result WrongDeviceState(ErrorModule::NFP, 73); +constexpr Result NfcDisabled(ErrorModule::NFP, 80); +constexpr Result WriteAmiiboFailed(ErrorModule::NFP, 88); +constexpr Result TagRemoved(ErrorModule::NFP, 97); +constexpr Result RegistrationIsNotInitialized(ErrorModule::NFP, 120); +constexpr Result ApplicationAreaIsNotInitialized(ErrorModule::NFP, 128); +constexpr Result CorruptedData(ErrorModule::NFP, 144); +constexpr Result WrongApplicationAreaId(ErrorModule::NFP, 152); +constexpr Result ApplicationAreaExist(ErrorModule::NFP, 168); +constexpr Result NotAnAmiibo(ErrorModule::NFP, 178); + +} // namespace Service::NFP diff --git a/src/core/hle/service/nfp/nfp_types.h b/src/core/hle/service/nfp/nfp_types.h new file mode 100644 index 000000000..c09f9ddb6 --- /dev/null +++ b/src/core/hle/service/nfp/nfp_types.h @@ -0,0 +1,320 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include <array> + +#include "common/swap.h" +#include "core/hle/service/mii/types.h" + +namespace Service::NFP { +static constexpr std::size_t amiibo_name_length = 0xA; + +enum class ServiceType : u32 { + User, + Debug, + System, +}; + +enum class State : u32 { + NonInitialized, + Initialized, +}; + +enum class DeviceState : u32 { + Initialized, + SearchingForTag, + TagFound, + TagRemoved, + TagMounted, + Unavailable, + Finalized, +}; + +enum class ModelType : u32 { + Amiibo, +}; + +enum class MountTarget : u32 { + None, + Rom, + Ram, + All, +}; + +enum class AmiiboType : u8 { + Figure, + Card, + Yarn, +}; + +enum class AmiiboSeries : u8 { + SuperSmashBros, + SuperMario, + ChibiRobo, + YoshiWoollyWorld, + Splatoon, + AnimalCrossing, + EightBitMario, + Skylanders, + Unknown8, + TheLegendOfZelda, + ShovelKnight, + Unknown11, + Kiby, + Pokemon, + MarioSportsSuperstars, + MonsterHunter, + BoxBoy, + Pikmin, + FireEmblem, + Metroid, + Others, + MegaMan, + Diablo, +}; + +enum class TagType : u32 { + None, + Type1, // ISO14443A RW 96-2k bytes 106kbit/s + Type2, // ISO14443A RW/RO 540 bytes 106kbit/s + Type3, // Sony Felica RW/RO 2k bytes 212kbit/s + Type4, // ISO14443A RW/RO 4k-32k bytes 424kbit/s + Type5, // ISO15693 RW/RO 540 bytes 106kbit/s +}; + +enum class PackedTagType : u8 { + None, + Type1, // ISO14443A RW 96-2k bytes 106kbit/s + Type2, // ISO14443A RW/RO 540 bytes 106kbit/s + Type3, // Sony Felica RW/RO 2k bytes 212kbit/s + Type4, // ISO14443A RW/RO 4k-32k bytes 424kbit/s + Type5, // ISO15693 RW/RO 540 bytes 106kbit/s +}; + +enum class TagProtocol : u32 { + None, + TypeA, // ISO14443A + TypeB, // ISO14443B + TypeF, // Sony Felica +}; + +using UniqueSerialNumber = std::array<u8, 7>; +using LockBytes = std::array<u8, 2>; +using HashData = std::array<u8, 0x20>; +using ApplicationArea = std::array<u8, 0xD8>; +using AmiiboName = std::array<char, (amiibo_name_length * 4) + 1>; + +struct TagUuid { + UniqueSerialNumber uid; + u8 nintendo_id; + LockBytes lock_bytes; +}; +static_assert(sizeof(TagUuid) == 10, "TagUuid is an invalid size"); + +struct WriteDate { + u16 year; + u8 month; + u8 day; +}; +static_assert(sizeof(WriteDate) == 0x4, "WriteDate is an invalid size"); + +struct AmiiboDate { + u16 raw_date{}; + + u16 GetValue() const { + return Common::swap16(raw_date); + } + + u16 GetYear() const { + return static_cast<u16>(((GetValue() & 0xFE00) >> 9) + 2000); + } + u8 GetMonth() const { + return static_cast<u8>((GetValue() & 0x01E0) >> 5); + } + u8 GetDay() const { + return static_cast<u8>(GetValue() & 0x001F); + } + + WriteDate GetWriteDate() const { + if (!IsValidDate()) { + return { + .year = 2000, + .month = 1, + .day = 1, + }; + } + return { + .year = GetYear(), + .month = GetMonth(), + .day = GetDay(), + }; + } + + void SetYear(u16 year) { + const u16 year_converted = static_cast<u16>((year - 2000) << 9); + raw_date = Common::swap16((GetValue() & ~0xFE00) | year_converted); + } + void SetMonth(u8 month) { + const u16 month_converted = static_cast<u16>(month << 5); + raw_date = Common::swap16((GetValue() & ~0x01E0) | month_converted); + } + void SetDay(u8 day) { + const u16 day_converted = static_cast<u16>(day); + raw_date = Common::swap16((GetValue() & ~0x001F) | day_converted); + } + + bool IsValidDate() const { + const bool is_day_valid = GetDay() > 0 && GetDay() < 32; + const bool is_month_valid = GetMonth() > 0 && GetMonth() < 13; + const bool is_year_valid = GetYear() >= 2000; + return is_year_valid && is_month_valid && is_day_valid; + } +}; +static_assert(sizeof(AmiiboDate) == 2, "AmiiboDate is an invalid size"); + +struct Settings { + union { + u8 raw{}; + + BitField<4, 1, u8> amiibo_initialized; + BitField<5, 1, u8> appdata_initialized; + }; +}; +static_assert(sizeof(Settings) == 1, "AmiiboDate is an invalid size"); + +struct AmiiboSettings { + Settings settings; + u8 country_code_id; + u16_be crc_counter; // Incremented each time crc is changed + AmiiboDate init_date; + AmiiboDate write_date; + u32_be crc; + std::array<u16_be, amiibo_name_length> amiibo_name; // UTF-16 text +}; +static_assert(sizeof(AmiiboSettings) == 0x20, "AmiiboSettings is an invalid size"); + +struct AmiiboModelInfo { + u16 character_id; + u8 character_variant; + AmiiboType amiibo_type; + u16_be model_number; + AmiiboSeries series; + PackedTagType tag_type; + INSERT_PADDING_BYTES(0x4); // Unknown +}; +static_assert(sizeof(AmiiboModelInfo) == 0xC, "AmiiboModelInfo is an invalid size"); + +struct NTAG215Password { + u32 PWD; // Password to allow write access + u16 PACK; // Password acknowledge reply + u16 RFUI; // Reserved for future use +}; +static_assert(sizeof(NTAG215Password) == 0x8, "NTAG215Password is an invalid size"); + +#pragma pack(1) +struct EncryptedAmiiboFile { + u8 constant_value; // Must be A5 + u16_be write_counter; // Number of times the amiibo has been written? + INSERT_PADDING_BYTES(0x1); // Unknown 1 + AmiiboSettings settings; // Encrypted amiibo settings + HashData hmac_tag; // Hash + AmiiboModelInfo model_info; // Encrypted amiibo model info + HashData keygen_salt; // Salt + HashData hmac_data; // Hash + Service::Mii::Ver3StoreData owner_mii; // Encrypted Mii data + u64_be title_id; // Encrypted Game id + u16_be applicaton_write_counter; // Encrypted Counter + u32_be application_area_id; // Encrypted Game id + std::array<u8, 0x2> unknown; + std::array<u32, 0x8> unknown2; + ApplicationArea application_area; // Encrypted Game data +}; +static_assert(sizeof(EncryptedAmiiboFile) == 0x1F8, "AmiiboFile is an invalid size"); + +struct NTAG215File { + LockBytes lock_bytes; // Tag UUID + u16 static_lock; // Set defined pages as read only + u32 compability_container; // Defines available memory + HashData hmac_data; // Hash + u8 constant_value; // Must be A5 + u16_be write_counter; // Number of times the amiibo has been written? + INSERT_PADDING_BYTES(0x1); // Unknown 1 + AmiiboSettings settings; + Service::Mii::Ver3StoreData owner_mii; // Encrypted Mii data + u64_be title_id; + u16_be applicaton_write_counter; // Encrypted Counter + u32_be application_area_id; + std::array<u8, 0x2> unknown; + std::array<u32, 0x8> unknown2; + ApplicationArea application_area; // Encrypted Game data + HashData hmac_tag; // Hash + UniqueSerialNumber uid; // Unique serial number + u8 nintendo_id; // Tag UUID + AmiiboModelInfo model_info; + HashData keygen_salt; // Salt + u32 dynamic_lock; // Dynamic lock + u32 CFG0; // Defines memory protected by password + u32 CFG1; // Defines number of verification attempts + NTAG215Password password; // Password data +}; +static_assert(sizeof(NTAG215File) == 0x21C, "NTAG215File is an invalid size"); +static_assert(std::is_trivially_copyable_v<NTAG215File>, "NTAG215File must be trivially copyable."); +#pragma pack() + +struct EncryptedNTAG215File { + TagUuid uuid; // Unique serial number + u16 static_lock; // Set defined pages as read only + u32 compability_container; // Defines available memory + EncryptedAmiiboFile user_memory; // Writable data + u32 dynamic_lock; // Dynamic lock + u32 CFG0; // Defines memory protected by password + u32 CFG1; // Defines number of verification attempts + NTAG215Password password; // Password data +}; +static_assert(sizeof(EncryptedNTAG215File) == 0x21C, "EncryptedNTAG215File is an invalid size"); +static_assert(std::is_trivially_copyable_v<EncryptedNTAG215File>, + "EncryptedNTAG215File must be trivially copyable."); + +struct TagInfo { + UniqueSerialNumber uuid; + INSERT_PADDING_BYTES(0x3); + u8 uuid_length; + INSERT_PADDING_BYTES(0x15); + TagProtocol protocol; + TagType tag_type; + INSERT_PADDING_BYTES(0x30); +}; +static_assert(sizeof(TagInfo) == 0x58, "TagInfo is an invalid size"); + +struct CommonInfo { + WriteDate last_write_date; + u16 write_counter; + u8 version; + INSERT_PADDING_BYTES(0x1); + u32 application_area_size; + INSERT_PADDING_BYTES(0x34); +}; +static_assert(sizeof(CommonInfo) == 0x40, "CommonInfo is an invalid size"); + +struct ModelInfo { + u16 character_id; + u8 character_variant; + AmiiboType amiibo_type; + u16 model_number; + AmiiboSeries series; + INSERT_PADDING_BYTES(0x39); // Unknown +}; +static_assert(sizeof(ModelInfo) == 0x40, "ModelInfo is an invalid size"); + +struct RegisterInfo { + Service::Mii::CharInfo mii_char_info; + WriteDate creation_date; + AmiiboName amiibo_name; + u8 font_region; + INSERT_PADDING_BYTES(0x7A); +}; +static_assert(sizeof(RegisterInfo) == 0x100, "RegisterInfo is an invalid size"); + +} // namespace Service::NFP diff --git a/src/core/hle/service/nfp/nfp_user.cpp b/src/core/hle/service/nfp/nfp_user.cpp index 2d7b156cf..4ed53b534 100644 --- a/src/core/hle/service/nfp/nfp_user.cpp +++ b/src/core/hle/service/nfp/nfp_user.cpp @@ -1,18 +1,674 @@ // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later +#include <array> +#include <atomic> + +#include "common/logging/log.h" +#include "core/core.h" +#include "core/hid/emulated_controller.h" +#include "core/hid/hid_core.h" +#include "core/hid/hid_types.h" +#include "core/hle/ipc_helpers.h" +#include "core/hle/kernel/k_event.h" +#include "core/hle/service/mii/mii_manager.h" +#include "core/hle/service/nfp/nfp_device.h" +#include "core/hle/service/nfp/nfp_result.h" #include "core/hle/service/nfp/nfp_user.h" namespace Service::NFP { -NFP_User::NFP_User(std::shared_ptr<Module> module_, Core::System& system_) - : Interface(std::move(module_), system_, "nfp:user") { +IUser::IUser(Core::System& system_) + : ServiceFramework{system_, "NFP::IUser"}, service_context{system_, service_name} { static const FunctionInfo functions[] = { - {0, &NFP_User::CreateUserInterface, "CreateUserInterface"}, + {0, &IUser::Initialize, "Initialize"}, + {1, &IUser::Finalize, "Finalize"}, + {2, &IUser::ListDevices, "ListDevices"}, + {3, &IUser::StartDetection, "StartDetection"}, + {4, &IUser::StopDetection, "StopDetection"}, + {5, &IUser::Mount, "Mount"}, + {6, &IUser::Unmount, "Unmount"}, + {7, &IUser::OpenApplicationArea, "OpenApplicationArea"}, + {8, &IUser::GetApplicationArea, "GetApplicationArea"}, + {9, &IUser::SetApplicationArea, "SetApplicationArea"}, + {10, &IUser::Flush, "Flush"}, + {11, &IUser::Restore, "Restore"}, + {12, &IUser::CreateApplicationArea, "CreateApplicationArea"}, + {13, &IUser::GetTagInfo, "GetTagInfo"}, + {14, &IUser::GetRegisterInfo, "GetRegisterInfo"}, + {15, &IUser::GetCommonInfo, "GetCommonInfo"}, + {16, &IUser::GetModelInfo, "GetModelInfo"}, + {17, &IUser::AttachActivateEvent, "AttachActivateEvent"}, + {18, &IUser::AttachDeactivateEvent, "AttachDeactivateEvent"}, + {19, &IUser::GetState, "GetState"}, + {20, &IUser::GetDeviceState, "GetDeviceState"}, + {21, &IUser::GetNpadId, "GetNpadId"}, + {22, &IUser::GetApplicationAreaSize, "GetApplicationAreaSize"}, + {23, &IUser::AttachAvailabilityChangeEvent, "AttachAvailabilityChangeEvent"}, + {24, &IUser::RecreateApplicationArea, "RecreateApplicationArea"}, }; RegisterHandlers(functions); + + availability_change_event = service_context.CreateEvent("IUser:AvailabilityChangeEvent"); + + for (u32 device_index = 0; device_index < 10; device_index++) { + devices[device_index] = + std::make_shared<NfpDevice>(Core::HID::IndexToNpadIdType(device_index), system, + service_context, availability_change_event); + } +} + +void IUser::Initialize(Kernel::HLERequestContext& ctx) { + LOG_INFO(Service_NFC, "called"); + + state = State::Initialized; + + for (auto& device : devices) { + device->Initialize(); + } + + IPC::ResponseBuilder rb{ctx, 2, 0}; + rb.Push(ResultSuccess); +} + +void IUser::Finalize(Kernel::HLERequestContext& ctx) { + LOG_INFO(Service_NFP, "called"); + + state = State::NonInitialized; + + for (auto& device : devices) { + device->Finalize(); + } + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +void IUser::ListDevices(Kernel::HLERequestContext& ctx) { + LOG_INFO(Service_NFP, "called"); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + if (!ctx.CanWriteBuffer()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(InvalidArgument); + return; + } + + if (ctx.GetWriteBufferSize() == 0) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(InvalidArgument); + return; + } + + std::vector<u64> nfp_devices; + const std::size_t max_allowed_devices = ctx.GetWriteBufferSize() / sizeof(u64); + + for (auto& device : devices) { + if (nfp_devices.size() >= max_allowed_devices) { + continue; + } + if (device->GetCurrentState() != DeviceState::Unavailable) { + nfp_devices.push_back(device->GetHandle()); + } + } + + if (nfp_devices.size() == 0) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + ctx.WriteBuffer(nfp_devices); + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(ResultSuccess); + rb.Push(static_cast<s32>(nfp_devices.size())); +} + +void IUser::StartDetection(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + const auto nfp_protocol{rp.Pop<s32>()}; + LOG_INFO(Service_NFP, "called, device_handle={}, nfp_protocol={}", device_handle, nfp_protocol); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + const auto result = device.value()->StartDetection(nfp_protocol); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void IUser::StopDetection(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + const auto result = device.value()->StopDetection(); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void IUser::Mount(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + const auto model_type{rp.PopEnum<ModelType>()}; + const auto mount_target{rp.PopEnum<MountTarget>()}; + LOG_INFO(Service_NFP, "called, device_handle={}, model_type={}, mount_target={}", device_handle, + model_type, mount_target); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + const auto result = device.value()->Mount(mount_target); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void IUser::Unmount(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + const auto result = device.value()->Unmount(); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void IUser::OpenApplicationArea(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + const auto access_id{rp.Pop<u32>()}; + LOG_INFO(Service_NFP, "called, device_handle={}, access_id={}", device_handle, access_id); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + const auto result = device.value()->OpenApplicationArea(access_id); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void IUser::GetApplicationArea(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + const auto data_size = ctx.GetWriteBufferSize(); + LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + if (!ctx.CanWriteBuffer()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(InvalidArgument); + return; + } + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + std::vector<u8> data(data_size); + const auto result = device.value()->GetApplicationArea(data); + ctx.WriteBuffer(data); + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(result); + rb.Push(static_cast<u32>(data_size)); +} + +void IUser::SetApplicationArea(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + const auto data{ctx.ReadBuffer()}; + LOG_INFO(Service_NFP, "called, device_handle={}, data_size={}", device_handle, data.size()); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + if (!ctx.CanReadBuffer()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(InvalidArgument); + return; + } + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + const auto result = device.value()->SetApplicationArea(data); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void IUser::Flush(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + const auto result = device.value()->Flush(); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void IUser::Restore(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_WARNING(Service_NFP, "(STUBBED) called, device_handle={}", device_handle); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + const auto result = device.value()->RestoreAmiibo(); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void IUser::CreateApplicationArea(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + const auto access_id{rp.Pop<u32>()}; + const auto data{ctx.ReadBuffer()}; + LOG_INFO(Service_NFP, "called, device_handle={}, data_size={}, access_id={}", device_handle, + access_id, data.size()); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + if (!ctx.CanReadBuffer()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(InvalidArgument); + return; + } + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + const auto result = device.value()->CreateApplicationArea(access_id, data); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void IUser::GetTagInfo(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + TagInfo tag_info{}; + const auto result = device.value()->GetTagInfo(tag_info); + ctx.WriteBuffer(tag_info); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void IUser::GetRegisterInfo(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + RegisterInfo register_info{}; + const auto result = device.value()->GetRegisterInfo(register_info); + ctx.WriteBuffer(register_info); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void IUser::GetCommonInfo(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + CommonInfo common_info{}; + const auto result = device.value()->GetCommonInfo(common_info); + ctx.WriteBuffer(common_info); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void IUser::GetModelInfo(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + ModelInfo model_info{}; + const auto result = device.value()->GetModelInfo(model_info); + ctx.WriteBuffer(model_info); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +void IUser::AttachActivateEvent(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + IPC::ResponseBuilder rb{ctx, 2, 1}; + rb.Push(ResultSuccess); + rb.PushCopyObjects(device.value()->GetActivateEvent()); +} + +void IUser::AttachDeactivateEvent(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + IPC::ResponseBuilder rb{ctx, 2, 1}; + rb.Push(ResultSuccess); + rb.PushCopyObjects(device.value()->GetDeactivateEvent()); +} + +void IUser::GetState(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_NFC, "called"); + + IPC::ResponseBuilder rb{ctx, 3, 0}; + rb.Push(ResultSuccess); + rb.PushEnum(state); +} + +void IUser::GetDeviceState(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle); + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(ResultSuccess); + rb.PushEnum(device.value()->GetCurrentState()); +} + +void IUser::GetNpadId(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(ResultSuccess); + rb.PushEnum(device.value()->GetNpadId()); +} + +void IUser::GetApplicationAreaSize(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle); + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(ResultSuccess); + rb.Push(device.value()->GetApplicationAreaSize()); } -NFP_User::~NFP_User() = default; +void IUser::AttachAvailabilityChangeEvent(Kernel::HLERequestContext& ctx) { + LOG_INFO(Service_NFP, "called"); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + IPC::ResponseBuilder rb{ctx, 2, 1}; + rb.Push(ResultSuccess); + rb.PushCopyObjects(availability_change_event->GetReadableEvent()); +} + +void IUser::RecreateApplicationArea(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + const auto access_id{rp.Pop<u32>()}; + const auto data{ctx.ReadBuffer()}; + LOG_INFO(Service_NFP, "called, device_handle={}, data_size={}, access_id={}", device_handle, + access_id, data.size()); + + if (state == State::NonInitialized) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(NfcDisabled); + return; + } + + auto device = GetNfpDevice(device_handle); + + if (!device.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(DeviceNotFound); + return; + } + + const auto result = device.value()->RecreateApplicationArea(access_id, data); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + +std::optional<std::shared_ptr<NfpDevice>> IUser::GetNfpDevice(u64 handle) { + for (auto& device : devices) { + if (device->GetHandle() == handle) { + return device; + } + } + return std::nullopt; +} } // namespace Service::NFP diff --git a/src/core/hle/service/nfp/nfp_user.h b/src/core/hle/service/nfp/nfp_user.h index 519ff56ee..68c60ae82 100644 --- a/src/core/hle/service/nfp/nfp_user.h +++ b/src/core/hle/service/nfp/nfp_user.h @@ -3,14 +3,52 @@ #pragma once +#include "core/hle/service/kernel_helpers.h" #include "core/hle/service/nfp/nfp.h" +#include "core/hle/service/nfp/nfp_types.h" namespace Service::NFP { +class NfpDevice; -class NFP_User final : public Module::Interface { +class IUser final : public ServiceFramework<IUser> { public: - explicit NFP_User(std::shared_ptr<Module> module_, Core::System& system_); - ~NFP_User() override; + explicit IUser(Core::System& system_); + +private: + void Initialize(Kernel::HLERequestContext& ctx); + void Finalize(Kernel::HLERequestContext& ctx); + void ListDevices(Kernel::HLERequestContext& ctx); + void StartDetection(Kernel::HLERequestContext& ctx); + void StopDetection(Kernel::HLERequestContext& ctx); + void Mount(Kernel::HLERequestContext& ctx); + void Unmount(Kernel::HLERequestContext& ctx); + void OpenApplicationArea(Kernel::HLERequestContext& ctx); + void GetApplicationArea(Kernel::HLERequestContext& ctx); + void SetApplicationArea(Kernel::HLERequestContext& ctx); + void Flush(Kernel::HLERequestContext& ctx); + void Restore(Kernel::HLERequestContext& ctx); + void CreateApplicationArea(Kernel::HLERequestContext& ctx); + void GetTagInfo(Kernel::HLERequestContext& ctx); + void GetRegisterInfo(Kernel::HLERequestContext& ctx); + void GetCommonInfo(Kernel::HLERequestContext& ctx); + void GetModelInfo(Kernel::HLERequestContext& ctx); + void AttachActivateEvent(Kernel::HLERequestContext& ctx); + void AttachDeactivateEvent(Kernel::HLERequestContext& ctx); + void GetState(Kernel::HLERequestContext& ctx); + void GetDeviceState(Kernel::HLERequestContext& ctx); + void GetNpadId(Kernel::HLERequestContext& ctx); + void GetApplicationAreaSize(Kernel::HLERequestContext& ctx); + void AttachAvailabilityChangeEvent(Kernel::HLERequestContext& ctx); + void RecreateApplicationArea(Kernel::HLERequestContext& ctx); + + std::optional<std::shared_ptr<NfpDevice>> GetNfpDevice(u64 handle); + + KernelHelpers::ServiceContext service_context; + + std::array<std::shared_ptr<NfpDevice>, 10> devices{}; + + State state{State::NonInitialized}; + Kernel::KEvent* availability_change_event; }; } // namespace Service::NFP diff --git a/src/core/hle/service/nim/nim.cpp b/src/core/hle/service/nim/nim.cpp index b2bb7426d..5a8a91e0b 100644 --- a/src/core/hle/service/nim/nim.cpp +++ b/src/core/hle/service/nim/nim.cpp @@ -328,7 +328,7 @@ private: void StartTask(Kernel::HLERequestContext& ctx) { // No need to connect to the internet, just finish the task straight away. LOG_DEBUG(Service_NIM, "called"); - finished_event->GetWritableEvent().Signal(); + finished_event->Signal(); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(ResultSuccess); } @@ -350,7 +350,7 @@ private: void Cancel(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_NIM, "called"); - finished_event->GetWritableEvent().Clear(); + finished_event->Clear(); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(ResultSuccess); } diff --git a/src/core/hle/service/ns/ns.cpp b/src/core/hle/service/ns/ns.cpp index f7318c3cb..f59a1a63d 100644 --- a/src/core/hle/service/ns/ns.cpp +++ b/src/core/hle/service/ns/ns.cpp @@ -8,6 +8,7 @@ #include "core/file_sys/patch_manager.h" #include "core/file_sys/vfs.h" #include "core/hle/ipc_helpers.h" +#include "core/hle/service/glue/glue_manager.h" #include "core/hle/service/ns/errors.h" #include "core/hle/service/ns/iplatform_service_manager.h" #include "core/hle/service/ns/language.h" @@ -581,7 +582,7 @@ IReadOnlyApplicationControlDataInterface::IReadOnlyApplicationControlDataInterfa : ServiceFramework{system_, "IReadOnlyApplicationControlDataInterface"} { // clang-format off static const FunctionInfo functions[] = { - {0, nullptr, "GetApplicationControlData"}, + {0, &IReadOnlyApplicationControlDataInterface::GetApplicationControlData, "GetApplicationControlData"}, {1, nullptr, "GetApplicationDesiredLanguage"}, {2, nullptr, "ConvertApplicationLanguageToLanguageCode"}, {3, nullptr, "ConvertLanguageCodeToApplicationLanguage"}, @@ -594,6 +595,33 @@ IReadOnlyApplicationControlDataInterface::IReadOnlyApplicationControlDataInterfa IReadOnlyApplicationControlDataInterface::~IReadOnlyApplicationControlDataInterface() = default; +void IReadOnlyApplicationControlDataInterface::GetApplicationControlData( + Kernel::HLERequestContext& ctx) { + enum class ApplicationControlSource : u8 { + CacheOnly, + Storage, + StorageOnly, + }; + + struct RequestParameters { + ApplicationControlSource source; + u64 application_id; + }; + static_assert(sizeof(RequestParameters) == 0x10, "RequestParameters has incorrect size."); + + IPC::RequestParser rp{ctx}; + const auto parameters{rp.PopRaw<RequestParameters>()}; + const auto nacp_data{system.GetARPManager().GetControlProperty(parameters.application_id)}; + const auto result = nacp_data ? ResultSuccess : ResultUnknown; + + if (nacp_data) { + ctx.WriteBuffer(nacp_data->data(), nacp_data->size()); + } + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); +} + NS::NS(const char* name, Core::System& system_) : ServiceFramework{system_, name} { // clang-format off static const FunctionInfo functions[] = { diff --git a/src/core/hle/service/ns/ns.h b/src/core/hle/service/ns/ns.h index 4dc191518..9c18e935c 100644 --- a/src/core/hle/service/ns/ns.h +++ b/src/core/hle/service/ns/ns.h @@ -78,6 +78,9 @@ class IReadOnlyApplicationControlDataInterface final public: explicit IReadOnlyApplicationControlDataInterface(Core::System& system_); ~IReadOnlyApplicationControlDataInterface() override; + +private: + void GetApplicationControlData(Kernel::HLERequestContext& ctx); }; class NS final : public ServiceFramework<NS> { diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp new file mode 100644 index 000000000..37ca24f5d --- /dev/null +++ b/src/core/hle/service/nvdrv/core/container.cpp @@ -0,0 +1,50 @@ +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/nvmap.h" +#include "core/hle/service/nvdrv/core/syncpoint_manager.h" +#include "video_core/host1x/host1x.h" + +namespace Service::Nvidia::NvCore { + +struct ContainerImpl { + explicit ContainerImpl(Tegra::Host1x::Host1x& host1x_) + : file{host1x_}, manager{host1x_}, device_file_data{} {} + NvMap file; + SyncpointManager manager; + Container::Host1xDeviceFileData device_file_data; +}; + +Container::Container(Tegra::Host1x::Host1x& host1x_) { + impl = std::make_unique<ContainerImpl>(host1x_); +} + +Container::~Container() = default; + +NvMap& Container::GetNvMapFile() { + return impl->file; +} + +const NvMap& Container::GetNvMapFile() const { + return impl->file; +} + +Container::Host1xDeviceFileData& Container::Host1xDeviceFile() { + return impl->device_file_data; +} + +const Container::Host1xDeviceFileData& Container::Host1xDeviceFile() const { + return impl->device_file_data; +} + +SyncpointManager& Container::GetSyncpointManager() { + return impl->manager; +} + +const SyncpointManager& Container::GetSyncpointManager() const { + return impl->manager; +} + +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h new file mode 100644 index 000000000..b4b63ac90 --- /dev/null +++ b/src/core/hle/service/nvdrv/core/container.h @@ -0,0 +1,52 @@ +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include <deque> +#include <memory> +#include <unordered_map> + +#include "core/hle/service/nvdrv/nvdata.h" + +namespace Tegra::Host1x { +class Host1x; +} // namespace Tegra::Host1x + +namespace Service::Nvidia::NvCore { + +class NvMap; +class SyncpointManager; + +struct ContainerImpl; + +class Container { +public: + explicit Container(Tegra::Host1x::Host1x& host1x); + ~Container(); + + NvMap& GetNvMapFile(); + + const NvMap& GetNvMapFile() const; + + SyncpointManager& GetSyncpointManager(); + + const SyncpointManager& GetSyncpointManager() const; + + struct Host1xDeviceFileData { + std::unordered_map<DeviceFD, u32> fd_to_id{}; + std::deque<u32> syncpts_accumulated{}; + u32 nvdec_next_id{}; + u32 vic_next_id{}; + }; + + Host1xDeviceFileData& Host1xDeviceFile(); + + const Host1xDeviceFileData& Host1xDeviceFile() const; + +private: + std::unique_ptr<ContainerImpl> impl; +}; + +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp new file mode 100644 index 000000000..fbd8a74a5 --- /dev/null +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -0,0 +1,272 @@ +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common/alignment.h" +#include "common/assert.h" +#include "common/logging/log.h" +#include "core/hle/service/nvdrv/core/nvmap.h" +#include "core/memory.h" +#include "video_core/host1x/host1x.h" + +using Core::Memory::YUZU_PAGESIZE; + +namespace Service::Nvidia::NvCore { +NvMap::Handle::Handle(u64 size_, Id id_) + : size(size_), aligned_size(size), orig_size(size), id(id_) { + flags.raw = 0; +} + +NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) { + std::scoped_lock lock(mutex); + + // Handles cannot be allocated twice + if (allocated) { + return NvResult::AccessDenied; + } + + flags = pFlags; + kind = pKind; + align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign; + + // This flag is only applicable for handles with an address passed + if (pAddress) { + flags.keep_uncached_after_free.Assign(0); + } else { + LOG_CRITICAL(Service_NVDRV, + "Mapping nvmap handles without a CPU side address is unimplemented!"); + } + + size = Common::AlignUp(size, YUZU_PAGESIZE); + aligned_size = Common::AlignUp(size, align); + address = pAddress; + allocated = true; + + return NvResult::Success; +} + +NvResult NvMap::Handle::Duplicate(bool internal_session) { + std::scoped_lock lock(mutex); + // Unallocated handles cannot be duplicated as duplication requires memory accounting (in HOS) + if (!allocated) [[unlikely]] { + return NvResult::BadValue; + } + + // If we internally use FromId the duplication tracking of handles won't work accurately due to + // us not implementing per-process handle refs. + if (internal_session) { + internal_dupes++; + } else { + dupes++; + } + + return NvResult::Success; +} + +NvMap::NvMap(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {} + +void NvMap::AddHandle(std::shared_ptr<Handle> handle_description) { + std::scoped_lock lock(handles_lock); + + handles.emplace(handle_description->id, std::move(handle_description)); +} + +void NvMap::UnmapHandle(Handle& handle_description) { + // Remove pending unmap queue entry if needed + if (handle_description.unmap_queue_entry) { + unmap_queue.erase(*handle_description.unmap_queue_entry); + handle_description.unmap_queue_entry.reset(); + } + + // Free and unmap the handle from the SMMU + host1x.MemoryManager().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address), + handle_description.aligned_size); + host1x.Allocator().Free(handle_description.pin_virt_address, + static_cast<u32>(handle_description.aligned_size)); + handle_description.pin_virt_address = 0; +} + +bool NvMap::TryRemoveHandle(const Handle& handle_description) { + // No dupes left, we can remove from handle map + if (handle_description.dupes == 0 && handle_description.internal_dupes == 0) { + std::scoped_lock lock(handles_lock); + + auto it{handles.find(handle_description.id)}; + if (it != handles.end()) { + handles.erase(it); + } + + return true; + } else { + return false; + } +} + +NvResult NvMap::CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out) { + if (!size) [[unlikely]] { + return NvResult::BadValue; + } + + u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)}; + auto handle_description{std::make_shared<Handle>(size, id)}; + AddHandle(handle_description); + + result_out = handle_description; + return NvResult::Success; +} + +std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) { + std::scoped_lock lock(handles_lock); + try { + return handles.at(handle); + } catch (std::out_of_range&) { + return nullptr; + } +} + +VAddr NvMap::GetHandleAddress(Handle::Id handle) { + std::scoped_lock lock(handles_lock); + try { + return handles.at(handle)->address; + } catch (std::out_of_range&) { + return 0; + } +} + +u32 NvMap::PinHandle(NvMap::Handle::Id handle) { + auto handle_description{GetHandle(handle)}; + if (!handle_description) [[unlikely]] { + return 0; + } + + std::scoped_lock lock(handle_description->mutex); + if (!handle_description->pins) { + // If we're in the unmap queue we can just remove ourselves and return since we're already + // mapped + { + // Lock now to prevent our queue entry from being removed for allocation in-between the + // following check and erase + std::scoped_lock queueLock(unmap_queue_lock); + if (handle_description->unmap_queue_entry) { + unmap_queue.erase(*handle_description->unmap_queue_entry); + handle_description->unmap_queue_entry.reset(); + + handle_description->pins++; + return handle_description->pin_virt_address; + } + } + + // If not then allocate some space and map it + u32 address{}; + auto& smmu_allocator = host1x.Allocator(); + auto& smmu_memory_manager = host1x.MemoryManager(); + while (!(address = + smmu_allocator.Allocate(static_cast<u32>(handle_description->aligned_size)))) { + // Free handles until the allocation succeeds + std::scoped_lock queueLock(unmap_queue_lock); + if (auto freeHandleDesc{unmap_queue.front()}) { + // Handles in the unmap queue are guaranteed not to be pinned so don't bother + // checking if they are before unmapping + std::scoped_lock freeLock(freeHandleDesc->mutex); + if (handle_description->pin_virt_address) + UnmapHandle(*freeHandleDesc); + } else { + LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!"); + } + } + + smmu_memory_manager.Map(static_cast<GPUVAddr>(address), handle_description->address, + handle_description->aligned_size); + handle_description->pin_virt_address = address; + } + + handle_description->pins++; + return handle_description->pin_virt_address; +} + +void NvMap::UnpinHandle(Handle::Id handle) { + auto handle_description{GetHandle(handle)}; + if (!handle_description) { + return; + } + + std::scoped_lock lock(handle_description->mutex); + if (--handle_description->pins < 0) { + LOG_WARNING(Service_NVDRV, "Pin count imbalance detected!"); + } else if (!handle_description->pins) { + std::scoped_lock queueLock(unmap_queue_lock); + + // Add to the unmap queue allowing this handle's memory to be freed if needed + unmap_queue.push_back(handle_description); + handle_description->unmap_queue_entry = std::prev(unmap_queue.end()); + } +} + +void NvMap::DuplicateHandle(Handle::Id handle, bool internal_session) { + auto handle_description{GetHandle(handle)}; + if (!handle_description) { + LOG_CRITICAL(Service_NVDRV, "Unregistered handle!"); + return; + } + + auto result = handle_description->Duplicate(internal_session); + if (result != NvResult::Success) { + LOG_CRITICAL(Service_NVDRV, "Could not duplicate handle!"); + } +} + +std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool internal_session) { + std::weak_ptr<Handle> hWeak{GetHandle(handle)}; + FreeInfo freeInfo; + + // We use a weak ptr here so we can tell when the handle has been freed and report that back to + // guest + if (auto handle_description = hWeak.lock()) { + std::scoped_lock lock(handle_description->mutex); + + if (internal_session) { + if (--handle_description->internal_dupes < 0) + LOG_WARNING(Service_NVDRV, "Internal duplicate count imbalance detected!"); + } else { + if (--handle_description->dupes < 0) { + LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!"); + } else if (handle_description->dupes == 0) { + // Force unmap the handle + if (handle_description->pin_virt_address) { + std::scoped_lock queueLock(unmap_queue_lock); + UnmapHandle(*handle_description); + } + + handle_description->pins = 0; + } + } + + // Try to remove the shared ptr to the handle from the map, if nothing else is using the + // handle then it will now be freed when `handle_description` goes out of scope + if (TryRemoveHandle(*handle_description)) { + LOG_DEBUG(Service_NVDRV, "Removed nvmap handle: {}", handle); + } else { + LOG_DEBUG(Service_NVDRV, + "Tried to free nvmap handle: {} but didn't as it still has duplicates", + handle); + } + + freeInfo = { + .address = handle_description->address, + .size = handle_description->size, + .was_uncached = handle_description->flags.map_uncached.Value() != 0, + }; + } else { + return std::nullopt; + } + + // Handle hasn't been freed from memory, set address to 0 to mark that the handle wasn't freed + if (!hWeak.expired()) { + LOG_DEBUG(Service_NVDRV, "nvmap handle: {} wasn't freed as it is still in use", handle); + freeInfo.address = 0; + } + + return freeInfo; +} + +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h new file mode 100644 index 000000000..b9dd3801f --- /dev/null +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -0,0 +1,175 @@ +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include <atomic> +#include <list> +#include <memory> +#include <mutex> +#include <optional> +#include <unordered_map> +#include <assert.h> + +#include "common/bit_field.h" +#include "common/common_types.h" +#include "core/hle/service/nvdrv/nvdata.h" + +namespace Tegra { + +namespace Host1x { +class Host1x; +} // namespace Host1x + +} // namespace Tegra + +namespace Service::Nvidia::NvCore { +/** + * @brief The nvmap core class holds the global state for nvmap and provides methods to manage + * handles + */ +class NvMap { +public: + /** + * @brief A handle to a contiguous block of memory in an application's address space + */ + struct Handle { + std::mutex mutex; + + u64 align{}; //!< The alignment to use when pinning the handle onto the SMMU + u64 size; //!< Page-aligned size of the memory the handle refers to + u64 aligned_size; //!< `align`-aligned size of the memory the handle refers to + u64 orig_size; //!< Original unaligned size of the memory this handle refers to + + s32 dupes{1}; //!< How many guest references there are to this handle + s32 internal_dupes{0}; //!< How many emulator-internal references there are to this handle + + using Id = u32; + Id id; //!< A globally unique identifier for this handle + + s32 pins{}; + u32 pin_virt_address{}; + std::optional<typename std::list<std::shared_ptr<Handle>>::iterator> unmap_queue_entry{}; + + union Flags { + u32 raw; + BitField<0, 1, u32> map_uncached; //!< If the handle should be mapped as uncached + BitField<2, 1, u32> keep_uncached_after_free; //!< Only applicable when the handle was + //!< allocated with a fixed address + BitField<4, 1, u32> _unk0_; //!< Passed to IOVMM for pins + } flags{}; + static_assert(sizeof(Flags) == sizeof(u32)); + + u64 address{}; //!< The memory location in the guest's AS that this handle corresponds to, + //!< this can also be in the nvdrv tmem + bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC + //!< call + + u8 kind{}; //!< Used for memory compression + bool allocated{}; //!< If the handle has been allocated with `Alloc` + + u64 dma_map_addr{}; //! remove me after implementing pinning. + + Handle(u64 size, Id id); + + /** + * @brief Sets up the handle with the given memory config, can allocate memory from the tmem + * if a 0 address is passed + */ + [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress); + + /** + * @brief Increases the dupe counter of the handle for the given session + */ + [[nodiscard]] NvResult Duplicate(bool internal_session); + + /** + * @brief Obtains a pointer to the handle's memory and marks the handle it as having been + * mapped + */ + u8* GetPointer() { + if (!address) { + return nullptr; + } + + is_shared_mem_mapped = true; + return reinterpret_cast<u8*>(address); + } + }; + + /** + * @brief Encapsulates the result of a FreeHandle operation + */ + struct FreeInfo { + u64 address; //!< Address the handle referred to before deletion + u64 size; //!< Page-aligned handle size + bool was_uncached; //!< If the handle was allocated as uncached + }; + + explicit NvMap(Tegra::Host1x::Host1x& host1x); + + /** + * @brief Creates an unallocated handle of the given size + */ + [[nodiscard]] NvResult CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out); + + std::shared_ptr<Handle> GetHandle(Handle::Id handle); + + VAddr GetHandleAddress(Handle::Id handle); + + /** + * @brief Maps a handle into the SMMU address space + * @note This operation is refcounted, the number of calls to this must eventually match the + * number of calls to `UnpinHandle` + * @return The SMMU virtual address that the handle has been mapped to + */ + u32 PinHandle(Handle::Id handle); + + /** + * @brief When this has been called an equal number of times to `PinHandle` for the supplied + * handle it will be added to a list of handles to be freed when necessary + */ + void UnpinHandle(Handle::Id handle); + + /** + * @brief Tries to duplicate a handle + */ + void DuplicateHandle(Handle::Id handle, bool internal_session = false); + + /** + * @brief Tries to free a handle and remove a single dupe + * @note If a handle has no dupes left and has no other users a FreeInfo struct will be returned + * describing the prior state of the handle + */ + std::optional<FreeInfo> FreeHandle(Handle::Id handle, bool internal_session); + +private: + std::list<std::shared_ptr<Handle>> unmap_queue{}; + std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue` + + std::unordered_map<Handle::Id, std::shared_ptr<Handle>> + handles{}; //!< Main owning map of handles + std::mutex handles_lock; //!< Protects access to `handles` + + static constexpr u32 HandleIdIncrement{ + 4}; //!< Each new handle ID is an increment of 4 from the previous + std::atomic<u32> next_handle_id{HandleIdIncrement}; + Tegra::Host1x::Host1x& host1x; + + void AddHandle(std::shared_ptr<Handle> handle); + + /** + * @brief Unmaps and frees the SMMU memory region a handle is mapped to + * @note Both `unmap_queue_lock` and `handle_description.mutex` MUST be locked when calling this + */ + void UnmapHandle(Handle& handle_description); + + /** + * @brief Removes a handle from the map taking its dupes into account + * @note handle_description.mutex MUST be locked when calling this + * @return If the handle was removed from the map + */ + bool TryRemoveHandle(const Handle& handle_description); +}; +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp new file mode 100644 index 000000000..eda2041a0 --- /dev/null +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp @@ -0,0 +1,121 @@ +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common/assert.h" +#include "core/hle/service/nvdrv/core/syncpoint_manager.h" +#include "video_core/host1x/host1x.h" + +namespace Service::Nvidia::NvCore { + +SyncpointManager::SyncpointManager(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} { + constexpr u32 VBlank0SyncpointId{26}; + constexpr u32 VBlank1SyncpointId{27}; + + // Reserve both vblank syncpoints as client managed as they use Continuous Mode + // Refer to section 14.3.5.3 of the TRM for more information on Continuous Mode + // https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/drm/dc.c#L660 + ReserveSyncpoint(VBlank0SyncpointId, true); + ReserveSyncpoint(VBlank1SyncpointId, true); + + for (u32 syncpoint_id : channel_syncpoints) { + if (syncpoint_id) { + ReserveSyncpoint(syncpoint_id, false); + } + } +} + +SyncpointManager::~SyncpointManager() = default; + +u32 SyncpointManager::ReserveSyncpoint(u32 id, bool client_managed) { + if (syncpoints.at(id).reserved) { + ASSERT_MSG(false, "Requested syncpoint is in use"); + return 0; + } + + syncpoints.at(id).reserved = true; + syncpoints.at(id).interface_managed = client_managed; + + return id; +} + +u32 SyncpointManager::FindFreeSyncpoint() { + for (u32 i{1}; i < syncpoints.size(); i++) { + if (!syncpoints[i].reserved) { + return i; + } + } + ASSERT_MSG(false, "Failed to find a free syncpoint!"); + return 0; +} + +u32 SyncpointManager::AllocateSyncpoint(bool client_managed) { + std::lock_guard lock(reservation_lock); + return ReserveSyncpoint(FindFreeSyncpoint(), client_managed); +} + +void SyncpointManager::FreeSyncpoint(u32 id) { + std::lock_guard lock(reservation_lock); + ASSERT(syncpoints.at(id).reserved); + syncpoints.at(id).reserved = false; +} + +bool SyncpointManager::IsSyncpointAllocated(u32 id) { + return (id <= SyncpointCount) && syncpoints[id].reserved; +} + +bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) const { + const SyncpointInfo& syncpoint{syncpoints.at(id)}; + + if (!syncpoint.reserved) { + ASSERT(false); + return 0; + } + + // If the interface manages counters then we don't keep track of the maximum value as it handles + // sanity checking the values then + if (syncpoint.interface_managed) { + return static_cast<s32>(syncpoint.counter_min - threshold) >= 0; + } else { + return (syncpoint.counter_max - threshold) >= (syncpoint.counter_min - threshold); + } +} + +u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) { + if (!syncpoints.at(id).reserved) { + ASSERT(false); + return 0; + } + + return syncpoints.at(id).counter_max += amount; +} + +u32 SyncpointManager::ReadSyncpointMinValue(u32 id) { + if (!syncpoints.at(id).reserved) { + ASSERT(false); + return 0; + } + + return syncpoints.at(id).counter_min; +} + +u32 SyncpointManager::UpdateMin(u32 id) { + if (!syncpoints.at(id).reserved) { + ASSERT(false); + return 0; + } + + syncpoints.at(id).counter_min = host1x.GetSyncpointManager().GetHostSyncpointValue(id); + return syncpoints.at(id).counter_min; +} + +NvFence SyncpointManager::GetSyncpointFence(u32 id) { + if (!syncpoints.at(id).reserved) { + ASSERT(false); + return NvFence{}; + } + + return {.id = static_cast<s32>(id), .value = syncpoints.at(id).counter_max}; +} + +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.h b/src/core/hle/service/nvdrv/core/syncpoint_manager.h new file mode 100644 index 000000000..b76ef9032 --- /dev/null +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.h @@ -0,0 +1,134 @@ +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include <array> +#include <atomic> +#include <mutex> + +#include "common/common_types.h" +#include "core/hle/service/nvdrv/nvdata.h" + +namespace Tegra::Host1x { +class Host1x; +} // namespace Tegra::Host1x + +namespace Service::Nvidia::NvCore { + +enum class ChannelType : u32 { + MsEnc = 0, + VIC = 1, + GPU = 2, + NvDec = 3, + Display = 4, + NvJpg = 5, + TSec = 6, + Max = 7 +}; + +/** + * @brief SyncpointManager handles allocating and accessing host1x syncpoints, these are cached + * versions of the HW syncpoints which are intermittently synced + * @note Refer to Chapter 14 of the Tegra X1 TRM for an exhaustive overview of them + * @url https://http.download.nvidia.com/tegra-public-appnotes/host1x.html + * @url + * https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/jetson-tx1/drivers/video/tegra/host/nvhost_syncpt.c + */ +class SyncpointManager final { +public: + explicit SyncpointManager(Tegra::Host1x::Host1x& host1x); + ~SyncpointManager(); + + /** + * @brief Checks if the given syncpoint is both allocated and below the number of HW syncpoints + */ + bool IsSyncpointAllocated(u32 id); + + /** + * @brief Finds a free syncpoint and reserves it + * @return The ID of the reserved syncpoint + */ + u32 AllocateSyncpoint(bool client_managed); + + /** + * @url + * https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/syncpt.c#L259 + */ + bool HasSyncpointExpired(u32 id, u32 threshold) const; + + bool IsFenceSignalled(NvFence fence) const { + return HasSyncpointExpired(fence.id, fence.value); + } + + /** + * @brief Atomically increments the maximum value of a syncpoint by the given amount + * @return The new max value of the syncpoint + */ + u32 IncrementSyncpointMaxExt(u32 id, u32 amount); + + /** + * @return The minimum value of the syncpoint + */ + u32 ReadSyncpointMinValue(u32 id); + + /** + * @brief Synchronises the minimum value of the syncpoint to with the GPU + * @return The new minimum value of the syncpoint + */ + u32 UpdateMin(u32 id); + + /** + * @brief Frees the usage of a syncpoint. + */ + void FreeSyncpoint(u32 id); + + /** + * @return A fence that will be signalled once this syncpoint hits its maximum value + */ + NvFence GetSyncpointFence(u32 id); + + static constexpr std::array<u32, static_cast<u32>(ChannelType::Max)> channel_syncpoints{ + 0x0, // `MsEnc` is unimplemented + 0xC, // `VIC` + 0x0, // `GPU` syncpoints are allocated per-channel instead + 0x36, // `NvDec` + 0x0, // `Display` is unimplemented + 0x37, // `NvJpg` + 0x0, // `TSec` is unimplemented + }; //!< Maps each channel ID to a constant syncpoint + +private: + /** + * @note reservation_lock should be locked when calling this + */ + u32 ReserveSyncpoint(u32 id, bool client_managed); + + /** + * @return The ID of the first free syncpoint + */ + u32 FindFreeSyncpoint(); + + struct SyncpointInfo { + std::atomic<u32> counter_min; //!< The least value the syncpoint can be (The value it was + //!< when it was last synchronized with host1x) + std::atomic<u32> counter_max; //!< The maximum value the syncpoint can reach according to + //!< the current usage + bool interface_managed; //!< If the syncpoint is managed by a host1x client interface, a + //!< client interface is a HW block that can handle host1x + //!< transactions on behalf of a host1x client (Which would + //!< otherwise need to be manually synced using PIO which is + //!< synchronous and requires direct cooperation of the CPU) + bool reserved; //!< If the syncpoint is reserved or not, not to be confused with a reserved + //!< value + }; + + constexpr static std::size_t SyncpointCount{192}; + std::array<SyncpointInfo, SyncpointCount> syncpoints{}; + std::mutex reservation_lock; + + Tegra::Host1x::Host1x& host1x; +}; + +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/devices/nvdevice.h b/src/core/hle/service/nvdrv/devices/nvdevice.h index 696e8121e..204b0e757 100644 --- a/src/core/hle/service/nvdrv/devices/nvdevice.h +++ b/src/core/hle/service/nvdrv/devices/nvdevice.h @@ -11,6 +11,10 @@ namespace Core { class System; } +namespace Kernel { +class KEvent; +} + namespace Service::Nvidia::Devices { /// Represents an abstract nvidia device node. It is to be subclassed by concrete device nodes to @@ -64,6 +68,10 @@ public: */ virtual void OnClose(DeviceFD fd) = 0; + virtual Kernel::KEvent* QueryEvent(u32 event_id) { + return nullptr; + } + protected: Core::System& system; }; diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp index 604711914..4122fc98d 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp @@ -5,15 +5,16 @@ #include "common/logging/log.h" #include "core/core.h" #include "core/core_timing.h" +#include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" -#include "core/hle/service/nvdrv/devices/nvmap.h" #include "core/perf_stats.h" #include "video_core/gpu.h" namespace Service::Nvidia::Devices { -nvdisp_disp0::nvdisp_disp0(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_) - : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)} {} +nvdisp_disp0::nvdisp_disp0(Core::System& system_, NvCore::Container& core) + : nvdevice{system_}, container{core}, nvmap{core.GetNvMapFile()} {} nvdisp_disp0::~nvdisp_disp0() = default; NvResult nvdisp_disp0::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, @@ -39,8 +40,9 @@ void nvdisp_disp0::OnClose(DeviceFD fd) {} void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, u32 height, u32 stride, android::BufferTransformFlags transform, - const Common::Rectangle<int>& crop_rect) { - const VAddr addr = nvmap_dev->GetObjectAddress(buffer_handle); + const Common::Rectangle<int>& crop_rect, + std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences) { + const VAddr addr = nvmap.GetHandleAddress(buffer_handle); LOG_TRACE(Service, "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}", addr, offset, width, height, stride, format); @@ -48,10 +50,15 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat form const Tegra::FramebufferConfig framebuffer{addr, offset, width, height, stride, format, transform, crop_rect}; + system.GPU().RequestSwapBuffers(&framebuffer, fences, num_fences); system.GetPerfStats().EndSystemFrame(); - system.GPU().SwapBuffers(&framebuffer); system.SpeedLimiter().DoSpeedLimiting(system.CoreTiming().GetGlobalTimeUs()); system.GetPerfStats().BeginSystemFrame(); } +Kernel::KEvent* nvdisp_disp0::QueryEvent(u32 event_id) { + LOG_CRITICAL(Service_NVDRV, "Unknown DISP Event {}", event_id); + return nullptr; +} + } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h index 67b105e02..04217ab12 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h @@ -11,13 +11,18 @@ #include "core/hle/service/nvflinger/buffer_transform_flags.h" #include "core/hle/service/nvflinger/pixel_format.h" +namespace Service::Nvidia::NvCore { +class Container; +class NvMap; +} // namespace Service::Nvidia::NvCore + namespace Service::Nvidia::Devices { class nvmap; class nvdisp_disp0 final : public nvdevice { public: - explicit nvdisp_disp0(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_); + explicit nvdisp_disp0(Core::System& system_, NvCore::Container& core); ~nvdisp_disp0() override; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, @@ -33,10 +38,14 @@ public: /// Performs a screen flip, drawing the buffer pointed to by the handle. void flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, u32 height, u32 stride, android::BufferTransformFlags transform, - const Common::Rectangle<int>& crop_rect); + const Common::Rectangle<int>& crop_rect, + std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences); + + Kernel::KEvent* QueryEvent(u32 event_id) override; private: - std::shared_ptr<nvmap> nvmap_dev; + NvCore::Container& container; + NvCore::NvMap& nvmap; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 9867a648d..b635e6ed1 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -1,21 +1,30 @@ -// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #include <cstring> #include <utility> +#include "common/alignment.h" #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" -#include "core/hle/service/nvdrv/devices/nvmap.h" +#include "core/hle/service/nvdrv/devices/nvhost_gpu.h" +#include "core/hle/service/nvdrv/nvdrv.h" +#include "video_core/control/channel_state.h" +#include "video_core/gpu.h" #include "video_core/memory_manager.h" #include "video_core/rasterizer_interface.h" namespace Service::Nvidia::Devices { -nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_) - : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)} {} +nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, Module& module_, NvCore::Container& core) + : nvdevice{system_}, module{module_}, container{core}, nvmap{core.GetNvMapFile()}, vm{}, + gmmu{} {} + nvhost_as_gpu::~nvhost_as_gpu() = default; NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, @@ -82,12 +91,52 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector<u8>& input, std::vector<u8>& IoctlAllocAsEx params{}; std::memcpy(¶ms, input.data(), input.size()); - LOG_WARNING(Service_NVDRV, "(STUBBED) called, big_page_size=0x{:X}", params.big_page_size); - if (params.big_page_size == 0) { - params.big_page_size = DEFAULT_BIG_PAGE_SIZE; + LOG_DEBUG(Service_NVDRV, "called, big_page_size=0x{:X}", params.big_page_size); + + std::scoped_lock lock(mutex); + + if (vm.initialised) { + ASSERT_MSG(false, "Cannot initialise an address space twice!"); + return NvResult::InvalidState; + } + + if (params.big_page_size) { + if (!std::has_single_bit(params.big_page_size)) { + LOG_ERROR(Service_NVDRV, "Non power-of-2 big page size: 0x{:X}!", params.big_page_size); + return NvResult::BadValue; + } + + if ((params.big_page_size & VM::SUPPORTED_BIG_PAGE_SIZES) == 0) { + LOG_ERROR(Service_NVDRV, "Unsupported big page size: 0x{:X}!", params.big_page_size); + return NvResult::BadValue; + } + + vm.big_page_size = params.big_page_size; + vm.big_page_size_bits = static_cast<u32>(std::countr_zero(params.big_page_size)); + + vm.va_range_start = params.big_page_size << VM::VA_START_SHIFT; + } + + // If this is unspecified then default values should be used + if (params.va_range_start) { + vm.va_range_start = params.va_range_start; + vm.va_range_split = params.va_range_split; + vm.va_range_end = params.va_range_end; } - big_page_size = params.big_page_size; + const auto start_pages{static_cast<u32>(vm.va_range_start >> VM::PAGE_SIZE_BITS)}; + const auto end_pages{static_cast<u32>(vm.va_range_split >> VM::PAGE_SIZE_BITS)}; + vm.small_page_allocator = std::make_shared<VM::Allocator>(start_pages, end_pages); + + const auto start_big_pages{static_cast<u32>(vm.va_range_split >> vm.big_page_size_bits)}; + const auto end_big_pages{ + static_cast<u32>((vm.va_range_end - vm.va_range_split) >> vm.big_page_size_bits)}; + vm.big_page_allocator = std::make_unique<VM::Allocator>(start_big_pages, end_big_pages); + + gmmu = std::make_shared<Tegra::MemoryManager>(system, 40, vm.big_page_size_bits, + VM::PAGE_SIZE_BITS); + system.GPU().InitAddressSpace(*gmmu); + vm.initialised = true; return NvResult::Success; } @@ -99,21 +148,76 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector< LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages, params.page_size, params.flags); - const auto size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)}; - if ((params.flags & AddressSpaceFlags::FixedOffset) != AddressSpaceFlags::None) { - params.offset = *system.GPU().MemoryManager().AllocateFixed(params.offset, size); + std::scoped_lock lock(mutex); + + if (!vm.initialised) { + return NvResult::BadValue; + } + + if (params.page_size != VM::YUZU_PAGESIZE && params.page_size != vm.big_page_size) { + return NvResult::BadValue; + } + + if (params.page_size != vm.big_page_size && + ((params.flags & MappingFlags::Sparse) != MappingFlags::None)) { + UNIMPLEMENTED_MSG("Sparse small pages are not implemented!"); + return NvResult::NotImplemented; + } + + const u32 page_size_bits{params.page_size == VM::YUZU_PAGESIZE ? VM::PAGE_SIZE_BITS + : vm.big_page_size_bits}; + + auto& allocator{params.page_size == VM::YUZU_PAGESIZE ? *vm.small_page_allocator + : *vm.big_page_allocator}; + + if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) { + allocator.AllocateFixed(static_cast<u32>(params.offset >> page_size_bits), params.pages); } else { - params.offset = system.GPU().MemoryManager().Allocate(size, params.align); + params.offset = static_cast<u64>(allocator.Allocate(params.pages)) << page_size_bits; + if (!params.offset) { + ASSERT_MSG(false, "Failed to allocate free space in the GPU AS!"); + return NvResult::InsufficientMemory; + } } - auto result = NvResult::Success; - if (!params.offset) { - LOG_CRITICAL(Service_NVDRV, "allocation failed for size {}", size); - result = NvResult::InsufficientMemory; + u64 size{static_cast<u64>(params.pages) * params.page_size}; + + if ((params.flags & MappingFlags::Sparse) != MappingFlags::None) { + gmmu->MapSparse(params.offset, size); } + allocation_map[params.offset] = { + .size = size, + .mappings{}, + .page_size = params.page_size, + .sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None, + .big_pages = params.page_size != VM::YUZU_PAGESIZE, + }; + std::memcpy(output.data(), ¶ms, output.size()); - return result; + return NvResult::Success; +} + +void nvhost_as_gpu::FreeMappingLocked(u64 offset) { + auto mapping{mapping_map.at(offset)}; + + if (!mapping->fixed) { + auto& allocator{mapping->big_page ? *vm.big_page_allocator : *vm.small_page_allocator}; + u32 page_size_bits{mapping->big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS}; + + allocator.Free(static_cast<u32>(mapping->offset >> page_size_bits), + static_cast<u32>(mapping->size >> page_size_bits)); + } + + // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state + // Only FreeSpace can unmap them fully + if (mapping->sparse_alloc) { + gmmu->MapSparse(offset, mapping->size, mapping->big_page); + } else { + gmmu->Unmap(offset, mapping->size); + } + + mapping_map.erase(offset); } NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>& output) { @@ -123,8 +227,40 @@ NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>& LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset, params.pages, params.page_size); - system.GPU().MemoryManager().Unmap(params.offset, - static_cast<std::size_t>(params.pages) * params.page_size); + std::scoped_lock lock(mutex); + + if (!vm.initialised) { + return NvResult::BadValue; + } + + try { + auto allocation{allocation_map[params.offset]}; + + if (allocation.page_size != params.page_size || + allocation.size != (static_cast<u64>(params.pages) * params.page_size)) { + return NvResult::BadValue; + } + + for (const auto& mapping : allocation.mappings) { + FreeMappingLocked(mapping->offset); + } + + // Unset sparse flag if required + if (allocation.sparse) { + gmmu->Unmap(params.offset, allocation.size); + } + + auto& allocator{params.page_size == VM::YUZU_PAGESIZE ? *vm.small_page_allocator + : *vm.big_page_allocator}; + u32 page_size_bits{params.page_size == VM::YUZU_PAGESIZE ? VM::PAGE_SIZE_BITS + : vm.big_page_size_bits}; + + allocator.Free(static_cast<u32>(params.offset >> page_size_bits), + static_cast<u32>(allocation.size >> page_size_bits)); + allocation_map.erase(params.offset); + } catch (const std::out_of_range&) { + return NvResult::BadValue; + } std::memcpy(output.data(), ¶ms, output.size()); return NvResult::Success; @@ -135,35 +271,53 @@ NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& out LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries); - auto result = NvResult::Success; std::vector<IoctlRemapEntry> entries(num_entries); std::memcpy(entries.data(), input.data(), input.size()); + std::scoped_lock lock(mutex); + + if (!vm.initialised) { + return NvResult::BadValue; + } + for (const auto& entry : entries) { - LOG_DEBUG(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}", - entry.offset, entry.nvmap_handle, entry.pages); + GPUVAddr virtual_address{static_cast<u64>(entry.as_offset_big_pages) + << vm.big_page_size_bits}; + u64 size{static_cast<u64>(entry.big_pages) << vm.big_page_size_bits}; - const auto object{nvmap_dev->GetObject(entry.nvmap_handle)}; - if (!object) { - LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", entry.nvmap_handle); - result = NvResult::InvalidState; - break; + auto alloc{allocation_map.upper_bound(virtual_address)}; + + if (alloc-- == allocation_map.begin() || + (virtual_address - alloc->first) + size > alloc->second.size) { + LOG_WARNING(Service_NVDRV, "Cannot remap into an unallocated region!"); + return NvResult::BadValue; } - const auto offset{static_cast<GPUVAddr>(entry.offset) << 0x10}; - const auto size{static_cast<u64>(entry.pages) << 0x10}; - const auto map_offset{static_cast<u64>(entry.map_offset) << 0x10}; - const auto addr{system.GPU().MemoryManager().Map(object->addr + map_offset, offset, size)}; + if (!alloc->second.sparse) { + LOG_WARNING(Service_NVDRV, "Cannot remap a non-sparse mapping!"); + return NvResult::BadValue; + } - if (!addr) { - LOG_CRITICAL(Service_NVDRV, "map returned an invalid address!"); - result = NvResult::InvalidState; - break; + const bool use_big_pages = alloc->second.big_pages; + if (!entry.handle) { + gmmu->MapSparse(virtual_address, size, use_big_pages); + } else { + auto handle{nvmap.GetHandle(entry.handle)}; + if (!handle) { + return NvResult::BadValue; + } + + VAddr cpu_address{static_cast<VAddr>( + handle->address + + (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))}; + + gmmu->Map(virtual_address, cpu_address, size, static_cast<Tegra::PTEKind>(entry.kind), + use_big_pages); } } std::memcpy(output.data(), entries.data(), output.size()); - return result; + return NvResult::Success; } NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) { @@ -173,79 +327,101 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8 LOG_DEBUG(Service_NVDRV, "called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}" ", offset={}", - params.flags, params.nvmap_handle, params.buffer_offset, params.mapping_size, + params.flags, params.handle, params.buffer_offset, params.mapping_size, params.offset); - const auto object{nvmap_dev->GetObject(params.nvmap_handle)}; - if (!object) { - LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle); - std::memcpy(output.data(), ¶ms, output.size()); - return NvResult::InvalidState; - } - - // The real nvservices doesn't make a distinction between handles and ids, and - // object can only have one handle and it will be the same as its id. Assert that this is the - // case to prevent unexpected behavior. - ASSERT(object->id == params.nvmap_handle); - auto& gpu = system.GPU(); + std::scoped_lock lock(mutex); - u64 page_size{params.page_size}; - if (!page_size) { - page_size = object->align; + if (!vm.initialised) { + return NvResult::BadValue; } - if ((params.flags & AddressSpaceFlags::Remap) != AddressSpaceFlags::None) { - if (const auto buffer_map{FindBufferMap(params.offset)}; buffer_map) { - const auto cpu_addr{static_cast<VAddr>(buffer_map->CpuAddr() + params.buffer_offset)}; - const auto gpu_addr{static_cast<GPUVAddr>(params.offset + params.buffer_offset)}; + // Remaps a subregion of an existing mapping to a different PA + if ((params.flags & MappingFlags::Remap) != MappingFlags::None) { + try { + auto mapping{mapping_map.at(params.offset)}; - if (!gpu.MemoryManager().Map(cpu_addr, gpu_addr, params.mapping_size)) { - LOG_CRITICAL(Service_NVDRV, - "remap failed, flags={:X}, nvmap_handle={:X}, buffer_offset={}, " - "mapping_size = {}, offset={}", - params.flags, params.nvmap_handle, params.buffer_offset, - params.mapping_size, params.offset); - - std::memcpy(output.data(), ¶ms, output.size()); - return NvResult::InvalidState; + if (mapping->size < params.mapping_size) { + LOG_WARNING(Service_NVDRV, + "Cannot remap a partially mapped GPU address space region: 0x{:X}", + params.offset); + return NvResult::BadValue; } - std::memcpy(output.data(), ¶ms, output.size()); - return NvResult::Success; - } else { - LOG_CRITICAL(Service_NVDRV, "address not mapped offset={}", params.offset); + u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)}; + VAddr cpu_address{mapping->ptr + params.buffer_offset}; + + gmmu->Map(gpu_address, cpu_address, params.mapping_size, + static_cast<Tegra::PTEKind>(params.kind), mapping->big_page); - std::memcpy(output.data(), ¶ms, output.size()); - return NvResult::InvalidState; + return NvResult::Success; + } catch (const std::out_of_range&) { + LOG_WARNING(Service_NVDRV, "Cannot remap an unmapped GPU address space region: 0x{:X}", + params.offset); + return NvResult::BadValue; } } - // We can only map objects that have already been assigned a CPU address. - ASSERT(object->status == nvmap::Object::Status::Allocated); - - const auto physical_address{object->addr + params.buffer_offset}; - u64 size{params.mapping_size}; - if (!size) { - size = object->size; + auto handle{nvmap.GetHandle(params.handle)}; + if (!handle) { + return NvResult::BadValue; } - const bool is_alloc{(params.flags & AddressSpaceFlags::FixedOffset) == AddressSpaceFlags::None}; - if (is_alloc) { - params.offset = gpu.MemoryManager().MapAllocate(physical_address, size, page_size); - } else { - params.offset = gpu.MemoryManager().Map(physical_address, params.offset, size); - } + VAddr cpu_address{static_cast<VAddr>(handle->address + params.buffer_offset)}; + u64 size{params.mapping_size ? params.mapping_size : handle->orig_size}; + + bool big_page{[&]() { + if (Common::IsAligned(handle->align, vm.big_page_size)) { + return true; + } else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) { + return false; + } else { + ASSERT(false); + return false; + } + }()}; + + if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) { + auto alloc{allocation_map.upper_bound(params.offset)}; - auto result = NvResult::Success; - if (!params.offset) { - LOG_CRITICAL(Service_NVDRV, "failed to map size={}", size); - result = NvResult::InvalidState; + if (alloc-- == allocation_map.begin() || + (params.offset - alloc->first) + size > alloc->second.size) { + ASSERT_MSG(false, "Cannot perform a fixed mapping into an unallocated region!"); + return NvResult::BadValue; + } + + const bool use_big_pages = alloc->second.big_pages && big_page; + gmmu->Map(params.offset, cpu_address, size, static_cast<Tegra::PTEKind>(params.kind), + use_big_pages); + + auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true, + use_big_pages, alloc->second.sparse)}; + alloc->second.mappings.push_back(mapping); + mapping_map[params.offset] = mapping; } else { - AddBufferMap(params.offset, size, physical_address, is_alloc); + + auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator}; + u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE}; + u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS}; + + params.offset = static_cast<u64>(allocator.Allocate( + static_cast<u32>(Common::AlignUp(size, page_size) >> page_size_bits))) + << page_size_bits; + if (!params.offset) { + ASSERT_MSG(false, "Failed to allocate free space in the GPU AS!"); + return NvResult::InsufficientMemory; + } + + gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size), + static_cast<Tegra::PTEKind>(params.kind), big_page); + + auto mapping{ + std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)}; + mapping_map[params.offset] = mapping; } std::memcpy(output.data(), ¶ms, output.size()); - return result; + return NvResult::Success; } NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) { @@ -254,47 +430,82 @@ NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8 LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset); - if (const auto size{RemoveBufferMap(params.offset)}; size) { - system.GPU().MemoryManager().Unmap(params.offset, *size); - } else { - LOG_ERROR(Service_NVDRV, "invalid offset=0x{:X}", params.offset); + std::scoped_lock lock(mutex); + + if (!vm.initialised) { + return NvResult::BadValue; + } + + try { + auto mapping{mapping_map.at(params.offset)}; + + if (!mapping->fixed) { + auto& allocator{mapping->big_page ? *vm.big_page_allocator : *vm.small_page_allocator}; + u32 page_size_bits{mapping->big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS}; + + allocator.Free(static_cast<u32>(mapping->offset >> page_size_bits), + static_cast<u32>(mapping->size >> page_size_bits)); + } + + // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state + // Only FreeSpace can unmap them fully + if (mapping->sparse_alloc) { + gmmu->MapSparse(params.offset, mapping->size, mapping->big_page); + } else { + gmmu->Unmap(params.offset, mapping->size); + } + + mapping_map.erase(params.offset); + } catch (const std::out_of_range&) { + LOG_WARNING(Service_NVDRV, "Couldn't find region to unmap at 0x{:X}", params.offset); } - std::memcpy(output.data(), ¶ms, output.size()); return NvResult::Success; } NvResult nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& output) { IoctlBindChannel params{}; std::memcpy(¶ms, input.data(), input.size()); - LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}", params.fd); + LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd); - channel = params.fd; + auto gpu_channel_device = module.GetDevice<nvhost_gpu>(params.fd); + gpu_channel_device->channel_state->memory_manager = gmmu; return NvResult::Success; } +void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) { + params.buf_size = 2 * sizeof(VaRegion); + + params.regions = std::array<VaRegion, 2>{ + VaRegion{ + .offset = vm.small_page_allocator->GetVAStart() << VM::PAGE_SIZE_BITS, + .page_size = VM::YUZU_PAGESIZE, + ._pad0_{}, + .pages = vm.small_page_allocator->GetVALimit() - vm.small_page_allocator->GetVAStart(), + }, + VaRegion{ + .offset = vm.big_page_allocator->GetVAStart() << vm.big_page_size_bits, + .page_size = vm.big_page_size, + ._pad0_{}, + .pages = vm.big_page_allocator->GetVALimit() - vm.big_page_allocator->GetVAStart(), + }, + }; +} + NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output) { IoctlGetVaRegions params{}; std::memcpy(¶ms, input.data(), input.size()); - LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr, - params.buf_size); - - params.buf_size = 0x30; + LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr, + params.buf_size); - params.small = IoctlVaRegion{ - .offset = 0x04000000, - .page_size = DEFAULT_SMALL_PAGE_SIZE, - .pages = 0x3fbfff, - }; + std::scoped_lock lock(mutex); - params.big = IoctlVaRegion{ - .offset = 0x04000000, - .page_size = big_page_size, - .pages = 0x1bffff, - }; + if (!vm.initialised) { + return NvResult::BadValue; + } - // TODO(ogniK): This probably can stay stubbed but should add support way way later + GetVARegionsImpl(params); std::memcpy(output.data(), ¶ms, output.size()); return NvResult::Success; @@ -305,62 +516,27 @@ NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u IoctlGetVaRegions params{}; std::memcpy(¶ms, input.data(), input.size()); - LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr, - params.buf_size); - - params.buf_size = 0x30; + LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr, + params.buf_size); - params.small = IoctlVaRegion{ - .offset = 0x04000000, - .page_size = 0x1000, - .pages = 0x3fbfff, - }; + std::scoped_lock lock(mutex); - params.big = IoctlVaRegion{ - .offset = 0x04000000, - .page_size = big_page_size, - .pages = 0x1bffff, - }; + if (!vm.initialised) { + return NvResult::BadValue; + } - // TODO(ogniK): This probably can stay stubbed but should add support way way later + GetVARegionsImpl(params); std::memcpy(output.data(), ¶ms, output.size()); - std::memcpy(inline_output.data(), ¶ms.small, sizeof(IoctlVaRegion)); - std::memcpy(inline_output.data() + sizeof(IoctlVaRegion), ¶ms.big, sizeof(IoctlVaRegion)); + std::memcpy(inline_output.data(), ¶ms.regions[0], sizeof(VaRegion)); + std::memcpy(inline_output.data() + sizeof(VaRegion), ¶ms.regions[1], sizeof(VaRegion)); return NvResult::Success; } -std::optional<nvhost_as_gpu::BufferMap> nvhost_as_gpu::FindBufferMap(GPUVAddr gpu_addr) const { - const auto end{buffer_mappings.upper_bound(gpu_addr)}; - for (auto iter{buffer_mappings.begin()}; iter != end; ++iter) { - if (gpu_addr >= iter->second.StartAddr() && gpu_addr < iter->second.EndAddr()) { - return iter->second; - } - } - - return std::nullopt; -} - -void nvhost_as_gpu::AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, - bool is_allocated) { - buffer_mappings[gpu_addr] = {gpu_addr, size, cpu_addr, is_allocated}; -} - -std::optional<std::size_t> nvhost_as_gpu::RemoveBufferMap(GPUVAddr gpu_addr) { - if (const auto iter{buffer_mappings.find(gpu_addr)}; iter != buffer_mappings.end()) { - std::size_t size{}; - - if (iter->second.IsAllocated()) { - size = iter->second.Size(); - } - - buffer_mappings.erase(iter); - - return size; - } - - return std::nullopt; +Kernel::KEvent* nvhost_as_gpu::QueryEvent(u32 event_id) { + LOG_CRITICAL(Service_NVDRV, "Unknown AS GPU Event {}", event_id); + return nullptr; } } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index 555843a6f..86fe71c75 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h @@ -1,35 +1,50 @@ -// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once +#include <bit> +#include <list> #include <map> #include <memory> +#include <mutex> #include <optional> #include <vector> +#include "common/address_space.h" #include "common/common_funcs.h" #include "common/common_types.h" #include "common/swap.h" +#include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/devices/nvdevice.h" -namespace Service::Nvidia::Devices { +namespace Tegra { +class MemoryManager; +} // namespace Tegra + +namespace Service::Nvidia { +class Module; +} -constexpr u32 DEFAULT_BIG_PAGE_SIZE = 1 << 16; -constexpr u32 DEFAULT_SMALL_PAGE_SIZE = 1 << 12; +namespace Service::Nvidia::NvCore { +class Container; +class NvMap; +} // namespace Service::Nvidia::NvCore -class nvmap; +namespace Service::Nvidia::Devices { -enum class AddressSpaceFlags : u32 { - None = 0x0, - FixedOffset = 0x1, - Remap = 0x100, +enum class MappingFlags : u32 { + None = 0, + Fixed = 1 << 0, + Sparse = 1 << 1, + Remap = 1 << 8, }; -DECLARE_ENUM_FLAG_OPERATORS(AddressSpaceFlags); +DECLARE_ENUM_FLAG_OPERATORS(MappingFlags); class nvhost_as_gpu final : public nvdevice { public: - explicit nvhost_as_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_); + explicit nvhost_as_gpu(Core::System& system_, Module& module, NvCore::Container& core); ~nvhost_as_gpu() override; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, @@ -42,46 +57,17 @@ public: void OnOpen(DeviceFD fd) override; void OnClose(DeviceFD fd) override; -private: - class BufferMap final { - public: - constexpr BufferMap() = default; - - constexpr BufferMap(GPUVAddr start_addr_, std::size_t size_) - : start_addr{start_addr_}, end_addr{start_addr_ + size_} {} - - constexpr BufferMap(GPUVAddr start_addr_, std::size_t size_, VAddr cpu_addr_, - bool is_allocated_) - : start_addr{start_addr_}, end_addr{start_addr_ + size_}, cpu_addr{cpu_addr_}, - is_allocated{is_allocated_} {} - - constexpr VAddr StartAddr() const { - return start_addr; - } - - constexpr VAddr EndAddr() const { - return end_addr; - } - - constexpr std::size_t Size() const { - return end_addr - start_addr; - } - - constexpr VAddr CpuAddr() const { - return cpu_addr; - } - - constexpr bool IsAllocated() const { - return is_allocated; - } - - private: - GPUVAddr start_addr{}; - GPUVAddr end_addr{}; - VAddr cpu_addr{}; - bool is_allocated{}; + Kernel::KEvent* QueryEvent(u32 event_id) override; + + struct VaRegion { + u64 offset; + u32 page_size; + u32 _pad0_; + u64 pages; }; + static_assert(sizeof(VaRegion) == 0x18); +private: struct IoctlAllocAsEx { u32_le flags{}; // usually passes 1 s32_le as_fd{}; // ignored; passes 0 @@ -96,7 +82,7 @@ private: struct IoctlAllocSpace { u32_le pages{}; u32_le page_size{}; - AddressSpaceFlags flags{}; + MappingFlags flags{}; INSERT_PADDING_WORDS(1); union { u64_le offset; @@ -113,19 +99,19 @@ private: static_assert(sizeof(IoctlFreeSpace) == 16, "IoctlFreeSpace is incorrect size"); struct IoctlRemapEntry { - u16_le flags{}; - u16_le kind{}; - u32_le nvmap_handle{}; - u32_le map_offset{}; - u32_le offset{}; - u32_le pages{}; + u16 flags; + u16 kind; + NvCore::NvMap::Handle::Id handle; + u32 handle_offset_big_pages; + u32 as_offset_big_pages; + u32 big_pages; }; static_assert(sizeof(IoctlRemapEntry) == 20, "IoctlRemapEntry is incorrect size"); struct IoctlMapBufferEx { - AddressSpaceFlags flags{}; // bit0: fixed_offset, bit2: cacheable - u32_le kind{}; // -1 is default - u32_le nvmap_handle{}; + MappingFlags flags{}; // bit0: fixed_offset, bit2: cacheable + u32_le kind{}; // -1 is default + NvCore::NvMap::Handle::Id handle; u32_le page_size{}; // 0 means don't care s64_le buffer_offset{}; u64_le mapping_size{}; @@ -143,27 +129,15 @@ private: }; static_assert(sizeof(IoctlBindChannel) == 4, "IoctlBindChannel is incorrect size"); - struct IoctlVaRegion { - u64_le offset{}; - u32_le page_size{}; - INSERT_PADDING_WORDS(1); - u64_le pages{}; - }; - static_assert(sizeof(IoctlVaRegion) == 24, "IoctlVaRegion is incorrect size"); - struct IoctlGetVaRegions { u64_le buf_addr{}; // (contained output user ptr on linux, ignored) u32_le buf_size{}; // forced to 2*sizeof(struct va_region) u32_le reserved{}; - IoctlVaRegion small{}; - IoctlVaRegion big{}; + std::array<VaRegion, 2> regions{}; }; - static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(IoctlVaRegion) * 2, + static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(VaRegion) * 2, "IoctlGetVaRegions is incorrect size"); - s32 channel{}; - u32 big_page_size{DEFAULT_BIG_PAGE_SIZE}; - NvResult AllocAsEx(const std::vector<u8>& input, std::vector<u8>& output); NvResult AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output); NvResult Remap(const std::vector<u8>& input, std::vector<u8>& output); @@ -172,18 +146,75 @@ private: NvResult FreeSpace(const std::vector<u8>& input, std::vector<u8>& output); NvResult BindChannel(const std::vector<u8>& input, std::vector<u8>& output); + void GetVARegionsImpl(IoctlGetVaRegions& params); NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output); NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output, std::vector<u8>& inline_output); - std::optional<BufferMap> FindBufferMap(GPUVAddr gpu_addr) const; - void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated); - std::optional<std::size_t> RemoveBufferMap(GPUVAddr gpu_addr); + void FreeMappingLocked(u64 offset); + + Module& module; + + NvCore::Container& container; + NvCore::NvMap& nvmap; - std::shared_ptr<nvmap> nvmap_dev; + struct Mapping { + VAddr ptr; + u64 offset; + u64 size; + bool fixed; + bool big_page; // Only valid if fixed == false + bool sparse_alloc; + + Mapping(VAddr ptr_, u64 offset_, u64 size_, bool fixed_, bool big_page_, bool sparse_alloc_) + : ptr(ptr_), offset(offset_), size(size_), fixed(fixed_), big_page(big_page_), + sparse_alloc(sparse_alloc_) {} + }; + + struct Allocation { + u64 size; + std::list<std::shared_ptr<Mapping>> mappings; + u32 page_size; + bool sparse; + bool big_pages; + }; - // This is expected to be ordered, therefore we must use a map, not unordered_map - std::map<GPUVAddr, BufferMap> buffer_mappings; + std::map<u64, std::shared_ptr<Mapping>> + mapping_map; //!< This maps the base addresses of mapped buffers to their total sizes and + //!< mapping type, this is needed as what was originally a single buffer may + //!< have been split into multiple GPU side buffers with the remap flag. + std::map<u64, Allocation> allocation_map; //!< Holds allocations created by AllocSpace from + //!< which fixed buffers can be mapped into + std::mutex mutex; //!< Locks all AS operations + + struct VM { + static constexpr u32 YUZU_PAGESIZE{0x1000}; + static constexpr u32 PAGE_SIZE_BITS{std::countr_zero(YUZU_PAGESIZE)}; + + static constexpr u32 SUPPORTED_BIG_PAGE_SIZES{0x30000}; + static constexpr u32 DEFAULT_BIG_PAGE_SIZE{0x20000}; + u32 big_page_size{DEFAULT_BIG_PAGE_SIZE}; + u32 big_page_size_bits{std::countr_zero(DEFAULT_BIG_PAGE_SIZE)}; + + static constexpr u32 VA_START_SHIFT{10}; + static constexpr u64 DEFAULT_VA_SPLIT{1ULL << 34}; + static constexpr u64 DEFAULT_VA_RANGE{1ULL << 37}; + u64 va_range_start{DEFAULT_BIG_PAGE_SIZE << VA_START_SHIFT}; + u64 va_range_split{DEFAULT_VA_SPLIT}; + u64 va_range_end{DEFAULT_VA_RANGE}; + + using Allocator = Common::FlatAllocator<u32, 0, 32>; + + std::unique_ptr<Allocator> big_page_allocator; + std::shared_ptr<Allocator> + small_page_allocator; //! Shared as this is also used by nvhost::GpuChannel + + bool initialised{}; + } vm; + std::shared_ptr<Tegra::MemoryManager> gmmu; + + // s32 channel{}; + // u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE}; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index 527531f29..eee11fab8 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -1,24 +1,38 @@ -// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later +#include <bit> #include <cstdlib> #include <cstring> +#include <fmt/format.h> #include "common/assert.h" #include "common/logging/log.h" +#include "common/scope_exit.h" #include "core/core.h" #include "core/hle/kernel/k_event.h" -#include "core/hle/kernel/k_writable_event.h" +#include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "core/hle/service/nvdrv/devices/nvhost_ctrl.h" #include "video_core/gpu.h" +#include "video_core/host1x/host1x.h" namespace Service::Nvidia::Devices { nvhost_ctrl::nvhost_ctrl(Core::System& system_, EventInterface& events_interface_, - SyncpointManager& syncpoint_manager_) - : nvdevice{system_}, events_interface{events_interface_}, syncpoint_manager{ - syncpoint_manager_} {} -nvhost_ctrl::~nvhost_ctrl() = default; + NvCore::Container& core_) + : nvdevice{system_}, events_interface{events_interface_}, core{core_}, + syncpoint_manager{core_.GetSyncpointManager()} {} + +nvhost_ctrl::~nvhost_ctrl() { + for (auto& event : events) { + if (!event.registered) { + continue; + } + events_interface.FreeEvent(event.kevent); + } +} NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) { @@ -30,13 +44,15 @@ NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& case 0x1c: return IocCtrlClearEventWait(input, output); case 0x1d: - return IocCtrlEventWait(input, output, false); - case 0x1e: return IocCtrlEventWait(input, output, true); + case 0x1e: + return IocCtrlEventWait(input, output, false); case 0x1f: return IocCtrlEventRegister(input, output); case 0x20: return IocCtrlEventUnregister(input, output); + case 0x21: + return IocCtrlEventUnregisterBatch(input, output); } break; default: @@ -60,6 +76,7 @@ NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& } void nvhost_ctrl::OnOpen(DeviceFD fd) {} + void nvhost_ctrl::OnClose(DeviceFD fd) {} NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output) { @@ -71,116 +88,167 @@ NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector } NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, - bool is_async) { + bool is_allocation) { IocCtrlEventWaitParams params{}; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_async={}", - params.syncpt_id, params.threshold, params.timeout, is_async); + LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_allocation={}", + params.fence.id, params.fence.value, params.timeout, is_allocation); - if (params.syncpt_id >= MaxSyncPoints) { - return NvResult::BadParameter; - } + bool must_unmark_fail = !is_allocation; + const u32 event_id = params.value.raw; + SCOPE_EXIT({ + std::memcpy(output.data(), ¶ms, sizeof(params)); + if (must_unmark_fail) { + events[event_id].fails = 0; + } + }); - u32 event_id = params.value & 0x00FF; + const u32 fence_id = static_cast<u32>(params.fence.id); - if (event_id >= MaxNvEvents) { - std::memcpy(output.data(), ¶ms, sizeof(params)); + if (fence_id >= MaxSyncPoints) { return NvResult::BadParameter; } - if (syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) { - params.value = syncpoint_manager.GetSyncpointMin(params.syncpt_id); - std::memcpy(output.data(), ¶ms, sizeof(params)); - events_interface.failed[event_id] = false; + if (params.fence.value == 0) { + if (!syncpoint_manager.IsSyncpointAllocated(params.fence.id)) { + LOG_WARNING(Service_NVDRV, + "Unallocated syncpt_id={}, threshold={}, timeout={}, is_allocation={}", + params.fence.id, params.fence.value, params.timeout, is_allocation); + } else { + params.value.raw = syncpoint_manager.ReadSyncpointMinValue(fence_id); + } return NvResult::Success; } - if (const auto new_value = syncpoint_manager.RefreshSyncpoint(params.syncpt_id); - syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) { - params.value = new_value; - std::memcpy(output.data(), ¶ms, sizeof(params)); - events_interface.failed[event_id] = false; + if (syncpoint_manager.IsFenceSignalled(params.fence)) { + params.value.raw = syncpoint_manager.ReadSyncpointMinValue(fence_id); return NvResult::Success; } - auto& event = events_interface.events[event_id]; - auto& gpu = system.GPU(); - - // This is mostly to take into account unimplemented features. As synced - // gpu is always synced. - if (!gpu.IsAsync()) { - event.event->GetWritableEvent().Signal(); - return NvResult::Success; - } - const u32 current_syncpoint_value = event.fence.value; - const s32 diff = current_syncpoint_value - params.threshold; - if (diff >= 0) { - event.event->GetWritableEvent().Signal(); - params.value = current_syncpoint_value; - std::memcpy(output.data(), ¶ms, sizeof(params)); - events_interface.failed[event_id] = false; + if (const auto new_value = syncpoint_manager.UpdateMin(fence_id); + syncpoint_manager.IsFenceSignalled(params.fence)) { + params.value.raw = new_value; return NvResult::Success; } - const u32 target_value = current_syncpoint_value - diff; - if (!is_async) { - params.value = 0; + auto& host1x_syncpoint_manager = system.Host1x().GetSyncpointManager(); + const u32 target_value = params.fence.value; + + auto lock = NvEventsLock(); + + u32 slot = [&]() { + if (is_allocation) { + params.value.raw = 0; + return FindFreeNvEvent(fence_id); + } else { + return params.value.raw; + } + }(); + + must_unmark_fail = false; + + const auto check_failing = [&]() { + if (events[slot].fails > 2) { + { + auto lk = system.StallProcesses(); + host1x_syncpoint_manager.WaitHost(fence_id, target_value); + system.UnstallProcesses(); + } + params.value.raw = target_value; + return true; + } + return false; + }; + + if (slot >= MaxNvEvents) { + return NvResult::BadParameter; } if (params.timeout == 0) { - std::memcpy(output.data(), ¶ms, sizeof(params)); + if (check_failing()) { + events[slot].fails = 0; + return NvResult::Success; + } return NvResult::Timeout; } - EventState status = events_interface.status[event_id]; - const bool bad_parameter = status == EventState::Busy; - if (bad_parameter) { - std::memcpy(output.data(), ¶ms, sizeof(params)); + auto& event = events[slot]; + + if (!event.registered) { return NvResult::BadParameter; } - events_interface.SetEventStatus(event_id, EventState::Waiting); - events_interface.assigned_syncpt[event_id] = params.syncpt_id; - events_interface.assigned_value[event_id] = target_value; - if (is_async) { - params.value = params.syncpt_id << 4; - } else { - params.value = ((params.syncpt_id & 0xfff) << 16) | 0x10000000; - } - params.value |= event_id; - event.event->GetWritableEvent().Clear(); - if (events_interface.failed[event_id]) { - { - auto lk = system.StallProcesses(); - gpu.WaitFence(params.syncpt_id, target_value); - system.UnstallProcesses(); - } - std::memcpy(output.data(), ¶ms, sizeof(params)); - events_interface.failed[event_id] = false; + + if (event.IsBeingUsed()) { + return NvResult::BadParameter; + } + + if (check_failing()) { + event.fails = 0; return NvResult::Success; } - gpu.RegisterSyncptInterrupt(params.syncpt_id, target_value); - std::memcpy(output.data(), ¶ms, sizeof(params)); + + params.value.raw = 0; + + event.status.store(EventState::Waiting, std::memory_order_release); + event.assigned_syncpt = fence_id; + event.assigned_value = target_value; + if (is_allocation) { + params.value.syncpoint_id_for_allocation.Assign(static_cast<u16>(fence_id)); + params.value.event_allocated.Assign(1); + } else { + params.value.syncpoint_id.Assign(fence_id); + } + params.value.raw |= slot; + + event.wait_handle = + host1x_syncpoint_manager.RegisterHostAction(fence_id, target_value, [this, slot]() { + auto& event_ = events[slot]; + if (event_.status.exchange(EventState::Signalling, std::memory_order_acq_rel) == + EventState::Waiting) { + event_.kevent->Signal(); + } + event_.status.store(EventState::Signalled, std::memory_order_release); + }); return NvResult::Timeout; } +NvResult nvhost_ctrl::FreeEvent(u32 slot) { + if (slot >= MaxNvEvents) { + return NvResult::BadParameter; + } + + auto& event = events[slot]; + + if (!event.registered) { + return NvResult::Success; + } + + if (event.IsBeingUsed()) { + return NvResult::Busy; + } + + FreeNvEvent(slot); + return NvResult::Success; +} + NvResult nvhost_ctrl::IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output) { IocCtrlEventRegisterParams params{}; std::memcpy(¶ms, input.data(), sizeof(params)); - const u32 event_id = params.user_event_id & 0x00FF; + const u32 event_id = params.user_event_id; LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id); if (event_id >= MaxNvEvents) { return NvResult::BadParameter; } - if (events_interface.registered[event_id]) { - const auto event_state = events_interface.status[event_id]; - if (event_state != EventState::Free) { - LOG_WARNING(Service_NVDRV, "Event already registered! Unregistering previous event"); - events_interface.UnregisterEvent(event_id); - } else { - return NvResult::BadParameter; + + auto lock = NvEventsLock(); + + if (events[event_id].registered) { + const auto result = FreeEvent(event_id); + if (result != NvResult::Success) { + return result; } } - events_interface.RegisterEvent(event_id); + CreateNvEvent(event_id); return NvResult::Success; } @@ -190,34 +258,142 @@ NvResult nvhost_ctrl::IocCtrlEventUnregister(const std::vector<u8>& input, std::memcpy(¶ms, input.data(), sizeof(params)); const u32 event_id = params.user_event_id & 0x00FF; LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id); - if (event_id >= MaxNvEvents) { - return NvResult::BadParameter; - } - if (!events_interface.registered[event_id]) { - return NvResult::BadParameter; + + auto lock = NvEventsLock(); + return FreeEvent(event_id); +} + +NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(const std::vector<u8>& input, + std::vector<u8>& output) { + IocCtrlEventUnregisterBatchParams params{}; + std::memcpy(¶ms, input.data(), sizeof(params)); + u64 event_mask = params.user_events; + LOG_DEBUG(Service_NVDRV, " called, event_mask: {:X}", event_mask); + + auto lock = NvEventsLock(); + while (event_mask != 0) { + const u64 event_id = std::countr_zero(event_mask); + event_mask &= ~(1ULL << event_id); + const auto result = FreeEvent(static_cast<u32>(event_id)); + if (result != NvResult::Success) { + return result; + } } - events_interface.UnregisterEvent(event_id); return NvResult::Success; } NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output) { - IocCtrlEventSignalParams params{}; + IocCtrlEventClearParams params{}; std::memcpy(¶ms, input.data(), sizeof(params)); - u32 event_id = params.event_id & 0x00FF; - LOG_WARNING(Service_NVDRV, "cleared event wait on, event_id: {:X}", event_id); + u32 event_id = params.event_id.slot; + LOG_DEBUG(Service_NVDRV, "called, event_id: {:X}", event_id); if (event_id >= MaxNvEvents) { return NvResult::BadParameter; } - if (events_interface.status[event_id] == EventState::Waiting) { - events_interface.LiberateEvent(event_id); - } - events_interface.failed[event_id] = true; - syncpoint_manager.RefreshSyncpoint(events_interface.events[event_id].fence.id); + auto lock = NvEventsLock(); + + auto& event = events[event_id]; + if (event.status.exchange(EventState::Cancelling, std::memory_order_acq_rel) == + EventState::Waiting) { + auto& host1x_syncpoint_manager = system.Host1x().GetSyncpointManager(); + host1x_syncpoint_manager.DeregisterHostAction(event.assigned_syncpt, event.wait_handle); + syncpoint_manager.UpdateMin(event.assigned_syncpt); + event.wait_handle = {}; + } + event.fails++; + event.status.store(EventState::Cancelled, std::memory_order_release); + event.kevent->Clear(); return NvResult::Success; } +Kernel::KEvent* nvhost_ctrl::QueryEvent(u32 event_id) { + const auto desired_event = SyncpointEventValue{.raw = event_id}; + + const bool allocated = desired_event.event_allocated.Value() != 0; + const u32 slot{allocated ? desired_event.partial_slot.Value() + : static_cast<u32>(desired_event.slot)}; + if (slot >= MaxNvEvents) { + ASSERT(false); + return nullptr; + } + + const u32 syncpoint_id{allocated ? desired_event.syncpoint_id_for_allocation.Value() + : desired_event.syncpoint_id.Value()}; + + auto lock = NvEventsLock(); + + auto& event = events[slot]; + if (event.registered && event.assigned_syncpt == syncpoint_id) { + ASSERT(event.kevent); + return event.kevent; + } + // Is this possible in hardware? + ASSERT_MSG(false, "Slot:{}, SyncpointID:{}, requested", slot, syncpoint_id); + return nullptr; +} + +std::unique_lock<std::mutex> nvhost_ctrl::NvEventsLock() { + return std::unique_lock<std::mutex>(events_mutex); +} + +void nvhost_ctrl::CreateNvEvent(u32 event_id) { + auto& event = events[event_id]; + ASSERT(!event.kevent); + ASSERT(!event.registered); + ASSERT(!event.IsBeingUsed()); + event.kevent = events_interface.CreateEvent(fmt::format("NVCTRL::NvEvent_{}", event_id)); + event.status = EventState::Available; + event.registered = true; + const u64 mask = 1ULL << event_id; + event.fails = 0; + events_mask |= mask; + event.assigned_syncpt = 0; +} + +void nvhost_ctrl::FreeNvEvent(u32 event_id) { + auto& event = events[event_id]; + ASSERT(event.kevent); + ASSERT(event.registered); + ASSERT(!event.IsBeingUsed()); + events_interface.FreeEvent(event.kevent); + event.kevent = nullptr; + event.status = EventState::Available; + event.registered = false; + const u64 mask = ~(1ULL << event_id); + events_mask &= mask; +} + +u32 nvhost_ctrl::FindFreeNvEvent(u32 syncpoint_id) { + u32 slot{MaxNvEvents}; + u32 free_slot{MaxNvEvents}; + for (u32 i = 0; i < MaxNvEvents; i++) { + auto& event = events[i]; + if (event.registered) { + if (!event.IsBeingUsed()) { + slot = i; + if (event.assigned_syncpt == syncpoint_id) { + return slot; + } + } + } else if (free_slot == MaxNvEvents) { + free_slot = i; + } + } + if (free_slot < MaxNvEvents) { + CreateNvEvent(free_slot); + return free_slot; + } + + if (slot < MaxNvEvents) { + return slot; + } + + LOG_CRITICAL(Service_NVDRV, "Failed to allocate an event"); + return 0; +} + } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h index 4fbb89b15..0b56d7070 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h @@ -1,20 +1,28 @@ -// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once #include <array> #include <vector> +#include "common/bit_field.h" #include "common/common_types.h" #include "core/hle/service/nvdrv/devices/nvdevice.h" #include "core/hle/service/nvdrv/nvdrv.h" +#include "video_core/host1x/syncpoint_manager.h" + +namespace Service::Nvidia::NvCore { +class Container; +class SyncpointManager; +} // namespace Service::Nvidia::NvCore namespace Service::Nvidia::Devices { class nvhost_ctrl final : public nvdevice { public: explicit nvhost_ctrl(Core::System& system_, EventInterface& events_interface_, - SyncpointManager& syncpoint_manager_); + NvCore::Container& core); ~nvhost_ctrl() override; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, @@ -27,7 +35,70 @@ public: void OnOpen(DeviceFD fd) override; void OnClose(DeviceFD fd) override; + Kernel::KEvent* QueryEvent(u32 event_id) override; + + union SyncpointEventValue { + u32 raw; + + union { + BitField<0, 4, u32> partial_slot; + BitField<4, 28, u32> syncpoint_id; + }; + + struct { + u16 slot; + union { + BitField<0, 12, u16> syncpoint_id_for_allocation; + BitField<12, 1, u16> event_allocated; + }; + }; + }; + static_assert(sizeof(SyncpointEventValue) == sizeof(u32)); + private: + struct InternalEvent { + // Mask representing registered events + + // Each kernel event associated to an NV event + Kernel::KEvent* kevent{}; + // The status of the current NVEvent + std::atomic<EventState> status{}; + + // Tells the NVEvent that it has failed. + u32 fails{}; + // When an NVEvent is waiting on GPU interrupt, this is the sync_point + // associated with it. + u32 assigned_syncpt{}; + // This is the value of the GPU interrupt for which the NVEvent is waiting + // for. + u32 assigned_value{}; + + // Tells if an NVEvent is registered or not + bool registered{}; + + // Used for waiting on a syncpoint & canceling it. + Tegra::Host1x::SyncpointManager::ActionHandle wait_handle{}; + + bool IsBeingUsed() const { + const auto current_status = status.load(std::memory_order_acquire); + return current_status == EventState::Waiting || + current_status == EventState::Cancelling || + current_status == EventState::Signalling; + } + }; + + std::unique_lock<std::mutex> NvEventsLock(); + + void CreateNvEvent(u32 event_id); + + void FreeNvEvent(u32 event_id); + + u32 FindFreeNvEvent(u32 syncpoint_id); + + std::array<InternalEvent, MaxNvEvents> events{}; + std::mutex events_mutex; + u64 events_mask{}; + struct IocSyncptReadParams { u32_le id{}; u32_le value{}; @@ -83,27 +154,18 @@ private: }; static_assert(sizeof(IocGetConfigParams) == 387, "IocGetConfigParams is incorrect size"); - struct IocCtrlEventSignalParams { - u32_le event_id{}; + struct IocCtrlEventClearParams { + SyncpointEventValue event_id{}; }; - static_assert(sizeof(IocCtrlEventSignalParams) == 4, - "IocCtrlEventSignalParams is incorrect size"); + static_assert(sizeof(IocCtrlEventClearParams) == 4, + "IocCtrlEventClearParams is incorrect size"); struct IocCtrlEventWaitParams { - u32_le syncpt_id{}; - u32_le threshold{}; - s32_le timeout{}; - u32_le value{}; - }; - static_assert(sizeof(IocCtrlEventWaitParams) == 16, "IocCtrlEventWaitParams is incorrect size"); - - struct IocCtrlEventWaitAsyncParams { - u32_le syncpt_id{}; - u32_le threshold{}; + NvFence fence{}; u32_le timeout{}; - u32_le value{}; + SyncpointEventValue value{}; }; - static_assert(sizeof(IocCtrlEventWaitAsyncParams) == 16, + static_assert(sizeof(IocCtrlEventWaitParams) == 16, "IocCtrlEventWaitAsyncParams is incorrect size"); struct IocCtrlEventRegisterParams { @@ -118,19 +180,25 @@ private: static_assert(sizeof(IocCtrlEventUnregisterParams) == 4, "IocCtrlEventUnregisterParams is incorrect size"); - struct IocCtrlEventKill { + struct IocCtrlEventUnregisterBatchParams { u64_le user_events{}; }; - static_assert(sizeof(IocCtrlEventKill) == 8, "IocCtrlEventKill is incorrect size"); + static_assert(sizeof(IocCtrlEventUnregisterBatchParams) == 8, + "IocCtrlEventKill is incorrect size"); NvResult NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output); - NvResult IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, bool is_async); + NvResult IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, + bool is_allocation); NvResult IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output); NvResult IocCtrlEventUnregister(const std::vector<u8>& input, std::vector<u8>& output); + NvResult IocCtrlEventUnregisterBatch(const std::vector<u8>& input, std::vector<u8>& output); NvResult IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output); + NvResult FreeEvent(u32 slot); + EventInterface& events_interface; - SyncpointManager& syncpoint_manager; + NvCore::Container& core; + NvCore::SyncpointManager& syncpoint_manager; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp index 2b3b7efea..ced57dfe6 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp @@ -7,11 +7,19 @@ #include "core/core.h" #include "core/core_timing.h" #include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h" +#include "core/hle/service/nvdrv/nvdrv.h" namespace Service::Nvidia::Devices { -nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system_) : nvdevice{system_} {} -nvhost_ctrl_gpu::~nvhost_ctrl_gpu() = default; +nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system_, EventInterface& events_interface_) + : nvdevice{system_}, events_interface{events_interface_} { + error_notifier_event = events_interface.CreateEvent("CtrlGpuErrorNotifier"); + unknown_event = events_interface.CreateEvent("CtrlGpuUknownEvent"); +} +nvhost_ctrl_gpu::~nvhost_ctrl_gpu() { + events_interface.FreeEvent(error_notifier_event); + events_interface.FreeEvent(unknown_event); +} NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) { @@ -286,4 +294,17 @@ NvResult nvhost_ctrl_gpu::GetGpuTime(const std::vector<u8>& input, std::vector<u return NvResult::Success; } +Kernel::KEvent* nvhost_ctrl_gpu::QueryEvent(u32 event_id) { + switch (event_id) { + case 1: + return error_notifier_event; + case 2: + return unknown_event; + default: { + LOG_CRITICAL(Service_NVDRV, "Unknown Ctrl GPU Event {}", event_id); + } + } + return nullptr; +} + } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h index 97e9a90cb..1e8f254e2 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h @@ -10,11 +10,15 @@ #include "common/swap.h" #include "core/hle/service/nvdrv/devices/nvdevice.h" +namespace Service::Nvidia { +class EventInterface; +} + namespace Service::Nvidia::Devices { class nvhost_ctrl_gpu final : public nvdevice { public: - explicit nvhost_ctrl_gpu(Core::System& system_); + explicit nvhost_ctrl_gpu(Core::System& system_, EventInterface& events_interface_); ~nvhost_ctrl_gpu() override; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, @@ -27,6 +31,8 @@ public: void OnOpen(DeviceFD fd) override; void OnClose(DeviceFD fd) override; + Kernel::KEvent* QueryEvent(u32 event_id) override; + private: struct IoctlGpuCharacteristics { u32_le arch; // 0x120 (NVGPU_GPU_ARCH_GM200) @@ -160,6 +166,12 @@ private: NvResult ZBCQueryTable(const std::vector<u8>& input, std::vector<u8>& output); NvResult FlushL2(const std::vector<u8>& input, std::vector<u8>& output); NvResult GetGpuTime(const std::vector<u8>& input, std::vector<u8>& output); + + EventInterface& events_interface; + + // Events + Kernel::KEvent* error_notifier_event; + Kernel::KEvent* unknown_event; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index b98e63011..45a759fa8 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp @@ -5,29 +5,46 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/nvmap.h" +#include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "core/hle/service/nvdrv/devices/nvhost_gpu.h" -#include "core/hle/service/nvdrv/syncpoint_manager.h" +#include "core/hle/service/nvdrv/nvdrv.h" #include "core/memory.h" +#include "video_core/control/channel_state.h" +#include "video_core/engines/puller.h" #include "video_core/gpu.h" +#include "video_core/host1x/host1x.h" namespace Service::Nvidia::Devices { namespace { -Tegra::CommandHeader BuildFenceAction(Tegra::GPU::FenceOperation op, u32 syncpoint_id) { - Tegra::GPU::FenceAction result{}; +Tegra::CommandHeader BuildFenceAction(Tegra::Engines::Puller::FenceOperation op, u32 syncpoint_id) { + Tegra::Engines::Puller::FenceAction result{}; result.op.Assign(op); result.syncpoint_id.Assign(syncpoint_id); return {result.raw}; } } // namespace -nvhost_gpu::nvhost_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, - SyncpointManager& syncpoint_manager_) - : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, syncpoint_manager{syncpoint_manager_} { - channel_fence.id = syncpoint_manager_.AllocateSyncpoint(); - channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id); +nvhost_gpu::nvhost_gpu(Core::System& system_, EventInterface& events_interface_, + NvCore::Container& core_) + : nvdevice{system_}, events_interface{events_interface_}, core{core_}, + syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()}, + channel_state{system.GPU().AllocateChannel()} { + channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false); + sm_exception_breakpoint_int_report_event = + events_interface.CreateEvent("GpuChannelSMExceptionBreakpointInt"); + sm_exception_breakpoint_pause_report_event = + events_interface.CreateEvent("GpuChannelSMExceptionBreakpointPause"); + error_notifier_event = events_interface.CreateEvent("GpuChannelErrorNotifier"); } -nvhost_gpu::~nvhost_gpu() = default; +nvhost_gpu::~nvhost_gpu() { + events_interface.FreeEvent(sm_exception_breakpoint_int_report_event); + events_interface.FreeEvent(sm_exception_breakpoint_pause_report_event); + events_interface.FreeEvent(error_notifier_event); + syncpoint_manager.FreeSyncpoint(channel_syncpoint); +} NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) { @@ -167,9 +184,14 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8 params.num_entries, params.flags, params.unk0, params.unk1, params.unk2, params.unk3); - channel_fence.value = system.GPU().GetSyncpointValue(channel_fence.id); + if (channel_state->initialized) { + LOG_CRITICAL(Service_NVDRV, "Already allocated!"); + return NvResult::AlreadyAllocated; + } + + system.GPU().InitChannel(*channel_state); - params.fence_out = channel_fence; + params.fence_out = syncpoint_manager.GetSyncpointFence(channel_syncpoint); std::memcpy(output.data(), ¶ms, output.size()); return NvResult::Success; @@ -188,39 +210,37 @@ NvResult nvhost_gpu::AllocateObjectContext(const std::vector<u8>& input, std::ve static std::vector<Tegra::CommandHeader> BuildWaitCommandList(NvFence fence) { return { - Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1, + Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointPayload, 1, Tegra::SubmissionMode::Increasing), {fence.value}, - Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1, + Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointOperation, 1, Tegra::SubmissionMode::Increasing), - BuildFenceAction(Tegra::GPU::FenceOperation::Acquire, fence.id), + BuildFenceAction(Tegra::Engines::Puller::FenceOperation::Acquire, fence.id), }; } -static std::vector<Tegra::CommandHeader> BuildIncrementCommandList(NvFence fence, - u32 add_increment) { +static std::vector<Tegra::CommandHeader> BuildIncrementCommandList(NvFence fence) { std::vector<Tegra::CommandHeader> result{ - Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1, + Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointPayload, 1, Tegra::SubmissionMode::Increasing), {}}; - for (u32 count = 0; count < add_increment; ++count) { - result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1, + for (u32 count = 0; count < 2; ++count) { + result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointOperation, 1, Tegra::SubmissionMode::Increasing)); - result.emplace_back(BuildFenceAction(Tegra::GPU::FenceOperation::Increment, fence.id)); + result.emplace_back( + BuildFenceAction(Tegra::Engines::Puller::FenceOperation::Increment, fence.id)); } return result; } -static std::vector<Tegra::CommandHeader> BuildIncrementWithWfiCommandList(NvFence fence, - u32 add_increment) { +static std::vector<Tegra::CommandHeader> BuildIncrementWithWfiCommandList(NvFence fence) { std::vector<Tegra::CommandHeader> result{ - Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForInterrupt, 1, + Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForIdle, 1, Tegra::SubmissionMode::Increasing), {}}; - const std::vector<Tegra::CommandHeader> increment{ - BuildIncrementCommandList(fence, add_increment)}; + const std::vector<Tegra::CommandHeader> increment{BuildIncrementCommandList(fence)}; result.insert(result.end(), increment.begin(), increment.end()); @@ -234,33 +254,41 @@ NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::vector<u8> auto& gpu = system.GPU(); - params.fence_out.id = channel_fence.id; + std::scoped_lock lock(channel_mutex); - if (params.flags.add_wait.Value() && - !syncpoint_manager.IsSyncpointExpired(params.fence_out.id, params.fence_out.value)) { - gpu.PushGPUEntries(Tegra::CommandList{BuildWaitCommandList(params.fence_out)}); - } + const auto bind_id = channel_state->bind_id; - if (params.flags.add_increment.Value() || params.flags.increment.Value()) { - const u32 increment_value = params.flags.increment.Value() ? params.fence_out.value : 0; - params.fence_out.value = syncpoint_manager.IncreaseSyncpoint( - params.fence_out.id, params.AddIncrementValue() + increment_value); - } else { - params.fence_out.value = syncpoint_manager.GetSyncpointMax(params.fence_out.id); + auto& flags = params.flags; + + if (flags.fence_wait.Value()) { + if (flags.increment_value.Value()) { + return NvResult::BadParameter; + } + + if (!syncpoint_manager.IsFenceSignalled(params.fence)) { + gpu.PushGPUEntries(bind_id, Tegra::CommandList{BuildWaitCommandList(params.fence)}); + } } - gpu.PushGPUEntries(std::move(entries)); + params.fence.id = channel_syncpoint; + + u32 increment{(flags.fence_increment.Value() != 0 ? 2 : 0) + + (flags.increment_value.Value() != 0 ? params.fence.value : 0)}; + params.fence.value = syncpoint_manager.IncrementSyncpointMaxExt(channel_syncpoint, increment); + gpu.PushGPUEntries(bind_id, std::move(entries)); - if (params.flags.add_increment.Value()) { - if (params.flags.suppress_wfi) { - gpu.PushGPUEntries(Tegra::CommandList{ - BuildIncrementCommandList(params.fence_out, params.AddIncrementValue())}); + if (flags.fence_increment.Value()) { + if (flags.suppress_wfi.Value()) { + gpu.PushGPUEntries(bind_id, + Tegra::CommandList{BuildIncrementCommandList(params.fence)}); } else { - gpu.PushGPUEntries(Tegra::CommandList{ - BuildIncrementWithWfiCommandList(params.fence_out, params.AddIncrementValue())}); + gpu.PushGPUEntries(bind_id, + Tegra::CommandList{BuildIncrementWithWfiCommandList(params.fence)}); } } + flags.raw = 0; + std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmitGpfifo)); return NvResult::Success; } @@ -328,4 +356,19 @@ NvResult nvhost_gpu::ChannelSetTimeslice(const std::vector<u8>& input, std::vect return NvResult::Success; } +Kernel::KEvent* nvhost_gpu::QueryEvent(u32 event_id) { + switch (event_id) { + case 1: + return sm_exception_breakpoint_int_report_event; + case 2: + return sm_exception_breakpoint_pause_report_event; + case 3: + return error_notifier_event; + default: { + LOG_CRITICAL(Service_NVDRV, "Unknown Ctrl GPU Event {}", event_id); + } + } + return nullptr; +} + } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h index 8a9f7775a..1e4ecd55b 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h @@ -13,17 +13,31 @@ #include "core/hle/service/nvdrv/nvdata.h" #include "video_core/dma_pusher.h" +namespace Tegra { +namespace Control { +struct ChannelState; +} +} // namespace Tegra + namespace Service::Nvidia { + +namespace NvCore { +class Container; +class NvMap; class SyncpointManager; -} +} // namespace NvCore + +class EventInterface; +} // namespace Service::Nvidia namespace Service::Nvidia::Devices { +class nvhost_as_gpu; class nvmap; class nvhost_gpu final : public nvdevice { public: - explicit nvhost_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, - SyncpointManager& syncpoint_manager_); + explicit nvhost_gpu(Core::System& system_, EventInterface& events_interface_, + NvCore::Container& core); ~nvhost_gpu() override; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, @@ -36,7 +50,10 @@ public: void OnOpen(DeviceFD fd) override; void OnClose(DeviceFD fd) override; + Kernel::KEvent* QueryEvent(u32 event_id) override; + private: + friend class nvhost_as_gpu; enum class CtxObjects : u32_le { Ctx2D = 0x902D, Ctx3D = 0xB197, @@ -146,17 +163,13 @@ private: u32_le num_entries{}; // number of fence objects being submitted union { u32_le raw; - BitField<0, 1, u32_le> add_wait; // append a wait sync_point to the list - BitField<1, 1, u32_le> add_increment; // append an increment to the list - BitField<2, 1, u32_le> new_hw_format; // mostly ignored - BitField<4, 1, u32_le> suppress_wfi; // suppress wait for interrupt - BitField<8, 1, u32_le> increment; // increment the returned fence + BitField<0, 1, u32_le> fence_wait; // append a wait sync_point to the list + BitField<1, 1, u32_le> fence_increment; // append an increment to the list + BitField<2, 1, u32_le> new_hw_format; // mostly ignored + BitField<4, 1, u32_le> suppress_wfi; // suppress wait for interrupt + BitField<8, 1, u32_le> increment_value; // increment the returned fence } flags; - NvFence fence_out{}; // returned new fence object for others to wait on - - u32 AddIncrementValue() const { - return flags.add_increment.Value() << 1; - } + NvFence fence{}; // returned new fence object for others to wait on }; static_assert(sizeof(IoctlSubmitGpfifo) == 16 + sizeof(NvFence), "IoctlSubmitGpfifo is incorrect size"); @@ -191,9 +204,18 @@ private: NvResult ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output); NvResult ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output); - std::shared_ptr<nvmap> nvmap_dev; - SyncpointManager& syncpoint_manager; - NvFence channel_fence; + EventInterface& events_interface; + NvCore::Container& core; + NvCore::SyncpointManager& syncpoint_manager; + NvCore::NvMap& nvmap; + std::shared_ptr<Tegra::Control::ChannelState> channel_state; + u32 channel_syncpoint; + std::mutex channel_mutex; + + // Events + Kernel::KEvent* sm_exception_breakpoint_int_report_event; + Kernel::KEvent* sm_exception_breakpoint_pause_report_event; + Kernel::KEvent* error_notifier_event; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp index 2a5128c60..1703f9cc3 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp @@ -1,17 +1,18 @@ // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later +#include "audio_core/audio_core.h" #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/devices/nvhost_nvdec.h" #include "video_core/renderer_base.h" namespace Service::Nvidia::Devices { -nvhost_nvdec::nvhost_nvdec(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, - SyncpointManager& syncpoint_manager_) - : nvhost_nvdec_common{system_, std::move(nvmap_dev_), syncpoint_manager_} {} +nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core_) + : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::NvDec} {} nvhost_nvdec::~nvhost_nvdec() = default; NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, @@ -20,8 +21,9 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& case 0x0: switch (command.cmd) { case 0x1: { - if (!fd_to_id.contains(fd)) { - fd_to_id[fd] = next_id++; + auto& host1x_file = core.Host1xDeviceFile(); + if (!host1x_file.fd_to_id.contains(fd)) { + host1x_file.fd_to_id[fd] = host1x_file.nvdec_next_id++; } return Submit(fd, input, output); } @@ -65,14 +67,19 @@ NvResult nvhost_nvdec::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& return NvResult::NotImplemented; } -void nvhost_nvdec::OnOpen(DeviceFD fd) {} +void nvhost_nvdec::OnOpen(DeviceFD fd) { + LOG_INFO(Service_NVDRV, "NVDEC video stream started"); + system.AudioCore().SetNVDECActive(true); +} void nvhost_nvdec::OnClose(DeviceFD fd) { LOG_INFO(Service_NVDRV, "NVDEC video stream ended"); - const auto iter = fd_to_id.find(fd); - if (iter != fd_to_id.end()) { + auto& host1x_file = core.Host1xDeviceFile(); + const auto iter = host1x_file.fd_to_id.find(fd); + if (iter != host1x_file.fd_to_id.end()) { system.GPU().ClearCdmaInstance(iter->second); } + system.AudioCore().SetNVDECActive(false); } } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h index 29b3e6a36..c1b4e53e8 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h @@ -10,8 +10,7 @@ namespace Service::Nvidia::Devices { class nvhost_nvdec final : public nvhost_nvdec_common { public: - explicit nvhost_nvdec(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, - SyncpointManager& syncpoint_manager_); + explicit nvhost_nvdec(Core::System& system_, NvCore::Container& core); ~nvhost_nvdec() override; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, @@ -23,9 +22,6 @@ public: void OnOpen(DeviceFD fd) override; void OnClose(DeviceFD fd) override; - -private: - u32 next_id{}; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index 8b2cd9bf1..99eede702 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp @@ -8,10 +8,12 @@ #include "common/common_types.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/nvmap.h" +#include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h" -#include "core/hle/service/nvdrv/devices/nvmap.h" -#include "core/hle/service/nvdrv/syncpoint_manager.h" #include "core/memory.h" +#include "video_core/host1x/host1x.h" #include "video_core/memory_manager.h" #include "video_core/renderer_base.h" @@ -44,10 +46,22 @@ std::size_t WriteVectors(std::vector<u8>& dst, const std::vector<T>& src, std::s } } // Anonymous namespace -nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, - SyncpointManager& syncpoint_manager_) - : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, syncpoint_manager{syncpoint_manager_} {} -nvhost_nvdec_common::~nvhost_nvdec_common() = default; +nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_, + NvCore::ChannelType channel_type_) + : nvdevice{system_}, core{core_}, syncpoint_manager{core.GetSyncpointManager()}, + nvmap{core.GetNvMapFile()}, channel_type{channel_type_} { + auto& syncpts_accumulated = core.Host1xDeviceFile().syncpts_accumulated; + if (syncpts_accumulated.empty()) { + channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false); + } else { + channel_syncpoint = syncpts_accumulated.front(); + syncpts_accumulated.pop_front(); + } +} + +nvhost_nvdec_common::~nvhost_nvdec_common() { + core.Host1xDeviceFile().syncpts_accumulated.push_back(channel_syncpoint); +} NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) { IoctlSetNvmapFD params{}; @@ -84,16 +98,16 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, const std::vector<u8>& input, for (std::size_t i = 0; i < syncpt_increments.size(); i++) { const SyncptIncr& syncpt_incr = syncpt_increments[i]; fence_thresholds[i] = - syncpoint_manager.IncreaseSyncpoint(syncpt_incr.id, syncpt_incr.increments); + syncpoint_manager.IncrementSyncpointMaxExt(syncpt_incr.id, syncpt_incr.increments); } } for (const auto& cmd_buffer : command_buffers) { - const auto object = nvmap_dev->GetObject(cmd_buffer.memory_id); + const auto object = nvmap.GetHandle(cmd_buffer.memory_id); ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;); Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count); - system.Memory().ReadBlock(object->addr + cmd_buffer.offset, cmdlist.data(), + system.Memory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(), cmdlist.size() * sizeof(u32)); - gpu.PushCommandBuffer(fd_to_id[fd], cmdlist); + gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist); } std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmit)); // Some games expect command_buffers to be written back @@ -112,10 +126,8 @@ NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector<u8>& input, std::ve std::memcpy(¶ms, input.data(), sizeof(IoctlGetSyncpoint)); LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param); - if (device_syncpoints[params.param] == 0 && system.GPU().UseNvdec()) { - device_syncpoints[params.param] = syncpoint_manager.AllocateSyncpoint(); - } - params.value = device_syncpoints[params.param]; + // const u32 id{NvCore::SyncpointManager::channel_syncpoints[static_cast<u32>(channel_type)]}; + params.value = channel_syncpoint; std::memcpy(output.data(), ¶ms, sizeof(IoctlGetSyncpoint)); return NvResult::Success; @@ -123,6 +135,7 @@ NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector<u8>& input, std::ve NvResult nvhost_nvdec_common::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) { IoctlGetWaitbase params{}; + LOG_CRITICAL(Service_NVDRV, "called WAITBASE"); std::memcpy(¶ms, input.data(), sizeof(IoctlGetWaitbase)); params.value = 0; // Seems to be hard coded at 0 std::memcpy(output.data(), ¶ms, sizeof(IoctlGetWaitbase)); @@ -136,28 +149,8 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer)); - auto& gpu = system.GPU(); - for (auto& cmd_buffer : cmd_buffer_handles) { - auto object{nvmap_dev->GetObject(cmd_buffer.map_handle)}; - if (!object) { - LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmd_buffer.map_handle); - std::memcpy(output.data(), ¶ms, output.size()); - return NvResult::InvalidState; - } - if (object->dma_map_addr == 0) { - // NVDEC and VIC memory is in the 32-bit address space - // MapAllocate32 will attempt to map a lower 32-bit value in the shared gpu memory space - const GPUVAddr low_addr = gpu.MemoryManager().MapAllocate32(object->addr, object->size); - object->dma_map_addr = static_cast<u32>(low_addr); - // Ensure that the dma_map_addr is indeed in the lower 32-bit address space. - ASSERT(object->dma_map_addr == low_addr); - } - if (!object->dma_map_addr) { - LOG_ERROR(Service_NVDRV, "failed to map size={}", object->size); - } else { - cmd_buffer.map_address = object->dma_map_addr; - } + cmd_buffer.map_address = nvmap.PinHandle(cmd_buffer.map_handle); } std::memcpy(output.data(), ¶ms, sizeof(IoctlMapBuffer)); std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(), @@ -167,11 +160,16 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto } NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) { - // This is intntionally stubbed. - // Skip unmapping buffers here, as to not break the continuity of the VP9 reference frame - // addresses, and risk invalidating data before the async GPU thread is done with it + IoctlMapBuffer params{}; + std::memcpy(¶ms, input.data(), sizeof(IoctlMapBuffer)); + std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries); + + SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer)); + for (auto& cmd_buffer : cmd_buffer_handles) { + nvmap.UnpinHandle(cmd_buffer.map_handle); + } + std::memset(output.data(), 0, output.size()); - LOG_DEBUG(Service_NVDRV, "(STUBBED) called"); return NvResult::Success; } @@ -182,4 +180,9 @@ NvResult nvhost_nvdec_common::SetSubmitTimeout(const std::vector<u8>& input, return NvResult::Success; } +Kernel::KEvent* nvhost_nvdec_common::QueryEvent(u32 event_id) { + LOG_CRITICAL(Service_NVDRV, "Unknown HOSTX1 Event {}", event_id); + return nullptr; +} + } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h index 12d39946d..fe76100c8 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h @@ -3,21 +3,26 @@ #pragma once +#include <deque> #include <vector> #include "common/common_types.h" #include "common/swap.h" +#include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "core/hle/service/nvdrv/devices/nvdevice.h" namespace Service::Nvidia { -class SyncpointManager; + +namespace NvCore { +class Container; +class NvMap; +} // namespace NvCore namespace Devices { -class nvmap; class nvhost_nvdec_common : public nvdevice { public: - explicit nvhost_nvdec_common(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, - SyncpointManager& syncpoint_manager_); + explicit nvhost_nvdec_common(Core::System& system_, NvCore::Container& core, + NvCore::ChannelType channel_type); ~nvhost_nvdec_common() override; protected: @@ -110,11 +115,15 @@ protected: NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output); NvResult SetSubmitTimeout(const std::vector<u8>& input, std::vector<u8>& output); - std::unordered_map<DeviceFD, u32> fd_to_id{}; + Kernel::KEvent* QueryEvent(u32 event_id) override; + + u32 channel_syncpoint; s32_le nvmap_fd{}; u32_le submit_timeout{}; - std::shared_ptr<nvmap> nvmap_dev; - SyncpointManager& syncpoint_manager; + NvCore::Container& core; + NvCore::SyncpointManager& syncpoint_manager; + NvCore::NvMap& nvmap; + NvCore::ChannelType channel_type; std::array<u32, MaxSyncPoints> device_syncpoints{}; }; }; // namespace Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index f58e8bada..73f97136e 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp @@ -4,13 +4,14 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/devices/nvhost_vic.h" #include "video_core/renderer_base.h" namespace Service::Nvidia::Devices { -nvhost_vic::nvhost_vic(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, - SyncpointManager& syncpoint_manager_) - : nvhost_nvdec_common{system_, std::move(nvmap_dev_), syncpoint_manager_} {} + +nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core_) + : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::VIC} {} nvhost_vic::~nvhost_vic() = default; @@ -19,11 +20,13 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& i switch (command.group) { case 0x0: switch (command.cmd) { - case 0x1: - if (!fd_to_id.contains(fd)) { - fd_to_id[fd] = next_id++; + case 0x1: { + auto& host1x_file = core.Host1xDeviceFile(); + if (!host1x_file.fd_to_id.contains(fd)) { + host1x_file.fd_to_id[fd] = host1x_file.vic_next_id++; } return Submit(fd, input, output); + } case 0x2: return GetSyncpoint(input, output); case 0x3: @@ -67,8 +70,9 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& i void nvhost_vic::OnOpen(DeviceFD fd) {} void nvhost_vic::OnClose(DeviceFD fd) { - const auto iter = fd_to_id.find(fd); - if (iter != fd_to_id.end()) { + auto& host1x_file = core.Host1xDeviceFile(); + const auto iter = host1x_file.fd_to_id.find(fd); + if (iter != host1x_file.fd_to_id.end()) { system.GPU().ClearCdmaInstance(iter->second); } } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.h b/src/core/hle/service/nvdrv/devices/nvhost_vic.h index b41b195ae..f164caafb 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.h @@ -9,8 +9,7 @@ namespace Service::Nvidia::Devices { class nvhost_vic final : public nvhost_nvdec_common { public: - explicit nvhost_vic(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, - SyncpointManager& syncpoint_manager_); + explicit nvhost_vic(Core::System& system_, NvCore::Container& core); ~nvhost_vic(); NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, @@ -22,8 +21,5 @@ public: void OnOpen(DeviceFD fd) override; void OnClose(DeviceFD fd) override; - -private: - u32 next_id{}; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index d8518149d..b60679021 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -2,19 +2,26 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include <algorithm> +#include <bit> #include <cstring> +#include "common/alignment.h" #include "common/assert.h" #include "common/logging/log.h" +#include "core/core.h" +#include "core/hle/kernel/k_page_table.h" +#include "core/hle/kernel/k_process.h" +#include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/devices/nvmap.h" +#include "core/memory.h" + +using Core::Memory::YUZU_PAGESIZE; namespace Service::Nvidia::Devices { -nvmap::nvmap(Core::System& system_) : nvdevice{system_} { - // Handle 0 appears to be used when remapping, so we create a placeholder empty nvmap object to - // represent this. - CreateObject(0); -} +nvmap::nvmap(Core::System& system_, NvCore::Container& container_) + : nvdevice{system_}, container{container_}, file{container.GetNvMapFile()} {} nvmap::~nvmap() = default; @@ -62,39 +69,21 @@ NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input, void nvmap::OnOpen(DeviceFD fd) {} void nvmap::OnClose(DeviceFD fd) {} -VAddr nvmap::GetObjectAddress(u32 handle) const { - auto object = GetObject(handle); - ASSERT(object); - ASSERT(object->status == Object::Status::Allocated); - return object->addr; -} - -u32 nvmap::CreateObject(u32 size) { - // Create a new nvmap object and obtain a handle to it. - auto object = std::make_shared<Object>(); - object->id = next_id++; - object->size = size; - object->status = Object::Status::Created; - object->refcount = 1; - - const u32 handle = next_handle++; - - handles.insert_or_assign(handle, std::move(object)); - - return handle; -} - NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) { IocCreateParams params; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_DEBUG(Service_NVDRV, "size=0x{:08X}", params.size); - - if (!params.size) { - LOG_ERROR(Service_NVDRV, "Size is 0"); - return NvResult::BadValue; + LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size); + + std::shared_ptr<NvCore::NvMap::Handle> handle_description{}; + auto result = + file.CreateHandle(Common::AlignUp(params.size, YUZU_PAGESIZE), handle_description); + if (result != NvResult::Success) { + LOG_CRITICAL(Service_NVDRV, "Failed to create Object"); + return result; } - - params.handle = CreateObject(params.size); + handle_description->orig_size = params.size; // Orig size is the unaligned size + params.handle = handle_description->id; + LOG_DEBUG(Service_NVDRV, "handle: {}, size: 0x{:X}", handle_description->id, params.size); std::memcpy(output.data(), ¶ms, sizeof(params)); return NvResult::Success; @@ -103,63 +92,69 @@ NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) { IocAllocParams params; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.addr); + LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address); if (!params.handle) { - LOG_ERROR(Service_NVDRV, "Handle is 0"); + LOG_CRITICAL(Service_NVDRV, "Handle is 0"); return NvResult::BadValue; } if ((params.align - 1) & params.align) { - LOG_ERROR(Service_NVDRV, "Incorrect alignment used, alignment={:08X}", params.align); + LOG_CRITICAL(Service_NVDRV, "Incorrect alignment used, alignment={:08X}", params.align); return NvResult::BadValue; } - const u32 min_alignment = 0x1000; - if (params.align < min_alignment) { - params.align = min_alignment; + // Force page size alignment at a minimum + if (params.align < YUZU_PAGESIZE) { + params.align = YUZU_PAGESIZE; } - auto object = GetObject(params.handle); - if (!object) { - LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); + auto handle_description{file.GetHandle(params.handle)}; + if (!handle_description) { + LOG_CRITICAL(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); return NvResult::BadValue; } - if (object->status == Object::Status::Allocated) { - LOG_ERROR(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle); + if (handle_description->allocated) { + LOG_CRITICAL(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle); return NvResult::InsufficientMemory; } - object->flags = params.flags; - object->align = params.align; - object->kind = params.kind; - object->addr = params.addr; - object->status = Object::Status::Allocated; - + const auto result = + handle_description->Alloc(params.flags, params.align, params.kind, params.address); + if (result != NvResult::Success) { + LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); + return result; + } + ASSERT(system.CurrentProcess() + ->PageTable() + .LockForMapDeviceAddressSpace(handle_description->address, handle_description->size, + Kernel::KMemoryPermission::None, true) + .IsSuccess()); std::memcpy(output.data(), ¶ms, sizeof(params)); - return NvResult::Success; + return result; } NvResult nvmap::IocGetId(const std::vector<u8>& input, std::vector<u8>& output) { IocGetIdParams params; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_WARNING(Service_NVDRV, "called"); + LOG_DEBUG(Service_NVDRV, "called"); + // See the comment in FromId for extra info on this function if (!params.handle) { - LOG_ERROR(Service_NVDRV, "Handle is zero"); + LOG_CRITICAL(Service_NVDRV, "Error!"); return NvResult::BadValue; } - auto object = GetObject(params.handle); - if (!object) { - LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); - return NvResult::BadValue; + auto handle_description{file.GetHandle(params.handle)}; + if (!handle_description) { + LOG_CRITICAL(Service_NVDRV, "Error!"); + return NvResult::AccessDenied; // This will always return EPERM irrespective of if the + // handle exists or not } - params.id = object->id; - + params.id = handle_description->id; std::memcpy(output.data(), ¶ms, sizeof(params)); return NvResult::Success; } @@ -168,26 +163,29 @@ NvResult nvmap::IocFromId(const std::vector<u8>& input, std::vector<u8>& output) IocFromIdParams params; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_WARNING(Service_NVDRV, "(STUBBED) called"); + LOG_DEBUG(Service_NVDRV, "called, id:{}", params.id); - auto itr = std::find_if(handles.begin(), handles.end(), - [&](const auto& entry) { return entry.second->id == params.id; }); - if (itr == handles.end()) { - LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); + // Handles and IDs are always the same value in nvmap however IDs can be used globally given the + // right permissions. + // Since we don't plan on ever supporting multiprocess we can skip implementing handle refs and + // so this function just does simple validation and passes through the handle id. + if (!params.id) { + LOG_CRITICAL(Service_NVDRV, "Zero Id is invalid!"); return NvResult::BadValue; } - auto& object = itr->second; - if (object->status != Object::Status::Allocated) { - LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle); + auto handle_description{file.GetHandle(params.id)}; + if (!handle_description) { + LOG_CRITICAL(Service_NVDRV, "Unregistered handle!"); return NvResult::BadValue; } - itr->second->refcount++; - - // Return the existing handle instead of creating a new one. - params.handle = itr->first; - + auto result = handle_description->Duplicate(false); + if (result != NvResult::Success) { + LOG_CRITICAL(Service_NVDRV, "Could not duplicate handle!"); + return result; + } + params.handle = handle_description->id; std::memcpy(output.data(), ¶ms, sizeof(params)); return NvResult::Success; } @@ -198,35 +196,43 @@ NvResult nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output) IocParamParams params; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_DEBUG(Service_NVDRV, "(STUBBED) called type={}", params.param); + LOG_DEBUG(Service_NVDRV, "called type={}", params.param); - auto object = GetObject(params.handle); - if (!object) { - LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); + if (!params.handle) { + LOG_CRITICAL(Service_NVDRV, "Invalid handle!"); return NvResult::BadValue; } - if (object->status != Object::Status::Allocated) { - LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle); + auto handle_description{file.GetHandle(params.handle)}; + if (!handle_description) { + LOG_CRITICAL(Service_NVDRV, "Not registered handle!"); return NvResult::BadValue; } - switch (static_cast<ParamTypes>(params.param)) { - case ParamTypes::Size: - params.result = object->size; + switch (params.param) { + case HandleParameterType::Size: + params.result = static_cast<u32_le>(handle_description->orig_size); + break; + case HandleParameterType::Alignment: + params.result = static_cast<u32_le>(handle_description->align); break; - case ParamTypes::Alignment: - params.result = object->align; + case HandleParameterType::Base: + params.result = static_cast<u32_le>(-22); // posix EINVAL break; - case ParamTypes::Heap: - // TODO(Subv): Seems to be a hardcoded value? - params.result = 0x40000000; + case HandleParameterType::Heap: + if (handle_description->allocated) + params.result = 0x40000000; + else + params.result = 0; break; - case ParamTypes::Kind: - params.result = object->kind; + case HandleParameterType::Kind: + params.result = handle_description->kind; + break; + case HandleParameterType::IsSharedMemMapped: + params.result = handle_description->is_shared_mem_mapped; break; default: - UNIMPLEMENTED(); + return NvResult::BadValue; } std::memcpy(output.data(), ¶ms, sizeof(params)); @@ -234,46 +240,29 @@ NvResult nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output) } NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) { - // TODO(Subv): These flags are unconfirmed. - enum FreeFlags { - Freed = 0, - NotFreedYet = 1, - }; - IocFreeParams params; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_DEBUG(Service_NVDRV, "(STUBBED) called"); + LOG_DEBUG(Service_NVDRV, "called"); - auto itr = handles.find(params.handle); - if (itr == handles.end()) { - LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); - return NvResult::BadValue; - } - if (!itr->second->refcount) { - LOG_ERROR( - Service_NVDRV, - "There is no references to this object. The object is already freed. handle={:08X}", - params.handle); - return NvResult::BadValue; + if (!params.handle) { + LOG_CRITICAL(Service_NVDRV, "Handle null freed?"); + return NvResult::Success; } - itr->second->refcount--; - - params.size = itr->second->size; - - if (itr->second->refcount == 0) { - params.flags = Freed; - // The address of the nvmap is written to the output if we're finally freeing it, otherwise - // 0 is written. - params.address = itr->second->addr; + if (auto freeInfo{file.FreeHandle(params.handle, false)}) { + ASSERT(system.CurrentProcess() + ->PageTable() + .UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size) + .IsSuccess()); + params.address = freeInfo->address; + params.size = static_cast<u32>(freeInfo->size); + params.flags.raw = 0; + params.flags.map_uncached.Assign(freeInfo->was_uncached); } else { - params.flags = NotFreedYet; - params.address = 0; + // This is possible when there's internel dups or other duplicates. } - handles.erase(params.handle); - std::memcpy(output.data(), ¶ms, sizeof(params)); return NvResult::Success; } diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h index d5360d6e5..e9bfd0358 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.h +++ b/src/core/hle/service/nvdrv/devices/nvmap.h @@ -9,15 +9,23 @@ #include "common/common_funcs.h" #include "common/common_types.h" #include "common/swap.h" +#include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/devices/nvdevice.h" +namespace Service::Nvidia::NvCore { +class Container; +} // namespace Service::Nvidia::NvCore + namespace Service::Nvidia::Devices { class nvmap final : public nvdevice { public: - explicit nvmap(Core::System& system_); + explicit nvmap(Core::System& system_, NvCore::Container& container); ~nvmap() override; + nvmap(const nvmap&) = delete; + nvmap& operator=(const nvmap&) = delete; + NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input, @@ -28,31 +36,15 @@ public: void OnOpen(DeviceFD fd) override; void OnClose(DeviceFD fd) override; - /// Returns the allocated address of an nvmap object given its handle. - VAddr GetObjectAddress(u32 handle) const; - - /// Represents an nvmap object. - struct Object { - enum class Status { Created, Allocated }; - u32 id; - u32 size; - u32 flags; - u32 align; - u8 kind; - VAddr addr; - Status status; - u32 refcount; - u32 dma_map_addr; + enum class HandleParameterType : u32_le { + Size = 1, + Alignment = 2, + Base = 3, + Heap = 4, + Kind = 5, + IsSharedMemMapped = 6 }; - std::shared_ptr<Object> GetObject(u32 handle) const { - auto itr = handles.find(handle); - if (itr != handles.end()) { - return itr->second; - } - return {}; - } - private: /// Id to use for the next handle that is created. u32 next_handle = 0; @@ -60,9 +52,6 @@ private: /// Id to use for the next object that is created. u32 next_id = 0; - /// Mapping of currently allocated handles to the objects they represent. - std::unordered_map<u32, std::shared_ptr<Object>> handles; - struct IocCreateParams { // Input u32_le size{}; @@ -83,11 +72,11 @@ private: // Input u32_le handle{}; u32_le heap_mask{}; - u32_le flags{}; + NvCore::NvMap::Handle::Flags flags{}; u32_le align{}; u8 kind{}; INSERT_PADDING_BYTES(7); - u64_le addr{}; + u64_le address{}; }; static_assert(sizeof(IocAllocParams) == 32, "IocAllocParams has wrong size"); @@ -96,14 +85,14 @@ private: INSERT_PADDING_BYTES(4); u64_le address{}; u32_le size{}; - u32_le flags{}; + NvCore::NvMap::Handle::Flags flags{}; }; static_assert(sizeof(IocFreeParams) == 24, "IocFreeParams has wrong size"); struct IocParamParams { // Input u32_le handle{}; - u32_le param{}; + HandleParameterType param{}; // Output u32_le result{}; }; @@ -117,14 +106,15 @@ private: }; static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size"); - u32 CreateObject(u32 size); - NvResult IocCreate(const std::vector<u8>& input, std::vector<u8>& output); NvResult IocAlloc(const std::vector<u8>& input, std::vector<u8>& output); NvResult IocGetId(const std::vector<u8>& input, std::vector<u8>& output); NvResult IocFromId(const std::vector<u8>& input, std::vector<u8>& output); NvResult IocParam(const std::vector<u8>& input, std::vector<u8>& output); NvResult IocFree(const std::vector<u8>& input, std::vector<u8>& output); + + NvCore::Container& container; + NvCore::NvMap& file; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/nvdata.h b/src/core/hle/service/nvdrv/nvdata.h index 1d00394c8..0e2f47075 100644 --- a/src/core/hle/service/nvdrv/nvdata.h +++ b/src/core/hle/service/nvdrv/nvdata.h @@ -1,5 +1,6 @@ -// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once @@ -78,11 +79,15 @@ enum class NvResult : u32 { ModuleNotPresent = 0xA000E, }; +// obtained from +// https://github.com/skyline-emu/skyline/blob/nvdec-dev/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost/ctrl.h#L47 enum class EventState { - Free = 0, - Registered = 1, - Waiting = 2, - Busy = 3, + Available = 0, + Waiting = 1, + Cancelling = 2, + Signalling = 3, + Signalled = 4, + Cancelled = 5, }; union Ioctl { diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index 756eb7453..9f4c7c99a 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -1,5 +1,6 @@ -// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #include <utility> @@ -7,7 +8,7 @@ #include "core/core.h" #include "core/hle/ipc_helpers.h" #include "core/hle/kernel/k_event.h" -#include "core/hle/kernel/k_writable_event.h" +#include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/devices/nvdevice.h" #include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" @@ -15,17 +16,31 @@ #include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h" #include "core/hle/service/nvdrv/devices/nvhost_gpu.h" #include "core/hle/service/nvdrv/devices/nvhost_nvdec.h" +#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h" #include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h" #include "core/hle/service/nvdrv/devices/nvhost_vic.h" #include "core/hle/service/nvdrv/devices/nvmap.h" #include "core/hle/service/nvdrv/nvdrv.h" #include "core/hle/service/nvdrv/nvdrv_interface.h" #include "core/hle/service/nvdrv/nvmemp.h" -#include "core/hle/service/nvdrv/syncpoint_manager.h" #include "core/hle/service/nvflinger/nvflinger.h" +#include "video_core/gpu.h" namespace Service::Nvidia { +EventInterface::EventInterface(Module& module_) : module{module_}, guard{}, on_signal{} {} + +EventInterface::~EventInterface() = default; + +Kernel::KEvent* EventInterface::CreateEvent(std::string name) { + Kernel::KEvent* new_event = module.service_context.CreateEvent(std::move(name)); + return new_event; +} + +void EventInterface::FreeEvent(Kernel::KEvent* event) { + module.service_context.CloseEvent(event); +} + void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger, Core::System& system) { auto module_ = std::make_shared<Module>(system); @@ -38,34 +53,54 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger } Module::Module(Core::System& system) - : syncpoint_manager{system.GPU()}, service_context{system, "nvdrv"} { - for (u32 i = 0; i < MaxNvEvents; i++) { - events_interface.events[i].event = - service_context.CreateEvent(fmt::format("NVDRV::NvEvent_{}", i)); - events_interface.status[i] = EventState::Free; - events_interface.registered[i] = false; - } - auto nvmap_dev = std::make_shared<Devices::nvmap>(system); - devices["/dev/nvhost-as-gpu"] = std::make_shared<Devices::nvhost_as_gpu>(system, nvmap_dev); - devices["/dev/nvhost-gpu"] = - std::make_shared<Devices::nvhost_gpu>(system, nvmap_dev, syncpoint_manager); - devices["/dev/nvhost-ctrl-gpu"] = std::make_shared<Devices::nvhost_ctrl_gpu>(system); - devices["/dev/nvmap"] = nvmap_dev; - devices["/dev/nvdisp_disp0"] = std::make_shared<Devices::nvdisp_disp0>(system, nvmap_dev); - devices["/dev/nvhost-ctrl"] = - std::make_shared<Devices::nvhost_ctrl>(system, events_interface, syncpoint_manager); - devices["/dev/nvhost-nvdec"] = - std::make_shared<Devices::nvhost_nvdec>(system, nvmap_dev, syncpoint_manager); - devices["/dev/nvhost-nvjpg"] = std::make_shared<Devices::nvhost_nvjpg>(system); - devices["/dev/nvhost-vic"] = - std::make_shared<Devices::nvhost_vic>(system, nvmap_dev, syncpoint_manager); + : container{system.Host1x()}, service_context{system, "nvdrv"}, events_interface{*this} { + builders["/dev/nvhost-as-gpu"] = [this, &system](DeviceFD fd) { + std::shared_ptr<Devices::nvdevice> device = + std::make_shared<Devices::nvhost_as_gpu>(system, *this, container); + return open_files.emplace(fd, device).first; + }; + builders["/dev/nvhost-gpu"] = [this, &system](DeviceFD fd) { + std::shared_ptr<Devices::nvdevice> device = + std::make_shared<Devices::nvhost_gpu>(system, events_interface, container); + return open_files.emplace(fd, device).first; + }; + builders["/dev/nvhost-ctrl-gpu"] = [this, &system](DeviceFD fd) { + std::shared_ptr<Devices::nvdevice> device = + std::make_shared<Devices::nvhost_ctrl_gpu>(system, events_interface); + return open_files.emplace(fd, device).first; + }; + builders["/dev/nvmap"] = [this, &system](DeviceFD fd) { + std::shared_ptr<Devices::nvdevice> device = + std::make_shared<Devices::nvmap>(system, container); + return open_files.emplace(fd, device).first; + }; + builders["/dev/nvdisp_disp0"] = [this, &system](DeviceFD fd) { + std::shared_ptr<Devices::nvdevice> device = + std::make_shared<Devices::nvdisp_disp0>(system, container); + return open_files.emplace(fd, device).first; + }; + builders["/dev/nvhost-ctrl"] = [this, &system](DeviceFD fd) { + std::shared_ptr<Devices::nvdevice> device = + std::make_shared<Devices::nvhost_ctrl>(system, events_interface, container); + return open_files.emplace(fd, device).first; + }; + builders["/dev/nvhost-nvdec"] = [this, &system](DeviceFD fd) { + std::shared_ptr<Devices::nvdevice> device = + std::make_shared<Devices::nvhost_nvdec>(system, container); + return open_files.emplace(fd, device).first; + }; + builders["/dev/nvhost-nvjpg"] = [this, &system](DeviceFD fd) { + std::shared_ptr<Devices::nvdevice> device = std::make_shared<Devices::nvhost_nvjpg>(system); + return open_files.emplace(fd, device).first; + }; + builders["/dev/nvhost-vic"] = [this, &system](DeviceFD fd) { + std::shared_ptr<Devices::nvdevice> device = + std::make_shared<Devices::nvhost_vic>(system, container); + return open_files.emplace(fd, device).first; + }; } -Module::~Module() { - for (u32 i = 0; i < MaxNvEvents; i++) { - service_context.CloseEvent(events_interface.events[i].event); - } -} +Module::~Module() {} NvResult Module::VerifyFD(DeviceFD fd) const { if (fd < 0) { @@ -82,18 +117,18 @@ NvResult Module::VerifyFD(DeviceFD fd) const { } DeviceFD Module::Open(const std::string& device_name) { - if (devices.find(device_name) == devices.end()) { + auto it = builders.find(device_name); + if (it == builders.end()) { LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name); return INVALID_NVDRV_FD; } - auto device = devices[device_name]; const DeviceFD fd = next_fd++; + auto& builder = it->second; + auto device = builder(fd)->second; device->OnOpen(fd); - open_files[fd] = std::move(device); - return fd; } @@ -168,22 +203,24 @@ NvResult Module::Close(DeviceFD fd) { return NvResult::Success; } -void Module::SignalSyncpt(const u32 syncpoint_id, const u32 value) { - for (u32 i = 0; i < MaxNvEvents; i++) { - if (events_interface.assigned_syncpt[i] == syncpoint_id && - events_interface.assigned_value[i] == value) { - events_interface.LiberateEvent(i); - events_interface.events[i].event->GetWritableEvent().Signal(); - } +NvResult Module::QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event) { + if (fd < 0) { + LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd); + return NvResult::InvalidState; } -} -Kernel::KReadableEvent& Module::GetEvent(const u32 event_id) { - return events_interface.events[event_id].event->GetReadableEvent(); -} + const auto itr = open_files.find(fd); -Kernel::KWritableEvent& Module::GetEventWriteable(const u32 event_id) { - return events_interface.events[event_id].event->GetWritableEvent(); + if (itr == open_files.end()) { + LOG_ERROR(Service_NVDRV, "Could not find DeviceFD={}!", fd); + return NvResult::NotImplemented; + } + + event = itr->second->QueryEvent(event_id); + if (!event) { + return NvResult::BadParameter; + } + return NvResult::Success; } } // namespace Service::Nvidia diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index c929e5106..f3c81bd88 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -1,16 +1,20 @@ -// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once +#include <functional> +#include <list> #include <memory> +#include <string> #include <unordered_map> #include <vector> #include "common/common_types.h" #include "core/hle/service/kernel_helpers.h" +#include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/nvdata.h" -#include "core/hle/service/nvdrv/syncpoint_manager.h" #include "core/hle/service/nvflinger/ui/fence.h" #include "core/hle/service/service.h" @@ -28,81 +32,31 @@ class NVFlinger; namespace Service::Nvidia { +namespace NvCore { +class Container; class SyncpointManager; +} // namespace NvCore namespace Devices { class nvdevice; -} +class nvhost_ctrl; +} // namespace Devices -/// Represents an Nvidia event -struct NvEvent { - Kernel::KEvent* event{}; - NvFence fence{}; -}; +class Module; -struct EventInterface { - // Mask representing currently busy events - u64 events_mask{}; - // Each kernel event associated to an NV event - std::array<NvEvent, MaxNvEvents> events; - // The status of the current NVEvent - std::array<EventState, MaxNvEvents> status{}; - // Tells if an NVEvent is registered or not - std::array<bool, MaxNvEvents> registered{}; - // Tells the NVEvent that it has failed. - std::array<bool, MaxNvEvents> failed{}; - // When an NVEvent is waiting on GPU interrupt, this is the sync_point - // associated with it. - std::array<u32, MaxNvEvents> assigned_syncpt{}; - // This is the value of the GPU interrupt for which the NVEvent is waiting - // for. - std::array<u32, MaxNvEvents> assigned_value{}; - // Constant to denote an unasigned syncpoint. - static constexpr u32 unassigned_syncpt = 0xFFFFFFFF; - std::optional<u32> GetFreeEvent() const { - u64 mask = events_mask; - for (u32 i = 0; i < MaxNvEvents; i++) { - const bool is_free = (mask & 0x1) == 0; - if (is_free) { - if (status[i] == EventState::Registered || status[i] == EventState::Free) { - return {i}; - } - } - mask = mask >> 1; - } - return std::nullopt; - } - void SetEventStatus(const u32 event_id, EventState new_status) { - EventState old_status = status[event_id]; - if (old_status == new_status) { - return; - } - status[event_id] = new_status; - if (new_status == EventState::Registered) { - registered[event_id] = true; - } - if (new_status == EventState::Waiting || new_status == EventState::Busy) { - events_mask |= (1ULL << event_id); - } - } - void RegisterEvent(const u32 event_id) { - registered[event_id] = true; - if (status[event_id] == EventState::Free) { - status[event_id] = EventState::Registered; - } - } - void UnregisterEvent(const u32 event_id) { - registered[event_id] = false; - if (status[event_id] == EventState::Registered) { - status[event_id] = EventState::Free; - } - } - void LiberateEvent(const u32 event_id) { - status[event_id] = registered[event_id] ? EventState::Registered : EventState::Free; - events_mask &= ~(1ULL << event_id); - assigned_syncpt[event_id] = unassigned_syncpt; - assigned_value[event_id] = 0; - } +class EventInterface { +public: + explicit EventInterface(Module& module_); + ~EventInterface(); + + Kernel::KEvent* CreateEvent(std::string name); + + void FreeEvent(Kernel::KEvent* event); + +private: + Module& module; + std::mutex guard; + std::list<Devices::nvhost_ctrl*> on_signal; }; class Module final { @@ -112,9 +66,9 @@ public: /// Returns a pointer to one of the available devices, identified by its name. template <typename T> - std::shared_ptr<T> GetDevice(const std::string& name) { - auto itr = devices.find(name); - if (itr == devices.end()) + std::shared_ptr<T> GetDevice(DeviceFD fd) { + auto itr = open_files.find(fd); + if (itr == open_files.end()) return nullptr; return std::static_pointer_cast<T>(itr->second); } @@ -137,28 +91,27 @@ public: /// Closes a device file descriptor and returns operation success. NvResult Close(DeviceFD fd); - void SignalSyncpt(const u32 syncpoint_id, const u32 value); - - Kernel::KReadableEvent& GetEvent(u32 event_id); - - Kernel::KWritableEvent& GetEventWriteable(u32 event_id); + NvResult QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event); private: + friend class EventInterface; + friend class Service::NVFlinger::NVFlinger; + /// Manages syncpoints on the host - SyncpointManager syncpoint_manager; + NvCore::Container container; /// Id to use for the next open file descriptor. DeviceFD next_fd = 1; + using FilesContainerType = std::unordered_map<DeviceFD, std::shared_ptr<Devices::nvdevice>>; /// Mapping of file descriptors to the devices they reference. - std::unordered_map<DeviceFD, std::shared_ptr<Devices::nvdevice>> open_files; + FilesContainerType open_files; - /// Mapping of device node names to their implementation. - std::unordered_map<std::string, std::shared_ptr<Devices::nvdevice>> devices; + KernelHelpers::ServiceContext service_context; EventInterface events_interface; - KernelHelpers::ServiceContext service_context; + std::unordered_map<std::string, std::function<FilesContainerType::iterator(DeviceFD)>> builders; }; /// Registers all NVDRV services with the specified service manager. diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.cpp b/src/core/hle/service/nvdrv/nvdrv_interface.cpp index b5a980384..edbdfee43 100644 --- a/src/core/hle/service/nvdrv/nvdrv_interface.cpp +++ b/src/core/hle/service/nvdrv/nvdrv_interface.cpp @@ -1,10 +1,12 @@ -// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #include <cinttypes> #include "common/logging/log.h" #include "core/core.h" #include "core/hle/ipc_helpers.h" +#include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_readable_event.h" #include "core/hle/service/nvdrv/nvdata.h" #include "core/hle/service/nvdrv/nvdrv.h" @@ -12,10 +14,6 @@ namespace Service::Nvidia { -void NVDRV::SignalGPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) { - nvdrv->SignalSyncpt(syncpoint_id, value); -} - void NVDRV::Open(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_NVDRV, "called"); IPC::ResponseBuilder rb{ctx, 4}; @@ -164,8 +162,7 @@ void NVDRV::Initialize(Kernel::HLERequestContext& ctx) { void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; const auto fd = rp.Pop<DeviceFD>(); - const auto event_id = rp.Pop<u32>() & 0x00FF; - LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}, event_id={:X}", fd, event_id); + const auto event_id = rp.Pop<u32>(); if (!is_initialized) { ServiceError(ctx, NvResult::NotInitialized); @@ -173,24 +170,20 @@ void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) { return; } - const auto nv_result = nvdrv->VerifyFD(fd); - if (nv_result != NvResult::Success) { - LOG_ERROR(Service_NVDRV, "Invalid FD specified DeviceFD={}!", fd); - ServiceError(ctx, nv_result); - return; - } + Kernel::KEvent* event = nullptr; + NvResult result = nvdrv->QueryEvent(fd, event_id, event); - if (event_id < MaxNvEvents) { + if (result == NvResult::Success) { IPC::ResponseBuilder rb{ctx, 3, 1}; rb.Push(ResultSuccess); - auto& event = nvdrv->GetEvent(event_id); - event.Clear(); - rb.PushCopyObjects(event); + auto& readable_event = event->GetReadableEvent(); + rb.PushCopyObjects(readable_event); rb.PushEnum(NvResult::Success); } else { + LOG_ERROR(Service_NVDRV, "Invalid event request!"); IPC::ResponseBuilder rb{ctx, 3}; rb.Push(ResultSuccess); - rb.PushEnum(NvResult::BadParameter); + rb.PushEnum(result); } } diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.h b/src/core/hle/service/nvdrv/nvdrv_interface.h index cbd37b52b..5ac06ee30 100644 --- a/src/core/hle/service/nvdrv/nvdrv_interface.h +++ b/src/core/hle/service/nvdrv/nvdrv_interface.h @@ -7,10 +7,6 @@ #include "core/hle/service/nvdrv/nvdrv.h" #include "core/hle/service/service.h" -namespace Kernel { -class KWritableEvent; -} - namespace Service::Nvidia { class NVDRV final : public ServiceFramework<NVDRV> { @@ -18,8 +14,6 @@ public: explicit NVDRV(Core::System& system_, std::shared_ptr<Module> nvdrv_, const char* name); ~NVDRV() override; - void SignalGPUInterruptSyncpt(u32 syncpoint_id, u32 value); - private: void Open(Kernel::HLERequestContext& ctx); void Ioctl1(Kernel::HLERequestContext& ctx); diff --git a/src/core/hle/service/nvdrv/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/syncpoint_manager.cpp deleted file mode 100644 index a6fa943e8..000000000 --- a/src/core/hle/service/nvdrv/syncpoint_manager.cpp +++ /dev/null @@ -1,38 +0,0 @@ -// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later - -#include "common/assert.h" -#include "core/hle/service/nvdrv/syncpoint_manager.h" -#include "video_core/gpu.h" - -namespace Service::Nvidia { - -SyncpointManager::SyncpointManager(Tegra::GPU& gpu_) : gpu{gpu_} {} - -SyncpointManager::~SyncpointManager() = default; - -u32 SyncpointManager::RefreshSyncpoint(u32 syncpoint_id) { - syncpoints[syncpoint_id].min = gpu.GetSyncpointValue(syncpoint_id); - return GetSyncpointMin(syncpoint_id); -} - -u32 SyncpointManager::AllocateSyncpoint() { - for (u32 syncpoint_id = 1; syncpoint_id < MaxSyncPoints; syncpoint_id++) { - if (!syncpoints[syncpoint_id].is_allocated) { - syncpoints[syncpoint_id].is_allocated = true; - return syncpoint_id; - } - } - ASSERT_MSG(false, "No more available syncpoints!"); - return {}; -} - -u32 SyncpointManager::IncreaseSyncpoint(u32 syncpoint_id, u32 value) { - for (u32 index = 0; index < value; ++index) { - syncpoints[syncpoint_id].max.fetch_add(1, std::memory_order_relaxed); - } - - return GetSyncpointMax(syncpoint_id); -} - -} // namespace Service::Nvidia diff --git a/src/core/hle/service/nvdrv/syncpoint_manager.h b/src/core/hle/service/nvdrv/syncpoint_manager.h deleted file mode 100644 index 7f080f76e..000000000 --- a/src/core/hle/service/nvdrv/syncpoint_manager.h +++ /dev/null @@ -1,84 +0,0 @@ -// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later - -#pragma once - -#include <array> -#include <atomic> - -#include "common/common_types.h" -#include "core/hle/service/nvdrv/nvdata.h" - -namespace Tegra { -class GPU; -} - -namespace Service::Nvidia { - -class SyncpointManager final { -public: - explicit SyncpointManager(Tegra::GPU& gpu_); - ~SyncpointManager(); - - /** - * Returns true if the specified syncpoint is expired for the given value. - * @param syncpoint_id Syncpoint ID to check. - * @param value Value to check against the specified syncpoint. - * @returns True if the specified syncpoint is expired for the given value, otherwise False. - */ - bool IsSyncpointExpired(u32 syncpoint_id, u32 value) const { - return (GetSyncpointMax(syncpoint_id) - value) >= (GetSyncpointMin(syncpoint_id) - value); - } - - /** - * Gets the lower bound for the specified syncpoint. - * @param syncpoint_id Syncpoint ID to get the lower bound for. - * @returns The lower bound for the specified syncpoint. - */ - u32 GetSyncpointMin(u32 syncpoint_id) const { - return syncpoints.at(syncpoint_id).min.load(std::memory_order_relaxed); - } - - /** - * Gets the uper bound for the specified syncpoint. - * @param syncpoint_id Syncpoint ID to get the upper bound for. - * @returns The upper bound for the specified syncpoint. - */ - u32 GetSyncpointMax(u32 syncpoint_id) const { - return syncpoints.at(syncpoint_id).max.load(std::memory_order_relaxed); - } - - /** - * Refreshes the minimum value for the specified syncpoint. - * @param syncpoint_id Syncpoint ID to be refreshed. - * @returns The new syncpoint minimum value. - */ - u32 RefreshSyncpoint(u32 syncpoint_id); - - /** - * Allocates a new syncoint. - * @returns The syncpoint ID for the newly allocated syncpoint. - */ - u32 AllocateSyncpoint(); - - /** - * Increases the maximum value for the specified syncpoint. - * @param syncpoint_id Syncpoint ID to be increased. - * @param value Value to increase the specified syncpoint by. - * @returns The new syncpoint maximum value. - */ - u32 IncreaseSyncpoint(u32 syncpoint_id, u32 value); - -private: - struct Syncpoint { - std::atomic<u32> min; - std::atomic<u32> max; - std::atomic<bool> is_allocated; - }; - - std::array<Syncpoint, MaxSyncPoints> syncpoints{}; - - Tegra::GPU& gpu; -}; - -} // namespace Service::Nvidia diff --git a/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp b/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp index 4b3d5efd6..1ce67c771 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp @@ -5,15 +5,18 @@ // https://cs.android.com/android/platform/superproject/+/android-5.1.1_r38:frameworks/native/libs/gui/BufferQueueConsumer.cpp #include "common/logging/log.h" +#include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvflinger/buffer_item.h" #include "core/hle/service/nvflinger/buffer_queue_consumer.h" #include "core/hle/service/nvflinger/buffer_queue_core.h" #include "core/hle/service/nvflinger/producer_listener.h" +#include "core/hle/service/nvflinger/ui/graphic_buffer.h" namespace Service::android { -BufferQueueConsumer::BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_) - : core{std::move(core_)}, slots{core->slots} {} +BufferQueueConsumer::BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_, + Service::Nvidia::NvCore::NvMap& nvmap_) + : core{std::move(core_)}, slots{core->slots}, nvmap(nvmap_) {} BufferQueueConsumer::~BufferQueueConsumer() = default; @@ -133,6 +136,8 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc slots[slot].buffer_state = BufferState::Free; + nvmap.FreeHandle(slots[slot].graphic_buffer->BufferId(), true); + listener = core->connected_producer_listener; LOG_DEBUG(Service_NVFlinger, "releasing slot {}", slot); diff --git a/src/core/hle/service/nvflinger/buffer_queue_consumer.h b/src/core/hle/service/nvflinger/buffer_queue_consumer.h index b598c314f..4ec06ca13 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_consumer.h +++ b/src/core/hle/service/nvflinger/buffer_queue_consumer.h @@ -13,6 +13,10 @@ #include "core/hle/service/nvflinger/buffer_queue_defs.h" #include "core/hle/service/nvflinger/status.h" +namespace Service::Nvidia::NvCore { +class NvMap; +} // namespace Service::Nvidia::NvCore + namespace Service::android { class BufferItem; @@ -21,7 +25,8 @@ class IConsumerListener; class BufferQueueConsumer final { public: - explicit BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_); + explicit BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_, + Service::Nvidia::NvCore::NvMap& nvmap_); ~BufferQueueConsumer(); Status AcquireBuffer(BufferItem* out_buffer, std::chrono::nanoseconds expected_present); @@ -32,6 +37,7 @@ public: private: std::shared_ptr<BufferQueueCore> core; BufferQueueDefs::SlotsType& slots; + Service::Nvidia::NvCore::NvMap& nvmap; }; } // namespace Service::android diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp index 337431488..77ddbb6ef 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp @@ -11,10 +11,9 @@ #include "core/hle/kernel/hle_ipc.h" #include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_readable_event.h" -#include "core/hle/kernel/k_writable_event.h" #include "core/hle/kernel/kernel.h" #include "core/hle/service/kernel_helpers.h" -#include "core/hle/service/nvdrv/nvdrv.h" +#include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvflinger/buffer_queue_core.h" #include "core/hle/service/nvflinger/buffer_queue_producer.h" #include "core/hle/service/nvflinger/consumer_listener.h" @@ -26,8 +25,10 @@ namespace Service::android { BufferQueueProducer::BufferQueueProducer(Service::KernelHelpers::ServiceContext& service_context_, - std::shared_ptr<BufferQueueCore> buffer_queue_core_) - : service_context{service_context_}, core{std::move(buffer_queue_core_)}, slots(core->slots) { + std::shared_ptr<BufferQueueCore> buffer_queue_core_, + Service::Nvidia::NvCore::NvMap& nvmap_) + : service_context{service_context_}, core{std::move(buffer_queue_core_)}, slots(core->slots), + nvmap(nvmap_) { buffer_wait_event = service_context.CreateEvent("BufferQueue:WaitEvent"); } @@ -108,7 +109,7 @@ Status BufferQueueProducer::SetBufferCount(s32 buffer_count) { core->override_max_buffer_count = buffer_count; core->SignalDequeueCondition(); - buffer_wait_event->GetWritableEvent().Signal(); + buffer_wait_event->Signal(); listener = core->consumer_listener; } @@ -530,6 +531,8 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input, item.is_droppable = core->dequeue_buffer_cannot_block || async; item.swap_interval = swap_interval; + nvmap.DuplicateHandle(item.graphic_buffer->BufferId(), true); + sticky_transform = sticky_transform_; if (core->queue.empty()) { @@ -619,7 +622,7 @@ void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) { slots[slot].fence = fence; core->SignalDequeueCondition(); - buffer_wait_event->GetWritableEvent().Signal(); + buffer_wait_event->Signal(); } Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) { @@ -749,7 +752,7 @@ Status BufferQueueProducer::Disconnect(NativeWindowApi api) { core->connected_producer_listener = nullptr; core->connected_api = NativeWindowApi::NoConnectedApi; core->SignalDequeueCondition(); - buffer_wait_event->GetWritableEvent().Signal(); + buffer_wait_event->Signal(); listener = core->consumer_listener; } else { LOG_ERROR(Service_NVFlinger, "still connected to another api (cur = {} req = {})", @@ -798,7 +801,7 @@ Status BufferQueueProducer::SetPreallocatedBuffer(s32 slot, } core->SignalDequeueCondition(); - buffer_wait_event->GetWritableEvent().Signal(); + buffer_wait_event->Signal(); return Status::NoError; } diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.h b/src/core/hle/service/nvflinger/buffer_queue_producer.h index 42d4722dc..7526bf8ec 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_producer.h +++ b/src/core/hle/service/nvflinger/buffer_queue_producer.h @@ -24,13 +24,16 @@ namespace Kernel { class KernelCore; class KEvent; class KReadableEvent; -class KWritableEvent; } // namespace Kernel namespace Service::KernelHelpers { class ServiceContext; } // namespace Service::KernelHelpers +namespace Service::Nvidia::NvCore { +class NvMap; +} // namespace Service::Nvidia::NvCore + namespace Service::android { class BufferQueueCore; @@ -39,7 +42,8 @@ class IProducerListener; class BufferQueueProducer final : public IBinder { public: explicit BufferQueueProducer(Service::KernelHelpers::ServiceContext& service_context_, - std::shared_ptr<BufferQueueCore> buffer_queue_core_); + std::shared_ptr<BufferQueueCore> buffer_queue_core_, + Service::Nvidia::NvCore::NvMap& nvmap_); ~BufferQueueProducer(); void Transact(Kernel::HLERequestContext& ctx, android::TransactionId code, u32 flags) override; @@ -78,6 +82,8 @@ private: s32 next_callback_ticket{}; s32 current_callback_ticket{}; std::condition_variable_any callback_condition; + + Service::Nvidia::NvCore::NvMap& nvmap; }; } // namespace Service::android diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index 5574269eb..aa14d2cbc 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp @@ -22,7 +22,10 @@ #include "core/hle/service/nvflinger/ui/graphic_buffer.h" #include "core/hle/service/vi/display/vi_display.h" #include "core/hle/service/vi/layer/vi_layer.h" +#include "core/hle/service/vi/vi_results.h" #include "video_core/gpu.h" +#include "video_core/host1x/host1x.h" +#include "video_core/host1x/syncpoint_manager.h" namespace Service::NVFlinger { @@ -30,7 +33,7 @@ constexpr auto frame_ns = std::chrono::nanoseconds{1000000000 / 60}; void NVFlinger::SplitVSync(std::stop_token stop_token) { system.RegisterHostThread(); - std::string name = "yuzu:VSyncThread"; + std::string name = "VSyncThread"; MicroProfileOnThreadCreate(name.c_str()); // Cleanup @@ -38,20 +41,16 @@ void NVFlinger::SplitVSync(std::stop_token stop_token) { Common::SetCurrentThreadName(name.c_str()); Common::SetCurrentThreadPriority(Common::ThreadPriority::High); - s64 delay = 0; + while (!stop_token.stop_requested()) { + vsync_signal.wait(false); + vsync_signal.store(false); + guard->lock(); - const s64 time_start = system.CoreTiming().GetGlobalTimeNs().count(); + Compose(); - const auto ticks = GetNextTicks(); - const s64 time_end = system.CoreTiming().GetGlobalTimeNs().count(); - const s64 time_passed = time_end - time_start; - const s64 next_time = std::max<s64>(0, ticks - time_passed - delay); + guard->unlock(); - if (next_time > 0) { - std::this_thread::sleep_for(std::chrono::nanoseconds{next_time}); - } - delay = (system.CoreTiming().GetGlobalTimeNs().count() - time_end) - next_time; } } @@ -66,27 +65,41 @@ NVFlinger::NVFlinger(Core::System& system_, HosBinderDriverServer& hos_binder_dr guard = std::make_shared<std::mutex>(); // Schedule the screen composition events - composition_event = Core::Timing::CreateEvent( + multi_composition_event = Core::Timing::CreateEvent( + "ScreenComposition", + [this](std::uintptr_t, s64 time, + std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> { + vsync_signal.store(true); + vsync_signal.notify_all(); + return std::chrono::nanoseconds(GetNextTicks()); + }); + + single_composition_event = Core::Timing::CreateEvent( "ScreenComposition", [this](std::uintptr_t, s64 time, std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> { const auto lock_guard = Lock(); Compose(); - return std::max(std::chrono::nanoseconds::zero(), - std::chrono::nanoseconds(GetNextTicks()) - ns_late); + return std::chrono::nanoseconds(GetNextTicks()); }); if (system.IsMulticore()) { + system.CoreTiming().ScheduleLoopingEvent(frame_ns, frame_ns, multi_composition_event); vsync_thread = std::jthread([this](std::stop_token token) { SplitVSync(token); }); } else { - system.CoreTiming().ScheduleLoopingEvent(frame_ns, frame_ns, composition_event); + system.CoreTiming().ScheduleLoopingEvent(frame_ns, frame_ns, single_composition_event); } } NVFlinger::~NVFlinger() { - if (!system.IsMulticore()) { - system.CoreTiming().UnscheduleEvent(composition_event, 0); + if (system.IsMulticore()) { + system.CoreTiming().UnscheduleEvent(multi_composition_event, {}); + vsync_thread.request_stop(); + vsync_signal.store(true); + vsync_signal.notify_all(); + } else { + system.CoreTiming().UnscheduleEvent(single_composition_event, {}); } for (auto& display : displays) { @@ -94,10 +107,15 @@ NVFlinger::~NVFlinger() { display.GetLayer(layer).Core().NotifyShutdown(); } } + + if (nvdrv) { + nvdrv->Close(disp_fd); + } } void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) { nvdrv = std::move(instance); + disp_fd = nvdrv->Open("/dev/nvdisp_disp0"); } std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) { @@ -131,7 +149,7 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { void NVFlinger::CreateLayerAtId(VI::Display& display, u64 layer_id) { const auto buffer_id = next_buffer_queue_id++; - display.CreateLayer(layer_id, buffer_id); + display.CreateLayer(layer_id, buffer_id, nvdrv->container); } void NVFlinger::CloseLayer(u64 layer_id) { @@ -153,15 +171,15 @@ std::optional<u32> NVFlinger::FindBufferQueueId(u64 display_id, u64 layer_id) { return layer->GetBinderId(); } -Kernel::KReadableEvent* NVFlinger::FindVsyncEvent(u64 display_id) { +ResultVal<Kernel::KReadableEvent*> NVFlinger::FindVsyncEvent(u64 display_id) { const auto lock_guard = Lock(); auto* const display = FindDisplay(display_id); if (display == nullptr) { - return nullptr; + return VI::ResultNotFound; } - return &display->GetVSyncEvent(); + return display->GetVSyncEvent(); } VI::Display* NVFlinger::FindDisplay(u64 display_id) { @@ -251,30 +269,24 @@ void NVFlinger::Compose() { return; // We are likely shutting down } - auto& gpu = system.GPU(); - const auto& multi_fence = buffer.fence; - guard->unlock(); - for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) { - const auto& fence = multi_fence.fences[fence_id]; - gpu.WaitFence(fence.id, fence.value); - } - guard->lock(); - - MicroProfileFlip(); - // Now send the buffer to the GPU for drawing. // TODO(Subv): Support more than just disp0. The display device selection is probably based // on which display we're drawing (Default, Internal, External, etc) - auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>("/dev/nvdisp_disp0"); + auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>(disp_fd); ASSERT(nvdisp); + guard->unlock(); Common::Rectangle<int> crop_rect{ static_cast<int>(buffer.crop.Left()), static_cast<int>(buffer.crop.Top()), static_cast<int>(buffer.crop.Right()), static_cast<int>(buffer.crop.Bottom())}; nvdisp->flip(igbp_buffer.BufferId(), igbp_buffer.Offset(), igbp_buffer.ExternalFormat(), igbp_buffer.Width(), igbp_buffer.Height(), igbp_buffer.Stride(), - static_cast<android::BufferTransformFlags>(buffer.transform), crop_rect); + static_cast<android::BufferTransformFlags>(buffer.transform), crop_rect, + buffer.fence.fences, buffer.fence.num_fences); + + MicroProfileFlip(); + guard->lock(); swap_interval = buffer.swap_interval; diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h index 4775597cc..99509bc5b 100644 --- a/src/core/hle/service/nvflinger/nvflinger.h +++ b/src/core/hle/service/nvflinger/nvflinger.h @@ -11,6 +11,7 @@ #include <vector> #include "common/common_types.h" +#include "core/hle/result.h" #include "core/hle/service/kernel_helpers.h" namespace Common { @@ -24,7 +25,6 @@ struct EventType; namespace Kernel { class KReadableEvent; -class KWritableEvent; } // namespace Kernel namespace Service::Nvidia { @@ -71,8 +71,9 @@ public: /// Gets the vsync event for the specified display. /// - /// If an invalid display ID is provided, then nullptr is returned. - [[nodiscard]] Kernel::KReadableEvent* FindVsyncEvent(u64 display_id); + /// If an invalid display ID is provided, then VI::ResultNotFound is returned. + /// If the vsync event has already been retrieved, then VI::ResultPermissionDenied is returned. + [[nodiscard]] ResultVal<Kernel::KReadableEvent*> FindVsyncEvent(u64 display_id); /// Performs a composition request to the emulated nvidia GPU and triggers the vsync events when /// finished. @@ -114,6 +115,7 @@ private: void SplitVSync(std::stop_token stop_token); std::shared_ptr<Nvidia::Module> nvdrv; + s32 disp_fd; std::list<VI::Display> displays; @@ -126,12 +128,15 @@ private: u32 swap_interval = 1; /// Event that handles screen composition. - std::shared_ptr<Core::Timing::EventType> composition_event; + std::shared_ptr<Core::Timing::EventType> multi_composition_event; + std::shared_ptr<Core::Timing::EventType> single_composition_event; std::shared_ptr<std::mutex> guard; Core::System& system; + std::atomic<bool> vsync_signal; + std::jthread vsync_thread; KernelHelpers::ServiceContext service_context; diff --git a/src/core/hle/service/ptm/psm.cpp b/src/core/hle/service/ptm/psm.cpp index 2c31e9485..1ac97fe31 100644 --- a/src/core/hle/service/ptm/psm.cpp +++ b/src/core/hle/service/ptm/psm.cpp @@ -37,19 +37,19 @@ public: void SignalChargerTypeChanged() { if (should_signal && should_signal_charger_type) { - state_change_event->GetWritableEvent().Signal(); + state_change_event->Signal(); } } void SignalPowerSupplyChanged() { if (should_signal && should_signal_power_supply) { - state_change_event->GetWritableEvent().Signal(); + state_change_event->Signal(); } } void SignalBatteryVoltageStateChanged() { if (should_signal && should_signal_battery_voltage) { - state_change_event->GetWritableEvent().Signal(); + state_change_event->Signal(); } } diff --git a/src/core/hle/service/ptm/ts.cpp b/src/core/hle/service/ptm/ts.cpp index 65c3f135f..b1a0a5544 100644 --- a/src/core/hle/service/ptm/ts.cpp +++ b/src/core/hle/service/ptm/ts.cpp @@ -15,7 +15,7 @@ TS::TS(Core::System& system_) : ServiceFramework{system_, "ts"} { {0, nullptr, "GetTemperatureRange"}, {1, &TS::GetTemperature, "GetTemperature"}, {2, nullptr, "SetMeasurementMode"}, - {3, nullptr, "GetTemperatureMilliC"}, + {3, &TS::GetTemperatureMilliC, "GetTemperatureMilliC"}, {4, nullptr, "OpenSession"}, }; // clang-format on @@ -29,8 +29,6 @@ void TS::GetTemperature(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; const auto location{rp.PopEnum<Location>()}; - LOG_WARNING(Service_HID, "(STUBBED) called. location={}", location); - const s32 temperature = location == Location::Internal ? 35 : 20; IPC::ResponseBuilder rb{ctx, 3}; @@ -38,4 +36,15 @@ void TS::GetTemperature(Kernel::HLERequestContext& ctx) { rb.Push(temperature); } +void TS::GetTemperatureMilliC(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto location{rp.PopEnum<Location>()}; + + const s32 temperature = location == Location::Internal ? 35000 : 20000; + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(ResultSuccess); + rb.Push(temperature); +} + } // namespace Service::PTM diff --git a/src/core/hle/service/ptm/ts.h b/src/core/hle/service/ptm/ts.h index 39a734ef7..39d51847e 100644 --- a/src/core/hle/service/ptm/ts.h +++ b/src/core/hle/service/ptm/ts.h @@ -20,6 +20,7 @@ private: }; void GetTemperature(Kernel::HLERequestContext& ctx); + void GetTemperatureMilliC(Kernel::HLERequestContext& ctx); }; } // namespace Service::PTM diff --git a/src/core/hle/service/set/set_sys.cpp b/src/core/hle/service/set/set_sys.cpp index 2a0b812c1..d7cea6aac 100644 --- a/src/core/hle/service/set/set_sys.cpp +++ b/src/core/hle/service/set/set_sys.cpp @@ -101,6 +101,81 @@ void SET_SYS::SetColorSetId(Kernel::HLERequestContext& ctx) { rb.Push(ResultSuccess); } +// FIXME: implement support for the real system_settings.ini + +template <typename T> +static std::vector<u8> ToBytes(const T& value) { + static_assert(std::is_trivially_copyable_v<T>); + + const auto* begin = reinterpret_cast<const u8*>(&value); + const auto* end = begin + sizeof(T); + + return std::vector<u8>(begin, end); +} + +using Settings = + std::map<std::string, std::map<std::string, std::vector<u8>, std::less<>>, std::less<>>; + +static Settings GetSettings() { + Settings ret; + + ret["hbloader"]["applet_heap_size"] = ToBytes(u64{0x0}); + ret["hbloader"]["applet_heap_reservation_size"] = ToBytes(u64{0x8600000}); + + return ret; +} + +void SET_SYS::GetSettingsItemValueSize(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_SET, "called"); + + // The category of the setting. This corresponds to the top-level keys of + // system_settings.ini. + const auto setting_category_buf{ctx.ReadBuffer(0)}; + const std::string setting_category{setting_category_buf.begin(), setting_category_buf.end()}; + + // The name of the setting. This corresponds to the second-level keys of + // system_settings.ini. + const auto setting_name_buf{ctx.ReadBuffer(1)}; + const std::string setting_name{setting_name_buf.begin(), setting_name_buf.end()}; + + auto settings{GetSettings()}; + u64 response_size{0}; + + if (settings.contains(setting_category) && settings[setting_category].contains(setting_name)) { + response_size = settings[setting_category][setting_name].size(); + } + + IPC::ResponseBuilder rb{ctx, 4}; + rb.Push(response_size == 0 ? ResultUnknown : ResultSuccess); + rb.Push(response_size); +} + +void SET_SYS::GetSettingsItemValue(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_SET, "called"); + + // The category of the setting. This corresponds to the top-level keys of + // system_settings.ini. + const auto setting_category_buf{ctx.ReadBuffer(0)}; + const std::string setting_category{setting_category_buf.begin(), setting_category_buf.end()}; + + // The name of the setting. This corresponds to the second-level keys of + // system_settings.ini. + const auto setting_name_buf{ctx.ReadBuffer(1)}; + const std::string setting_name{setting_name_buf.begin(), setting_name_buf.end()}; + + auto settings{GetSettings()}; + Result response{ResultUnknown}; + + if (settings.contains(setting_category) && settings[setting_category].contains(setting_name)) { + auto setting_value = settings[setting_category][setting_name]; + ctx.WriteBuffer(setting_value.data(), setting_value.size()); + response = ResultSuccess; + } + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(response); +} + SET_SYS::SET_SYS(Core::System& system_) : ServiceFramework{system_, "set:sys"} { // clang-format off static const FunctionInfo functions[] = { @@ -138,8 +213,8 @@ SET_SYS::SET_SYS(Core::System& system_) : ServiceFramework{system_, "set:sys"} { {32, nullptr, "SetAccountNotificationSettings"}, {35, nullptr, "GetVibrationMasterVolume"}, {36, nullptr, "SetVibrationMasterVolume"}, - {37, nullptr, "GetSettingsItemValueSize"}, - {38, nullptr, "GetSettingsItemValue"}, + {37, &SET_SYS::GetSettingsItemValueSize, "GetSettingsItemValueSize"}, + {38, &SET_SYS::GetSettingsItemValue, "GetSettingsItemValue"}, {39, nullptr, "GetTvSettings"}, {40, nullptr, "SetTvSettings"}, {41, nullptr, "GetEdid"}, diff --git a/src/core/hle/service/set/set_sys.h b/src/core/hle/service/set/set_sys.h index ac97772b7..258ef8c57 100644 --- a/src/core/hle/service/set/set_sys.h +++ b/src/core/hle/service/set/set_sys.h @@ -23,6 +23,8 @@ private: BasicBlack = 1, }; + void GetSettingsItemValueSize(Kernel::HLERequestContext& ctx); + void GetSettingsItemValue(Kernel::HLERequestContext& ctx); void GetFirmwareVersion(Kernel::HLERequestContext& ctx); void GetFirmwareVersion2(Kernel::HLERequestContext& ctx); void GetColorSetId(Kernel::HLERequestContext& ctx); diff --git a/src/core/hle/service/sm/sm.cpp b/src/core/hle/service/sm/sm.cpp index 246c94623..48e70f93c 100644 --- a/src/core/hle/service/sm/sm.cpp +++ b/src/core/hle/service/sm/sm.cpp @@ -156,7 +156,8 @@ ResultVal<Kernel::KClientSession*> SM::GetServiceImpl(Kernel::HLERequestContext& // Create a new session. Kernel::KClientSession* session{}; - if (const auto result = port->GetClientPort().CreateSession(std::addressof(session)); + if (const auto result = port->GetClientPort().CreateSession( + std::addressof(session), std::make_shared<Kernel::SessionRequestManager>(kernel)); result.IsError()) { LOG_ERROR(Service_SM, "called service={} -> error 0x{:08X}", name, result.raw); return result; diff --git a/src/core/hle/service/sm/sm_controller.cpp b/src/core/hle/service/sm/sm_controller.cpp index 2a4bd64ab..273f79568 100644 --- a/src/core/hle/service/sm/sm_controller.cpp +++ b/src/core/hle/service/sm/sm_controller.cpp @@ -15,9 +15,10 @@ namespace Service::SM { void Controller::ConvertCurrentObjectToDomain(Kernel::HLERequestContext& ctx) { - ASSERT_MSG(!ctx.Session()->IsDomain(), "Session is already a domain"); + ASSERT_MSG(!ctx.Session()->GetSessionRequestManager()->IsDomain(), + "Session is already a domain"); LOG_DEBUG(Service, "called, server_session={}", ctx.Session()->GetId()); - ctx.Session()->ConvertToDomain(); + ctx.Session()->GetSessionRequestManager()->ConvertToDomainOnRequestEnd(); IPC::ResponseBuilder rb{ctx, 3}; rb.Push(ResultSuccess); diff --git a/src/core/hle/service/sockets/bsd.cpp b/src/core/hle/service/sockets/bsd.cpp index cc679cc81..9e94a462f 100644 --- a/src/core/hle/service/sockets/bsd.cpp +++ b/src/core/hle/service/sockets/bsd.cpp @@ -929,7 +929,7 @@ BSD::BSD(Core::System& system_, const char* name) proxy_packet_received = room_member->BindOnProxyPacketReceived( [this](const Network::ProxyPacket& packet) { OnProxyPacketReceived(packet); }); } else { - LOG_ERROR(Service, "Network isn't initalized"); + LOG_ERROR(Service, "Network isn't initialized"); } } diff --git a/src/core/hle/service/time/system_clock_context_update_callback.cpp b/src/core/hle/service/time/system_clock_context_update_callback.cpp index a649bed3a..cafc04ee7 100644 --- a/src/core/hle/service/time/system_clock_context_update_callback.cpp +++ b/src/core/hle/service/time/system_clock_context_update_callback.cpp @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later -#include "core/hle/kernel/k_writable_event.h" +#include "core/hle/kernel/k_event.h" #include "core/hle/service/time/errors.h" #include "core/hle/service/time/system_clock_context_update_callback.h" @@ -20,13 +20,13 @@ bool SystemClockContextUpdateCallback::NeedUpdate(const SystemClockContext& valu } void SystemClockContextUpdateCallback::RegisterOperationEvent( - std::shared_ptr<Kernel::KWritableEvent>&& writable_event) { - operation_event_list.emplace_back(std::move(writable_event)); + std::shared_ptr<Kernel::KEvent>&& event) { + operation_event_list.emplace_back(std::move(event)); } void SystemClockContextUpdateCallback::BroadcastOperationEvent() { - for (const auto& writable_event : operation_event_list) { - writable_event->Signal(); + for (const auto& event : operation_event_list) { + event->Signal(); } } diff --git a/src/core/hle/service/time/system_clock_context_update_callback.h b/src/core/hle/service/time/system_clock_context_update_callback.h index 9c6caf196..bf657acd9 100644 --- a/src/core/hle/service/time/system_clock_context_update_callback.h +++ b/src/core/hle/service/time/system_clock_context_update_callback.h @@ -9,7 +9,7 @@ #include "core/hle/service/time/clock_types.h" namespace Kernel { -class KWritableEvent; +class KEvent; } namespace Service::Time::Clock { @@ -24,7 +24,7 @@ public: bool NeedUpdate(const SystemClockContext& value) const; - void RegisterOperationEvent(std::shared_ptr<Kernel::KWritableEvent>&& writable_event); + void RegisterOperationEvent(std::shared_ptr<Kernel::KEvent>&& event); void BroadcastOperationEvent(); @@ -37,7 +37,7 @@ protected: private: bool has_context{}; - std::vector<std::shared_ptr<Kernel::KWritableEvent>> operation_event_list; + std::vector<std::shared_ptr<Kernel::KEvent>> operation_event_list; }; } // namespace Service::Time::Clock diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp index b34febb50..8ef74f1f0 100644 --- a/src/core/hle/service/vi/display/vi_display.cpp +++ b/src/core/hle/service/vi/display/vi_display.cpp @@ -10,8 +10,8 @@ #include "core/core.h" #include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_readable_event.h" -#include "core/hle/kernel/k_writable_event.h" #include "core/hle/service/kernel_helpers.h" +#include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvflinger/buffer_item_consumer.h" #include "core/hle/service/nvflinger/buffer_queue_consumer.h" #include "core/hle/service/nvflinger/buffer_queue_core.h" @@ -19,6 +19,7 @@ #include "core/hle/service/nvflinger/hos_binder_driver_server.h" #include "core/hle/service/vi/display/vi_display.h" #include "core/hle/service/vi/layer/vi_layer.h" +#include "core/hle/service/vi/vi_results.h" namespace Service::VI { @@ -28,11 +29,13 @@ struct BufferQueue { std::unique_ptr<android::BufferQueueConsumer> consumer; }; -static BufferQueue CreateBufferQueue(KernelHelpers::ServiceContext& service_context) { +static BufferQueue CreateBufferQueue(KernelHelpers::ServiceContext& service_context, + Service::Nvidia::NvCore::NvMap& nvmap) { auto buffer_queue_core = std::make_shared<android::BufferQueueCore>(); - return {buffer_queue_core, - std::make_unique<android::BufferQueueProducer>(service_context, buffer_queue_core), - std::make_unique<android::BufferQueueConsumer>(buffer_queue_core)}; + return { + buffer_queue_core, + std::make_unique<android::BufferQueueProducer>(service_context, buffer_queue_core, nvmap), + std::make_unique<android::BufferQueueConsumer>(buffer_queue_core, nvmap)}; } Display::Display(u64 id, std::string name_, @@ -55,18 +58,29 @@ const Layer& Display::GetLayer(std::size_t index) const { return *layers.at(index); } -Kernel::KReadableEvent& Display::GetVSyncEvent() { - return vsync_event->GetReadableEvent(); +ResultVal<Kernel::KReadableEvent*> Display::GetVSyncEvent() { + if (got_vsync_event) { + return ResultPermissionDenied; + } + + got_vsync_event = true; + + return GetVSyncEventUnchecked(); +} + +Kernel::KReadableEvent* Display::GetVSyncEventUnchecked() { + return &vsync_event->GetReadableEvent(); } void Display::SignalVSyncEvent() { - vsync_event->GetWritableEvent().Signal(); + vsync_event->Signal(); } -void Display::CreateLayer(u64 layer_id, u32 binder_id) { +void Display::CreateLayer(u64 layer_id, u32 binder_id, + Service::Nvidia::NvCore::Container& nv_core) { ASSERT_MSG(layers.empty(), "Only one layer is supported per display at the moment"); - auto [core, producer, consumer] = CreateBufferQueue(service_context); + auto [core, producer, consumer] = CreateBufferQueue(service_context, nv_core.GetNvMapFile()); auto buffer_item_consumer = std::make_shared<android::BufferItemConsumer>(std::move(consumer)); buffer_item_consumer->Connect(false); diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h index 3838bb599..33d5f398c 100644 --- a/src/core/hle/service/vi/display/vi_display.h +++ b/src/core/hle/service/vi/display/vi_display.h @@ -9,6 +9,7 @@ #include "common/common_funcs.h" #include "common/common_types.h" +#include "core/hle/result.h" namespace Kernel { class KEvent; @@ -26,6 +27,11 @@ namespace Service::NVFlinger { class HosBinderDriverServer; } +namespace Service::Nvidia::NvCore { +class Container; +class NvMap; +} // namespace Service::Nvidia::NvCore + namespace Service::VI { class Layer; @@ -73,8 +79,16 @@ public: return layers.size(); } - /// Gets the readable vsync event. - Kernel::KReadableEvent& GetVSyncEvent(); + /** + * Gets the internal vsync event. + * + * @returns The internal Vsync event if it has not yet been retrieved, + * VI::ResultPermissionDenied otherwise. + */ + [[nodiscard]] ResultVal<Kernel::KReadableEvent*> GetVSyncEvent(); + + /// Gets the internal vsync event. + Kernel::KReadableEvent* GetVSyncEventUnchecked(); /// Signals the internal vsync event. void SignalVSyncEvent(); @@ -84,7 +98,7 @@ public: /// @param layer_id The ID to assign to the created layer. /// @param binder_id The ID assigned to the buffer queue. /// - void CreateLayer(u64 layer_id, u32 binder_id); + void CreateLayer(u64 layer_id, u32 binder_id, Service::Nvidia::NvCore::Container& core); /// Closes and removes a layer from this display with the given ID. /// @@ -118,6 +132,7 @@ private: std::vector<std::unique_ptr<Layer>> layers; Kernel::KEvent* vsync_event{}; + bool got_vsync_event{false}; }; } // namespace Service::VI diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp index 546879648..9c917cacf 100644 --- a/src/core/hle/service/vi/vi.cpp +++ b/src/core/hle/service/vi/vi.cpp @@ -29,16 +29,12 @@ #include "core/hle/service/service.h" #include "core/hle/service/vi/vi.h" #include "core/hle/service/vi/vi_m.h" +#include "core/hle/service/vi/vi_results.h" #include "core/hle/service/vi/vi_s.h" #include "core/hle/service/vi/vi_u.h" namespace Service::VI { -constexpr Result ERR_OPERATION_FAILED{ErrorModule::VI, 1}; -constexpr Result ERR_PERMISSION_DENIED{ErrorModule::VI, 5}; -constexpr Result ERR_UNSUPPORTED{ErrorModule::VI, 6}; -constexpr Result ERR_NOT_FOUND{ErrorModule::VI, 7}; - struct DisplayInfo { /// The name of this particular display. char display_name[0x40]{"Default"}; @@ -62,6 +58,7 @@ static_assert(sizeof(DisplayInfo) == 0x60, "DisplayInfo has wrong size"); class NativeWindow final { public: constexpr explicit NativeWindow(u32 id_) : id{id_} {} + constexpr explicit NativeWindow(const NativeWindow& other) = default; private: const u32 magic = 2; @@ -348,7 +345,7 @@ private: if (!layer_id) { LOG_ERROR(Service_VI, "Layer not found! display=0x{:016X}", display); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ERR_NOT_FOUND); + rb.Push(ResultNotFound); return; } @@ -498,7 +495,7 @@ private: if (!display_id) { LOG_ERROR(Service_VI, "Display not found! display_name={}", name); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ERR_NOT_FOUND); + rb.Push(ResultNotFound); return; } @@ -554,14 +551,14 @@ private: if (scaling_mode > NintendoScaleMode::PreserveAspectRatio) { LOG_ERROR(Service_VI, "Invalid scaling mode provided."); - rb.Push(ERR_OPERATION_FAILED); + rb.Push(ResultOperationFailed); return; } if (scaling_mode != NintendoScaleMode::ScaleToWindow && scaling_mode != NintendoScaleMode::PreserveAspectRatio) { LOG_ERROR(Service_VI, "Unsupported scaling mode supplied."); - rb.Push(ERR_UNSUPPORTED); + rb.Push(ResultNotSupported); return; } @@ -594,7 +591,7 @@ private: if (!display_id) { LOG_ERROR(Service_VI, "Layer not found! layer_id={}", layer_id); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ERR_NOT_FOUND); + rb.Push(ResultNotFound); return; } @@ -602,7 +599,7 @@ private: if (!buffer_queue_id) { LOG_ERROR(Service_VI, "Buffer queue id not found! display_id={}", *display_id); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ERR_NOT_FOUND); + rb.Push(ResultNotFound); return; } @@ -640,7 +637,7 @@ private: if (!layer_id) { LOG_ERROR(Service_VI, "Layer not found! display_id={}", display_id); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ERR_NOT_FOUND); + rb.Push(ResultNotFound); return; } @@ -648,7 +645,7 @@ private: if (!buffer_queue_id) { LOG_ERROR(Service_VI, "Buffer queue id not found! display_id={}", display_id); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ERR_NOT_FOUND); + rb.Push(ResultNotFound); return; } @@ -675,19 +672,23 @@ private: IPC::RequestParser rp{ctx}; const u64 display_id = rp.Pop<u64>(); - LOG_WARNING(Service_VI, "(STUBBED) called. display_id=0x{:016X}", display_id); + LOG_DEBUG(Service_VI, "called. display_id={}", display_id); const auto vsync_event = nv_flinger.FindVsyncEvent(display_id); - if (!vsync_event) { - LOG_ERROR(Service_VI, "Vsync event was not found for display_id={}", display_id); + if (vsync_event.Failed()) { + const auto result = vsync_event.Code(); + if (result == ResultNotFound) { + LOG_ERROR(Service_VI, "Vsync event was not found for display_id={}", display_id); + } + IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ERR_NOT_FOUND); + rb.Push(result); return; } IPC::ResponseBuilder rb{ctx, 2, 1}; rb.Push(ResultSuccess); - rb.PushCopyObjects(vsync_event); + rb.PushCopyObjects(*vsync_event); } void ConvertScalingMode(Kernel::HLERequestContext& ctx) { @@ -764,7 +765,7 @@ private: return ConvertedScaleMode::PreserveAspectRatio; default: LOG_ERROR(Service_VI, "Invalid scaling mode specified, mode={}", mode); - return ERR_OPERATION_FAILED; + return ResultOperationFailed; } } @@ -794,7 +795,7 @@ void detail::GetDisplayServiceImpl(Kernel::HLERequestContext& ctx, Core::System& if (!IsValidServiceAccess(permission, policy)) { LOG_ERROR(Service_VI, "Permission denied for policy {}", policy); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ERR_PERMISSION_DENIED); + rb.Push(ResultPermissionDenied); return; } diff --git a/src/core/hle/service/vi/vi_results.h b/src/core/hle/service/vi/vi_results.h new file mode 100644 index 000000000..22bac799f --- /dev/null +++ b/src/core/hle/service/vi/vi_results.h @@ -0,0 +1,15 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "core/hle/result.h" + +namespace Service::VI { + +constexpr Result ResultOperationFailed{ErrorModule::VI, 1}; +constexpr Result ResultPermissionDenied{ErrorModule::VI, 5}; +constexpr Result ResultNotSupported{ErrorModule::VI, 6}; +constexpr Result ResultNotFound{ErrorModule::VI, 7}; + +} // namespace Service::VI diff --git a/src/core/internal_network/network.cpp b/src/core/internal_network/network.cpp index cdf38a2a4..447fbffaa 100644 --- a/src/core/internal_network/network.cpp +++ b/src/core/internal_network/network.cpp @@ -364,7 +364,7 @@ std::pair<s32, Errno> Poll(std::vector<PollFD>& pollfds, s32 timeout) { std::vector<WSAPOLLFD> host_pollfds(pollfds.size()); std::transform(pollfds.begin(), pollfds.end(), host_pollfds.begin(), [](PollFD fd) { WSAPOLLFD result; - result.fd = fd.socket->fd; + result.fd = fd.socket->GetFD(); result.events = TranslatePollEvents(fd.events); result.revents = 0; return result; @@ -430,12 +430,12 @@ std::pair<SocketBase::AcceptResult, Errno> Socket::Accept() { return {AcceptResult{}, GetAndLogLastError()}; } - AcceptResult result; - result.socket = std::make_unique<Socket>(); - result.socket->fd = new_socket; - ASSERT(addrlen == sizeof(sockaddr_in)); - result.sockaddr_in = TranslateToSockAddrIn(addr); + + AcceptResult result{ + .socket = std::make_unique<Socket>(new_socket), + .sockaddr_in = TranslateToSockAddrIn(addr), + }; return {std::move(result), Errno::SUCCESS}; } diff --git a/src/core/internal_network/network_interface.cpp b/src/core/internal_network/network_interface.cpp index 0f0a66160..057fd3661 100644 --- a/src/core/internal_network/network_interface.cpp +++ b/src/core/internal_network/network_interface.cpp @@ -188,7 +188,7 @@ std::vector<NetworkInterface> GetAvailableNetworkInterfaces() { std::optional<NetworkInterface> GetSelectedNetworkInterface() { const auto& selected_network_interface = Settings::values.network_interface.GetValue(); const auto network_interfaces = Network::GetAvailableNetworkInterfaces(); - if (network_interfaces.size() == 0) { + if (network_interfaces.empty()) { LOG_ERROR(Network, "GetAvailableNetworkInterfaces returned no interfaces"); return std::nullopt; } @@ -206,4 +206,14 @@ std::optional<NetworkInterface> GetSelectedNetworkInterface() { return *res; } +void SelectFirstNetworkInterface() { + const auto network_interfaces = Network::GetAvailableNetworkInterfaces(); + + if (network_interfaces.empty()) { + return; + } + + Settings::values.network_interface.SetValue(network_interfaces[0].name); +} + } // namespace Network diff --git a/src/core/internal_network/network_interface.h b/src/core/internal_network/network_interface.h index 9b98b6b42..175e61b1f 100644 --- a/src/core/internal_network/network_interface.h +++ b/src/core/internal_network/network_interface.h @@ -24,5 +24,6 @@ struct NetworkInterface { std::vector<NetworkInterface> GetAvailableNetworkInterfaces(); std::optional<NetworkInterface> GetSelectedNetworkInterface(); +void SelectFirstNetworkInterface(); } // namespace Network diff --git a/src/core/internal_network/socket_proxy.cpp b/src/core/internal_network/socket_proxy.cpp index 0c746bd82..7d5d37bbc 100644 --- a/src/core/internal_network/socket_proxy.cpp +++ b/src/core/internal_network/socket_proxy.cpp @@ -6,6 +6,7 @@ #include "common/assert.h" #include "common/logging/log.h" +#include "common/zstd_compression.h" #include "core/internal_network/network.h" #include "core/internal_network/network_interface.h" #include "core/internal_network/socket_proxy.h" @@ -32,8 +33,11 @@ void ProxySocket::HandleProxyPacket(const ProxyPacket& packet) { return; } + auto decompressed = packet; + decompressed.data = Common::Compression::DecompressDataZSTD(packet.data); + std::lock_guard guard(packets_mutex); - received_packets.push(packet); + received_packets.push(decompressed); } template <typename T> @@ -185,6 +189,8 @@ std::pair<s32, Errno> ProxySocket::Send(const std::vector<u8>& message, int flag void ProxySocket::SendPacket(ProxyPacket& packet) { if (auto room_member = room_network.GetRoomMember().lock()) { if (room_member->IsConnected()) { + packet.data = Common::Compression::CompressDataZSTDDefault(packet.data.data(), + packet.data.size()); room_member->SendProxyPacket(packet); } } diff --git a/src/core/internal_network/sockets.h b/src/core/internal_network/sockets.h index a70429b19..2e328c645 100644 --- a/src/core/internal_network/sockets.h +++ b/src/core/internal_network/sockets.h @@ -32,6 +32,10 @@ public: std::unique_ptr<SocketBase> socket; SockAddrIn sockaddr_in; }; + + SocketBase() = default; + explicit SocketBase(SOCKET fd_) : fd{fd_} {} + virtual ~SocketBase() = default; virtual SocketBase& operator=(const SocketBase&) = delete; @@ -89,12 +93,19 @@ public: virtual void HandleProxyPacket(const ProxyPacket& packet) = 0; + [[nodiscard]] SOCKET GetFD() const { + return fd; + } + +protected: SOCKET fd = INVALID_SOCKET; }; class Socket : public SocketBase { public: Socket() = default; + explicit Socket(SOCKET fd_) : SocketBase{fd_} {} + ~Socket() override; Socket(const Socket&) = delete; diff --git a/src/core/loader/loader.cpp b/src/core/loader/loader.cpp index 104d16efa..f24474ed8 100644 --- a/src/core/loader/loader.cpp +++ b/src/core/loader/loader.cpp @@ -244,6 +244,10 @@ static std::unique_ptr<AppLoader> GetFileLoader(Core::System& system, FileSys::V std::unique_ptr<AppLoader> GetLoader(Core::System& system, FileSys::VirtualFile file, u64 program_id, std::size_t program_index) { + if (!file) { + return nullptr; + } + FileType type = IdentifyFile(file); const FileType filename_type = GuessFromFilename(file->GetName()); diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 34ad7cadd..3ca80c8ff 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -65,7 +65,7 @@ struct Memory::Impl { return {}; } - return system.DeviceMemory().GetPointer(paddr) + vaddr; + return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr; } [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const { @@ -75,7 +75,7 @@ struct Memory::Impl { return {}; } - return system.DeviceMemory().GetPointer(paddr) + vaddr; + return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr; } u8 Read8(const VAddr addr) { @@ -233,18 +233,17 @@ struct Memory::Impl { current_vaddr, src_addr, size); std::memset(dest_buffer, 0, copy_amount); }, - [&dest_buffer](const std::size_t copy_amount, const u8* const src_ptr) { + [&](const std::size_t copy_amount, const u8* const src_ptr) { std::memcpy(dest_buffer, src_ptr, copy_amount); }, - [&system = system, &dest_buffer](const VAddr current_vaddr, - const std::size_t copy_amount, - const u8* const host_ptr) { + [&](const VAddr current_vaddr, const std::size_t copy_amount, + const u8* const host_ptr) { if constexpr (!UNSAFE) { system.GPU().FlushRegion(current_vaddr, copy_amount); } std::memcpy(dest_buffer, host_ptr, copy_amount); }, - [&dest_buffer](const std::size_t copy_amount) { + [&](const std::size_t copy_amount) { dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; }); } @@ -267,17 +266,16 @@ struct Memory::Impl { "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", current_vaddr, dest_addr, size); }, - [&src_buffer](const std::size_t copy_amount, u8* const dest_ptr) { + [&](const std::size_t copy_amount, u8* const dest_ptr) { std::memcpy(dest_ptr, src_buffer, copy_amount); }, - [&system = system, &src_buffer](const VAddr current_vaddr, - const std::size_t copy_amount, u8* const host_ptr) { + [&](const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) { if constexpr (!UNSAFE) { system.GPU().InvalidateRegion(current_vaddr, copy_amount); } std::memcpy(host_ptr, src_buffer, copy_amount); }, - [&src_buffer](const std::size_t copy_amount) { + [&](const std::size_t copy_amount) { src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; }); } @@ -301,8 +299,7 @@ struct Memory::Impl { [](const std::size_t copy_amount, u8* const dest_ptr) { std::memset(dest_ptr, 0, copy_amount); }, - [&system = system](const VAddr current_vaddr, const std::size_t copy_amount, - u8* const host_ptr) { + [&](const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) { system.GPU().InvalidateRegion(current_vaddr, copy_amount); std::memset(host_ptr, 0, copy_amount); }, @@ -313,22 +310,20 @@ struct Memory::Impl { const std::size_t size) { WalkBlock( process, dest_addr, size, - [this, &process, &dest_addr, &src_addr, size](const std::size_t copy_amount, - const VAddr current_vaddr) { + [&](const std::size_t copy_amount, const VAddr current_vaddr) { LOG_ERROR(HW_Memory, "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", current_vaddr, src_addr, size); ZeroBlock(process, dest_addr, copy_amount); }, - [this, &process, &dest_addr](const std::size_t copy_amount, const u8* const src_ptr) { + [&](const std::size_t copy_amount, const u8* const src_ptr) { WriteBlockImpl<false>(process, dest_addr, src_ptr, copy_amount); }, - [this, &system = system, &process, &dest_addr]( - const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) { + [&](const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) { system.GPU().FlushRegion(current_vaddr, copy_amount); WriteBlockImpl<false>(process, dest_addr, host_ptr, copy_amount); }, - [&dest_addr, &src_addr](const std::size_t copy_amount) { + [&](const std::size_t copy_amount) { dest_addr += static_cast<VAddr>(copy_amount); src_addr += static_cast<VAddr>(copy_amount); }); @@ -499,7 +494,7 @@ struct Memory::Impl { } else { while (base != end) { page_table.pointers[base].Store( - system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type); + system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS), type); page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS); ASSERT_MSG(page_table.pointers[base].Pointer(), @@ -551,6 +546,11 @@ struct Memory::Impl { []() {}); } + [[nodiscard]] u8* GetPointerSilent(const VAddr vaddr) const { + return GetPointerImpl( + vaddr, []() {}, []() {}); + } + /** * Reads a particular data type out of memory at the given virtual address. * @@ -570,7 +570,7 @@ struct Memory::Impl { [vaddr]() { LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8, vaddr); }, - [&system = system, vaddr]() { system.GPU().FlushRegion(vaddr, sizeof(T)); }); + [&]() { system.GPU().FlushRegion(vaddr, sizeof(T)); }); if (ptr) { std::memcpy(&result, ptr, sizeof(T)); } @@ -594,7 +594,7 @@ struct Memory::Impl { LOG_ERROR(HW_Memory, "Unmapped Write{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8, vaddr, static_cast<u64>(data)); }, - [&system = system, vaddr]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); }); + [&]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); }); if (ptr) { std::memcpy(ptr, &data, sizeof(T)); } @@ -608,7 +608,7 @@ struct Memory::Impl { LOG_ERROR(HW_Memory, "Unmapped WriteExclusive{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8, vaddr, static_cast<u64>(data)); }, - [&system = system, vaddr]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); }); + [&]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); }); if (ptr) { const auto volatile_pointer = reinterpret_cast<volatile T*>(ptr); return Common::AtomicCompareAndSwap(volatile_pointer, data, expected); @@ -623,7 +623,7 @@ struct Memory::Impl { LOG_ERROR(HW_Memory, "Unmapped WriteExclusive128 @ 0x{:016X} = 0x{:016X}{:016X}", vaddr, static_cast<u64>(data[1]), static_cast<u64>(data[0])); }, - [&system = system, vaddr]() { system.GPU().InvalidateRegion(vaddr, sizeof(u128)); }); + [&]() { system.GPU().InvalidateRegion(vaddr, sizeof(u128)); }); if (ptr) { const auto volatile_pointer = reinterpret_cast<volatile u64*>(ptr); return Common::AtomicCompareAndSwap(volatile_pointer, data, expected); @@ -686,6 +686,10 @@ u8* Memory::GetPointer(VAddr vaddr) { return impl->GetPointer(vaddr); } +u8* Memory::GetPointerSilent(VAddr vaddr) { + return impl->GetPointerSilent(vaddr); +} + const u8* Memory::GetPointer(VAddr vaddr) const { return impl->GetPointer(vaddr); } diff --git a/src/core/memory.h b/src/core/memory.h index a11ff8766..81eac448b 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -114,6 +114,7 @@ public: * If the address is not valid, nullptr will be returned. */ u8* GetPointer(VAddr vaddr); + u8* GetPointerSilent(VAddr vaddr); template <typename T> T* GetPointer(VAddr vaddr) { diff --git a/src/dedicated_room/CMakeLists.txt b/src/dedicated_room/CMakeLists.txt index 1efdbc1f7..2d9731f19 100644 --- a/src/dedicated_room/CMakeLists.txt +++ b/src/dedicated_room/CMakeLists.txt @@ -23,5 +23,5 @@ endif() target_link_libraries(yuzu-room PRIVATE ${PLATFORM_LIBRARIES} Threads::Threads) if(UNIX AND NOT APPLE) - install(TARGETS yuzu-room RUNTIME DESTINATION "${CMAKE_INSTALL_PREFIX}/bin") + install(TARGETS yuzu-room) endif() diff --git a/src/dedicated_room/yuzu_room.cpp b/src/dedicated_room/yuzu_room.cpp index 7b6deba41..359891883 100644 --- a/src/dedicated_room/yuzu_room.cpp +++ b/src/dedicated_room/yuzu_room.cpp @@ -76,7 +76,18 @@ static constexpr char BanListMagic[] = "YuzuRoom-BanList-1"; static constexpr char token_delimiter{':'}; static void PadToken(std::string& token) { - while (token.size() % 4 != 0) { + std::size_t outlen = 0; + + std::array<unsigned char, 512> output{}; + std::array<unsigned char, 2048> roundtrip{}; + for (size_t i = 0; i < 3; i++) { + mbedtls_base64_decode(output.data(), output.size(), &outlen, + reinterpret_cast<const unsigned char*>(token.c_str()), + token.length()); + mbedtls_base64_encode(roundtrip.data(), roundtrip.size(), &outlen, output.data(), outlen); + if (memcmp(roundtrip.data(), token.data(), token.size()) == 0) { + break; + } token.push_back('='); } } diff --git a/src/input_common/CMakeLists.txt b/src/input_common/CMakeLists.txt index 4b91b88ce..cc6f0ffc0 100644 --- a/src/input_common/CMakeLists.txt +++ b/src/input_common/CMakeLists.txt @@ -18,6 +18,8 @@ add_library(input_common STATIC drivers/touch_screen.h drivers/udp_client.cpp drivers/udp_client.h + drivers/virtual_amiibo.cpp + drivers/virtual_amiibo.h helpers/stick_from_buttons.cpp helpers/stick_from_buttons.h helpers/touch_from_buttons.cpp @@ -37,21 +39,14 @@ add_library(input_common STATIC if (MSVC) target_compile_options(input_common PRIVATE /W4 - /WX /we4242 # 'identifier': conversion from 'type1' to 'type2', possible loss of data - /we4244 # 'conversion': conversion from 'type1' to 'type2', possible loss of data - /we4245 # 'conversion': conversion from 'type1' to 'type2', signed/unsigned mismatch /we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data + /we4800 # Implicit conversion from 'type' to bool. Possible information loss ) else() target_compile_options(input_common PRIVATE - -Werror -Werror=conversion - -Werror=ignored-qualifiers - $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter> - $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable> - -Werror=unused-variable ) endif() diff --git a/src/input_common/drivers/gc_adapter.cpp b/src/input_common/drivers/gc_adapter.cpp index 27a0ffb0d..f4dd24e7d 100644 --- a/src/input_common/drivers/gc_adapter.cpp +++ b/src/input_common/drivers/gc_adapter.cpp @@ -90,7 +90,7 @@ GCAdapter::~GCAdapter() { void GCAdapter::AdapterInputThread(std::stop_token stop_token) { LOG_DEBUG(Input, "Input thread started"); - Common::SetCurrentThreadName("yuzu:input:GCAdapter"); + Common::SetCurrentThreadName("GCAdapter"); s32 payload_size{}; AdapterPayload adapter_payload{}; @@ -214,7 +214,7 @@ void GCAdapter::UpdateStateAxes(std::size_t port, const AdapterPayload& adapter_ } void GCAdapter::AdapterScanThread(std::stop_token stop_token) { - Common::SetCurrentThreadName("yuzu:input:ScanGCAdapter"); + Common::SetCurrentThreadName("ScanGCAdapter"); usb_adapter_handle = nullptr; pads = {}; while (!stop_token.stop_requested() && !Setup()) { diff --git a/src/input_common/drivers/mouse.cpp b/src/input_common/drivers/mouse.cpp index 4909fa8d7..98c3157a8 100644 --- a/src/input_common/drivers/mouse.cpp +++ b/src/input_common/drivers/mouse.cpp @@ -37,7 +37,7 @@ Mouse::Mouse(std::string input_engine_) : InputEngine(std::move(input_engine_)) } void Mouse::UpdateThread(std::stop_token stop_token) { - Common::SetCurrentThreadName("yuzu:input:Mouse"); + Common::SetCurrentThreadName("Mouse"); constexpr int update_time = 10; while (!stop_token.stop_requested()) { if (Settings::values.mouse_panning && !Settings::values.mouse_enabled) { diff --git a/src/input_common/drivers/sdl_driver.cpp b/src/input_common/drivers/sdl_driver.cpp index 5cc1ccbd9..c175a8853 100644 --- a/src/input_common/drivers/sdl_driver.cpp +++ b/src/input_common/drivers/sdl_driver.cpp @@ -40,8 +40,8 @@ public: void EnableMotion() { if (sdl_controller) { SDL_GameController* controller = sdl_controller.get(); - has_accel = SDL_GameControllerHasSensor(controller, SDL_SENSOR_ACCEL); - has_gyro = SDL_GameControllerHasSensor(controller, SDL_SENSOR_GYRO); + has_accel = SDL_GameControllerHasSensor(controller, SDL_SENSOR_ACCEL) == SDL_TRUE; + has_gyro = SDL_GameControllerHasSensor(controller, SDL_SENSOR_GYRO) == SDL_TRUE; if (has_accel) { SDL_GameControllerSetSensorEnabled(controller, SDL_SENSOR_ACCEL, SDL_TRUE); } @@ -436,7 +436,7 @@ SDLDriver::SDLDriver(std::string input_engine_) : InputEngine(std::move(input_en initialized = true; if (start_thread) { poll_thread = std::thread([this] { - Common::SetCurrentThreadName("yuzu:input:SDL"); + Common::SetCurrentThreadName("SDL_MainLoop"); using namespace std::chrono_literals; while (initialized) { SDL_PumpEvents(); @@ -444,7 +444,7 @@ SDLDriver::SDLDriver(std::string input_engine_) : InputEngine(std::move(input_en } }); vibration_thread = std::thread([this] { - Common::SetCurrentThreadName("yuzu:input:SDL_Vibration"); + Common::SetCurrentThreadName("SDL_Vibration"); using namespace std::chrono_literals; while (initialized) { SendVibrations(); diff --git a/src/input_common/drivers/virtual_amiibo.cpp b/src/input_common/drivers/virtual_amiibo.cpp new file mode 100644 index 000000000..0cd5129da --- /dev/null +++ b/src/input_common/drivers/virtual_amiibo.cpp @@ -0,0 +1,101 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#include <cstring> +#include <fmt/format.h> + +#include "common/fs/file.h" +#include "common/fs/fs.h" +#include "common/fs/path_util.h" +#include "common/logging/log.h" +#include "common/settings.h" +#include "input_common/drivers/virtual_amiibo.h" + +namespace InputCommon { +constexpr PadIdentifier identifier = { + .guid = Common::UUID{}, + .port = 0, + .pad = 0, +}; + +VirtualAmiibo::VirtualAmiibo(std::string input_engine_) : InputEngine(std::move(input_engine_)) {} + +VirtualAmiibo::~VirtualAmiibo() = default; + +Common::Input::PollingError VirtualAmiibo::SetPollingMode( + [[maybe_unused]] const PadIdentifier& identifier_, + const Common::Input::PollingMode polling_mode_) { + polling_mode = polling_mode_; + + if (polling_mode == Common::Input::PollingMode::NFC) { + if (state == State::Initialized) { + state = State::WaitingForAmiibo; + } + } else { + if (state == State::AmiiboIsOpen) { + CloseAmiibo(); + } + } + + return Common::Input::PollingError::None; +} + +Common::Input::NfcState VirtualAmiibo::SupportsNfc( + [[maybe_unused]] const PadIdentifier& identifier_) const { + return Common::Input::NfcState::Success; +} + +Common::Input::NfcState VirtualAmiibo::WriteNfcData( + [[maybe_unused]] const PadIdentifier& identifier_, const std::vector<u8>& data) { + const Common::FS::IOFile amiibo_file{file_path, Common::FS::FileAccessMode::ReadWrite, + Common::FS::FileType::BinaryFile}; + + if (!amiibo_file.IsOpen()) { + LOG_ERROR(Core, "Amiibo is already on use"); + return Common::Input::NfcState::WriteFailed; + } + + if (!amiibo_file.Write(data)) { + LOG_ERROR(Service_NFP, "Error writting to file"); + return Common::Input::NfcState::WriteFailed; + } + + return Common::Input::NfcState::Success; +} + +VirtualAmiibo::State VirtualAmiibo::GetCurrentState() const { + return state; +} + +VirtualAmiibo::Info VirtualAmiibo::LoadAmiibo(const std::string& filename) { + const Common::FS::IOFile amiibo_file{filename, Common::FS::FileAccessMode::Read, + Common::FS::FileType::BinaryFile}; + + if (state != State::WaitingForAmiibo) { + return Info::WrongDeviceState; + } + + if (!amiibo_file.IsOpen()) { + return Info::UnableToLoad; + } + + amiibo_data.resize(amiibo_size); + + if (amiibo_file.Read(amiibo_data) < amiibo_size_without_password) { + return Info::NotAnAmiibo; + } + + file_path = filename; + state = State::AmiiboIsOpen; + SetNfc(identifier, {Common::Input::NfcState::NewAmiibo, amiibo_data}); + return Info::Success; +} + +VirtualAmiibo::Info VirtualAmiibo::CloseAmiibo() { + state = polling_mode == Common::Input::PollingMode::NFC ? State::WaitingForAmiibo + : State::Initialized; + SetNfc(identifier, {Common::Input::NfcState::AmiiboRemoved, {}}); + return Info::Success; +} + +} // namespace InputCommon diff --git a/src/input_common/drivers/virtual_amiibo.h b/src/input_common/drivers/virtual_amiibo.h new file mode 100644 index 000000000..9eac07544 --- /dev/null +++ b/src/input_common/drivers/virtual_amiibo.h @@ -0,0 +1,61 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include <array> +#include <string> +#include <vector> + +#include "common/common_types.h" +#include "input_common/input_engine.h" + +namespace Common::FS { +class IOFile; +} + +namespace InputCommon { + +class VirtualAmiibo final : public InputEngine { +public: + enum class State { + Initialized, + WaitingForAmiibo, + AmiiboIsOpen, + }; + + enum class Info { + Success, + UnableToLoad, + NotAnAmiibo, + WrongDeviceState, + Unknown, + }; + + explicit VirtualAmiibo(std::string input_engine_); + ~VirtualAmiibo() override; + + // Sets polling mode to a controller + Common::Input::PollingError SetPollingMode( + const PadIdentifier& identifier_, const Common::Input::PollingMode polling_mode_) override; + + Common::Input::NfcState SupportsNfc(const PadIdentifier& identifier_) const override; + + Common::Input::NfcState WriteNfcData(const PadIdentifier& identifier_, + const std::vector<u8>& data) override; + + State GetCurrentState() const; + + Info LoadAmiibo(const std::string& amiibo_file); + Info CloseAmiibo(); + +private: + static constexpr std::size_t amiibo_size = 0x21C; + static constexpr std::size_t amiibo_size_without_password = amiibo_size - 0x8; + + std::string file_path{}; + State state{State::Initialized}; + std::vector<u8> amiibo_data; + Common::Input::PollingMode polling_mode{Common::Input::PollingMode::Pasive}; +}; +} // namespace InputCommon diff --git a/src/input_common/input_engine.cpp b/src/input_common/input_engine.cpp index 6ede0e4b0..61cfd0911 100644 --- a/src/input_common/input_engine.cpp +++ b/src/input_common/input_engine.cpp @@ -102,6 +102,17 @@ void InputEngine::SetCamera(const PadIdentifier& identifier, TriggerOnCameraChange(identifier, value); } +void InputEngine::SetNfc(const PadIdentifier& identifier, const Common::Input::NfcStatus& value) { + { + std::scoped_lock lock{mutex}; + ControllerData& controller = controller_list.at(identifier); + if (!configuring) { + controller.nfc = value; + } + } + TriggerOnNfcChange(identifier, value); +} + bool InputEngine::GetButton(const PadIdentifier& identifier, int button) const { std::scoped_lock lock{mutex}; const auto controller_iter = controller_list.find(identifier); @@ -189,6 +200,18 @@ Common::Input::CameraStatus InputEngine::GetCamera(const PadIdentifier& identifi return controller.camera; } +Common::Input::NfcStatus InputEngine::GetNfc(const PadIdentifier& identifier) const { + std::scoped_lock lock{mutex}; + const auto controller_iter = controller_list.find(identifier); + if (controller_iter == controller_list.cend()) { + LOG_ERROR(Input, "Invalid identifier guid={}, pad={}, port={}", identifier.guid.RawString(), + identifier.pad, identifier.port); + return {}; + } + const ControllerData& controller = controller_iter->second; + return controller.nfc; +} + void InputEngine::ResetButtonState() { for (const auto& controller : controller_list) { for (const auto& button : controller.second.buttons) { @@ -355,6 +378,20 @@ void InputEngine::TriggerOnCameraChange(const PadIdentifier& identifier, } } +void InputEngine::TriggerOnNfcChange(const PadIdentifier& identifier, + [[maybe_unused]] const Common::Input::NfcStatus& value) { + std::scoped_lock lock{mutex_callback}; + for (const auto& poller_pair : callback_list) { + const InputIdentifier& poller = poller_pair.second; + if (!IsInputIdentifierEqual(poller, identifier, EngineInputType::Nfc, 0)) { + continue; + } + if (poller.callback.on_change) { + poller.callback.on_change(); + } + } +} + bool InputEngine::IsInputIdentifierEqual(const InputIdentifier& input_identifier, const PadIdentifier& identifier, EngineInputType type, int index) const { diff --git a/src/input_common/input_engine.h b/src/input_common/input_engine.h index f6b3c4610..cfbdb26bd 100644 --- a/src/input_common/input_engine.h +++ b/src/input_common/input_engine.h @@ -42,6 +42,7 @@ enum class EngineInputType { Camera, HatButton, Motion, + Nfc, }; namespace std { @@ -127,6 +128,18 @@ public: return Common::Input::CameraError::NotSupported; } + // Request nfc data from a controller + virtual Common::Input::NfcState SupportsNfc( + [[maybe_unused]] const PadIdentifier& identifier) const { + return Common::Input::NfcState::NotSupported; + } + + // Writes data to an nfc tag + virtual Common::Input::NfcState WriteNfcData([[maybe_unused]] const PadIdentifier& identifier, + [[maybe_unused]] const std::vector<u8>& data) { + return Common::Input::NfcState::NotSupported; + } + // Returns the engine name [[nodiscard]] const std::string& GetEngineName() const; @@ -183,6 +196,7 @@ public: Common::Input::BatteryLevel GetBattery(const PadIdentifier& identifier) const; BasicMotion GetMotion(const PadIdentifier& identifier, int motion) const; Common::Input::CameraStatus GetCamera(const PadIdentifier& identifier) const; + Common::Input::NfcStatus GetNfc(const PadIdentifier& identifier) const; int SetCallback(InputIdentifier input_identifier); void SetMappingCallback(MappingCallback callback); @@ -195,6 +209,7 @@ protected: void SetBattery(const PadIdentifier& identifier, Common::Input::BatteryLevel value); void SetMotion(const PadIdentifier& identifier, int motion, const BasicMotion& value); void SetCamera(const PadIdentifier& identifier, const Common::Input::CameraStatus& value); + void SetNfc(const PadIdentifier& identifier, const Common::Input::NfcStatus& value); virtual std::string GetHatButtonName([[maybe_unused]] u8 direction_value) const { return "Unknown"; @@ -208,6 +223,7 @@ private: std::unordered_map<int, BasicMotion> motions; Common::Input::BatteryLevel battery{}; Common::Input::CameraStatus camera{}; + Common::Input::NfcStatus nfc{}; }; void TriggerOnButtonChange(const PadIdentifier& identifier, int button, bool value); @@ -218,6 +234,7 @@ private: const BasicMotion& value); void TriggerOnCameraChange(const PadIdentifier& identifier, const Common::Input::CameraStatus& value); + void TriggerOnNfcChange(const PadIdentifier& identifier, const Common::Input::NfcStatus& value); bool IsInputIdentifierEqual(const InputIdentifier& input_identifier, const PadIdentifier& identifier, EngineInputType type, diff --git a/src/input_common/input_poller.cpp b/src/input_common/input_poller.cpp index ffb9b945e..ccc3076ca 100644 --- a/src/input_common/input_poller.cpp +++ b/src/input_common/input_poller.cpp @@ -691,9 +691,56 @@ public: } void OnChange() { + const auto camera_status = GetStatus(); + const Common::Input::CallbackStatus status{ .type = Common::Input::InputType::IrSensor, - .camera_status = GetStatus(), + .camera_status = camera_status.format, + .raw_data = camera_status.data, + }; + + TriggerOnChange(status); + } + +private: + const PadIdentifier identifier; + int callback_key; + InputEngine* input_engine; +}; + +class InputFromNfc final : public Common::Input::InputDevice { +public: + explicit InputFromNfc(PadIdentifier identifier_, InputEngine* input_engine_) + : identifier(identifier_), input_engine(input_engine_) { + UpdateCallback engine_callback{[this]() { OnChange(); }}; + const InputIdentifier input_identifier{ + .identifier = identifier, + .type = EngineInputType::Nfc, + .index = 0, + .callback = engine_callback, + }; + callback_key = input_engine->SetCallback(input_identifier); + } + + ~InputFromNfc() override { + input_engine->DeleteCallback(callback_key); + } + + Common::Input::NfcStatus GetStatus() const { + return input_engine->GetNfc(identifier); + } + + void ForceUpdate() override { + OnChange(); + } + + void OnChange() { + const auto nfc_status = GetStatus(); + + const Common::Input::CallbackStatus status{ + .type = Common::Input::InputType::Nfc, + .nfc_status = nfc_status.state, + .raw_data = nfc_status.data, }; TriggerOnChange(status); @@ -727,6 +774,14 @@ public: return input_engine->SetCameraFormat(identifier, camera_format); } + Common::Input::NfcState SupportsNfc() const override { + return input_engine->SupportsNfc(identifier); + } + + Common::Input::NfcState WriteNfcData(const std::vector<u8>& data) override { + return input_engine->WriteNfcData(identifier, data); + } + private: const PadIdentifier identifier; InputEngine* input_engine; @@ -742,8 +797,8 @@ std::unique_ptr<Common::Input::InputDevice> InputFactory::CreateButtonDevice( const auto button_id = params.Get("button", 0); const auto keyboard_key = params.Get("code", 0); - const auto toggle = params.Get("toggle", false); - const auto inverted = params.Get("inverted", false); + const auto toggle = params.Get("toggle", false) != 0; + const auto inverted = params.Get("inverted", false) != 0; input_engine->PreSetController(identifier); input_engine->PreSetButton(identifier, button_id); input_engine->PreSetButton(identifier, keyboard_key); @@ -765,8 +820,8 @@ std::unique_ptr<Common::Input::InputDevice> InputFactory::CreateHatButtonDevice( const auto button_id = params.Get("hat", 0); const auto direction = input_engine->GetHatButtonId(params.Get("direction", "")); - const auto toggle = params.Get("toggle", false); - const auto inverted = params.Get("inverted", false); + const auto toggle = params.Get("toggle", false) != 0; + const auto inverted = params.Get("inverted", false) != 0; input_engine->PreSetController(identifier); input_engine->PreSetHatButton(identifier, button_id); @@ -824,7 +879,7 @@ std::unique_ptr<Common::Input::InputDevice> InputFactory::CreateAnalogDevice( .threshold = std::clamp(params.Get("threshold", 0.5f), 0.0f, 1.0f), .offset = std::clamp(params.Get("offset", 0.0f), -1.0f, 1.0f), .inverted = params.Get("invert", "+") == "-", - .toggle = static_cast<bool>(params.Get("toggle", false)), + .toggle = params.Get("toggle", false) != 0, }; input_engine->PreSetController(identifier); input_engine->PreSetAxis(identifier, axis); @@ -840,8 +895,8 @@ std::unique_ptr<Common::Input::InputDevice> InputFactory::CreateTriggerDevice( }; const auto button = params.Get("button", 0); - const auto toggle = params.Get("toggle", false); - const auto inverted = params.Get("inverted", false); + const auto toggle = params.Get("toggle", false) != 0; + const auto inverted = params.Get("inverted", false) != 0; const auto axis = params.Get("axis", 0); const Common::Input::AnalogProperties properties = { @@ -871,8 +926,8 @@ std::unique_ptr<Common::Input::InputDevice> InputFactory::CreateTouchDevice( }; const auto button = params.Get("button", 0); - const auto toggle = params.Get("toggle", false); - const auto inverted = params.Get("inverted", false); + const auto toggle = params.Get("toggle", false) != 0; + const auto inverted = params.Get("inverted", false) != 0; const auto axis_x = params.Get("axis_x", 0); const Common::Input::AnalogProperties properties_x = { @@ -978,6 +1033,18 @@ std::unique_ptr<Common::Input::InputDevice> InputFactory::CreateCameraDevice( return std::make_unique<InputFromCamera>(identifier, input_engine.get()); } +std::unique_ptr<Common::Input::InputDevice> InputFactory::CreateNfcDevice( + const Common::ParamPackage& params) { + const PadIdentifier identifier = { + .guid = Common::UUID{params.Get("guid", "")}, + .port = static_cast<std::size_t>(params.Get("port", 0)), + .pad = static_cast<std::size_t>(params.Get("pad", 0)), + }; + + input_engine->PreSetController(identifier); + return std::make_unique<InputFromNfc>(identifier, input_engine.get()); +} + InputFactory::InputFactory(std::shared_ptr<InputEngine> input_engine_) : input_engine(std::move(input_engine_)) {} @@ -989,6 +1056,9 @@ std::unique_ptr<Common::Input::InputDevice> InputFactory::Create( if (params.Has("camera")) { return CreateCameraDevice(params); } + if (params.Has("nfc")) { + return CreateNfcDevice(params); + } if (params.Has("button") && params.Has("axis")) { return CreateTriggerDevice(params); } diff --git a/src/input_common/input_poller.h b/src/input_common/input_poller.h index 4410a8415..d7db13ce4 100644 --- a/src/input_common/input_poller.h +++ b/src/input_common/input_poller.h @@ -222,6 +222,16 @@ private: std::unique_ptr<Common::Input::InputDevice> CreateCameraDevice( const Common::ParamPackage& params); + /** + * Creates a nfc device from the parameters given. + * @param params contains parameters for creating the device: + * - "guid": text string for identifying controllers + * - "port": port of the connected device + * - "pad": slot of the connected controller + * @returns a unique input device with the parameters specified + */ + std::unique_ptr<Common::Input::InputDevice> CreateNfcDevice(const Common::ParamPackage& params); + std::shared_ptr<InputEngine> input_engine; }; } // namespace InputCommon diff --git a/src/input_common/main.cpp b/src/input_common/main.cpp index 75a57b9fc..b2064ef95 100644 --- a/src/input_common/main.cpp +++ b/src/input_common/main.cpp @@ -11,6 +11,7 @@ #include "input_common/drivers/tas_input.h" #include "input_common/drivers/touch_screen.h" #include "input_common/drivers/udp_client.h" +#include "input_common/drivers/virtual_amiibo.h" #include "input_common/helpers/stick_from_buttons.h" #include "input_common/helpers/touch_from_buttons.h" #include "input_common/input_engine.h" @@ -87,6 +88,15 @@ struct InputSubsystem::Impl { Common::Input::RegisterFactory<Common::Input::OutputDevice>(camera->GetEngineName(), camera_output_factory); + virtual_amiibo = std::make_shared<VirtualAmiibo>("virtual_amiibo"); + virtual_amiibo->SetMappingCallback(mapping_callback); + virtual_amiibo_input_factory = std::make_shared<InputFactory>(virtual_amiibo); + virtual_amiibo_output_factory = std::make_shared<OutputFactory>(virtual_amiibo); + Common::Input::RegisterFactory<Common::Input::InputDevice>(virtual_amiibo->GetEngineName(), + virtual_amiibo_input_factory); + Common::Input::RegisterFactory<Common::Input::OutputDevice>(virtual_amiibo->GetEngineName(), + virtual_amiibo_output_factory); + #ifdef HAVE_SDL2 sdl = std::make_shared<SDLDriver>("sdl"); sdl->SetMappingCallback(mapping_callback); @@ -327,6 +337,7 @@ struct InputSubsystem::Impl { std::shared_ptr<TasInput::Tas> tas_input; std::shared_ptr<CemuhookUDP::UDPClient> udp_client; std::shared_ptr<Camera> camera; + std::shared_ptr<VirtualAmiibo> virtual_amiibo; std::shared_ptr<InputFactory> keyboard_factory; std::shared_ptr<InputFactory> mouse_factory; @@ -335,6 +346,7 @@ struct InputSubsystem::Impl { std::shared_ptr<InputFactory> udp_client_input_factory; std::shared_ptr<InputFactory> tas_input_factory; std::shared_ptr<InputFactory> camera_input_factory; + std::shared_ptr<InputFactory> virtual_amiibo_input_factory; std::shared_ptr<OutputFactory> keyboard_output_factory; std::shared_ptr<OutputFactory> mouse_output_factory; @@ -342,6 +354,7 @@ struct InputSubsystem::Impl { std::shared_ptr<OutputFactory> udp_client_output_factory; std::shared_ptr<OutputFactory> tas_output_factory; std::shared_ptr<OutputFactory> camera_output_factory; + std::shared_ptr<OutputFactory> virtual_amiibo_output_factory; #ifdef HAVE_SDL2 std::shared_ptr<SDLDriver> sdl; @@ -402,6 +415,14 @@ const Camera* InputSubsystem::GetCamera() const { return impl->camera.get(); } +VirtualAmiibo* InputSubsystem::GetVirtualAmiibo() { + return impl->virtual_amiibo.get(); +} + +const VirtualAmiibo* InputSubsystem::GetVirtualAmiibo() const { + return impl->virtual_amiibo.get(); +} + std::vector<Common::ParamPackage> InputSubsystem::GetInputDevices() const { return impl->GetInputDevices(); } diff --git a/src/input_common/main.h b/src/input_common/main.h index 9a969e747..ced252383 100644 --- a/src/input_common/main.h +++ b/src/input_common/main.h @@ -33,6 +33,7 @@ class Camera; class Keyboard; class Mouse; class TouchScreen; +class VirtualAmiibo; struct MappingData; } // namespace InputCommon @@ -101,6 +102,12 @@ public: /// Retrieves the underlying camera input device. [[nodiscard]] const Camera* GetCamera() const; + /// Retrieves the underlying virtual amiibo input device. + [[nodiscard]] VirtualAmiibo* GetVirtualAmiibo(); + + /// Retrieves the underlying virtual amiibo input device. + [[nodiscard]] const VirtualAmiibo* GetVirtualAmiibo() const; + /** * Returns all available input devices that this Factory can create a new device with. * Each returned ParamPackage should have a `display` field used for display, a `engine` field diff --git a/src/network/network.cpp b/src/network/network.cpp index 0841e4134..6652a186b 100644 --- a/src/network/network.cpp +++ b/src/network/network.cpp @@ -15,7 +15,7 @@ RoomNetwork::RoomNetwork() { bool RoomNetwork::Init() { if (enet_initialize() != 0) { - LOG_ERROR(Network, "Error initalizing ENet"); + LOG_ERROR(Network, "Error initializing ENet"); return false; } m_room = std::make_shared<Room>(); diff --git a/src/network/room.cpp b/src/network/room.cpp index 8c63b255b..dc5dbce7f 100644 --- a/src/network/room.cpp +++ b/src/network/room.cpp @@ -212,6 +212,12 @@ public: void HandleProxyPacket(const ENetEvent* event); /** + * Broadcasts this packet to all members except the sender. + * @param event The ENet event containing the data + */ + void HandleLdnPacket(const ENetEvent* event); + + /** * Extracts a chat entry from a received ENet packet and adds it to the chat queue. * @param event The ENet event that was received. */ @@ -247,6 +253,9 @@ void Room::RoomImpl::ServerLoop() { case IdProxyPacket: HandleProxyPacket(&event); break; + case IdLdnPacket: + HandleLdnPacket(&event); + break; case IdChatMessage: HandleChatPacket(&event); break; @@ -861,6 +870,60 @@ void Room::RoomImpl::HandleProxyPacket(const ENetEvent* event) { enet_host_flush(server); } +void Room::RoomImpl::HandleLdnPacket(const ENetEvent* event) { + Packet in_packet; + in_packet.Append(event->packet->data, event->packet->dataLength); + + in_packet.IgnoreBytes(sizeof(u8)); // Message type + + in_packet.IgnoreBytes(sizeof(u8)); // LAN packet type + in_packet.IgnoreBytes(sizeof(IPv4Address)); // Local IP + + IPv4Address remote_ip; + in_packet.Read(remote_ip); // Remote IP + + bool broadcast; + in_packet.Read(broadcast); // Broadcast + + Packet out_packet; + out_packet.Append(event->packet->data, event->packet->dataLength); + ENetPacket* enet_packet = enet_packet_create(out_packet.GetData(), out_packet.GetDataSize(), + ENET_PACKET_FLAG_RELIABLE); + + const auto& destination_address = remote_ip; + if (broadcast) { // Send the data to everyone except the sender + std::lock_guard lock(member_mutex); + bool sent_packet = false; + for (const auto& member : members) { + if (member.peer != event->peer) { + sent_packet = true; + enet_peer_send(member.peer, 0, enet_packet); + } + } + + if (!sent_packet) { + enet_packet_destroy(enet_packet); + } + } else { + std::lock_guard lock(member_mutex); + auto member = std::find_if(members.begin(), members.end(), + [destination_address](const Member& member_entry) -> bool { + return member_entry.fake_ip == destination_address; + }); + if (member != members.end()) { + enet_peer_send(member->peer, 0, enet_packet); + } else { + LOG_ERROR(Network, + "Attempting to send to unknown IP address: " + "{}.{}.{}.{}", + destination_address[0], destination_address[1], destination_address[2], + destination_address[3]); + enet_packet_destroy(enet_packet); + } + } + enet_host_flush(server); +} + void Room::RoomImpl::HandleChatPacket(const ENetEvent* event) { Packet in_packet; in_packet.Append(event->packet->data, event->packet->dataLength); diff --git a/src/network/room.h b/src/network/room.h index c2a4b1a70..edbd3ecfb 100644 --- a/src/network/room.h +++ b/src/network/room.h @@ -40,6 +40,7 @@ enum RoomMessageTypes : u8 { IdRoomInformation, IdSetGameInfo, IdProxyPacket, + IdLdnPacket, IdChatMessage, IdNameCollision, IdIpCollision, diff --git a/src/network/room_member.cpp b/src/network/room_member.cpp index 06818af78..b94cb24ad 100644 --- a/src/network/room_member.cpp +++ b/src/network/room_member.cpp @@ -58,6 +58,7 @@ public: private: CallbackSet<ProxyPacket> callback_set_proxy_packet; + CallbackSet<LDNPacket> callback_set_ldn_packet; CallbackSet<ChatEntry> callback_set_chat_messages; CallbackSet<StatusMessageEntry> callback_set_status_messages; CallbackSet<RoomInformation> callback_set_room_information; @@ -108,6 +109,12 @@ public: void HandleProxyPackets(const ENetEvent* event); /** + * Extracts an LdnPacket from a received ENet packet. + * @param event The ENet event that was received. + */ + void HandleLdnPackets(const ENetEvent* event); + + /** * Extracts a chat entry from a received ENet packet and adds it to the chat queue. * @param event The ENet event that was received. */ @@ -166,6 +173,9 @@ void RoomMember::RoomMemberImpl::MemberLoop() { case IdProxyPacket: HandleProxyPackets(&event); break; + case IdLdnPacket: + HandleLdnPackets(&event); + break; case IdChatMessage: HandleChatPacket(&event); break; @@ -372,6 +382,27 @@ void RoomMember::RoomMemberImpl::HandleProxyPackets(const ENetEvent* event) { Invoke<ProxyPacket>(proxy_packet); } +void RoomMember::RoomMemberImpl::HandleLdnPackets(const ENetEvent* event) { + LDNPacket ldn_packet{}; + Packet packet; + packet.Append(event->packet->data, event->packet->dataLength); + + // Ignore the first byte, which is the message id. + packet.IgnoreBytes(sizeof(u8)); // Ignore the message type + + u8 packet_type; + packet.Read(packet_type); + ldn_packet.type = static_cast<LDNPacketType>(packet_type); + + packet.Read(ldn_packet.local_ip); + packet.Read(ldn_packet.remote_ip); + packet.Read(ldn_packet.broadcast); + + packet.Read(ldn_packet.data); + + Invoke<LDNPacket>(ldn_packet); +} + void RoomMember::RoomMemberImpl::HandleChatPacket(const ENetEvent* event) { Packet packet; packet.Append(event->packet->data, event->packet->dataLength); @@ -450,6 +481,11 @@ RoomMember::RoomMemberImpl::CallbackSet<ProxyPacket>& RoomMember::RoomMemberImpl } template <> +RoomMember::RoomMemberImpl::CallbackSet<LDNPacket>& RoomMember::RoomMemberImpl::Callbacks::Get() { + return callback_set_ldn_packet; +} + +template <> RoomMember::RoomMemberImpl::CallbackSet<RoomMember::State>& RoomMember::RoomMemberImpl::Callbacks::Get() { return callback_set_state; @@ -607,6 +643,21 @@ void RoomMember::SendProxyPacket(const ProxyPacket& proxy_packet) { room_member_impl->Send(std::move(packet)); } +void RoomMember::SendLdnPacket(const LDNPacket& ldn_packet) { + Packet packet; + packet.Write(static_cast<u8>(IdLdnPacket)); + + packet.Write(static_cast<u8>(ldn_packet.type)); + + packet.Write(ldn_packet.local_ip); + packet.Write(ldn_packet.remote_ip); + packet.Write(ldn_packet.broadcast); + + packet.Write(ldn_packet.data); + + room_member_impl->Send(std::move(packet)); +} + void RoomMember::SendChatMessage(const std::string& message) { Packet packet; packet.Write(static_cast<u8>(IdChatMessage)); @@ -663,6 +714,11 @@ RoomMember::CallbackHandle<ProxyPacket> RoomMember::BindOnProxyPacketReceived( return room_member_impl->Bind(callback); } +RoomMember::CallbackHandle<LDNPacket> RoomMember::BindOnLdnPacketReceived( + std::function<void(const LDNPacket&)> callback) { + return room_member_impl->Bind(std::move(callback)); +} + RoomMember::CallbackHandle<RoomInformation> RoomMember::BindOnRoomInformationChanged( std::function<void(const RoomInformation&)> callback) { return room_member_impl->Bind(callback); @@ -699,6 +755,7 @@ void RoomMember::Leave() { } template void RoomMember::Unbind(CallbackHandle<ProxyPacket>); +template void RoomMember::Unbind(CallbackHandle<LDNPacket>); template void RoomMember::Unbind(CallbackHandle<RoomMember::State>); template void RoomMember::Unbind(CallbackHandle<RoomMember::Error>); template void RoomMember::Unbind(CallbackHandle<RoomInformation>); diff --git a/src/network/room_member.h b/src/network/room_member.h index f578f7f6a..0d6417294 100644 --- a/src/network/room_member.h +++ b/src/network/room_member.h @@ -17,7 +17,24 @@ namespace Network { using AnnounceMultiplayerRoom::GameInfo; using AnnounceMultiplayerRoom::RoomInformation; -/// Information about the received WiFi packets. +enum class LDNPacketType : u8 { + Scan, + ScanResp, + Connect, + SyncNetwork, + Disconnect, + DestroyNetwork, +}; + +struct LDNPacket { + LDNPacketType type; + IPv4Address local_ip; + IPv4Address remote_ip; + bool broadcast; + std::vector<u8> data; +}; + +/// Information about the received proxy packets. struct ProxyPacket { SockAddrIn local_endpoint; SockAddrIn remote_endpoint; @@ -152,6 +169,12 @@ public: void SendProxyPacket(const ProxyPacket& packet); /** + * Sends an LDN packet to the room. + * @param packet The WiFi packet to send. + */ + void SendLdnPacket(const LDNPacket& packet); + + /** * Sends a chat message to the room. * @param message The contents of the message. */ @@ -205,6 +228,16 @@ public: std::function<void(const ProxyPacket&)> callback); /** + * Binds a function to an event that will be triggered every time an LDNPacket is received. + * The function wil be called everytime the event is triggered. + * The callback function must not bind or unbind a function. Doing so will cause a deadlock + * @param callback The function to call + * @return A handle used for removing the function from the registered list + */ + CallbackHandle<LDNPacket> BindOnLdnPacketReceived( + std::function<void(const LDNPacket&)> callback); + + /** * Binds a function to an event that will be triggered every time the RoomInformation changes. * The function wil be called every time the event is triggered. * The callback function must not bind or unbind a function. Doing so will cause a deadlock diff --git a/src/shader_recompiler/CMakeLists.txt b/src/shader_recompiler/CMakeLists.txt index af8e51fe8..bcdd60db9 100644 --- a/src/shader_recompiler/CMakeLists.txt +++ b/src/shader_recompiler/CMakeLists.txt @@ -241,24 +241,14 @@ target_link_libraries(shader_recompiler PUBLIC common fmt::fmt sirit) if (MSVC) target_compile_options(shader_recompiler PRIVATE /W4 - /WX - /we4018 # 'expression' : signed/unsigned mismatch - /we4244 # 'argument' : conversion from 'type1' to 'type2', possible loss of data (floating-point) - /we4245 # 'conversion' : conversion from 'type1' to 'type2', signed/unsigned mismatch + + /we4242 # 'identifier': conversion from 'type1' to 'type2', possible loss of data /we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data - /we4267 # 'var' : conversion from 'size_t' to 'type', possible loss of data - /we4305 # 'context' : truncation from 'type1' to 'type2' /we4800 # Implicit conversion from 'type' to bool. Possible information loss - /we4826 # Conversion from 'type1' to 'type2' is sign-extended. This may cause unexpected runtime behavior. ) else() target_compile_options(shader_recompiler PRIVATE - -Werror -Werror=conversion - -Werror=ignored-qualifiers - $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter> - $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable> - -Werror=unused-variable # Bracket depth determines maximum size of a fold expression in Clang since 9c9974c3ccb6. # And this in turns limits the size of a std::array. diff --git a/src/shader_recompiler/backend/glasm/emit_glasm.cpp b/src/shader_recompiler/backend/glasm/emit_glasm.cpp index 97a6b383b..01f9abc71 100644 --- a/src/shader_recompiler/backend/glasm/emit_glasm.cpp +++ b/src/shader_recompiler/backend/glasm/emit_glasm.cpp @@ -175,7 +175,7 @@ bool IsReference(IR::Inst& inst) { } void PrecolorInst(IR::Inst& phi) { - // Insert phi moves before references to avoid overwritting other phis + // Insert phi moves before references to avoid overwriting other phis const size_t num_args{phi.NumArgs()}; for (size_t i = 0; i < num_args; ++i) { IR::Block& phi_block{*phi.PhiBlock(i)}; diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp index b5c08d611..7e8f37563 100644 --- a/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp +++ b/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp @@ -13,9 +13,6 @@ namespace Shader::Backend::GLASM { namespace { void GetCbuf(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset, std::string_view size) { - if (!binding.IsImmediate()) { - throw NotImplementedException("Indirect constant buffer loading"); - } const Register ret{ctx.reg_alloc.Define(inst)}; if (offset.type == Type::U32) { // Avoid reading arrays out of bounds, matching hardware's behavior @@ -24,7 +21,27 @@ void GetCbuf(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU return; } } - ctx.Add("LDC.{} {},c{}[{}];", size, ret, binding.U32(), offset); + + if (binding.IsImmediate()) { + ctx.Add("LDC.{} {},c{}[{}];", size, ret, binding.U32(), offset); + return; + } + + const ScalarU32 idx{ctx.reg_alloc.Consume(binding)}; + for (u32 i = 0; i < Info::MAX_INDIRECT_CBUFS; i++) { + ctx.Add("SEQ.S.CC RC.x,{},{};" + "IF NE.x;" + "LDC.{} {},c{}[{}];", + idx, i, size, ret, i, offset); + + if (i != Info::MAX_INDIRECT_CBUFS - 1) { + ctx.Add("ELSE;"); + } + } + + for (u32 i = 0; i < Info::MAX_INDIRECT_CBUFS; i++) { + ctx.Add("ENDIF;"); + } } bool IsInputArray(Stage stage) { diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp index 7094d8e42..1f4ffdd62 100644 --- a/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp +++ b/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp @@ -5,10 +5,6 @@ #include "shader_recompiler/backend/glasm/glasm_emit_context.h" #include "shader_recompiler/frontend/ir/value.h" -#ifdef _MSC_VER -#pragma warning(disable : 4100) -#endif - namespace Shader::Backend::GLASM { #define NotImplemented() throw NotImplementedException("GLASM instruction {}", __LINE__) diff --git a/src/shader_recompiler/backend/glsl/emit_glsl.cpp b/src/shader_recompiler/backend/glsl/emit_glsl.cpp index 76c18e488..e8a4390f6 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl.cpp @@ -101,7 +101,7 @@ bool IsReference(IR::Inst& inst) { } void PrecolorInst(IR::Inst& phi) { - // Insert phi moves before references to avoid overwritting other phis + // Insert phi moves before references to avoid overwriting other phis const size_t num_args{phi.NumArgs()}; for (size_t i = 0; i < num_args; ++i) { IR::Block& phi_block{*phi.PhiBlock(i)}; diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_not_implemented.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_not_implemented.cpp index b03a8ba1e..9f1ed95a4 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_not_implemented.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_not_implemented.cpp @@ -7,10 +7,6 @@ #include "shader_recompiler/backend/glsl/glsl_emit_context.h" #include "shader_recompiler/frontend/ir/value.h" -#ifdef _MSC_VER -#pragma warning(disable : 4100) -#endif - namespace Shader::Backend::GLSL { void EmitGetRegister(EmitContext& ctx) { diff --git a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp index 578bc8c1b..ce42475d4 100644 --- a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp +++ b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp @@ -964,9 +964,9 @@ private: demote_endif_node.type = Type::EndIf; demote_endif_node.data.end_if.merge = return_block_it->data.block; - asl.insert(return_block_it, demote_endif_node); - asl.insert(return_block_it, demote_node); - asl.insert(return_block_it, demote_if_node); + const auto next_it_1 = asl.insert(return_block_it, demote_endif_node); + const auto next_it_2 = asl.insert(next_it_1, demote_node); + asl.insert(next_it_2, demote_if_node); } ObjectPool<Statement>& stmt_pool; diff --git a/src/shader_recompiler/ir_opt/texture_pass.cpp b/src/shader_recompiler/ir_opt/texture_pass.cpp index 597112ba4..e8be58357 100644 --- a/src/shader_recompiler/ir_opt/texture_pass.cpp +++ b/src/shader_recompiler/ir_opt/texture_pass.cpp @@ -19,8 +19,10 @@ namespace { struct ConstBufferAddr { u32 index; u32 offset; + u32 shift_left; u32 secondary_index; u32 secondary_offset; + u32 secondary_shift_left; IR::U32 dynamic_offset; u32 count; bool has_secondary; @@ -172,19 +174,41 @@ bool IsTextureInstruction(const IR::Inst& inst) { return IndexedInstruction(inst) != IR::Opcode::Void; } -std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst); +std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst, Environment& env); -std::optional<ConstBufferAddr> Track(const IR::Value& value) { - return IR::BreadthFirstSearch(value, TryGetConstBuffer); +std::optional<ConstBufferAddr> Track(const IR::Value& value, Environment& env) { + return IR::BreadthFirstSearch( + value, [&env](const IR::Inst* inst) { return TryGetConstBuffer(inst, env); }); } -std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) { +std::optional<u32> TryGetConstant(IR::Value& value, Environment& env) { + const IR::Inst* inst = value.InstRecursive(); + if (inst->GetOpcode() != IR::Opcode::GetCbufU32) { + return std::nullopt; + } + const IR::Value index{inst->Arg(0)}; + const IR::Value offset{inst->Arg(1)}; + if (!index.IsImmediate()) { + return std::nullopt; + } + if (!offset.IsImmediate()) { + return std::nullopt; + } + const auto index_number = index.U32(); + if (index_number != 1) { + return std::nullopt; + } + const auto offset_number = offset.U32(); + return env.ReadCbufValue(index_number, offset_number); +} + +std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst, Environment& env) { switch (inst->GetOpcode()) { default: return std::nullopt; case IR::Opcode::BitwiseOr32: { - std::optional lhs{Track(inst->Arg(0))}; - std::optional rhs{Track(inst->Arg(1))}; + std::optional lhs{Track(inst->Arg(0), env)}; + std::optional rhs{Track(inst->Arg(1), env)}; if (!lhs || !rhs) { return std::nullopt; } @@ -194,19 +218,62 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) { if (lhs->count > 1 || rhs->count > 1) { return std::nullopt; } - if (lhs->index > rhs->index || lhs->offset > rhs->offset) { + if (lhs->shift_left > 0 || lhs->index > rhs->index || lhs->offset > rhs->offset) { std::swap(lhs, rhs); } return ConstBufferAddr{ .index = lhs->index, .offset = lhs->offset, + .shift_left = lhs->shift_left, .secondary_index = rhs->index, .secondary_offset = rhs->offset, + .secondary_shift_left = rhs->shift_left, .dynamic_offset = {}, .count = 1, .has_secondary = true, }; } + case IR::Opcode::ShiftLeftLogical32: { + const IR::Value shift{inst->Arg(1)}; + if (!shift.IsImmediate()) { + return std::nullopt; + } + std::optional lhs{Track(inst->Arg(0), env)}; + if (lhs) { + lhs->shift_left = shift.U32(); + } + return lhs; + break; + } + case IR::Opcode::BitwiseAnd32: { + IR::Value op1{inst->Arg(0)}; + IR::Value op2{inst->Arg(1)}; + if (op1.IsImmediate()) { + std::swap(op1, op2); + } + if (!op2.IsImmediate() && !op1.IsImmediate()) { + do { + auto try_index = TryGetConstant(op1, env); + if (try_index) { + op1 = op2; + op2 = IR::Value{*try_index}; + break; + } + auto try_index_2 = TryGetConstant(op2, env); + if (try_index_2) { + op2 = IR::Value{*try_index_2}; + break; + } + return std::nullopt; + } while (false); + } + std::optional lhs{Track(op1, env)}; + if (lhs) { + lhs->shift_left = static_cast<u32>(std::countr_zero(op2.U32())); + } + return lhs; + break; + } case IR::Opcode::GetCbufU32x2: case IR::Opcode::GetCbufU32: break; @@ -222,8 +289,10 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) { return ConstBufferAddr{ .index = index.U32(), .offset = offset.U32(), + .shift_left = 0, .secondary_index = 0, .secondary_offset = 0, + .secondary_shift_left = 0, .dynamic_offset = {}, .count = 1, .has_secondary = false, @@ -247,8 +316,10 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) { return ConstBufferAddr{ .index = index.U32(), .offset = base_offset, + .shift_left = 0, .secondary_index = 0, .secondary_offset = 0, + .secondary_shift_left = 0, .dynamic_offset = dynamic_offset, .count = 8, .has_secondary = false, @@ -258,7 +329,7 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) { TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) { ConstBufferAddr addr; if (IsBindless(inst)) { - const std::optional<ConstBufferAddr> track_addr{Track(inst.Arg(0))}; + const std::optional<ConstBufferAddr> track_addr{Track(inst.Arg(0), env)}; if (!track_addr) { throw NotImplementedException("Failed to track bindless texture constant buffer"); } @@ -267,8 +338,10 @@ TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) { addr = ConstBufferAddr{ .index = env.TextureBoundBuffer(), .offset = inst.Arg(0).U32(), + .shift_left = 0, .secondary_index = 0, .secondary_offset = 0, + .secondary_shift_left = 0, .dynamic_offset = {}, .count = 1, .has_secondary = false, @@ -284,8 +357,9 @@ TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) { TextureType ReadTextureType(Environment& env, const ConstBufferAddr& cbuf) { const u32 secondary_index{cbuf.has_secondary ? cbuf.secondary_index : cbuf.index}; const u32 secondary_offset{cbuf.has_secondary ? cbuf.secondary_offset : cbuf.offset}; - const u32 lhs_raw{env.ReadCbufValue(cbuf.index, cbuf.offset)}; - const u32 rhs_raw{env.ReadCbufValue(secondary_index, secondary_offset)}; + const u32 lhs_raw{env.ReadCbufValue(cbuf.index, cbuf.offset) << cbuf.shift_left}; + const u32 rhs_raw{env.ReadCbufValue(secondary_index, secondary_offset) + << cbuf.secondary_shift_left}; return env.ReadTextureType(lhs_raw | rhs_raw); } @@ -487,8 +561,10 @@ void TexturePass(Environment& env, IR::Program& program) { .has_secondary = cbuf.has_secondary, .cbuf_index = cbuf.index, .cbuf_offset = cbuf.offset, + .shift_left = cbuf.shift_left, .secondary_cbuf_index = cbuf.secondary_index, .secondary_cbuf_offset = cbuf.secondary_offset, + .secondary_shift_left = cbuf.secondary_shift_left, .count = cbuf.count, .size_shift = DESCRIPTOR_SIZE_SHIFT, }); @@ -499,8 +575,10 @@ void TexturePass(Environment& env, IR::Program& program) { .has_secondary = cbuf.has_secondary, .cbuf_index = cbuf.index, .cbuf_offset = cbuf.offset, + .shift_left = cbuf.shift_left, .secondary_cbuf_index = cbuf.secondary_index, .secondary_cbuf_offset = cbuf.secondary_offset, + .secondary_shift_left = cbuf.secondary_shift_left, .count = cbuf.count, .size_shift = DESCRIPTOR_SIZE_SHIFT, }); diff --git a/src/shader_recompiler/shader_info.h b/src/shader_recompiler/shader_info.h index c3c586ada..81097bf1a 100644 --- a/src/shader_recompiler/shader_info.h +++ b/src/shader_recompiler/shader_info.h @@ -62,8 +62,10 @@ struct TextureBufferDescriptor { bool has_secondary; u32 cbuf_index; u32 cbuf_offset; + u32 shift_left; u32 secondary_cbuf_index; u32 secondary_cbuf_offset; + u32 secondary_shift_left; u32 count; u32 size_shift; }; @@ -86,8 +88,10 @@ struct TextureDescriptor { bool has_secondary; u32 cbuf_index; u32 cbuf_offset; + u32 shift_left; u32 secondary_cbuf_index; u32 secondary_cbuf_offset; + u32 secondary_shift_left; u32 count; u32 size_shift; }; diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp index 7c432a63c..284b2ae66 100644 --- a/src/tests/core/core_timing.cpp +++ b/src/tests/core/core_timing.cpp @@ -40,9 +40,6 @@ struct ScopeInit final { core_timing.SetMulticore(true); core_timing.Initialize([]() {}); } - ~ScopeInit() { - core_timing.Shutdown(); - } Core::Timing::CoreTiming core_timing; }; diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 5b3808351..106991969 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -4,7 +4,7 @@ add_subdirectory(host_shaders) if(LIBVA_FOUND) - set_source_files_properties(command_classes/codecs/codec.cpp + set_source_files_properties(host1x/codecs/codec.cpp PROPERTIES COMPILE_DEFINITIONS LIBVA_FOUND=1) list(APPEND FFmpeg_LIBRARIES ${LIBVA_LIBRARIES}) endif() @@ -15,26 +15,14 @@ add_library(video_core STATIC buffer_cache/buffer_cache.h cdma_pusher.cpp cdma_pusher.h - command_classes/codecs/codec.cpp - command_classes/codecs/codec.h - command_classes/codecs/h264.cpp - command_classes/codecs/h264.h - command_classes/codecs/vp8.cpp - command_classes/codecs/vp8.h - command_classes/codecs/vp9.cpp - command_classes/codecs/vp9.h - command_classes/codecs/vp9_types.h - command_classes/host1x.cpp - command_classes/host1x.h - command_classes/nvdec.cpp - command_classes/nvdec.h - command_classes/nvdec_common.h - command_classes/sync_manager.cpp - command_classes/sync_manager.h - command_classes/vic.cpp - command_classes/vic.h compatible_formats.cpp compatible_formats.h + control/channel_state.cpp + control/channel_state.h + control/channel_state_cache.cpp + control/channel_state_cache.h + control/scheduler.cpp + control/scheduler.h delayed_destruction_ring.h dirty_flags.cpp dirty_flags.h @@ -54,7 +42,31 @@ add_library(video_core STATIC engines/maxwell_3d.h engines/maxwell_dma.cpp engines/maxwell_dma.h + engines/puller.cpp + engines/puller.h framebuffer_config.h + host1x/codecs/codec.cpp + host1x/codecs/codec.h + host1x/codecs/h264.cpp + host1x/codecs/h264.h + host1x/codecs/vp8.cpp + host1x/codecs/vp8.h + host1x/codecs/vp9.cpp + host1x/codecs/vp9.h + host1x/codecs/vp9_types.h + host1x/control.cpp + host1x/control.h + host1x/host1x.cpp + host1x/host1x.h + host1x/nvdec.cpp + host1x/nvdec.h + host1x/nvdec_common.h + host1x/sync_manager.cpp + host1x/sync_manager.h + host1x/syncpoint_manager.cpp + host1x/syncpoint_manager.h + host1x/vic.cpp + host1x/vic.h macro/macro.cpp macro/macro.h macro/macro_hle.cpp @@ -70,6 +82,7 @@ add_library(video_core STATIC gpu_thread.h memory_manager.cpp memory_manager.h + pte_kind.h query_cache.h rasterizer_accelerated.cpp rasterizer_accelerated.h @@ -195,6 +208,7 @@ add_library(video_core STATIC texture_cache/render_targets.h texture_cache/samples_helper.h texture_cache/slot_vector.h + texture_cache/texture_cache.cpp texture_cache/texture_cache.h texture_cache/texture_cache_base.h texture_cache/types.h @@ -265,14 +279,8 @@ if (MSVC) else() target_compile_options(video_core PRIVATE -Werror=conversion - -Wno-error=sign-conversion - -Werror=pessimizing-move - -Werror=redundant-move - -Werror=type-limits - $<$<CXX_COMPILER_ID:GNU>:-Werror=class-memaccess> - $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter> - $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable> + -Wno-sign-conversion ) endif() diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index f015dae56..2ba33543c 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -5,7 +5,6 @@ #include <algorithm> #include <array> -#include <deque> #include <memory> #include <mutex> #include <numeric> @@ -23,6 +22,7 @@ #include "common/settings.h" #include "core/memory.h" #include "video_core/buffer_cache/buffer_base.h" +#include "video_core/control/channel_state_cache.h" #include "video_core/delayed_destruction_ring.h" #include "video_core/dirty_flags.h" #include "video_core/engines/kepler_compute.h" @@ -56,7 +56,7 @@ using UniformBufferSizes = std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFE using ComputeUniformBufferSizes = std::array<u32, NUM_COMPUTE_UNIFORM_BUFFERS>; template <typename P> -class BufferCache { +class BufferCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> { // Page size for caching purposes. // This is unrelated to the CPU page size and it can be changed as it seems optimal. @@ -116,10 +116,7 @@ public: static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast<u32>(4_KiB); explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_, - Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::Engines::KeplerCompute& kepler_compute_, - Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, - Runtime& runtime_); + Core::Memory::Memory& cpu_memory_, Runtime& runtime_); void TickFrame(); @@ -129,7 +126,7 @@ public: void DownloadMemory(VAddr cpu_addr, u64 size); - bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<u8> inlined_buffer); + bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<const u8> inlined_buffer); void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size); @@ -353,7 +350,7 @@ private: void NotifyBufferDeletion(); - [[nodiscard]] Binding StorageBufferBinding(GPUVAddr ssbo_addr) const; + [[nodiscard]] Binding StorageBufferBinding(GPUVAddr ssbo_addr, bool is_written = false) const; [[nodiscard]] TextureBufferBinding GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size, PixelFormat format); @@ -367,9 +364,6 @@ private: void ClearDownload(IntervalType subtract_interval); VideoCore::RasterizerInterface& rasterizer; - Tegra::Engines::Maxwell3D& maxwell3d; - Tegra::Engines::KeplerCompute& kepler_compute; - Tegra::MemoryManager& gpu_memory; Core::Memory::Memory& cpu_memory; SlotVector<Buffer> slot_buffers; @@ -444,12 +438,8 @@ private: template <class P> BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_, - Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::Engines::KeplerCompute& kepler_compute_, - Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, - Runtime& runtime_) - : runtime{runtime_}, rasterizer{rasterizer_}, maxwell3d{maxwell3d_}, - kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, cpu_memory{cpu_memory_} { + Core::Memory::Memory& cpu_memory_, Runtime& runtime_) + : runtime{runtime_}, rasterizer{rasterizer_}, cpu_memory{cpu_memory_} { // Ensure the first slot is used for the null buffer void(slot_buffers.insert(runtime, NullBufferParams{})); common_ranges.clear(); @@ -552,8 +542,8 @@ void BufferCache<P>::ClearDownload(IntervalType subtract_interval) { template <class P> bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { - const std::optional<VAddr> cpu_src_address = gpu_memory.GpuToCpuAddress(src_address); - const std::optional<VAddr> cpu_dest_address = gpu_memory.GpuToCpuAddress(dest_address); + const std::optional<VAddr> cpu_src_address = gpu_memory->GpuToCpuAddress(src_address); + const std::optional<VAddr> cpu_dest_address = gpu_memory->GpuToCpuAddress(dest_address); if (!cpu_src_address || !cpu_dest_address) { return false; } @@ -611,7 +601,7 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am template <class P> bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) { - const std::optional<VAddr> cpu_dst_address = gpu_memory.GpuToCpuAddress(dst_address); + const std::optional<VAddr> cpu_dst_address = gpu_memory->GpuToCpuAddress(dst_address); if (!cpu_dst_address) { return false; } @@ -635,7 +625,7 @@ bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) { template <class P> void BufferCache<P>::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) { - const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); + const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); const Binding binding{ .cpu_addr = *cpu_addr, .size = size, @@ -673,7 +663,7 @@ void BufferCache<P>::BindHostGeometryBuffers(bool is_indexed) { if (is_indexed) { BindHostIndexBuffer(); } else if constexpr (!HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) { - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; if (regs.draw.topology == Maxwell::PrimitiveTopology::Quads) { runtime.BindQuadArrayIndexBuffer(regs.vertex_buffer.first, regs.vertex_buffer.count); } @@ -733,9 +723,9 @@ void BufferCache<P>::BindGraphicsStorageBuffer(size_t stage, size_t ssbo_index, enabled_storage_buffers[stage] |= 1U << ssbo_index; written_storage_buffers[stage] |= (is_written ? 1U : 0U) << ssbo_index; - const auto& cbufs = maxwell3d.state.shader_stages[stage]; + const auto& cbufs = maxwell3d->state.shader_stages[stage]; const GPUVAddr ssbo_addr = cbufs.const_buffers[cbuf_index].address + cbuf_offset; - storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr); + storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr, is_written); } template <class P> @@ -770,12 +760,12 @@ void BufferCache<P>::BindComputeStorageBuffer(size_t ssbo_index, u32 cbuf_index, enabled_compute_storage_buffers |= 1U << ssbo_index; written_compute_storage_buffers |= (is_written ? 1U : 0U) << ssbo_index; - const auto& launch_desc = kepler_compute.launch_description; + const auto& launch_desc = kepler_compute->launch_description; ASSERT(((launch_desc.const_buffer_enable_mask >> cbuf_index) & 1) != 0); const auto& cbufs = launch_desc.const_buffer_config; const GPUVAddr ssbo_addr = cbufs[cbuf_index].Address() + cbuf_offset; - compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr); + compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr, is_written); } template <class P> @@ -836,6 +826,19 @@ void BufferCache<P>::CommitAsyncFlushesHigh() { const bool is_accuracy_normal = Settings::values.gpu_accuracy.GetValue() == Settings::GPUAccuracy::Normal; + auto it = committed_ranges.begin(); + while (it != committed_ranges.end()) { + auto& current_intervals = *it; + auto next_it = std::next(it); + while (next_it != committed_ranges.end()) { + for (auto& interval : *next_it) { + current_intervals.subtract(interval); + } + next_it++; + } + it++; + } + boost::container::small_vector<std::pair<BufferCopy, BufferId>, 1> downloads; u64 total_size_bytes = 0; u64 largest_copy = 0; @@ -991,19 +994,19 @@ void BufferCache<P>::BindHostIndexBuffer() { const u32 size = index_buffer.size; SynchronizeBuffer(buffer, index_buffer.cpu_addr, size); if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) { - const u32 new_offset = offset + maxwell3d.regs.index_array.first * - maxwell3d.regs.index_array.FormatSizeInBytes(); + const u32 new_offset = offset + maxwell3d->regs.index_buffer.first * + maxwell3d->regs.index_buffer.FormatSizeInBytes(); runtime.BindIndexBuffer(buffer, new_offset, size); } else { - runtime.BindIndexBuffer(maxwell3d.regs.draw.topology, maxwell3d.regs.index_array.format, - maxwell3d.regs.index_array.first, maxwell3d.regs.index_array.count, - buffer, offset, size); + runtime.BindIndexBuffer(maxwell3d->regs.draw.topology, maxwell3d->regs.index_buffer.format, + maxwell3d->regs.index_buffer.first, + maxwell3d->regs.index_buffer.count, buffer, offset, size); } } template <class P> void BufferCache<P>::BindHostVertexBuffers() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) { const Binding& binding = vertex_buffers[index]; Buffer& buffer = slot_buffers[binding.buffer_id]; @@ -1014,7 +1017,7 @@ void BufferCache<P>::BindHostVertexBuffers() { } flags[Dirty::VertexBuffer0 + index] = false; - const u32 stride = maxwell3d.regs.vertex_array[index].stride; + const u32 stride = maxwell3d->regs.vertex_streams[index].stride; const u32 offset = buffer.Offset(binding.cpu_addr); runtime.BindVertexBuffer(index, buffer, offset, binding.size, stride); } @@ -1154,7 +1157,7 @@ void BufferCache<P>::BindHostGraphicsTextureBuffers(size_t stage) { template <class P> void BufferCache<P>::BindHostTransformFeedbackBuffers() { - if (maxwell3d.regs.tfb_enabled == 0) { + if (maxwell3d->regs.transform_feedback_enabled == 0) { return; } for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) { @@ -1239,16 +1242,19 @@ void BufferCache<P>::BindHostComputeTextureBuffers() { template <class P> void BufferCache<P>::DoUpdateGraphicsBuffers(bool is_indexed) { - if (is_indexed) { - UpdateIndexBuffer(); - } - UpdateVertexBuffers(); - UpdateTransformFeedbackBuffers(); - for (size_t stage = 0; stage < NUM_STAGES; ++stage) { - UpdateUniformBuffers(stage); - UpdateStorageBuffers(stage); - UpdateTextureBuffers(stage); - } + do { + has_deleted_buffers = false; + if (is_indexed) { + UpdateIndexBuffer(); + } + UpdateVertexBuffers(); + UpdateTransformFeedbackBuffers(); + for (size_t stage = 0; stage < NUM_STAGES; ++stage) { + UpdateUniformBuffers(stage); + UpdateStorageBuffers(stage); + UpdateTextureBuffers(stage); + } + } while (has_deleted_buffers); } template <class P> @@ -1262,8 +1268,8 @@ template <class P> void BufferCache<P>::UpdateIndexBuffer() { // We have to check for the dirty flags and index count // The index count is currently changed without updating the dirty flags - const auto& index_array = maxwell3d.regs.index_array; - auto& flags = maxwell3d.dirty.flags; + const auto& index_array = maxwell3d->regs.index_buffer; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::IndexBuffer] && last_index_count == index_array.count) { return; } @@ -1272,7 +1278,7 @@ void BufferCache<P>::UpdateIndexBuffer() { const GPUVAddr gpu_addr_begin = index_array.StartAddress(); const GPUVAddr gpu_addr_end = index_array.EndAddress(); - const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr_begin); + const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin); const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin); const u32 draw_size = (index_array.count + index_array.first) * index_array.FormatSizeInBytes(); const u32 size = std::min(address_size, draw_size); @@ -1289,8 +1295,8 @@ void BufferCache<P>::UpdateIndexBuffer() { template <class P> void BufferCache<P>::UpdateVertexBuffers() { - auto& flags = maxwell3d.dirty.flags; - if (!maxwell3d.dirty.flags[Dirty::VertexBuffers]) { + auto& flags = maxwell3d->dirty.flags; + if (!maxwell3d->dirty.flags[Dirty::VertexBuffers]) { return; } flags[Dirty::VertexBuffers] = false; @@ -1302,33 +1308,25 @@ void BufferCache<P>::UpdateVertexBuffers() { template <class P> void BufferCache<P>::UpdateVertexBuffer(u32 index) { - if (!maxwell3d.dirty.flags[Dirty::VertexBuffer0 + index]) { + if (!maxwell3d->dirty.flags[Dirty::VertexBuffer0 + index]) { return; } - const auto& array = maxwell3d.regs.vertex_array[index]; - const auto& limit = maxwell3d.regs.vertex_array_limit[index]; - const GPUVAddr gpu_addr_begin = array.StartAddress(); - const GPUVAddr gpu_addr_end = limit.LimitAddress() + 1; - const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr_begin); - u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin); - if (address_size >= 64_MiB) { - // Reported vertex buffer size is very large, cap to mapped buffer size - GPUVAddr submapped_addr_end = gpu_addr_begin; - - const auto ranges{gpu_memory.GetSubmappedRange(gpu_addr_begin, address_size)}; - if (ranges.size() > 0) { - const auto& [addr, size] = *ranges.begin(); - submapped_addr_end = addr + size; - } - - address_size = - std::min(address_size, static_cast<u32>(submapped_addr_end - gpu_addr_begin)); - } - const u32 size = address_size; // TODO: Analyze stride and number of vertices - if (array.enable == 0 || size == 0 || !cpu_addr) { + const auto& array = maxwell3d->regs.vertex_streams[index]; + const auto& limit = maxwell3d->regs.vertex_stream_limits[index]; + const GPUVAddr gpu_addr_begin = array.Address(); + const GPUVAddr gpu_addr_end = limit.Address() + 1; + const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin); + u32 address_size = static_cast<u32>( + std::min(gpu_addr_end - gpu_addr_begin, static_cast<u64>(std::numeric_limits<u32>::max()))); + if (array.enable == 0 || address_size == 0 || !cpu_addr) { vertex_buffers[index] = NULL_BINDING; return; } + if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) { + address_size = + static_cast<u32>(gpu_memory->MaxContinousRange(gpu_addr_begin, address_size)); + } + const u32 size = address_size; // TODO: Analyze stride and number of vertices vertex_buffers[index] = Binding{ .cpu_addr = *cpu_addr, .size = size, @@ -1382,7 +1380,7 @@ void BufferCache<P>::UpdateTextureBuffers(size_t stage) { template <class P> void BufferCache<P>::UpdateTransformFeedbackBuffers() { - if (maxwell3d.regs.tfb_enabled == 0) { + if (maxwell3d->regs.transform_feedback_enabled == 0) { return; } for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) { @@ -1392,11 +1390,11 @@ void BufferCache<P>::UpdateTransformFeedbackBuffers() { template <class P> void BufferCache<P>::UpdateTransformFeedbackBuffer(u32 index) { - const auto& binding = maxwell3d.regs.tfb_bindings[index]; - const GPUVAddr gpu_addr = binding.Address() + binding.buffer_offset; - const u32 size = binding.buffer_size; - const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); - if (binding.buffer_enable == 0 || size == 0 || !cpu_addr) { + const auto& binding = maxwell3d->regs.transform_feedback.buffers[index]; + const GPUVAddr gpu_addr = binding.Address() + binding.start_offset; + const u32 size = binding.size; + const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); + if (binding.enable == 0 || size == 0 || !cpu_addr) { transform_feedback_buffers[index] = NULL_BINDING; return; } @@ -1414,10 +1412,10 @@ void BufferCache<P>::UpdateComputeUniformBuffers() { ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) { Binding& binding = compute_uniform_buffers[index]; binding = NULL_BINDING; - const auto& launch_desc = kepler_compute.launch_description; + const auto& launch_desc = kepler_compute->launch_description; if (((launch_desc.const_buffer_enable_mask >> index) & 1) != 0) { const auto& cbuf = launch_desc.const_buffer_config[index]; - const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(cbuf.Address()); + const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(cbuf.Address()); if (cpu_addr) { binding.cpu_addr = *cpu_addr; binding.size = cbuf.size; @@ -1567,6 +1565,8 @@ BufferId BufferCache<P>::CreateBuffer(VAddr cpu_addr, u32 wanted_size) { const OverlapResult overlap = ResolveOverlaps(cpu_addr, wanted_size); const u32 size = static_cast<u32>(overlap.end - overlap.begin); const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, overlap.begin, size); + auto& new_buffer = slot_buffers[new_buffer_id]; + runtime.ClearBuffer(new_buffer, 0, new_buffer.SizeBytes(), 0); for (const BufferId overlap_id : overlap.ids) { JoinOverlap(new_buffer_id, overlap_id, !overlap.has_stream_leap); } @@ -1695,7 +1695,7 @@ void BufferCache<P>::MappedUploadMemory(Buffer& buffer, u64 total_size_bytes, template <class P> bool BufferCache<P>::InlineMemory(VAddr dest_address, size_t copy_size, - std::span<u8> inlined_buffer) { + std::span<const u8> inlined_buffer) { const bool is_dirty = IsRegionRegistered(dest_address, copy_size); if (!is_dirty) { return false; @@ -1831,7 +1831,7 @@ void BufferCache<P>::NotifyBufferDeletion() { dirty_uniform_buffers.fill(~u32{0}); uniform_buffer_binding_sizes.fill({}); } - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; flags[Dirty::IndexBuffer] = true; flags[Dirty::VertexBuffers] = true; for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) { @@ -1841,16 +1841,18 @@ void BufferCache<P>::NotifyBufferDeletion() { } template <class P> -typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr) const { - const GPUVAddr gpu_addr = gpu_memory.Read<u64>(ssbo_addr); - const u32 size = gpu_memory.Read<u32>(ssbo_addr + 8); - const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); +typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr, + bool is_written) const { + const GPUVAddr gpu_addr = gpu_memory->Read<u64>(ssbo_addr); + const u32 size = gpu_memory->Read<u32>(ssbo_addr + 8); + const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); if (!cpu_addr || size == 0) { return NULL_BINDING; } + const VAddr cpu_end = Common::AlignUp(*cpu_addr + size, Core::Memory::YUZU_PAGESIZE); const Binding binding{ .cpu_addr = *cpu_addr, - .size = size, + .size = is_written ? size : static_cast<u32>(cpu_end - *cpu_addr), .buffer_id = BufferId{}, }; return binding; @@ -1859,7 +1861,7 @@ typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr s template <class P> typename BufferCache<P>::TextureBufferBinding BufferCache<P>::GetTextureBufferBinding( GPUVAddr gpu_addr, u32 size, PixelFormat format) { - const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); + const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); TextureBufferBinding binding; if (!cpu_addr || size == 0) { binding.cpu_addr = 0; diff --git a/src/video_core/cdma_pusher.cpp b/src/video_core/cdma_pusher.cpp index 8e890a85e..28a2d2090 100644 --- a/src/video_core/cdma_pusher.cpp +++ b/src/video_core/cdma_pusher.cpp @@ -2,20 +2,22 @@ // SPDX-License-Identifier: MIT #include <bit> -#include "command_classes/host1x.h" -#include "command_classes/nvdec.h" -#include "command_classes/vic.h" #include "video_core/cdma_pusher.h" -#include "video_core/command_classes/sync_manager.h" #include "video_core/engines/maxwell_3d.h" -#include "video_core/gpu.h" +#include "video_core/host1x/control.h" +#include "video_core/host1x/host1x.h" +#include "video_core/host1x/nvdec.h" +#include "video_core/host1x/nvdec_common.h" +#include "video_core/host1x/sync_manager.h" +#include "video_core/host1x/vic.h" +#include "video_core/memory_manager.h" namespace Tegra { -CDmaPusher::CDmaPusher(GPU& gpu_) - : gpu{gpu_}, nvdec_processor(std::make_shared<Nvdec>(gpu)), - vic_processor(std::make_unique<Vic>(gpu, nvdec_processor)), - host1x_processor(std::make_unique<Host1x>(gpu)), - sync_manager(std::make_unique<SyncptIncrManager>(gpu)) {} +CDmaPusher::CDmaPusher(Host1x::Host1x& host1x_) + : host1x{host1x_}, nvdec_processor(std::make_shared<Host1x::Nvdec>(host1x)), + vic_processor(std::make_unique<Host1x::Vic>(host1x, nvdec_processor)), + host1x_processor(std::make_unique<Host1x::Control>(host1x)), + sync_manager(std::make_unique<Host1x::SyncptIncrManager>(host1x)) {} CDmaPusher::~CDmaPusher() = default; @@ -109,16 +111,17 @@ void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) { case ThiMethod::SetMethod1: LOG_DEBUG(Service_NVDRV, "VIC method 0x{:X}, Args=({})", static_cast<u32>(vic_thi_state.method_0), data); - vic_processor->ProcessMethod(static_cast<Vic::Method>(vic_thi_state.method_0), data); + vic_processor->ProcessMethod(static_cast<Host1x::Vic::Method>(vic_thi_state.method_0), + data); break; default: break; } break; - case ChClassId::Host1x: + case ChClassId::Control: // This device is mainly for syncpoint synchronization LOG_DEBUG(Service_NVDRV, "Host1X Class Method"); - host1x_processor->ProcessMethod(static_cast<Host1x::Method>(offset), data); + host1x_processor->ProcessMethod(static_cast<Host1x::Control::Method>(offset), data); break; default: UNIMPLEMENTED_MSG("Current class not implemented {:X}", static_cast<u32>(current_class)); diff --git a/src/video_core/cdma_pusher.h b/src/video_core/cdma_pusher.h index d6ffef95f..83112dfce 100644 --- a/src/video_core/cdma_pusher.h +++ b/src/video_core/cdma_pusher.h @@ -12,11 +12,13 @@ namespace Tegra { -class GPU; +namespace Host1x { +class Control; class Host1x; class Nvdec; class SyncptIncrManager; class Vic; +} // namespace Host1x enum class ChSubmissionMode : u32 { SetClass = 0, @@ -30,7 +32,7 @@ enum class ChSubmissionMode : u32 { enum class ChClassId : u32 { NoClass = 0x0, - Host1x = 0x1, + Control = 0x1, VideoEncodeMpeg = 0x20, VideoEncodeNvEnc = 0x21, VideoStreamingVi = 0x30, @@ -88,7 +90,7 @@ enum class ThiMethod : u32 { class CDmaPusher { public: - explicit CDmaPusher(GPU& gpu_); + explicit CDmaPusher(Host1x::Host1x& host1x); ~CDmaPusher(); /// Process the command entry @@ -101,11 +103,11 @@ private: /// Write arguments value to the ThiRegisters member at the specified offset void ThiStateWrite(ThiRegisters& state, u32 offset, u32 argument); - GPU& gpu; - std::shared_ptr<Tegra::Nvdec> nvdec_processor; - std::unique_ptr<Tegra::Vic> vic_processor; - std::unique_ptr<Tegra::Host1x> host1x_processor; - std::unique_ptr<SyncptIncrManager> sync_manager; + Host1x::Host1x& host1x; + std::shared_ptr<Tegra::Host1x::Nvdec> nvdec_processor; + std::unique_ptr<Tegra::Host1x::Vic> vic_processor; + std::unique_ptr<Tegra::Host1x::Control> host1x_processor; + std::unique_ptr<Host1x::SyncptIncrManager> sync_manager; ChClassId current_class{}; ThiRegisters vic_thi_state{}; ThiRegisters nvdec_thi_state{}; diff --git a/src/video_core/command_classes/host1x.cpp b/src/video_core/command_classes/host1x.cpp deleted file mode 100644 index 11855fe10..000000000 --- a/src/video_core/command_classes/host1x.cpp +++ /dev/null @@ -1,29 +0,0 @@ -// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later - -#include "common/assert.h" -#include "video_core/command_classes/host1x.h" -#include "video_core/gpu.h" - -Tegra::Host1x::Host1x(GPU& gpu_) : gpu(gpu_) {} - -Tegra::Host1x::~Host1x() = default; - -void Tegra::Host1x::ProcessMethod(Method method, u32 argument) { - switch (method) { - case Method::LoadSyncptPayload32: - syncpoint_value = argument; - break; - case Method::WaitSyncpt: - case Method::WaitSyncpt32: - Execute(argument); - break; - default: - UNIMPLEMENTED_MSG("Host1x method 0x{:X}", static_cast<u32>(method)); - break; - } -} - -void Tegra::Host1x::Execute(u32 data) { - gpu.WaitFence(data, syncpoint_value); -} diff --git a/src/video_core/control/channel_state.cpp b/src/video_core/control/channel_state.cpp new file mode 100644 index 000000000..cdecc3a91 --- /dev/null +++ b/src/video_core/control/channel_state.cpp @@ -0,0 +1,40 @@ +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common/assert.h" +#include "video_core/control/channel_state.h" +#include "video_core/dma_pusher.h" +#include "video_core/engines/fermi_2d.h" +#include "video_core/engines/kepler_compute.h" +#include "video_core/engines/kepler_memory.h" +#include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/maxwell_dma.h" +#include "video_core/engines/puller.h" +#include "video_core/memory_manager.h" + +namespace Tegra::Control { + +ChannelState::ChannelState(s32 bind_id_) : bind_id{bind_id_}, initialized{} {} + +void ChannelState::Init(Core::System& system, GPU& gpu) { + ASSERT(memory_manager); + dma_pusher = std::make_unique<Tegra::DmaPusher>(system, gpu, *memory_manager, *this); + maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, *memory_manager); + fermi_2d = std::make_unique<Engines::Fermi2D>(); + kepler_compute = std::make_unique<Engines::KeplerCompute>(system, *memory_manager); + maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, *memory_manager); + kepler_memory = std::make_unique<Engines::KeplerMemory>(system, *memory_manager); + initialized = true; +} + +void ChannelState::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) { + dma_pusher->BindRasterizer(rasterizer); + memory_manager->BindRasterizer(rasterizer); + maxwell_3d->BindRasterizer(rasterizer); + fermi_2d->BindRasterizer(rasterizer); + kepler_memory->BindRasterizer(rasterizer); + kepler_compute->BindRasterizer(rasterizer); + maxwell_dma->BindRasterizer(rasterizer); +} + +} // namespace Tegra::Control diff --git a/src/video_core/control/channel_state.h b/src/video_core/control/channel_state.h new file mode 100644 index 000000000..3a7b9872c --- /dev/null +++ b/src/video_core/control/channel_state.h @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include <memory> + +#include "common/common_types.h" + +namespace Core { +class System; +} + +namespace VideoCore { +class RasterizerInterface; +} + +namespace Tegra { + +class GPU; + +namespace Engines { +class Puller; +class Fermi2D; +class Maxwell3D; +class MaxwellDMA; +class KeplerCompute; +class KeplerMemory; +} // namespace Engines + +class MemoryManager; +class DmaPusher; + +namespace Control { + +struct ChannelState { + explicit ChannelState(s32 bind_id); + ChannelState(const ChannelState& state) = delete; + ChannelState& operator=(const ChannelState&) = delete; + ChannelState(ChannelState&& other) noexcept = default; + ChannelState& operator=(ChannelState&& other) noexcept = default; + + void Init(Core::System& system, GPU& gpu); + + void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); + + s32 bind_id = -1; + /// 3D engine + std::unique_ptr<Engines::Maxwell3D> maxwell_3d; + /// 2D engine + std::unique_ptr<Engines::Fermi2D> fermi_2d; + /// Compute engine + std::unique_ptr<Engines::KeplerCompute> kepler_compute; + /// DMA engine + std::unique_ptr<Engines::MaxwellDMA> maxwell_dma; + /// Inline memory engine + std::unique_ptr<Engines::KeplerMemory> kepler_memory; + + std::shared_ptr<MemoryManager> memory_manager; + + std::unique_ptr<DmaPusher> dma_pusher; + + bool initialized{}; +}; + +} // namespace Control + +} // namespace Tegra diff --git a/src/video_core/control/channel_state_cache.cpp b/src/video_core/control/channel_state_cache.cpp new file mode 100644 index 000000000..4ebeb6356 --- /dev/null +++ b/src/video_core/control/channel_state_cache.cpp @@ -0,0 +1,14 @@ +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "video_core/control/channel_state_cache.inc" + +namespace VideoCommon { + +ChannelInfo::ChannelInfo(Tegra::Control::ChannelState& channel_state) + : maxwell3d{*channel_state.maxwell_3d}, kepler_compute{*channel_state.kepler_compute}, + gpu_memory{*channel_state.memory_manager} {} + +template class VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo>; + +} // namespace VideoCommon diff --git a/src/video_core/control/channel_state_cache.h b/src/video_core/control/channel_state_cache.h new file mode 100644 index 000000000..584a0c26c --- /dev/null +++ b/src/video_core/control/channel_state_cache.h @@ -0,0 +1,101 @@ +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include <deque> +#include <limits> +#include <mutex> +#include <optional> +#include <unordered_map> +#include <vector> + +#include "common/common_types.h" + +namespace Tegra { + +namespace Engines { +class Maxwell3D; +class KeplerCompute; +} // namespace Engines + +class MemoryManager; + +namespace Control { +struct ChannelState; +} + +} // namespace Tegra + +namespace VideoCommon { + +class ChannelInfo { +public: + ChannelInfo() = delete; + explicit ChannelInfo(Tegra::Control::ChannelState& state); + ChannelInfo(const ChannelInfo& state) = delete; + ChannelInfo& operator=(const ChannelInfo&) = delete; + ChannelInfo(ChannelInfo&& other) = default; + ChannelInfo& operator=(ChannelInfo&& other) = default; + + Tegra::Engines::Maxwell3D& maxwell3d; + Tegra::Engines::KeplerCompute& kepler_compute; + Tegra::MemoryManager& gpu_memory; +}; + +template <class P> +class ChannelSetupCaches { +public: + /// Operations for seting the channel of execution. + virtual ~ChannelSetupCaches(); + + /// Create channel state. + virtual void CreateChannel(Tegra::Control::ChannelState& channel); + + /// Bind a channel for execution. + void BindToChannel(s32 id); + + /// Erase channel's state. + void EraseChannel(s32 id); + + Tegra::MemoryManager* GetFromID(size_t id) const { + std::unique_lock<std::mutex> lk(config_mutex); + const auto ref = address_spaces.find(id); + return ref->second.gpu_memory; + } + + std::optional<size_t> getStorageID(size_t id) const { + std::unique_lock<std::mutex> lk(config_mutex); + const auto ref = address_spaces.find(id); + if (ref == address_spaces.end()) { + return std::nullopt; + } + return ref->second.storage_id; + } + +protected: + static constexpr size_t UNSET_CHANNEL{std::numeric_limits<size_t>::max()}; + + P* channel_state; + size_t current_channel_id{UNSET_CHANNEL}; + size_t current_address_space{}; + Tegra::Engines::Maxwell3D* maxwell3d; + Tegra::Engines::KeplerCompute* kepler_compute; + Tegra::MemoryManager* gpu_memory; + + std::deque<P> channel_storage; + std::deque<size_t> free_channel_ids; + std::unordered_map<s32, size_t> channel_map; + std::vector<size_t> active_channel_ids; + struct AddresSpaceRef { + size_t ref_count; + size_t storage_id; + Tegra::MemoryManager* gpu_memory; + }; + std::unordered_map<size_t, AddresSpaceRef> address_spaces; + mutable std::mutex config_mutex; + + virtual void OnGPUASRegister([[maybe_unused]] size_t map_id) {} +}; + +} // namespace VideoCommon diff --git a/src/video_core/control/channel_state_cache.inc b/src/video_core/control/channel_state_cache.inc new file mode 100644 index 000000000..460313893 --- /dev/null +++ b/src/video_core/control/channel_state_cache.inc @@ -0,0 +1,86 @@ +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#include <algorithm> + +#include "video_core/control/channel_state.h" +#include "video_core/control/channel_state_cache.h" +#include "video_core/engines/kepler_compute.h" +#include "video_core/engines/maxwell_3d.h" +#include "video_core/memory_manager.h" + +namespace VideoCommon { + +template <class P> +ChannelSetupCaches<P>::~ChannelSetupCaches() = default; + +template <class P> +void ChannelSetupCaches<P>::CreateChannel(struct Tegra::Control::ChannelState& channel) { + std::unique_lock<std::mutex> lk(config_mutex); + ASSERT(channel_map.find(channel.bind_id) == channel_map.end() && channel.bind_id >= 0); + auto new_id = [this, &channel]() { + if (!free_channel_ids.empty()) { + auto id = free_channel_ids.front(); + free_channel_ids.pop_front(); + new (&channel_storage[id]) P(channel); + return id; + } + channel_storage.emplace_back(channel); + return channel_storage.size() - 1; + }(); + channel_map.emplace(channel.bind_id, new_id); + if (current_channel_id != UNSET_CHANNEL) { + channel_state = &channel_storage[current_channel_id]; + } + active_channel_ids.push_back(new_id); + auto as_it = address_spaces.find(channel.memory_manager->GetID()); + if (as_it != address_spaces.end()) { + as_it->second.ref_count++; + return; + } + AddresSpaceRef new_gpu_mem_ref{ + .ref_count = 1, + .storage_id = address_spaces.size(), + .gpu_memory = channel.memory_manager.get(), + }; + address_spaces.emplace(channel.memory_manager->GetID(), new_gpu_mem_ref); + OnGPUASRegister(channel.memory_manager->GetID()); +} + +/// Bind a channel for execution. +template <class P> +void ChannelSetupCaches<P>::BindToChannel(s32 id) { + std::unique_lock<std::mutex> lk(config_mutex); + auto it = channel_map.find(id); + ASSERT(it != channel_map.end() && id >= 0); + current_channel_id = it->second; + channel_state = &channel_storage[current_channel_id]; + maxwell3d = &channel_state->maxwell3d; + kepler_compute = &channel_state->kepler_compute; + gpu_memory = &channel_state->gpu_memory; + current_address_space = gpu_memory->GetID(); +} + +/// Erase channel's channel_state. +template <class P> +void ChannelSetupCaches<P>::EraseChannel(s32 id) { + std::unique_lock<std::mutex> lk(config_mutex); + const auto it = channel_map.find(id); + ASSERT(it != channel_map.end() && id >= 0); + const auto this_id = it->second; + free_channel_ids.push_back(this_id); + channel_map.erase(it); + if (this_id == current_channel_id) { + current_channel_id = UNSET_CHANNEL; + channel_state = nullptr; + maxwell3d = nullptr; + kepler_compute = nullptr; + gpu_memory = nullptr; + } else if (current_channel_id != UNSET_CHANNEL) { + channel_state = &channel_storage[current_channel_id]; + } + active_channel_ids.erase( + std::find(active_channel_ids.begin(), active_channel_ids.end(), this_id)); +} + +} // namespace VideoCommon diff --git a/src/video_core/control/scheduler.cpp b/src/video_core/control/scheduler.cpp new file mode 100644 index 000000000..f7cbe204e --- /dev/null +++ b/src/video_core/control/scheduler.cpp @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#include <memory> + +#include "common/assert.h" +#include "video_core/control/channel_state.h" +#include "video_core/control/scheduler.h" +#include "video_core/gpu.h" + +namespace Tegra::Control { +Scheduler::Scheduler(GPU& gpu_) : gpu{gpu_} {} + +Scheduler::~Scheduler() = default; + +void Scheduler::Push(s32 channel, CommandList&& entries) { + std::unique_lock lk(scheduling_guard); + auto it = channels.find(channel); + ASSERT(it != channels.end()); + auto channel_state = it->second; + gpu.BindChannel(channel_state->bind_id); + channel_state->dma_pusher->Push(std::move(entries)); + channel_state->dma_pusher->DispatchCalls(); +} + +void Scheduler::DeclareChannel(std::shared_ptr<ChannelState> new_channel) { + s32 channel = new_channel->bind_id; + std::unique_lock lk(scheduling_guard); + channels.emplace(channel, new_channel); +} + +} // namespace Tegra::Control diff --git a/src/video_core/control/scheduler.h b/src/video_core/control/scheduler.h new file mode 100644 index 000000000..44addf61c --- /dev/null +++ b/src/video_core/control/scheduler.h @@ -0,0 +1,37 @@ +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include <memory> +#include <mutex> +#include <unordered_map> + +#include "video_core/dma_pusher.h" + +namespace Tegra { + +class GPU; + +namespace Control { + +struct ChannelState; + +class Scheduler { +public: + explicit Scheduler(GPU& gpu_); + ~Scheduler(); + + void Push(s32 channel, CommandList&& entries); + + void DeclareChannel(std::shared_ptr<ChannelState> new_channel); + +private: + std::unordered_map<s32, std::shared_ptr<ChannelState>> channels; + std::mutex scheduling_guard; + GPU& gpu; +}; + +} // namespace Control + +} // namespace Tegra diff --git a/src/video_core/dirty_flags.cpp b/src/video_core/dirty_flags.cpp index 9dc4341f0..c2ecc12f5 100644 --- a/src/video_core/dirty_flags.cpp +++ b/src/video_core/dirty_flags.cpp @@ -17,21 +17,23 @@ using Tegra::Engines::Maxwell3D; void SetupDirtyVertexBuffers(Maxwell3D::DirtyState::Tables& tables) { static constexpr std::size_t num_array = 3; for (std::size_t i = 0; i < Maxwell3D::Regs::NumVertexArrays; ++i) { - const std::size_t array_offset = OFF(vertex_array) + i * NUM(vertex_array[0]); - const std::size_t limit_offset = OFF(vertex_array_limit) + i * NUM(vertex_array_limit[0]); + const std::size_t array_offset = OFF(vertex_streams) + i * NUM(vertex_streams[0]); + const std::size_t limit_offset = + OFF(vertex_stream_limits) + i * NUM(vertex_stream_limits[0]); FillBlock(tables, array_offset, num_array, VertexBuffer0 + i, VertexBuffers); - FillBlock(tables, limit_offset, NUM(vertex_array_limit), VertexBuffer0 + i, VertexBuffers); + FillBlock(tables, limit_offset, NUM(vertex_stream_limits), VertexBuffer0 + i, + VertexBuffers); } } void SetupIndexBuffer(Maxwell3D::DirtyState::Tables& tables) { - FillBlock(tables[0], OFF(index_array), NUM(index_array), IndexBuffer); + FillBlock(tables[0], OFF(index_buffer), NUM(index_buffer), IndexBuffer); } void SetupDirtyDescriptors(Maxwell3D::DirtyState::Tables& tables) { - FillBlock(tables[0], OFF(tic), NUM(tic), Descriptors); - FillBlock(tables[0], OFF(tsc), NUM(tsc), Descriptors); + FillBlock(tables[0], OFF(tex_header), NUM(tex_header), Descriptors); + FillBlock(tables[0], OFF(tex_sampler), NUM(tex_sampler), Descriptors); } void SetupDirtyRenderTargets(Maxwell3D::DirtyState::Tables& tables) { @@ -42,7 +44,7 @@ void SetupDirtyRenderTargets(Maxwell3D::DirtyState::Tables& tables) { FillBlock(tables[0], begin + rt * num_per_rt, num_per_rt, ColorBuffer0 + rt); } FillBlock(tables[1], begin, num, RenderTargets); - FillBlock(tables[0], OFF(render_area), NUM(render_area), RenderTargets); + FillBlock(tables[0], OFF(surface_clip), NUM(surface_clip), RenderTargets); tables[0][OFF(rt_control)] = RenderTargets; tables[1][OFF(rt_control)] = RenderTargetControl; @@ -52,15 +54,15 @@ void SetupDirtyRenderTargets(Maxwell3D::DirtyState::Tables& tables) { const u8 flag = zeta_flags[i]; auto& table = tables[i]; table[OFF(zeta_enable)] = flag; - table[OFF(zeta_width)] = flag; - table[OFF(zeta_height)] = flag; + table[OFF(zeta_size.width)] = flag; + table[OFF(zeta_size.height)] = flag; FillBlock(table, OFF(zeta), NUM(zeta), flag); } } void SetupDirtyShaders(Maxwell3D::DirtyState::Tables& tables) { - FillBlock(tables[0], OFF(shader_config[0]), - NUM(shader_config[0]) * Maxwell3D::Regs::MaxShaderProgram, Shaders); + FillBlock(tables[0], OFF(pipelines), NUM(pipelines[0]) * Maxwell3D::Regs::MaxShaderProgram, + Shaders); } } // Anonymous namespace diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp index 29b8582ab..9835e3ac1 100644 --- a/src/video_core/dma_pusher.cpp +++ b/src/video_core/dma_pusher.cpp @@ -12,7 +12,10 @@ namespace Tegra { -DmaPusher::DmaPusher(Core::System& system_, GPU& gpu_) : gpu{gpu_}, system{system_} {} +DmaPusher::DmaPusher(Core::System& system_, GPU& gpu_, MemoryManager& memory_manager_, + Control::ChannelState& channel_state_) + : gpu{gpu_}, system{system_}, memory_manager{memory_manager_}, puller{gpu_, memory_manager_, + *this, channel_state_} {} DmaPusher::~DmaPusher() = default; @@ -21,8 +24,6 @@ MICROPROFILE_DEFINE(DispatchCalls, "GPU", "Execute command buffer", MP_RGB(128, void DmaPusher::DispatchCalls() { MICROPROFILE_SCOPE(DispatchCalls); - gpu.SyncGuestHost(); - dma_pushbuffer_subindex = 0; dma_state.is_last_call = true; @@ -33,7 +34,6 @@ void DmaPusher::DispatchCalls() { } } gpu.FlushCommands(); - gpu.SyncGuestHost(); gpu.OnCommandListEnd(); } @@ -76,11 +76,11 @@ bool DmaPusher::Step() { // Push buffer non-empty, read a word command_headers.resize(command_list_header.size); if (Settings::IsGPULevelHigh()) { - gpu.MemoryManager().ReadBlock(dma_get, command_headers.data(), - command_list_header.size * sizeof(u32)); + memory_manager.ReadBlock(dma_get, command_headers.data(), + command_list_header.size * sizeof(u32)); } else { - gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(), - command_list_header.size * sizeof(u32)); + memory_manager.ReadBlockUnsafe(dma_get, command_headers.data(), + command_list_header.size * sizeof(u32)); } } for (std::size_t index = 0; index < command_headers.size();) { @@ -154,7 +154,7 @@ void DmaPusher::SetState(const CommandHeader& command_header) { void DmaPusher::CallMethod(u32 argument) const { if (dma_state.method < non_puller_methods) { - gpu.CallMethod(GPU::MethodCall{ + puller.CallPullerMethod(Engines::Puller::MethodCall{ dma_state.method, argument, dma_state.subchannel, @@ -168,12 +168,16 @@ void DmaPusher::CallMethod(u32 argument) const { void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const { if (dma_state.method < non_puller_methods) { - gpu.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods, - dma_state.method_count); + puller.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods, + dma_state.method_count); } else { subchannels[dma_state.subchannel]->CallMultiMethod(dma_state.method, base_start, num_methods, dma_state.method_count); } } +void DmaPusher::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) { + puller.BindRasterizer(rasterizer); +} + } // namespace Tegra diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h index 872fd146a..938f0f11c 100644 --- a/src/video_core/dma_pusher.h +++ b/src/video_core/dma_pusher.h @@ -10,6 +10,7 @@ #include "common/bit_field.h" #include "common/common_types.h" #include "video_core/engines/engine_interface.h" +#include "video_core/engines/puller.h" namespace Core { class System; @@ -17,7 +18,12 @@ class System; namespace Tegra { +namespace Control { +struct ChannelState; +} + class GPU; +class MemoryManager; enum class SubmissionMode : u32 { IncreasingOld = 0, @@ -31,24 +37,32 @@ enum class SubmissionMode : u32 { // Note that, traditionally, methods are treated as 4-byte addressable locations, and hence // their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4. // So the values you see in docs might be multiplied by 4. +// Register documentation: +// https://github.com/NVIDIA/open-gpu-doc/blob/ab27fc22db5de0d02a4cabe08e555663b62db4d4/classes/host/cla26f.h +// +// Register Description (approx): +// https://github.com/NVIDIA/open-gpu-doc/blob/ab27fc22db5de0d02a4cabe08e555663b62db4d4/manuals/volta/gv100/dev_pbdma.ref.txt enum class BufferMethods : u32 { BindObject = 0x0, + Illegal = 0x1, Nop = 0x2, SemaphoreAddressHigh = 0x4, SemaphoreAddressLow = 0x5, - SemaphoreSequence = 0x6, - SemaphoreTrigger = 0x7, - NotifyIntr = 0x8, + SemaphoreSequencePayload = 0x6, + SemaphoreOperation = 0x7, + NonStallInterrupt = 0x8, WrcacheFlush = 0x9, - Unk28 = 0xA, - UnkCacheFlush = 0xB, + MemOpA = 0xA, + MemOpB = 0xB, + MemOpC = 0xC, + MemOpD = 0xD, RefCnt = 0x14, SemaphoreAcquire = 0x1A, SemaphoreRelease = 0x1B, - FenceValue = 0x1C, - FenceAction = 0x1D, - WaitForInterrupt = 0x1E, - Unk7c = 0x1F, + SyncpointPayload = 0x1C, + SyncpointOperation = 0x1D, + WaitForIdle = 0x1E, + CRCCheck = 0x1F, Yield = 0x20, NonPullerMethods = 0x40, }; @@ -102,7 +116,8 @@ struct CommandList final { */ class DmaPusher final { public: - explicit DmaPusher(Core::System& system_, GPU& gpu_); + explicit DmaPusher(Core::System& system_, GPU& gpu_, MemoryManager& memory_manager_, + Control::ChannelState& channel_state_); ~DmaPusher(); void Push(CommandList&& entries) { @@ -115,6 +130,8 @@ public: subchannels[subchannel_id] = engine; } + void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); + private: static constexpr u32 non_puller_methods = 0x40; static constexpr u32 max_subchannels = 8; @@ -148,6 +165,8 @@ private: GPU& gpu; Core::System& system; + MemoryManager& memory_manager; + mutable Engines::Puller puller; }; } // namespace Tegra diff --git a/src/video_core/engines/engine_upload.cpp b/src/video_core/engines/engine_upload.cpp index 6ff5b1eca..a34819234 100644 --- a/src/video_core/engines/engine_upload.cpp +++ b/src/video_core/engines/engine_upload.cpp @@ -3,6 +3,7 @@ #include <cstring> +#include "common/algorithm.h" #include "common/assert.h" #include "video_core/engines/engine_upload.h" #include "video_core/memory_manager.h" @@ -34,21 +35,48 @@ void State::ProcessData(const u32 data, const bool is_last_call) { if (!is_last_call) { return; } + ProcessData(inner_buffer); +} + +void State::ProcessData(const u32* data, size_t num_data) { + std::span<const u8> read_buffer(reinterpret_cast<const u8*>(data), num_data * sizeof(u32)); + ProcessData(read_buffer); +} + +void State::ProcessData(std::span<const u8> read_buffer) { const GPUVAddr address{regs.dest.Address()}; if (is_linear) { - rasterizer->AccelerateInlineToMemory(address, copy_size, inner_buffer); + if (regs.line_count == 1) { + rasterizer->AccelerateInlineToMemory(address, copy_size, read_buffer); + } else { + for (u32 line = 0; line < regs.line_count; ++line) { + const GPUVAddr dest_line = address + static_cast<size_t>(line) * regs.dest.pitch; + memory_manager.WriteBlockUnsafe( + dest_line, read_buffer.data() + static_cast<size_t>(line) * regs.line_length_in, + regs.line_length_in); + } + memory_manager.InvalidateRegion(address, regs.dest.pitch * regs.line_count); + } } else { - UNIMPLEMENTED_IF(regs.dest.z != 0); - UNIMPLEMENTED_IF(regs.dest.depth != 1); - UNIMPLEMENTED_IF(regs.dest.BlockWidth() != 0); - UNIMPLEMENTED_IF(regs.dest.BlockDepth() != 0); + u32 width = regs.dest.width; + u32 x_elements = regs.line_length_in; + u32 x_offset = regs.dest.x; + const u32 bpp_shift = Common::FoldRight( + 4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); }, + width, x_elements, x_offset, static_cast<u32>(address)); + width >>= bpp_shift; + x_elements >>= bpp_shift; + x_offset >>= bpp_shift; + const u32 bytes_per_pixel = 1U << bpp_shift; const std::size_t dst_size = Tegra::Texture::CalculateSize( - true, 1, regs.dest.width, regs.dest.height, 1, regs.dest.BlockHeight(), 0); + true, bytes_per_pixel, width, regs.dest.height, regs.dest.depth, + regs.dest.BlockHeight(), regs.dest.BlockDepth()); tmp_buffer.resize(dst_size); memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size); - Tegra::Texture::SwizzleKepler(regs.dest.width, regs.dest.height, regs.dest.x, regs.dest.y, - regs.dest.BlockHeight(), copy_size, inner_buffer.data(), - tmp_buffer.data()); + Tegra::Texture::SwizzleSubrect(tmp_buffer, read_buffer, bytes_per_pixel, width, + regs.dest.height, regs.dest.depth, x_offset, regs.dest.y, + x_elements, regs.line_count, regs.dest.BlockHeight(), + regs.dest.BlockDepth(), regs.line_length_in); memory_manager.WriteBlock(address, tmp_buffer.data(), dst_size); } } diff --git a/src/video_core/engines/engine_upload.h b/src/video_core/engines/engine_upload.h index 94ff3314a..f08f6e36a 100644 --- a/src/video_core/engines/engine_upload.h +++ b/src/video_core/engines/engine_upload.h @@ -3,6 +3,7 @@ #pragma once +#include <span> #include <vector> #include "common/bit_field.h" #include "common/common_types.h" @@ -33,7 +34,7 @@ struct Registers { u32 width; u32 height; u32 depth; - u32 z; + u32 layer; u32 x; u32 y; @@ -62,11 +63,14 @@ public: void ProcessExec(bool is_linear_); void ProcessData(u32 data, bool is_last_call); + void ProcessData(const u32* data, size_t num_data); /// Binds a rasterizer to this engine. void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); private: + void ProcessData(std::span<const u8> read_buffer); + u32 write_offset = 0; u32 copy_size = 0; std::vector<u8> inner_buffer; diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp index 5db254d94..7c50bdbe0 100644 --- a/src/video_core/engines/kepler_compute.cpp +++ b/src/video_core/engines/kepler_compute.cpp @@ -36,8 +36,6 @@ void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_cal } case KEPLER_COMPUTE_REG_INDEX(data_upload): { upload_state.ProcessData(method_argument, is_last_call); - if (is_last_call) { - } break; } case KEPLER_COMPUTE_REG_INDEX(launch): @@ -50,8 +48,15 @@ void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_cal void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending) { - for (std::size_t i = 0; i < amount; i++) { - CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1); + switch (method) { + case KEPLER_COMPUTE_REG_INDEX(data_upload): + upload_state.ProcessData(base_start, static_cast<size_t>(amount)); + return; + default: + for (std::size_t i = 0; i < amount; i++) { + CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1); + } + break; } } diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp index e2b029542..a3fbab1e5 100644 --- a/src/video_core/engines/kepler_memory.cpp +++ b/src/video_core/engines/kepler_memory.cpp @@ -33,8 +33,6 @@ void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call } case KEPLERMEMORY_REG_INDEX(data): { upload_state.ProcessData(method_argument, is_last_call); - if (is_last_call) { - } break; } } @@ -42,8 +40,15 @@ void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call void KeplerMemory::CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending) { - for (std::size_t i = 0; i < amount; i++) { - CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1); + switch (method) { + case KEPLERMEMORY_REG_INDEX(data): + upload_state.ProcessData(base_start, static_cast<size_t>(amount)); + return; + default: + for (std::size_t i = 0; i < amount; i++) { + CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1); + } + break; } } diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index 3a4646289..b1a22b76c 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -56,37 +56,37 @@ void Maxwell3D::InitializeRegisterDefaults() { // Doom and Bomberman seems to use the uninitialized registers and just enable blend // so initialize blend registers with sane values - regs.blend.equation_rgb = Regs::Blend::Equation::Add; - regs.blend.factor_source_rgb = Regs::Blend::Factor::One; - regs.blend.factor_dest_rgb = Regs::Blend::Factor::Zero; - regs.blend.equation_a = Regs::Blend::Equation::Add; - regs.blend.factor_source_a = Regs::Blend::Factor::One; - regs.blend.factor_dest_a = Regs::Blend::Factor::Zero; - for (auto& blend : regs.independent_blend) { - blend.equation_rgb = Regs::Blend::Equation::Add; - blend.factor_source_rgb = Regs::Blend::Factor::One; - blend.factor_dest_rgb = Regs::Blend::Factor::Zero; - blend.equation_a = Regs::Blend::Equation::Add; - blend.factor_source_a = Regs::Blend::Factor::One; - blend.factor_dest_a = Regs::Blend::Factor::Zero; - } - regs.stencil_front_op_fail = Regs::StencilOp::Keep; - regs.stencil_front_op_zfail = Regs::StencilOp::Keep; - regs.stencil_front_op_zpass = Regs::StencilOp::Keep; - regs.stencil_front_func_func = Regs::ComparisonOp::Always; + regs.blend.color_op = Regs::Blend::Equation::Add_D3D; + regs.blend.color_source = Regs::Blend::Factor::One_D3D; + regs.blend.color_dest = Regs::Blend::Factor::Zero_D3D; + regs.blend.alpha_op = Regs::Blend::Equation::Add_D3D; + regs.blend.alpha_source = Regs::Blend::Factor::One_D3D; + regs.blend.alpha_dest = Regs::Blend::Factor::Zero_D3D; + for (auto& blend : regs.blend_per_target) { + blend.color_op = Regs::Blend::Equation::Add_D3D; + blend.color_source = Regs::Blend::Factor::One_D3D; + blend.color_dest = Regs::Blend::Factor::Zero_D3D; + blend.alpha_op = Regs::Blend::Equation::Add_D3D; + blend.alpha_source = Regs::Blend::Factor::One_D3D; + blend.alpha_dest = Regs::Blend::Factor::Zero_D3D; + } + regs.stencil_front_op.fail = Regs::StencilOp::Op::Keep_D3D; + regs.stencil_front_op.zfail = Regs::StencilOp::Op::Keep_D3D; + regs.stencil_front_op.zpass = Regs::StencilOp::Op::Keep_D3D; + regs.stencil_front_op.func = Regs::ComparisonOp::Always_GL; regs.stencil_front_func_mask = 0xFFFFFFFF; regs.stencil_front_mask = 0xFFFFFFFF; regs.stencil_two_side_enable = 1; - regs.stencil_back_op_fail = Regs::StencilOp::Keep; - regs.stencil_back_op_zfail = Regs::StencilOp::Keep; - regs.stencil_back_op_zpass = Regs::StencilOp::Keep; - regs.stencil_back_func_func = Regs::ComparisonOp::Always; + regs.stencil_back_op.fail = Regs::StencilOp::Op::Keep_D3D; + regs.stencil_back_op.zfail = Regs::StencilOp::Op::Keep_D3D; + regs.stencil_back_op.zpass = Regs::StencilOp::Op::Keep_D3D; + regs.stencil_back_op.func = Regs::ComparisonOp::Always_GL; regs.stencil_back_func_mask = 0xFFFFFFFF; regs.stencil_back_mask = 0xFFFFFFFF; - regs.depth_test_func = Regs::ComparisonOp::Always; - regs.front_face = Regs::FrontFace::CounterClockWise; - regs.cull_face = Regs::CullFace::Back; + regs.depth_test_func = Regs::ComparisonOp::Always_GL; + regs.gl_front_face = Regs::FrontFace::CounterClockWise; + regs.gl_cull_face = Regs::CullFace::Back; // TODO(Rodrigo): Most games do not set a point size. I think this is a case of a // register carrying a default value. Assume it's OpenGL's default (1). @@ -107,20 +107,20 @@ void Maxwell3D::InitializeRegisterDefaults() { // NVN games expect these values to be enabled at boot regs.rasterize_enable = 1; - regs.rt_separate_frag_data = 1; + regs.color_target_mrt_enable = 1; regs.framebuffer_srgb = 1; regs.line_width_aliased = 1.0f; regs.line_width_smooth = 1.0f; - regs.front_face = Maxwell3D::Regs::FrontFace::ClockWise; + regs.gl_front_face = Maxwell3D::Regs::FrontFace::ClockWise; regs.polygon_mode_back = Maxwell3D::Regs::PolygonMode::Fill; regs.polygon_mode_front = Maxwell3D::Regs::PolygonMode::Fill; shadow_state = regs; - mme_inline[MAXWELL3D_REG_INDEX(draw.vertex_end_gl)] = true; - mme_inline[MAXWELL3D_REG_INDEX(draw.vertex_begin_gl)] = true; + mme_inline[MAXWELL3D_REG_INDEX(draw.end)] = true; + mme_inline[MAXWELL3D_REG_INDEX(draw.begin)] = true; mme_inline[MAXWELL3D_REG_INDEX(vertex_buffer.count)] = true; - mme_inline[MAXWELL3D_REG_INDEX(index_array.count)] = true; + mme_inline[MAXWELL3D_REG_INDEX(index_buffer.count)] = true; } void Maxwell3D::ProcessMacro(u32 method, const u32* base_start, u32 amount, bool is_last_call) { @@ -173,77 +173,81 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume case MAXWELL3D_REG_INDEX(shadow_ram_control): shadow_state.shadow_ram_control = static_cast<Regs::ShadowRamControl>(nonshadow_argument); return; - case MAXWELL3D_REG_INDEX(macros.upload_address): - return macro_engine->ClearCode(regs.macros.upload_address); - case MAXWELL3D_REG_INDEX(macros.data): - return macro_engine->AddCode(regs.macros.upload_address, argument); - case MAXWELL3D_REG_INDEX(macros.bind): + case MAXWELL3D_REG_INDEX(load_mme.instruction_ptr): + return macro_engine->ClearCode(regs.load_mme.instruction_ptr); + case MAXWELL3D_REG_INDEX(load_mme.instruction): + return macro_engine->AddCode(regs.load_mme.instruction_ptr, argument); + case MAXWELL3D_REG_INDEX(load_mme.start_address): return ProcessMacroBind(argument); - case MAXWELL3D_REG_INDEX(firmware[4]): + case MAXWELL3D_REG_INDEX(falcon[4]): return ProcessFirmwareCall4(); - case MAXWELL3D_REG_INDEX(const_buffer.cb_data): - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 1: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 2: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 3: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 4: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 5: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 6: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 7: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 8: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 9: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 10: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 11: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 12: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 13: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 14: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 15: + case MAXWELL3D_REG_INDEX(const_buffer.buffer): + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 1: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 2: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 3: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 4: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 5: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 6: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 7: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 8: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 9: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 10: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 11: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 12: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 13: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 14: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 15: return ProcessCBData(argument); - case MAXWELL3D_REG_INDEX(cb_bind[0]): + case MAXWELL3D_REG_INDEX(bind_groups[0].raw_config): return ProcessCBBind(0); - case MAXWELL3D_REG_INDEX(cb_bind[1]): + case MAXWELL3D_REG_INDEX(bind_groups[1].raw_config): return ProcessCBBind(1); - case MAXWELL3D_REG_INDEX(cb_bind[2]): + case MAXWELL3D_REG_INDEX(bind_groups[2].raw_config): return ProcessCBBind(2); - case MAXWELL3D_REG_INDEX(cb_bind[3]): + case MAXWELL3D_REG_INDEX(bind_groups[3].raw_config): return ProcessCBBind(3); - case MAXWELL3D_REG_INDEX(cb_bind[4]): + case MAXWELL3D_REG_INDEX(bind_groups[4].raw_config): return ProcessCBBind(4); - case MAXWELL3D_REG_INDEX(draw.vertex_end_gl): + case MAXWELL3D_REG_INDEX(draw.end): return DrawArrays(); - case MAXWELL3D_REG_INDEX(small_index): - regs.index_array.count = regs.small_index.count; - regs.index_array.first = regs.small_index.first; + case MAXWELL3D_REG_INDEX(index_buffer32_first): + regs.index_buffer.count = regs.index_buffer32_first.count; + regs.index_buffer.first = regs.index_buffer32_first.first; dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; return DrawArrays(); - case MAXWELL3D_REG_INDEX(small_index_2): - regs.index_array.count = regs.small_index_2.count; - regs.index_array.first = regs.small_index_2.first; + case MAXWELL3D_REG_INDEX(index_buffer16_first): + regs.index_buffer.count = regs.index_buffer16_first.count; + regs.index_buffer.first = regs.index_buffer16_first.first; dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; return DrawArrays(); + case MAXWELL3D_REG_INDEX(index_buffer8_first): + regs.index_buffer.count = regs.index_buffer8_first.count; + regs.index_buffer.first = regs.index_buffer8_first.first; + dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; + // a macro calls this one over and over, should it increase instancing? + // Used by Hades and likely other Vulkan games. + return DrawArrays(); case MAXWELL3D_REG_INDEX(topology_override): use_topology_override = true; return; - case MAXWELL3D_REG_INDEX(clear_buffers): + case MAXWELL3D_REG_INDEX(clear_surface): return ProcessClearBuffers(); - case MAXWELL3D_REG_INDEX(query.query_get): + case MAXWELL3D_REG_INDEX(report_semaphore.query): return ProcessQueryGet(); - case MAXWELL3D_REG_INDEX(condition.mode): + case MAXWELL3D_REG_INDEX(render_enable.mode): return ProcessQueryCondition(); - case MAXWELL3D_REG_INDEX(counter_reset): + case MAXWELL3D_REG_INDEX(clear_report_value): return ProcessCounterReset(); case MAXWELL3D_REG_INDEX(sync_info): return ProcessSyncPoint(); - case MAXWELL3D_REG_INDEX(exec_upload): - return upload_state.ProcessExec(regs.exec_upload.linear != 0); - case MAXWELL3D_REG_INDEX(data_upload): + case MAXWELL3D_REG_INDEX(launch_dma): + return upload_state.ProcessExec(regs.launch_dma.memory_layout.Value() == + Regs::LaunchDMA::Layout::Pitch); + case MAXWELL3D_REG_INDEX(inline_data): upload_state.ProcessData(argument, is_last_call); - if (is_last_call) { - } return; case MAXWELL3D_REG_INDEX(fragment_barrier): return rasterizer->FragmentBarrier(); - case MAXWELL3D_REG_INDEX(tiled_cache_barrier): - return rasterizer->TiledCacheBarrier(); } } @@ -293,24 +297,27 @@ void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount, return; } switch (method) { - case MAXWELL3D_REG_INDEX(const_buffer.cb_data): - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 1: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 2: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 3: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 4: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 5: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 6: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 7: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 8: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 9: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 10: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 11: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 12: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 13: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 14: - case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 15: + case MAXWELL3D_REG_INDEX(const_buffer.buffer): + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 1: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 2: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 3: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 4: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 5: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 6: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 7: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 8: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 9: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 10: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 11: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 12: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 13: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 14: + case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 15: ProcessCBMultiData(base_start, amount); break; + case MAXWELL3D_REG_INDEX(inline_data): + upload_state.ProcessData(base_start, static_cast<size_t>(amount)); + return; default: for (std::size_t i = 0; i < amount; i++) { CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1); @@ -347,14 +354,15 @@ void Maxwell3D::CallMethodFromMME(u32 method, u32 method_argument) { if (mme_inline[method]) { regs.reg_array[method] = method_argument; if (method == MAXWELL3D_REG_INDEX(vertex_buffer.count) || - method == MAXWELL3D_REG_INDEX(index_array.count)) { + method == MAXWELL3D_REG_INDEX(index_buffer.count)) { const MMEDrawMode expected_mode = method == MAXWELL3D_REG_INDEX(vertex_buffer.count) ? MMEDrawMode::Array : MMEDrawMode::Indexed; StepInstance(expected_mode, method_argument); - } else if (method == MAXWELL3D_REG_INDEX(draw.vertex_begin_gl)) { + } else if (method == MAXWELL3D_REG_INDEX(draw.begin)) { mme_draw.instance_mode = - (regs.draw.instance_next != 0) || (regs.draw.instance_cont != 0); + (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Subsequent) || + (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Unchanged); mme_draw.gl_begin_consume = true; } else { mme_draw.gl_end_count++; @@ -399,11 +407,12 @@ void Maxwell3D::ProcessTopologyOverride() { void Maxwell3D::FlushMMEInlineDraw() { LOG_TRACE(HW_GPU, "called, topology={}, count={}", regs.draw.topology.Value(), regs.vertex_buffer.count); - ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?"); + ASSERT_MSG(!(regs.index_buffer.count && regs.vertex_buffer.count), "Both indexed and direct?"); ASSERT(mme_draw.instance_count == mme_draw.gl_end_count); // Both instance configuration registers can not be set at the same time. - ASSERT_MSG(!regs.draw.instance_next || !regs.draw.instance_cont, + ASSERT_MSG(regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::First || + regs.draw.instance_id != Maxwell3D::Regs::Draw::InstanceId::Unchanged, "Illegal combination of instancing parameters"); ProcessTopologyOverride(); @@ -418,7 +427,7 @@ void Maxwell3D::FlushMMEInlineDraw() { // it's possible that it is incorrect and that there is some other register used to specify the // drawing mode. if (is_indexed) { - regs.index_array.count = 0; + regs.index_buffer.count = 0; } else { regs.vertex_buffer.count = 0; } @@ -431,11 +440,11 @@ void Maxwell3D::FlushMMEInlineDraw() { } void Maxwell3D::ProcessMacroUpload(u32 data) { - macro_engine->AddCode(regs.macros.upload_address++, data); + macro_engine->AddCode(regs.load_mme.instruction_ptr++, data); } void Maxwell3D::ProcessMacroBind(u32 data) { - macro_positions[regs.macros.entry++] = data; + macro_positions[regs.load_mme.start_address_ptr++] = data; } void Maxwell3D::ProcessFirmwareCall4() { @@ -443,22 +452,14 @@ void Maxwell3D::ProcessFirmwareCall4() { // Firmware call 4 is a blob that changes some registers depending on its parameters. // These registers don't affect emulation and so are stubbed by setting 0xd00 to 1. - regs.reg_array[0xd00] = 1; + regs.shadow_scratch[0] = 1; } void Maxwell3D::StampQueryResult(u64 payload, bool long_query) { - struct LongQueryResult { - u64_le value; - u64_le timestamp; - }; - static_assert(sizeof(LongQueryResult) == 16, "LongQueryResult has wrong size"); - const GPUVAddr sequence_address{regs.query.QueryAddress()}; + const GPUVAddr sequence_address{regs.report_semaphore.Address()}; if (long_query) { - // Write the 128-bit result structure in long mode. Note: We emulate an infinitely fast - // GPU, this command may actually take a while to complete in real hardware due to GPU - // wait queues. - LongQueryResult query_result{payload, system.GPU().GetTicks()}; - memory_manager.WriteBlock(sequence_address, &query_result, sizeof(query_result)); + memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks()); + memory_manager.Write<u64>(sequence_address, payload); } else { memory_manager.Write<u32>(sequence_address, static_cast<u32>(payload)); } @@ -466,31 +467,45 @@ void Maxwell3D::StampQueryResult(u64 payload, bool long_query) { void Maxwell3D::ProcessQueryGet() { // TODO(Subv): Support the other query units. - if (regs.query.query_get.unit != Regs::QueryUnit::Crop) { - LOG_DEBUG(HW_GPU, "Units other than CROP are unimplemented"); - } - - switch (regs.query.query_get.operation) { - case Regs::QueryOperation::Release: - if (regs.query.query_get.fence == 1) { - rasterizer->SignalSemaphore(regs.query.QueryAddress(), regs.query.query_sequence); + if (regs.report_semaphore.query.location != Regs::ReportSemaphore::Location::All) { + LOG_DEBUG(HW_GPU, "Locations other than ALL are unimplemented"); + } + + switch (regs.report_semaphore.query.operation) { + case Regs::ReportSemaphore::Operation::Release: + if (regs.report_semaphore.query.short_query != 0) { + const GPUVAddr sequence_address{regs.report_semaphore.Address()}; + const u32 payload = regs.report_semaphore.payload; + std::function<void()> operation([this, sequence_address, payload] { + memory_manager.Write<u32>(sequence_address, payload); + }); + rasterizer->SignalFence(std::move(operation)); } else { - StampQueryResult(regs.query.query_sequence, regs.query.query_get.short_query == 0); + struct LongQueryResult { + u64_le value; + u64_le timestamp; + }; + const GPUVAddr sequence_address{regs.report_semaphore.Address()}; + const u32 payload = regs.report_semaphore.payload; + [this, sequence_address, payload] { + memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks()); + memory_manager.Write<u64>(sequence_address, payload); + }(); } break; - case Regs::QueryOperation::Acquire: + case Regs::ReportSemaphore::Operation::Acquire: // TODO(Blinkhawk): Under this operation, the GPU waits for the CPU to write a value that // matches the current payload. UNIMPLEMENTED_MSG("Unimplemented query operation ACQUIRE"); break; - case Regs::QueryOperation::Counter: + case Regs::ReportSemaphore::Operation::ReportOnly: if (const std::optional<u64> result = GetQueryResult()) { // If the query returns an empty optional it means it's cached and deferred. // In this case we have a non-empty result, so we stamp it immediately. - StampQueryResult(*result, regs.query.query_get.short_query == 0); + StampQueryResult(*result, regs.report_semaphore.query.short_query == 0); } break; - case Regs::QueryOperation::Trap: + case Regs::ReportSemaphore::Operation::Trap: UNIMPLEMENTED_MSG("Unimplemented query operation TRAP"); break; default: @@ -500,31 +515,31 @@ void Maxwell3D::ProcessQueryGet() { } void Maxwell3D::ProcessQueryCondition() { - const GPUVAddr condition_address{regs.condition.Address()}; - switch (regs.condition.mode) { - case Regs::ConditionMode::Always: { + const GPUVAddr condition_address{regs.render_enable.Address()}; + switch (regs.render_enable.mode) { + case Regs::RenderEnable::Mode::True: { execute_on = true; break; } - case Regs::ConditionMode::Never: { + case Regs::RenderEnable::Mode::False: { execute_on = false; break; } - case Regs::ConditionMode::ResNonZero: { - Regs::QueryCompare cmp; + case Regs::RenderEnable::Mode::Conditional: { + Regs::ReportSemaphore::Compare cmp; memory_manager.ReadBlock(condition_address, &cmp, sizeof(cmp)); execute_on = cmp.initial_sequence != 0U && cmp.initial_mode != 0U; break; } - case Regs::ConditionMode::Equal: { - Regs::QueryCompare cmp; + case Regs::RenderEnable::Mode::IfEqual: { + Regs::ReportSemaphore::Compare cmp; memory_manager.ReadBlock(condition_address, &cmp, sizeof(cmp)); execute_on = cmp.initial_sequence == cmp.current_sequence && cmp.initial_mode == cmp.current_mode; break; } - case Regs::ConditionMode::NotEqual: { - Regs::QueryCompare cmp; + case Regs::RenderEnable::Mode::IfNotEqual: { + Regs::ReportSemaphore::Compare cmp; memory_manager.ReadBlock(condition_address, &cmp, sizeof(cmp)); execute_on = cmp.initial_sequence != cmp.current_sequence || cmp.initial_mode != cmp.current_mode; @@ -539,45 +554,46 @@ void Maxwell3D::ProcessQueryCondition() { } void Maxwell3D::ProcessCounterReset() { - switch (regs.counter_reset) { - case Regs::CounterReset::SampleCnt: + switch (regs.clear_report_value) { + case Regs::ClearReport::ZPassPixelCount: rasterizer->ResetCounter(QueryType::SamplesPassed); break; default: - LOG_DEBUG(Render_OpenGL, "Unimplemented counter reset={}", regs.counter_reset); + LOG_DEBUG(Render_OpenGL, "Unimplemented counter reset={}", regs.clear_report_value); break; } } void Maxwell3D::ProcessSyncPoint() { const u32 sync_point = regs.sync_info.sync_point.Value(); - const u32 increment = regs.sync_info.increment.Value(); - [[maybe_unused]] const u32 cache_flush = regs.sync_info.unknown.Value(); - if (increment) { - rasterizer->SignalSyncPoint(sync_point); + const u32 cache_flush = regs.sync_info.clean_l2.Value(); + if (cache_flush != 0) { + rasterizer->InvalidateGPUCache(); } + rasterizer->SignalSyncPoint(sync_point); } void Maxwell3D::DrawArrays() { LOG_TRACE(HW_GPU, "called, topology={}, count={}", regs.draw.topology.Value(), regs.vertex_buffer.count); - ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?"); + ASSERT_MSG(!(regs.index_buffer.count && regs.vertex_buffer.count), "Both indexed and direct?"); // Both instance configuration registers can not be set at the same time. - ASSERT_MSG(!regs.draw.instance_next || !regs.draw.instance_cont, + ASSERT_MSG(regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::First || + regs.draw.instance_id != Maxwell3D::Regs::Draw::InstanceId::Unchanged, "Illegal combination of instancing parameters"); ProcessTopologyOverride(); - if (regs.draw.instance_next) { + if (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Subsequent) { // Increment the current instance *before* drawing. - state.current_instance += 1; - } else if (!regs.draw.instance_cont) { + state.current_instance++; + } else if (regs.draw.instance_id != Maxwell3D::Regs::Draw::InstanceId::Unchanged) { // Reset the current instance to 0. state.current_instance = 0; } - const bool is_indexed{regs.index_array.count && !regs.vertex_buffer.count}; + const bool is_indexed{regs.index_buffer.count && !regs.vertex_buffer.count}; if (ShouldExecute()) { rasterizer->Draw(is_indexed, false); } @@ -587,60 +603,60 @@ void Maxwell3D::DrawArrays() { // it's possible that it is incorrect and that there is some other register used to specify the // drawing mode. if (is_indexed) { - regs.index_array.count = 0; + regs.index_buffer.count = 0; } else { regs.vertex_buffer.count = 0; } } std::optional<u64> Maxwell3D::GetQueryResult() { - switch (regs.query.query_get.select) { - case Regs::QuerySelect::Payload: - return regs.query.query_sequence; - case Regs::QuerySelect::SamplesPassed: + switch (regs.report_semaphore.query.report) { + case Regs::ReportSemaphore::Report::Payload: + return regs.report_semaphore.payload; + case Regs::ReportSemaphore::Report::ZPassPixelCount64: // Deferred. - rasterizer->Query(regs.query.QueryAddress(), QueryType::SamplesPassed, + rasterizer->Query(regs.report_semaphore.Address(), QueryType::SamplesPassed, system.GPU().GetTicks()); return std::nullopt; default: - LOG_DEBUG(HW_GPU, "Unimplemented query select type {}", - regs.query.query_get.select.Value()); + LOG_DEBUG(HW_GPU, "Unimplemented query report type {}", + regs.report_semaphore.query.report.Value()); return 1; } } void Maxwell3D::ProcessCBBind(size_t stage_index) { // Bind the buffer currently in CB_ADDRESS to the specified index in the desired shader stage. - const auto& bind_data = regs.cb_bind[stage_index]; - auto& buffer = state.shader_stages[stage_index].const_buffers[bind_data.index]; + const auto& bind_data = regs.bind_groups[stage_index]; + auto& buffer = state.shader_stages[stage_index].const_buffers[bind_data.shader_slot]; buffer.enabled = bind_data.valid.Value() != 0; - buffer.address = regs.const_buffer.BufferAddress(); - buffer.size = regs.const_buffer.cb_size; + buffer.address = regs.const_buffer.Address(); + buffer.size = regs.const_buffer.size; const bool is_enabled = bind_data.valid.Value() != 0; if (!is_enabled) { - rasterizer->DisableGraphicsUniformBuffer(stage_index, bind_data.index); + rasterizer->DisableGraphicsUniformBuffer(stage_index, bind_data.shader_slot); return; } - const GPUVAddr gpu_addr = regs.const_buffer.BufferAddress(); - const u32 size = regs.const_buffer.cb_size; - rasterizer->BindGraphicsUniformBuffer(stage_index, bind_data.index, gpu_addr, size); + const GPUVAddr gpu_addr = regs.const_buffer.Address(); + const u32 size = regs.const_buffer.size; + rasterizer->BindGraphicsUniformBuffer(stage_index, bind_data.shader_slot, gpu_addr, size); } void Maxwell3D::ProcessCBMultiData(const u32* start_base, u32 amount) { // Write the input value to the current const buffer at the current position. - const GPUVAddr buffer_address = regs.const_buffer.BufferAddress(); + const GPUVAddr buffer_address = regs.const_buffer.Address(); ASSERT(buffer_address != 0); // Don't allow writing past the end of the buffer. - ASSERT(regs.const_buffer.cb_pos <= regs.const_buffer.cb_size); + ASSERT(regs.const_buffer.offset <= regs.const_buffer.size); - const GPUVAddr address{buffer_address + regs.const_buffer.cb_pos}; + const GPUVAddr address{buffer_address + regs.const_buffer.offset}; const size_t copy_size = amount * sizeof(u32); memory_manager.WriteBlock(address, start_base, copy_size); // Increment the current buffer position. - regs.const_buffer.cb_pos += static_cast<u32>(copy_size); + regs.const_buffer.offset += static_cast<u32>(copy_size); } void Maxwell3D::ProcessCBData(u32 value) { @@ -648,7 +664,8 @@ void Maxwell3D::ProcessCBData(u32 value) { } Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const { - const GPUVAddr tic_address_gpu{regs.tic.Address() + tic_index * sizeof(Texture::TICEntry)}; + const GPUVAddr tic_address_gpu{regs.tex_header.Address() + + tic_index * sizeof(Texture::TICEntry)}; Texture::TICEntry tic_entry; memory_manager.ReadBlockUnsafe(tic_address_gpu, &tic_entry, sizeof(Texture::TICEntry)); @@ -657,7 +674,8 @@ Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const { } Texture::TSCEntry Maxwell3D::GetTSCEntry(u32 tsc_index) const { - const GPUVAddr tsc_address_gpu{regs.tsc.Address() + tsc_index * sizeof(Texture::TSCEntry)}; + const GPUVAddr tsc_address_gpu{regs.tex_sampler.Address() + + tsc_index * sizeof(Texture::TSCEntry)}; Texture::TSCEntry tsc_entry; memory_manager.ReadBlockUnsafe(tsc_address_gpu, &tsc_entry, sizeof(Texture::TSCEntry)); diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h index 5f9eb208c..75e3b868d 100644 --- a/src/video_core/engines/maxwell_3d.h +++ b/src/video_core/engines/maxwell_3d.h @@ -39,12 +39,15 @@ namespace Tegra::Engines { /** * This Engine is known as GF100_3D. Documentation can be found in: + * https://github.com/NVIDIA/open-gpu-doc/blob/master/classes/3d/clb197.h * https://github.com/envytools/envytools/blob/master/rnndb/graph/gf100_3d.xml * https://cgit.freedesktop.org/mesa/mesa/tree/src/gallium/drivers/nouveau/nvc0/nvc0_3d.xml.h + * + * Note: nVidia have confirmed that their open docs have had parts redacted, so this list is + * currently incomplete, and the gaps are still worth exploring. */ -#define MAXWELL3D_REG_INDEX(field_name) \ - (offsetof(Tegra::Engines::Maxwell3D::Regs, field_name) / sizeof(u32)) +#define MAXWELL3D_REG_INDEX(field_name) (offsetof(Maxwell3D::Regs, field_name) / sizeof(u32)) class Maxwell3D final : public EngineInterface { public: @@ -55,7 +58,6 @@ public: void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); /// Register structure of the Maxwell3D engine. - /// TODO(Subv): This structure will need to be made bigger as more registers are discovered. struct Regs { static constexpr std::size_t NUM_REGS = 0xE00; @@ -74,90 +76,515 @@ public: static constexpr std::size_t MaxConstBuffers = 18; static constexpr std::size_t MaxConstBufferSize = 0x10000; - enum class QueryOperation : u32 { - Release = 0, - Acquire = 1, - Counter = 2, - Trap = 3, + struct ID { + union { + BitField<0, 16, u32> cls; + BitField<16, 5, u32> engine; + }; }; - enum class QueryUnit : u32 { - VFetch = 1, - VP = 2, - Rast = 4, - StrmOut = 5, - GP = 6, - ZCull = 7, - Prop = 10, - Crop = 15, + struct LoadMME { + u32 instruction_ptr; + u32 instruction; + u32 start_address_ptr; + u32 start_address; }; - enum class QuerySelect : u32 { - Payload = 0, - TimeElapsed = 2, - TransformFeedbackPrimitivesGenerated = 11, - PrimitivesGenerated = 18, - SamplesPassed = 21, - TransformFeedbackUnknown = 26, + struct Notify { + u32 address_high; + u32 address_low; + u32 type; + + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } }; - struct QueryCompare { - u32 initial_sequence; - u32 initial_mode; - u32 unknown1; - u32 unknown2; - u32 current_sequence; - u32 current_mode; + struct PeerSemaphore { + u32 address_high; + u32 address_low; + + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } }; - enum class QuerySyncCondition : u32 { - NotEqual = 0, - GreaterThan = 1, + struct GlobalRender { + enum class Mode : u32 { + False = 0, + True = 1, + Conditional = 2, + IfEqual = 3, + IfNotEqual = 4, + }; + u32 offset_high; + u32 offset_low; + Mode mode; + + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(offset_high) << 32) | + offset_low); + } }; - enum class ConditionMode : u32 { - Never = 0, - Always = 1, - ResNonZero = 2, - Equal = 3, - NotEqual = 4, + enum class ReductionOp : u32 { + RedAdd = 0, + RedMin = 1, + RedMax = 2, + RedInc = 3, + RedDec = 4, + RedAnd = 5, + RedOr = 6, + RedXor = 7, }; - enum class ShaderProgram : u32 { - VertexA = 0, - VertexB = 1, - TesselationControl = 2, - TesselationEval = 3, - Geometry = 4, - Fragment = 5, + struct LaunchDMA { + enum class Layout : u32 { + Blocklinear = 0, + Pitch = 1, + }; + + enum class CompletionType : u32 { + FlushDisable = 0, + FlushOnly = 1, + Release = 2, + }; + + union { + BitField<0, 1, Layout> memory_layout; + BitField<4, 2, CompletionType> completion_type; + BitField<8, 2, u32> interrupt_type; + BitField<12, 1, u32> sem_size; + BitField<1, 1, u32> reduction_enable; + BitField<13, 3, ReductionOp> reduction_op; + BitField<2, 2, u32> reduction_format; + BitField<6, 1, u32> sysmembar_disable; + }; + }; + + struct I2M { + u32 address_high; + u32 address_low; + u32 payload; + INSERT_PADDING_BYTES_NOINIT(0x8); + u32 nop0; + u32 nop1; + u32 nop2; + u32 nop3; + }; + + struct OpportunisticEarlyZ { + BitField<0, 5, u32> threshold; + + u32 Threshold() const { + switch (threshold) { + case 0x0: + return 0; + case 0x1F: + return 0x1F; + default: + // Thresholds begin at 0x10 (1 << 4) + // Threshold is in the range 0x1 to 0x13 + return 1 << (4 + threshold.Value() - 1); + } + } + }; + + struct GeometryShaderDmFifo { + union { + BitField<0, 13, u32> raster_on; + BitField<16, 13, u32> raster_off; + BitField<31, 1, u32> spill_enabled; + }; + }; + + struct L2CacheControl { + enum class EvictPolicy : u32 { + First = 0, + Normal = 1, + Last = 2, + }; + + union { + BitField<4, 2, EvictPolicy> policy; + }; + }; + + struct InvalidateShaderCache { + union { + BitField<0, 1, u32> instruction; + BitField<4, 1, u32> data; + BitField<12, 1, u32> constant; + BitField<1, 1, u32> locks; + BitField<2, 1, u32> flush_data; + }; + }; + + struct SyncInfo { + enum class Condition : u32 { + StreamOutWritesDone = 0, + RopWritesDone = 1, + }; + + union { + BitField<0, 16, u32> sync_point; + BitField<16, 1, u32> clean_l2; + BitField<20, 1, Condition> condition; + }; + }; + + struct SurfaceClipBlockId { + union { + BitField<0, 4, u32> block_width; + BitField<4, 4, u32> block_height; + BitField<8, 4, u32> block_depth; + }; + }; + + struct DecompressSurface { + union { + BitField<0, 3, u32> mrt_select; + BitField<4, 16, u32> rt_array_index; + }; + }; + + struct ZCullRopBypass { + union { + BitField<0, 1, u32> enable; + BitField<4, 1, u32> no_stall; + BitField<8, 1, u32> cull_everything; + BitField<12, 4, u32> threshold; + }; + }; + + struct ZCullSubregion { + union { + BitField<0, 1, u32> enable; + BitField<4, 24, u32> normalized_aliquots; + }; + }; + + struct RasterBoundingBox { + enum class Mode : u32 { + BoundingBox = 0, + FullViewport = 1, + }; + + union { + BitField<0, 1, Mode> mode; + BitField<4, 8, u32> pad; + }; + }; + + struct IteratedBlendOptimization { + enum class Noop : u32 { + Never = 0, + SourceRGBA0000 = 1, + SourceAlpha = 2, + SourceRGBA0001 = 3, + }; + + union { + BitField<0, 1, Noop> noop; + }; + }; + + struct ZCullSubregionAllocation { + enum class Format : u32 { + Z_16x16x2_4x4 = 0, + ZS_16x16_4x4 = 1, + Z_16x16_4x2 = 2, + Z_16x16_2x4 = 3, + Z_16x8_4x4 = 4, + Z_8x8_4x2 = 5, + Z_8x8_2x4 = 6, + Z_16x16_4x8 = 7, + Z_4x8_2x2 = 8, + ZS_16x8_4x2 = 9, + ZS_16x8_2x4 = 10, + ZS_8x8_2x2 = 11, + Z_4x8_1x1 = 12, + None = 15, + }; + + union { + BitField<0, 8, u32> id; + BitField<8, 16, u32> aliquots; + BitField<24, 4, Format> format; + }; + }; + + enum class ZCullSubregionAlgorithm : u32 { + Static = 0, + Adaptive = 1, + }; + + struct PixelShaderOutputSampleMaskUsage { + union { + BitField<0, 1, u32> enable; + BitField<1, 1, u32> qualify_by_aa; + }; + }; + + struct L1Configuration { + enum class AddressableMemory : u32 { + Size16Kb = 0, + Size48Kb = 3, + }; + union { + BitField<0, 3, AddressableMemory> direct_addressable_memory; + }; + }; + + struct SPAVersion { + union { + BitField<0, 8, u32> minor; + BitField<8, 8, u32> major; + }; + }; + + struct SnapGrid { + enum class Location : u32 { + Pixel2x2 = 1, + Pixel4x4 = 2, + Pixel8x8 = 3, + Pixel16x16 = 4, + Pixel32x32 = 5, + Pixel64x64 = 6, + Pixel128x128 = 7, + Pixel256x256 = 8, + }; + + enum class Mode : u32 { + RTNE = 0, + Tesla = 1, + }; + + struct { + union { + BitField<0, 4, Location> location; + BitField<8, 1, Mode> rounding_mode; + }; + } line; + + struct { + union { + BitField<0, 4, Location> location; + BitField<8, 1, Mode> rounding_mode; + }; + } non_line; + }; + + struct Tessellation { + enum class DomainType : u32 { + Isolines = 0, + Triangles = 1, + Quads = 2, + }; + + enum class Spacing : u32 { + Integer = 0, + FractionalOdd = 1, + FractionalEven = 2, + }; + + enum class OutputPrimitives : u32 { + Points = 0, + Lines = 1, + Triangles_CW = 2, + Triangles_CCW = 3, + }; + + struct Parameters { + union { + BitField<0, 2, DomainType> domain_type; + BitField<4, 2, Spacing> spacing; + BitField<8, 2, OutputPrimitives> output_primitives; + }; + } params; + + struct LOD { + std::array<f32, 4> outer; + std::array<f32, 2> inner; + } lod; + + std::array<u32, 9> reserved; + }; + + struct SubTilingPerf { + struct { + union { + BitField<0, 8, u32> spm_triangle_register_file_per; + BitField<8, 8, u32> spm_pixel_output_buffer_per; + BitField<16, 8, u32> spm_triangle_ram_per; + BitField<24, 8, u32> max_quads_per; + }; + } knob_a; + + struct { + union { + BitField<0, 8, u32> max_primitives_per; + }; + } knob_b; + + u32 knob_c; + }; + + struct ZCullSubregionReport { + enum class ReportType : u32 { + DepthTest = 0, + DepthTestNoAccept = 1, + DepthTestLateZ = 2, + StencilTest = 3, + }; + + union { + BitField<0, 1, u32> enabled; + BitField<4, 8, u32> subregion_id; + } to_report; + + union { + BitField<0, 1, u32> enabled; + BitField<4, 3, ReportType> type; + } report_type; + }; + + struct BalancedPrimitiveWorkload { + union { + BitField<0, 1, u32> unpartitioned_mode; + BitField<4, 1, u32> timesliced_mode; + }; + }; + + struct TransformFeedback { + struct Buffer { + u32 enable; + u32 address_high; + u32 address_low; + s32 size; + s32 start_offset; + INSERT_PADDING_BYTES_NOINIT(0xC); + + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } + }; + static_assert(sizeof(Buffer) == 0x20); + + struct Control { + u32 stream; + u32 varying_count; + u32 stride; + INSERT_PADDING_BYTES_NOINIT(0x4); + }; + static_assert(sizeof(Control) == 0x10); + + std::array<TransformFeedback::Buffer, NumTransformFeedbackBuffers> buffers; + + INSERT_PADDING_BYTES_NOINIT(0x300); + + std::array<TransformFeedback::Control, NumTransformFeedbackBuffers> controls; + }; + + struct HybridAntiAliasControl { + enum class Centroid : u32 { + PerFragment = 0, + PerPass = 1, + }; + union { + BitField<0, 4, u32> passes; + BitField<4, 1, Centroid> centroid; + BitField<5, 1, u32> passes_extended; + }; + }; + + struct ShaderLocalMemory { + u32 base_address; + INSERT_PADDING_BYTES_NOINIT(0x10); + u32 address_high; + u32 address_low; + u32 size_high; + u32 size_low; + u32 default_size_per_warp; + + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } + + u64 Size() const { + return (static_cast<u64>(size_high) << 32) | size_low; + } + }; + + struct ZCullRegion { + u32 width; + u32 height; + u32 depth; + u32 offset; + INSERT_PADDING_BYTES_NOINIT(0xC); + u32 fetch_streams_once; + union { + BitField<0, 16, u32> start_aliquot; + BitField<16, 16, u32> aliquot_count; + } location; + u32 aliquots_per_layer; + u32 storage_address_high; + u32 storage_address_low; + u32 storage_limit_address_high; + u32 storage_limit_address_low; + + GPUVAddr StorageAddress() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(storage_address_high) << 32) | + storage_address_low); + } + GPUVAddr StorageLimitAddress() const { + return static_cast<GPUVAddr>( + (static_cast<GPUVAddr>(storage_limit_address_high) << 32) | + storage_limit_address_low); + } + }; + + struct ZetaReadOnly { + union { + BitField<0, 1, u32> enable_z; + BitField<4, 1, u32> enable_stencil; + }; }; struct VertexAttribute { enum class Size : u32 { Invalid = 0x0, - Size_32_32_32_32 = 0x01, - Size_32_32_32 = 0x02, - Size_16_16_16_16 = 0x03, - Size_32_32 = 0x04, - Size_16_16_16 = 0x05, - Size_8_8_8_8 = 0x0a, - Size_16_16 = 0x0f, - Size_32 = 0x12, - Size_8_8_8 = 0x13, - Size_8_8 = 0x18, - Size_16 = 0x1b, - Size_8 = 0x1d, - Size_10_10_10_2 = 0x30, - Size_11_11_10 = 0x31, + Size_R32_G32_B32_A32 = 0x01, + Size_R32_G32_B32 = 0x02, + Size_R16_G16_B16_A16 = 0x03, + Size_R32_G32 = 0x04, + Size_R16_G16_B16 = 0x05, + Size_R8_G8_B8_A8 = 0x0A, + Size_R16_G16 = 0x0F, + Size_R32 = 0x12, + Size_R8_G8_B8 = 0x13, + Size_R8_G8 = 0x18, + Size_R16 = 0x1B, + Size_R8 = 0x1D, + Size_A2_B10_G10_R10 = 0x30, + Size_B10_G11_R11 = 0x31, + Size_G8_R8 = 0x32, + Size_X8_B8_G8_R8 = 0x33, + Size_A8 = 0x34, }; enum class Type : u32 { - SignedNorm = 1, - UnsignedNorm = 2, - SignedInt = 3, - UnsignedInt = 4, - UnsignedScaled = 5, - SignedScaled = 6, + UnusedEnumDoNotUseBecauseItWillGoAway = 0, + SNorm = 1, + UNorm = 2, + SInt = 3, + UInt = 4, + UScaled = 5, + SScaled = 6, Float = 7, }; @@ -173,33 +600,36 @@ public: u32 ComponentCount() const { switch (size) { - case Size::Size_32_32_32_32: + case Size::Size_R32_G32_B32_A32: return 4; - case Size::Size_32_32_32: + case Size::Size_R32_G32_B32: return 3; - case Size::Size_16_16_16_16: + case Size::Size_R16_G16_B16_A16: return 4; - case Size::Size_32_32: + case Size::Size_R32_G32: return 2; - case Size::Size_16_16_16: + case Size::Size_R16_G16_B16: return 3; - case Size::Size_8_8_8_8: + case Size::Size_R8_G8_B8_A8: + case Size::Size_X8_B8_G8_R8: return 4; - case Size::Size_16_16: + case Size::Size_R16_G16: return 2; - case Size::Size_32: + case Size::Size_R32: return 1; - case Size::Size_8_8_8: + case Size::Size_R8_G8_B8: return 3; - case Size::Size_8_8: + case Size::Size_R8_G8: + case Size::Size_G8_R8: return 2; - case Size::Size_16: + case Size::Size_R16: return 1; - case Size::Size_8: + case Size::Size_R8: + case Size::Size_A8: return 1; - case Size::Size_10_10_10_2: + case Size::Size_A2_B10_G10_R10: return 4; - case Size::Size_11_11_10: + case Size::Size_B10_G11_R11: return 3; default: ASSERT(false); @@ -209,33 +639,36 @@ public: u32 SizeInBytes() const { switch (size) { - case Size::Size_32_32_32_32: + case Size::Size_R32_G32_B32_A32: return 16; - case Size::Size_32_32_32: + case Size::Size_R32_G32_B32: return 12; - case Size::Size_16_16_16_16: + case Size::Size_R16_G16_B16_A16: return 8; - case Size::Size_32_32: + case Size::Size_R32_G32: return 8; - case Size::Size_16_16_16: + case Size::Size_R16_G16_B16: return 6; - case Size::Size_8_8_8_8: + case Size::Size_R8_G8_B8_A8: + case Size::Size_X8_B8_G8_R8: return 4; - case Size::Size_16_16: + case Size::Size_R16_G16: return 4; - case Size::Size_32: + case Size::Size_R32: return 4; - case Size::Size_8_8_8: + case Size::Size_R8_G8_B8: return 3; - case Size::Size_8_8: + case Size::Size_R8_G8: + case Size::Size_G8_R8: return 2; - case Size::Size_16: + case Size::Size_R16: return 2; - case Size::Size_8: + case Size::Size_R8: + case Size::Size_A8: return 1; - case Size::Size_10_10_10_2: + case Size::Size_A2_B10_G10_R10: return 4; - case Size::Size_11_11_10: + case Size::Size_B10_G11_R11: return 4; default: ASSERT(false); @@ -245,34 +678,36 @@ public: std::string SizeString() const { switch (size) { - case Size::Size_32_32_32_32: + case Size::Size_R32_G32_B32_A32: return "32_32_32_32"; - case Size::Size_32_32_32: + case Size::Size_R32_G32_B32: return "32_32_32"; - case Size::Size_16_16_16_16: + case Size::Size_R16_G16_B16_A16: return "16_16_16_16"; - case Size::Size_32_32: + case Size::Size_R32_G32: return "32_32"; - case Size::Size_16_16_16: + case Size::Size_R16_G16_B16: return "16_16_16"; - case Size::Size_8_8_8_8: + case Size::Size_R8_G8_B8_A8: return "8_8_8_8"; - case Size::Size_16_16: + case Size::Size_R16_G16: return "16_16"; - case Size::Size_32: + case Size::Size_R32: return "32"; - case Size::Size_8_8_8: + case Size::Size_R8_G8_B8: return "8_8_8"; - case Size::Size_8_8: + case Size::Size_R8_G8: + case Size::Size_G8_R8: return "8_8"; - case Size::Size_16: + case Size::Size_R16: return "16"; - case Size::Size_8: + case Size::Size_R8: + case Size::Size_A8: return "8"; - case Size::Size_10_10_10_2: - return "10_10_10_2"; - case Size::Size_11_11_10: - return "11_11_10"; + case Size::Size_A2_B10_G10_R10: + return "2_10_10_10"; + case Size::Size_B10_G11_R11: + return "10_11_12"; default: ASSERT(false); return {}; @@ -281,17 +716,19 @@ public: std::string TypeString() const { switch (type) { - case Type::SignedNorm: + case Type::UnusedEnumDoNotUseBecauseItWillGoAway: + return "Unused"; + case Type::SNorm: return "SNORM"; - case Type::UnsignedNorm: + case Type::UNorm: return "UNORM"; - case Type::SignedInt: + case Type::SInt: return "SINT"; - case Type::UnsignedInt: + case Type::UInt: return "UINT"; - case Type::UnsignedScaled: + case Type::UScaled: return "USCALED"; - case Type::SignedScaled: + case Type::SScaled: return "SSCALED"; case Type::Float: return "FLOAT"; @@ -301,7 +738,7 @@ public: } bool IsNormalized() const { - return (type == Type::SignedNorm) || (type == Type::UnsignedNorm); + return (type == Type::SNorm) || (type == Type::UNorm); } bool IsValid() const { @@ -312,6 +749,7 @@ public: return hex < other.hex; } }; + static_assert(sizeof(VertexAttribute) == 0x4); struct MsaaSampleLocation { union { @@ -342,9 +780,96 @@ public: } }; - enum class DepthMode : u32 { - MinusOneToOne = 0, - ZeroToOne = 1, + struct MultisampleCoverageToColor { + union { + BitField<0, 1, u32> enable; + BitField<4, 3, u32> target; + }; + }; + + struct DecompressZetaSurface { + union { + BitField<0, 1, u32> z_enable; + BitField<4, 1, u32> stencil_enable; + }; + }; + + struct ZetaSparse { + enum class UnmappedCompare : u32 { + Unmapped = 0, + FailAlways = 1, + }; + union { + BitField<0, 1, u32> enable; + BitField<1, 1, UnmappedCompare> unmapped_compare; + }; + }; + + struct RtControl { + union { + BitField<0, 4, u32> count; + BitField<4, 3, u32> target0; + BitField<7, 3, u32> target1; + BitField<10, 3, u32> target2; + BitField<13, 3, u32> target3; + BitField<16, 3, u32> target4; + BitField<19, 3, u32> target5; + BitField<22, 3, u32> target6; + BitField<25, 3, u32> target7; + }; + + u32 Map(std::size_t index) const { + const std::array<u32, NumRenderTargets> maps{target0, target1, target2, target3, + target4, target5, target6, target7}; + ASSERT(index < maps.size()); + return maps[index]; + } + }; + + struct CompressionThresholdSamples { + u32 samples; + + u32 Samples() { + if (samples == 0) { + return 0; + } + return 1 << (samples - 1); + } + }; + + struct PixelShaderInterlockControl { + enum class TileMode : u32 { + NoConflictDetect = 0, + DetectSampleConflict = 1, + DetectPixelConflict = 2, + }; + enum class TileSize : u32 { + Size_16x16 = 0, + Size_8x8 = 1, + }; + enum class FragmentOrder : u32 { + FragmentOrdered = 0, + FragmentUnordered = 1, + }; + union { + BitField<0, 2, TileMode> tile_mode; + BitField<2, 1, TileSize> tile_size; + BitField<3, 1, FragmentOrder> fragment_order; + }; + }; + + struct ZetaSize { + enum class DimensionControl : u32 { + DepthDefinesArray = 0, + ArraySizeOne = 1, + }; + + u32 width; + u32 height; + union { + BitField<0, 16, u32> depth; + BitField<16, 1, DimensionControl> dim_control; + }; }; enum class PrimitiveTopology : u32 { @@ -358,15 +883,21 @@ public: Quads = 0x7, QuadStrip = 0x8, Polygon = 0x9, - LinesAdjacency = 0xa, - LineStripAdjacency = 0xb, - TrianglesAdjacency = 0xc, - TriangleStripAdjacency = 0xd, - Patches = 0xe, + LinesAdjacency = 0xA, + LineStripAdjacency = 0xB, + TrianglesAdjacency = 0xC, + TriangleStripAdjacency = 0xD, + Patches = 0xE, + }; + + struct VertexArray { + union { + BitField<0, 16, u32> start; + BitField<16, 12, u32> count; + BitField<28, 3, PrimitiveTopology> topology; + }; }; - // Constants as from NVC0_3D_UNK1970_D3D - // https://gitlab.freedesktop.org/mesa/mesa/-/blob/main/src/gallium/drivers/nouveau/nvc0/nvc0_3d.xml.h#L1598 enum class PrimitiveTopologyOverride : u32 { None = 0x0, Points = 0x1, @@ -374,11 +905,32 @@ public: LineStrip = 0x3, Triangles = 0x4, TriangleStrip = 0x5, - LinesAdjacency = 0xa, - LineStripAdjacency = 0xb, - TrianglesAdjacency = 0xc, - TriangleStripAdjacency = 0xd, - Patches = 0xe, + LinesAdjacency = 0xA, + LineStripAdjacency = 0xB, + TrianglesAdjacency = 0xC, + TriangleStripAdjacency = 0xD, + Patches = 0xE, + + LegacyPoints = 0x1001, + LegacyIndexedLines = 0x1002, + LegacyIndexedTriangles = 0x1003, + LegacyLines = 0x100F, + LegacyLineStrip = 0x1010, + LegacyIndexedLineStrip = 0x1011, + LegacyTriangles = 0x1012, + LegacyTriangleStrip = 0x1013, + LegacyIndexedTriangleStrip = 0x1014, + LegacyTriangleFan = 0x1015, + LegacyIndexedTriangleFan = 0x1016, + LegacyTriangleFanImm = 0x1017, + LegacyLinesImm = 0x1018, + LegacyIndexedTriangles2 = 0x101A, + LegacyIndexedLines2 = 0x101B, + }; + + enum class DepthMode : u32 { + MinusOneToOne = 0, + ZeroToOne = 1, }; enum class IndexFormat : u32 { @@ -388,183 +940,143 @@ public: }; enum class ComparisonOp : u32 { - // These values are used by Nouveau and most games, they correspond to the OpenGL token - // values for these operations. - Never = 0x200, - Less = 0x201, - Equal = 0x202, - LessEqual = 0x203, - Greater = 0x204, - NotEqual = 0x205, - GreaterEqual = 0x206, - Always = 0x207, - - // These values are used by some games, they seem to be NV04 values. - NeverOld = 1, - LessOld = 2, - EqualOld = 3, - LessEqualOld = 4, - GreaterOld = 5, - NotEqualOld = 6, - GreaterEqualOld = 7, - AlwaysOld = 8, - }; - - enum class LogicOperation : u32 { - Clear = 0x1500, - And = 0x1501, - AndReverse = 0x1502, - Copy = 0x1503, - AndInverted = 0x1504, - NoOp = 0x1505, - Xor = 0x1506, - Or = 0x1507, - Nor = 0x1508, - Equiv = 0x1509, - Invert = 0x150A, - OrReverse = 0x150B, - CopyInverted = 0x150C, - OrInverted = 0x150D, - Nand = 0x150E, - Set = 0x150F, - }; - - enum class StencilOp : u32 { - Keep = 1, - Zero = 2, - Replace = 3, - Incr = 4, - Decr = 5, - Invert = 6, - IncrWrap = 7, - DecrWrap = 8, - KeepOGL = 0x1E00, - ZeroOGL = 0, - ReplaceOGL = 0x1E01, - IncrOGL = 0x1E02, - DecrOGL = 0x1E03, - InvertOGL = 0x150A, - IncrWrapOGL = 0x8507, - DecrWrapOGL = 0x8508, - }; - - enum class CounterReset : u32 { - SampleCnt = 0x01, - Unk02 = 0x02, - Unk03 = 0x03, - Unk04 = 0x04, - EmittedPrimitives = 0x10, // Not tested - Unk11 = 0x11, - Unk12 = 0x12, - Unk13 = 0x13, - Unk15 = 0x15, - Unk16 = 0x16, - Unk17 = 0x17, - Unk18 = 0x18, - Unk1A = 0x1A, - Unk1B = 0x1B, - Unk1C = 0x1C, - Unk1D = 0x1D, - Unk1E = 0x1E, - GeneratedPrimitives = 0x1F, + Never_D3D = 1, + Less_D3D = 2, + Equal_D3D = 3, + LessEqual_D3D = 4, + Greater_D3D = 5, + NotEqual_D3D = 6, + GreaterEqual_D3D = 7, + Always_D3D = 8, + + Never_GL = 0x200, + Less_GL = 0x201, + Equal_GL = 0x202, + LessEqual_GL = 0x203, + Greater_GL = 0x204, + NotEqual_GL = 0x205, + GreaterEqual_GL = 0x206, + Always_GL = 0x207, + }; + + enum class ClearReport : u32 { + ZPassPixelCount = 0x01, + ZCullStats = 0x02, + StreamingPrimitvesNeededMinusSucceeded = 0x03, + AlphaBetaClocks = 0x04, + StreamingPrimitivesSucceeded = 0x10, + StreamingPrimitivesNeeded = 0x11, + VerticesGenerated = 0x12, + PrimitivesGenerated = 0x13, + VertexShaderInvocations = 0x15, + TessellationInitInvocations = 0x16, + TessellationShaderInvocations = 0x17, + TessellationShaderPrimitivesGenerated = 0x18, + GeometryShaderInvocations = 0x1A, + GeometryShaderPrimitivesGenerated = 0x1B, + ClipperInvocations = 0x1C, + ClipperPrimitivesGenerated = 0x1D, + PixelShaderInvocations = 0x1E, + VtgPrimitivesOut = 0x1F, }; enum class FrontFace : u32 { - ClockWise = 0x0900, - CounterClockWise = 0x0901, + ClockWise = 0x900, + CounterClockWise = 0x901, }; enum class CullFace : u32 { - Front = 0x0404, - Back = 0x0405, - FrontAndBack = 0x0408, + Front = 0x404, + Back = 0x405, + FrontAndBack = 0x408, }; struct Blend { enum class Equation : u32 { - Add = 1, - Subtract = 2, - ReverseSubtract = 3, - Min = 4, - Max = 5, - - // These values are used by Nouveau and some games. - AddGL = 0x8006, - MinGL = 0x8007, - MaxGL = 0x8008, - SubtractGL = 0x800a, - ReverseSubtractGL = 0x800b + Add_D3D = 1, + Subtract_D3D = 2, + ReverseSubtract_D3D = 3, + Min_D3D = 4, + Max_D3D = 5, + + Add_GL = 0x8006, + Min_GL = 0x8007, + Max_GL = 0x8008, + Subtract_GL = 0x800A, + ReverseSubtract_GL = 0x800B }; enum class Factor : u32 { - Zero = 0x1, - One = 0x2, - SourceColor = 0x3, - OneMinusSourceColor = 0x4, - SourceAlpha = 0x5, - OneMinusSourceAlpha = 0x6, - DestAlpha = 0x7, - OneMinusDestAlpha = 0x8, - DestColor = 0x9, - OneMinusDestColor = 0xa, - SourceAlphaSaturate = 0xb, - Source1Color = 0x10, - OneMinusSource1Color = 0x11, - Source1Alpha = 0x12, - OneMinusSource1Alpha = 0x13, - ConstantColor = 0x61, - OneMinusConstantColor = 0x62, - ConstantAlpha = 0x63, - OneMinusConstantAlpha = 0x64, - - // These values are used by Nouveau and some games. - ZeroGL = 0x4000, - OneGL = 0x4001, - SourceColorGL = 0x4300, - OneMinusSourceColorGL = 0x4301, - SourceAlphaGL = 0x4302, - OneMinusSourceAlphaGL = 0x4303, - DestAlphaGL = 0x4304, - OneMinusDestAlphaGL = 0x4305, - DestColorGL = 0x4306, - OneMinusDestColorGL = 0x4307, - SourceAlphaSaturateGL = 0x4308, - ConstantColorGL = 0xc001, - OneMinusConstantColorGL = 0xc002, - ConstantAlphaGL = 0xc003, - OneMinusConstantAlphaGL = 0xc004, - Source1ColorGL = 0xc900, - OneMinusSource1ColorGL = 0xc901, - Source1AlphaGL = 0xc902, - OneMinusSource1AlphaGL = 0xc903, + Zero_D3D = 0x1, + One_D3D = 0x2, + SourceColor_D3D = 0x3, + OneMinusSourceColor_D3D = 0x4, + SourceAlpha_D3D = 0x5, + OneMinusSourceAlpha_D3D = 0x6, + DestAlpha_D3D = 0x7, + OneMinusDestAlpha_D3D = 0x8, + DestColor_D3D = 0x9, + OneMinusDestColor_D3D = 0xA, + SourceAlphaSaturate_D3D = 0xB, + BothSourceAlpha_D3D = 0xC, + OneMinusBothSourceAlpha_D3D = 0xD, + BlendFactor_D3D = 0xE, + OneMinusBlendFactor_D3D = 0xF, + Source1Color_D3D = 0x10, + OneMinusSource1Color_D3D = 0x11, + Source1Alpha_D3D = 0x12, + OneMinusSource1Alpha_D3D = 0x13, + + Zero_GL = 0x4000, + One_GL = 0x4001, + SourceColor_GL = 0x4300, + OneMinusSourceColor_GL = 0x4301, + SourceAlpha_GL = 0x4302, + OneMinusSourceAlpha_GL = 0x4303, + DestAlpha_GL = 0x4304, + OneMinusDestAlpha_GL = 0x4305, + DestColor_GL = 0x4306, + OneMinusDestColor_GL = 0x4307, + SourceAlphaSaturate_GL = 0x4308, + ConstantColor_GL = 0xC001, + OneMinusConstantColor_GL = 0xC002, + ConstantAlpha_GL = 0xC003, + OneMinusConstantAlpha_GL = 0xC004, + Source1Color_GL = 0xC900, + OneMinusSource1Color_GL = 0xC901, + Source1Alpha_GL = 0xC902, + OneMinusSource1Alpha_GL = 0xC903, }; u32 separate_alpha; - Equation equation_rgb; - Factor factor_source_rgb; - Factor factor_dest_rgb; - Equation equation_a; - Factor factor_source_a; - Factor factor_dest_a; - INSERT_PADDING_WORDS_NOINIT(1); - }; - - enum class TessellationPrimitive : u32 { - Isolines = 0, - Triangles = 1, - Quads = 2, + Equation color_op; + Factor color_source; + Factor color_dest; + Equation alpha_op; + Factor alpha_source; + u32 enable_global_color_key; + Factor alpha_dest; + + u32 single_rop_control_enable; + u32 enable[NumRenderTargets]; }; - enum class TessellationSpacing : u32 { - Equal = 0, - FractionalOdd = 1, - FractionalEven = 2, + struct BlendPerTarget { + u32 separate_alpha; + Blend::Equation color_op; + Blend::Factor color_source; + Blend::Factor color_dest; + Blend::Equation alpha_op; + Blend::Factor alpha_source; + Blend::Factor alpha_dest; + INSERT_PADDING_BYTES_NOINIT(0x4); }; + static_assert(sizeof(BlendPerTarget) == 0x20); enum class PolygonMode : u32 { - Point = 0x1b00, - Line = 0x1b01, - Fill = 0x1b02, + Point = 0x1B00, + Line = 0x1B01, + Fill = 0x1B02, }; enum class ShadowRamControl : u32 { @@ -589,18 +1101,22 @@ public: NegativeW = 7, }; - enum class SamplerIndex : u32 { + enum class SamplerBinding : u32 { Independently = 0, - ViaHeaderIndex = 1, + ViaHeaderBinding = 1, }; struct TileMode { + enum class DimensionControl : u32 { + DepthDefinesArray = 0, + DepthDefinesDepth = 1, + }; union { BitField<0, 4, u32> block_width; BitField<4, 4, u32> block_height; BitField<8, 4, u32> block_depth; BitField<12, 1, u32> is_pitch_linear; - BitField<16, 1, u32> is_3d; + BitField<16, 1, DimensionControl> dim_control; }; }; static_assert(sizeof(TileMode) == 4); @@ -616,23 +1132,25 @@ public: BitField<0, 16, u32> depth; BitField<16, 1, u32> volume; }; - u32 layer_stride; + u32 array_pitch; u32 base_layer; - INSERT_PADDING_WORDS_NOINIT(7); + u32 mark_ieee_clean; + INSERT_PADDING_BYTES_NOINIT(0x18); GPUVAddr Address() const { return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | address_low); } }; + static_assert(sizeof(RenderTargetConfig) == 0x40); struct ColorMask { union { u32 raw; - BitField<0, 4, u32> R; - BitField<4, 4, u32> G; - BitField<8, 4, u32> B; - BitField<12, 4, u32> A; + BitField<0, 1, u32> R; + BitField<4, 1, u32> G; + BitField<8, 1, u32> B; + BitField<12, 1, u32> A; }; }; @@ -643,6 +1161,7 @@ public: f32 translate_x; f32 translate_y; f32 translate_z; + union { u32 raw; BitField<0, 3, ViewportSwizzle> x; @@ -650,7 +1169,11 @@ public: BitField<8, 3, ViewportSwizzle> z; BitField<12, 3, ViewportSwizzle> w; } swizzle; - INSERT_PADDING_WORDS_NOINIT(1); + + union { + BitField<0, 5, u32> x; + BitField<8, 5, u32> y; + } snap_grid_precision; Common::Rectangle<f32> GetRect() const { return { @@ -677,21 +1200,14 @@ public: return translate_y + std::fabs(scale_y) - GetY(); } }; + static_assert(sizeof(ViewportTransform) == 0x20); - struct ScissorTest { - u32 enable; - union { - BitField<0, 16, u32> min_x; - BitField<16, 16, u32> max_x; + struct Viewport { + enum class PixelCenter : u32 { + HalfIntegers = 0, + Integers = 1, }; - union { - BitField<0, 16, u32> min_y; - BitField<16, 16, u32> max_y; - }; - u32 fill; - }; - struct ViewPort { union { BitField<0, 16, u32> x; BitField<16, 16, u32> width; @@ -703,726 +1219,1820 @@ public: float depth_range_near; float depth_range_far; }; + static_assert(sizeof(Viewport) == 0x10); - struct TransformFeedbackBinding { - u32 buffer_enable; - u32 address_high; - u32 address_low; - s32 buffer_size; - s32 buffer_offset; - INSERT_PADDING_WORDS_NOINIT(3); - - GPUVAddr Address() const { - return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | - address_low); - } + struct Window { + union { + BitField<0, 16, u32> x_min; + BitField<16, 16, u32> x_max; + }; + union { + BitField<0, 16, u32> y_min; + BitField<16, 16, u32> y_max; + }; }; - static_assert(sizeof(TransformFeedbackBinding) == 32); + static_assert(sizeof(Window) == 0x8); - struct TransformFeedbackLayout { - u32 stream; - u32 varying_count; - u32 stride; - INSERT_PADDING_WORDS_NOINIT(1); + struct ClipIdExtent { + union { + BitField<0, 16, u32> x; + BitField<16, 16, u32> width; + }; + union { + BitField<0, 16, u32> y; + BitField<16, 16, u32> height; + }; + }; + static_assert(sizeof(ClipIdExtent) == 0x8); + + enum class VisibleCallLimit : u32 { + Limit0 = 0, + Limit1 = 1, + Limit2 = 2, + Limit4 = 3, + Limit8 = 4, + Limit16 = 5, + Limit32 = 6, + Limit64 = 7, + Limit128 = 8, + None = 15, }; - static_assert(sizeof(TransformFeedbackLayout) == 16); - - bool IsShaderConfigEnabled(std::size_t index) const { - // The VertexB is always enabled. - if (index == static_cast<std::size_t>(Regs::ShaderProgram::VertexB)) { - return true; - } - return shader_config[index].enable != 0; - } - - bool IsShaderConfigEnabled(Regs::ShaderProgram type) const { - return IsShaderConfigEnabled(static_cast<std::size_t>(type)); - } - - union { - struct { - INSERT_PADDING_WORDS_NOINIT(0x44); - u32 wait_for_idle; + struct StatisticsCounter { + union { + BitField<0, 1, u32> da_vertices; + BitField<1, 1, u32> da_primitives; + BitField<2, 1, u32> vs_invocations; + BitField<3, 1, u32> gs_invocations; + BitField<4, 1, u32> gs_primitives; + BitField<5, 1, u32> streaming_primitives_succeeded; + BitField<6, 1, u32> streaming_primitives_needed; + BitField<7, 1, u32> clipper_invocations; + BitField<8, 1, u32> clipper_primitives; + BitField<9, 1, u32> ps_invocations; + BitField<11, 1, u32> ti_invocations; + BitField<12, 1, u32> ts_invocations; + BitField<13, 1, u32> ts_primitives; + BitField<14, 1, u32> total_streaming_primitives_needed_succeeded; + BitField<10, 1, u32> vtg_primitives_out; + BitField<15, 1, u32> alpha_beta_clocks; + }; + }; - struct { - u32 upload_address; - u32 data; - u32 entry; - u32 bind; - } macros; + struct ClearRect { + union { + BitField<0, 16, u32> x_min; + BitField<16, 16, u32> x_max; + }; + union { + BitField<0, 16, u32> y_min; + BitField<16, 16, u32> y_max; + }; + }; - ShadowRamControl shadow_ram_control; + struct VertexBuffer { + u32 first; + u32 count; + }; - INSERT_PADDING_WORDS_NOINIT(0x16); + struct InvalidateShaderCacheNoWFI { + union { + BitField<0, 1, u32> instruction; + BitField<4, 1, u32> global_data; + BitField<12, 1, u32> constant; + }; + }; - Upload::Registers upload; - struct { - union { - BitField<0, 1, u32> linear; - }; - } exec_upload; + struct ZCullSerialization { + enum class Applied : u32 { + Always = 0, + LateZ = 1, + OutOfGamutZ = 2, + LateZOrOutOfGamutZ = 3, + }; + union { + BitField<0, 1, u32> enable; + BitField<4, 2, Applied> applied; + }; + }; - u32 data_upload; + struct ZCullDirFormat { + enum class Zdir : u32 { + Less = 0, + Greater = 1, + }; + enum class Zformat : u32 { + MSB = 0, + FP = 1, + Ztrick = 2, + Zf32 = 3, + }; - INSERT_PADDING_WORDS_NOINIT(0x16); + union { + BitField<0, 16, Zdir> dir; + BitField<16, 16, Zformat> format; + }; + }; - u32 force_early_fragment_tests; + struct IteratedBlend { + union { + BitField<0, 1, u32> enable; + BitField<1, 1, u32> enable_alpha; + }; + u32 pass_count; + }; - INSERT_PADDING_WORDS_NOINIT(0x2D); + struct ZCullCriterion { + enum class Sfunc : u32 { + Never = 0, + Less = 1, + Equal = 2, + LessOrEqual = 3, + Greater = 4, + NotEqual = 5, + GreaterOrEqual = 6, + Always = 7, + }; - struct { - union { - BitField<0, 16, u32> sync_point; - BitField<16, 1, u32> unknown; - BitField<20, 1, u32> increment; - }; - } sync_info; + union { + BitField<0, 8, Sfunc> sfunc; + BitField<8, 1, u32> no_invalidate; + BitField<9, 1, u32> force_match; + BitField<16, 8, u32> sref; + BitField<24, 8, u32> smask; + }; + }; - INSERT_PADDING_WORDS_NOINIT(0x15); + struct LoadIteratedBlend { + enum class Test : u32 { + False = 0, + True = 1, + Equal = 2, + NotEqual = 3, + LessThan = 4, + LessOrEqual = 5, + Greater = 6, + GreaterOrEqual = 7, + }; + enum class Operation : u32 { + AddProducts = 0, + SubProducts = 1, + Min = 2, + Max = 3, + Reciprocal = 4, + Add = 5, + Sub = 6, + }; + enum class OperandA : u32 { + SrcRGB = 0, + DstRGB = 1, + SrcAAA = 2, + DstAAA = 3, + Temp0_RGB = 4, + Temp1_RGB = 5, + Temp2_RGB = 6, + PBR_RGB = 7, + }; + enum class OperandB : u32 { + Zero = 0, + One = 1, + SrcRGB = 2, + SrcAAA = 3, + OneMinusSrcAAA = 4, + DstRGB = 5, + DstAAA = 6, + OneMinusDstAAA = 7, + Temp0_RGB = 9, + Temp1_RGB = 10, + Temp2_RGB = 11, + PBR_RGB = 12, + ConstRGB = 13, + ZeroATimesB = 14, + }; + enum class Swizzle : u32 { + RGB = 0, + GBR = 1, + RRR = 2, + GGG = 3, + BBB = 4, + RToA = 5, + }; + enum class WriteMask : u32 { + RGB = 0, + ROnly = 1, + GOnly = 2, + BOnly = 3, + }; + enum class Pass : u32 { + Temp0 = 0, + Temp1 = 1, + Temp2 = 2, + None = 3, + }; - union { - BitField<0, 2, TessellationPrimitive> prim; - BitField<4, 2, TessellationSpacing> spacing; - BitField<8, 1, u32> cw; - BitField<9, 1, u32> connected; - } tess_mode; + u32 instruction_ptr; + union { + BitField<0, 3, Test> test; + BitField<3, 3, Operation> operation; + BitField<6, 3, u32> const_input; + BitField<9, 3, OperandA> operand_a; + BitField<12, 4, OperandB> operand_b; + BitField<16, 3, OperandA> operand_c; + BitField<19, 4, OperandB> operand_d; + BitField<23, 3, Swizzle> output_swizzle; + BitField<26, 2, WriteMask> output_mask; + BitField<28, 2, Pass> output_pass; + BitField<31, 1, u32> test_enabled; + }; + }; - std::array<f32, 4> tess_level_outer; - std::array<f32, 2> tess_level_inner; + struct ScissorTest { + u32 enable; + union { + BitField<0, 16, u32> min_x; + BitField<16, 16, u32> max_x; + }; + union { + BitField<0, 16, u32> min_y; + BitField<16, 16, u32> max_y; + }; + INSERT_PADDING_BYTES_NOINIT(0x4); + }; + static_assert(sizeof(ScissorTest) == 0x10); - INSERT_PADDING_WORDS_NOINIT(0x10); + struct VPCPerf { + union { + BitField<0, 8, u32> culled_small_lines; + BitField<8, 8, u32> culled_small_triangles; + BitField<16, 8, u32> nonculled_lines_and_points; + BitField<24, 8, u32> nonculled_triangles; + }; + }; - u32 rasterize_enable; + struct ConstantColorRendering { + u32 enabled; + u32 red; + u32 green; + u32 blue; + u32 alpha; + }; - std::array<TransformFeedbackBinding, NumTransformFeedbackBuffers> tfb_bindings; + struct VertexStreamSubstitute { + u32 address_high; + u32 address_low; - INSERT_PADDING_WORDS_NOINIT(0xC0); + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } + }; - std::array<TransformFeedbackLayout, NumTransformFeedbackBuffers> tfb_layouts; + struct VTGWarpWatermarks { + union { + BitField<0, 16, u32> low; + BitField<16, 16, u32> high; + }; + }; - INSERT_PADDING_WORDS_NOINIT(0x1); + struct SampleMask { + struct Target { + union { + BitField<0, 1, u32> raster_out; + BitField<4, 1, u32> color_target; + }; + u32 target; + }; + struct Pos { + u32 x0_y0; + u32 x1_y0; + u32 x0_y1; + u32 x1_y1; + }; + }; - u32 tfb_enabled; + enum class NonMultisampledZ : u32 { + PerSample = 0, + PixelCenter = 1, + }; - INSERT_PADDING_WORDS_NOINIT(0x2E); + enum class TIRMode : u32 { + Disabled = 0, + RasterNTargetM = 1, + }; - std::array<RenderTargetConfig, NumRenderTargets> rt; + enum class AntiAliasRaster : u32 { + Mode1x1 = 0, + Mode2x2 = 2, + Mode4x2_D3D = 4, + Mode2x1_D3D = 5, + Mode4x4 = 6, + }; - std::array<ViewportTransform, NumViewports> viewport_transform; + struct SurfaceClipIDMemory { + u32 address_high; + u32 address_low; - std::array<ViewPort, NumViewports> viewports; + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } + }; - INSERT_PADDING_WORDS_NOINIT(0x1D); + struct TIRModulation { + enum class Component : u32 { + None = 0, + RGB = 1, + AlphaOnly = 2, + RGBA = 3, + }; + enum class Function : u32 { + Linear = 0, + Table = 1, + }; + Component component; + Function function; + }; - struct { - u32 first; - u32 count; - } vertex_buffer; + struct Zeta { + u32 address_high; + u32 address_low; + Tegra::DepthFormat format; + TileMode tile_mode; + u32 array_pitch; - DepthMode depth_mode; + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } + }; - float clear_color[4]; - float clear_depth; + struct SurfaceClip { + union { + BitField<0, 16, u32> x; + BitField<16, 16, u32> width; + }; + union { + BitField<0, 16, u32> y; + BitField<16, 16, u32> height; + }; + }; - INSERT_PADDING_WORDS_NOINIT(0x3); + enum class L2CacheControlPolicy : u32 { + First = 0, + Normal = 1, + Last = 2, + }; - s32 clear_stencil; + struct L2CacheVAFRequests { + union { + BitField<0, 1, u32> system_memory_volatile; + BitField<4, 2, L2CacheControlPolicy> policy; + }; + }; - INSERT_PADDING_WORDS_NOINIT(0x2); + enum class ViewportMulticast : u32 { + ViewportOrder = 0, + PrimitiveOrder = 1, + }; - PolygonMode polygon_mode_front; - PolygonMode polygon_mode_back; + struct TIRModulationCoeff { + union { + BitField<0, 8, u32> table_v0; + BitField<8, 8, u32> table_v1; + BitField<16, 8, u32> table_v2; + BitField<24, 8, u32> table_v3; + }; + }; + static_assert(sizeof(TIRModulationCoeff) == 0x4); - INSERT_PADDING_WORDS_NOINIT(0x3); + struct ReduceColorThreshold { + union { + BitField<0, 8, u32> all_hit_once; + BitField<16, 8, u32> all_covered; + }; + }; - u32 polygon_offset_point_enable; - u32 polygon_offset_line_enable; - u32 polygon_offset_fill_enable; + struct ClearControl { + union { + BitField<0, 1, u32> respect_stencil_mask; + BitField<4, 1, u32> use_clear_rect; + BitField<8, 1, u32> use_scissor; + BitField<12, 1, u32> use_viewport_clip0; + }; + }; - u32 patch_vertices; + struct L2CacheRopNonInterlockedReads { + union { + BitField<4, 2, L2CacheControlPolicy> policy; + }; + }; - INSERT_PADDING_WORDS_NOINIT(0x4); + struct VertexOutputAttributeSkipMasks { + struct Attributes { + union { + BitField<0, 1, u32> attribute0_comp0; + BitField<1, 1, u32> attribute0_comp1; + BitField<2, 1, u32> attribute0_comp2; + BitField<3, 1, u32> attribute0_comp3; + BitField<4, 1, u32> attribute1_comp0; + BitField<5, 1, u32> attribute1_comp1; + BitField<6, 1, u32> attribute1_comp2; + BitField<7, 1, u32> attribute1_comp3; + BitField<8, 1, u32> attribute2_comp0; + BitField<9, 1, u32> attribute2_comp1; + BitField<10, 1, u32> attribute2_comp2; + BitField<11, 1, u32> attribute2_comp3; + BitField<12, 1, u32> attribute3_comp0; + BitField<13, 1, u32> attribute3_comp1; + BitField<14, 1, u32> attribute3_comp2; + BitField<15, 1, u32> attribute3_comp3; + BitField<16, 1, u32> attribute4_comp0; + BitField<17, 1, u32> attribute4_comp1; + BitField<18, 1, u32> attribute4_comp2; + BitField<19, 1, u32> attribute4_comp3; + BitField<20, 1, u32> attribute5_comp0; + BitField<21, 1, u32> attribute5_comp1; + BitField<22, 1, u32> attribute5_comp2; + BitField<23, 1, u32> attribute5_comp3; + BitField<24, 1, u32> attribute6_comp0; + BitField<25, 1, u32> attribute6_comp1; + BitField<26, 1, u32> attribute6_comp2; + BitField<27, 1, u32> attribute6_comp3; + BitField<28, 1, u32> attribute7_comp0; + BitField<29, 1, u32> attribute7_comp1; + BitField<30, 1, u32> attribute7_comp2; + BitField<31, 1, u32> attribute7_comp3; + }; + }; - u32 fragment_barrier; + std::array<Attributes, 2> a; + std::array<Attributes, 2> b; + }; - INSERT_PADDING_WORDS_NOINIT(0x7); + struct TIRControl { + union { + BitField<0, 1, u32> z_pass_pixel_count_use_raster_samples; + BitField<4, 1, u32> alpha_coverage_use_raster_samples; + BitField<1, 1, u32> reduce_coverage; + }; + }; - std::array<ScissorTest, NumViewports> scissor_test; + enum class FillViaTriangleMode : u32 { + Disabled = 0, + FillAll = 1, + FillBoundingBox = 2, + }; - INSERT_PADDING_WORDS_NOINIT(0x15); + struct PsTicketDispenserValue { + union { + BitField<0, 8, u32> index; + BitField<8, 16, u32> value; + }; + }; - s32 stencil_back_func_ref; - u32 stencil_back_mask; - u32 stencil_back_func_mask; + struct RegisterWatermarks { + union { + BitField<0, 16, u32> low; + BitField<16, 16, u32> high; + }; + }; - INSERT_PADDING_WORDS_NOINIT(0x5); + enum class InvalidateCacheLines : u32 { + All = 0, + One = 1, + }; - u32 invalidate_texture_data_cache; + struct InvalidateTextureDataCacheNoWfi { + union { + BitField<0, 1, InvalidateCacheLines> lines; + BitField<4, 22, u32> tag; + }; + }; - INSERT_PADDING_WORDS_NOINIT(0x1); + struct ZCullRegionEnable { + union { + BitField<0, 1, u32> enable_z; + BitField<4, 1, u32> enable_stencil; + BitField<1, 1, u32> rect_clear; + BitField<2, 1, u32> use_rt_array_index; + BitField<5, 16, u32> rt_array_index; + BitField<3, 1, u32> make_conservative; + }; + }; - u32 tiled_cache_barrier; + enum class FillMode : u32 { + Point = 1, + Wireframe = 2, + Solid = 3, + }; - INSERT_PADDING_WORDS_NOINIT(0x4); + enum class ShadeMode : u32 { + Flat = 0x1, + Gouraud = 0x2, + GL_Flat = 0x1D00, + GL_Smooth = 0x1D01, + }; - u32 color_mask_common; + enum class AlphaToCoverageDither : u32 { + Footprint_1x1 = 0, + Footprint_2x2 = 1, + Footprint_1x1_Virtual = 2, + }; - INSERT_PADDING_WORDS_NOINIT(0x2); + struct InlineIndex4x8Align { + union { + BitField<0, 30, u32> count; + BitField<30, 2, u32> start; + }; + }; - f32 depth_bounds[2]; + struct InlineIndex4x8Index { + union { + BitField<0, 8, u32> index0; + BitField<8, 8, u32> index1; + BitField<16, 8, u32> index2; + BitField<24, 8, u32> index3; + }; + }; - INSERT_PADDING_WORDS_NOINIT(0x2); + enum class D3DCullMode : u32 { + None = 0, + CW = 1, + CCW = 2, + }; - u32 rt_separate_frag_data; + struct BlendColor { + f32 r; + f32 g; + f32 b; + f32 a; + }; - INSERT_PADDING_WORDS_NOINIT(0x1); + struct StencilOp { + enum class Op : u32 { + Keep_D3D = 1, + Zero_D3D = 2, + Replace_D3D = 3, + IncrSaturate_D3D = 4, + DecrSaturate_D3D = 5, + Invert_D3D = 6, + Incr_D3D = 7, + Decr_D3D = 8, + + Keep_GL = 0x1E00, + Zero_GL = 0, + Replace_GL = 0x1E01, + IncrSaturate_GL = 0x1E02, + DecrSaturate_GL = 0x1E03, + Invert_GL = 0x150A, + Incr_GL = 0x8507, + Decr_GL = 0x8508, + }; - u32 multisample_raster_enable; - u32 multisample_raster_samples; - std::array<u32, 4> multisample_sample_mask; + Op fail; + Op zfail; + Op zpass; + ComparisonOp func; + }; - INSERT_PADDING_WORDS_NOINIT(0x5); + struct PsSaturate { + // Opposite of DepthMode + enum class Depth : u32 { + ZeroToOne = 0, + MinusOneToOne = 1, + }; - struct { - u32 address_high; - u32 address_low; - Tegra::DepthFormat format; - TileMode tile_mode; - u32 layer_stride; + union { + BitField<0, 1, u32> output0_enable; + BitField<1, 1, Depth> output0_range; + BitField<4, 1, u32> output1_enable; + BitField<5, 1, Depth> output1_range; + BitField<8, 1, u32> output2_enable; + BitField<9, 1, Depth> output2_range; + BitField<12, 1, u32> output3_enable; + BitField<13, 1, Depth> output3_range; + BitField<16, 1, u32> output4_enable; + BitField<17, 1, Depth> output4_range; + BitField<20, 1, u32> output5_enable; + BitField<21, 1, Depth> output5_range; + BitField<24, 1, u32> output6_enable; + BitField<25, 1, Depth> output6_range; + BitField<28, 1, u32> output7_enable; + BitField<29, 1, Depth> output7_range; + }; - GPUVAddr Address() const { - return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | - address_low); - } - } zeta; + bool AnyEnabled() const { + return output0_enable || output1_enable || output2_enable || output3_enable || + output4_enable || output5_enable || output6_enable || output7_enable; + } + }; - struct { - union { - BitField<0, 16, u32> x; - BitField<16, 16, u32> width; - }; - union { - BitField<0, 16, u32> y; - BitField<16, 16, u32> height; - }; - } render_area; + struct WindowOrigin { + enum class Mode : u32 { + UpperLeft = 0, + LowerLeft = 1, + }; + union { + BitField<0, 1, Mode> mode; + BitField<4, 1, u32> flip_y; + }; + }; - INSERT_PADDING_WORDS_NOINIT(0x3F); + struct IteratedBlendConstants { + u32 r; + u32 g; + u32 b; + INSERT_PADDING_BYTES_NOINIT(0x4); + }; + static_assert(sizeof(IteratedBlendConstants) == 0x10); + struct UserClip { + struct Enable { union { - BitField<0, 4, u32> stencil; - BitField<4, 4, u32> unknown; - BitField<8, 4, u32> scissor; - BitField<12, 4, u32> viewport; - } clear_flags; - - INSERT_PADDING_WORDS_NOINIT(0x10); - - u32 fill_rectangle; - - INSERT_PADDING_WORDS_NOINIT(0x2); - - u32 conservative_raster_enable; - - INSERT_PADDING_WORDS_NOINIT(0x5); - - std::array<VertexAttribute, NumVertexAttributes> vertex_attrib_format; + u32 raw; + BitField<0, 1, u32> plane0; + BitField<1, 1, u32> plane1; + BitField<2, 1, u32> plane2; + BitField<3, 1, u32> plane3; + BitField<4, 1, u32> plane4; + BitField<5, 1, u32> plane5; + BitField<6, 1, u32> plane6; + BitField<7, 1, u32> plane7; + }; - std::array<MsaaSampleLocation, 4> multisample_sample_locations; + bool AnyEnabled() const { + return plane0 || plane1 || plane2 || plane3 || plane4 || plane5 || plane6 || + plane7; + } + }; - INSERT_PADDING_WORDS_NOINIT(0x2); + struct Op { + enum class ClipOrCull : u32 { + Clip = 0, + Cull = 1, + }; union { - BitField<0, 1, u32> enable; - BitField<4, 3, u32> target; - } multisample_coverage_to_color; - - INSERT_PADDING_WORDS_NOINIT(0x8); - - struct { - union { - BitField<0, 4, u32> count; - BitField<4, 3, u32> map_0; - BitField<7, 3, u32> map_1; - BitField<10, 3, u32> map_2; - BitField<13, 3, u32> map_3; - BitField<16, 3, u32> map_4; - BitField<19, 3, u32> map_5; - BitField<22, 3, u32> map_6; - BitField<25, 3, u32> map_7; - }; - - u32 Map(std::size_t index) const { - const std::array<u32, NumRenderTargets> maps{map_0, map_1, map_2, map_3, - map_4, map_5, map_6, map_7}; - ASSERT(index < maps.size()); - return maps[index]; - } - } rt_control; - - INSERT_PADDING_WORDS_NOINIT(0x2); - - u32 zeta_width; - u32 zeta_height; - union { - BitField<0, 16, u32> zeta_depth; - BitField<16, 1, u32> zeta_volume; + u32 raw; + BitField<0, 1, ClipOrCull> plane0; + BitField<4, 1, ClipOrCull> plane1; + BitField<8, 1, ClipOrCull> plane2; + BitField<12, 1, ClipOrCull> plane3; + BitField<16, 1, ClipOrCull> plane4; + BitField<20, 1, ClipOrCull> plane5; + BitField<24, 1, ClipOrCull> plane6; + BitField<28, 1, ClipOrCull> plane7; }; + }; + }; - SamplerIndex sampler_index; + struct AntiAliasAlphaControl { + union { + BitField<0, 1, u32> alpha_to_coverage; + BitField<4, 1, u32> alpha_to_one; + }; + }; - INSERT_PADDING_WORDS_NOINIT(0x2); + struct RenderEnable { + enum class Override : u32 { + UseRenderEnable = 0, + AlwaysRender = 1, + NeverRender = 2, + }; - std::array<u32, 8> gp_passthrough_mask; + enum class Mode : u32 { + False = 0, + True = 1, + Conditional = 2, + IfEqual = 3, + IfNotEqual = 4, + }; - INSERT_PADDING_WORDS_NOINIT(0x1B); + u32 address_high; + u32 address_low; + Mode mode; - u32 depth_test_enable; + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } + }; - INSERT_PADDING_WORDS_NOINIT(0x5); + struct TexSampler { + u32 address_high; + u32 address_low; + u32 limit; - u32 independent_blend_enable; + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } + }; - u32 depth_write_enabled; + struct TexHeader { + u32 address_high; + u32 address_low; + u32 limit; - u32 alpha_test_enabled; + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } + }; - INSERT_PADDING_WORDS_NOINIT(0x6); + enum class ZCullRegionFormat : u32 { + Z_4x4 = 0, + ZS_4x4 = 1, + Z_4x2 = 2, + Z_2x4 = 3, + Z_16x8_4x4 = 4, + Z_8x8_4x2 = 5, + Z_8x8_2x4 = 6, + Z_16x16_4x8 = 7, + Z_4x8_2x2 = 8, + ZS_16x8_4x2 = 9, + ZS_16x8_2x4 = 10, + ZS_8x8_2x2 = 11, + Z_4x8_1x1 = 12, + }; - u32 d3d_cull_mode; + struct RtLayer { + enum class Control { + LayerSelectsLayer = 0, + GeometryShaderSelectsLayer = 1, + }; - ComparisonOp depth_test_func; - float alpha_test_ref; - ComparisonOp alpha_test_func; - u32 draw_tfb_stride; - struct { - float r; - float g; - float b; - float a; - } blend_color; + union { + BitField<0, 16, u32> layer; + BitField<16, 1, u32> control; + }; + }; - INSERT_PADDING_WORDS_NOINIT(0x4); + struct InlineIndex2x16 { + union { + BitField<0, 31, u32> count; + BitField<31, 1, u32> start_odd; + }; + union { + BitField<0, 16, u32> even; + BitField<16, 16, u32> odd; + }; + }; - struct { - u32 separate_alpha; - Blend::Equation equation_rgb; - Blend::Factor factor_source_rgb; - Blend::Factor factor_dest_rgb; - Blend::Equation equation_a; - Blend::Factor factor_source_a; - INSERT_PADDING_WORDS_NOINIT(1); - Blend::Factor factor_dest_a; + struct VertexGlobalBaseOffset { + u32 address_high; + u32 address_low; - u32 enable_common; - u32 enable[NumRenderTargets]; - } blend; + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } + }; - u32 stencil_enable; - StencilOp stencil_front_op_fail; - StencilOp stencil_front_op_zfail; - StencilOp stencil_front_op_zpass; - ComparisonOp stencil_front_func_func; - s32 stencil_front_func_ref; - u32 stencil_front_func_mask; - u32 stencil_front_mask; + struct ZCullRegionPixelOffset { + u32 width; + u32 height; + }; - INSERT_PADDING_WORDS_NOINIT(0x2); + struct PointSprite { + enum class RMode : u32 { + Zero = 0, + FromR = 1, + FromS = 2, + }; + enum class Origin : u32 { + Bottom = 0, + Top = 1, + }; + enum class Texture : u32 { + Passthrough = 0, + Generate = 1, + }; + + union { + BitField<0, 2, RMode> rmode; + BitField<2, 1, Origin> origin; + BitField<3, 1, Texture> texture0; + BitField<4, 1, Texture> texture1; + BitField<5, 1, Texture> texture2; + BitField<6, 1, Texture> texture3; + BitField<7, 1, Texture> texture4; + BitField<8, 1, Texture> texture5; + BitField<9, 1, Texture> texture6; + BitField<10, 1, Texture> texture7; + BitField<11, 1, Texture> texture8; + BitField<12, 1, Texture> texture9; + }; + }; - u32 frag_color_clamp; + struct ProgramRegion { + u32 address_high; + u32 address_low; - union { - BitField<0, 1, u32> y_negate; - BitField<4, 1, u32> triangle_rast_flip; - } screen_y_control; + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } + }; - float line_width_smooth; - float line_width_aliased; + struct DefaultAttributes { + enum class Diffuse : u32 { + Vector_0001 = 0, + Vector_1111 = 1, + }; + enum class Specular : u32 { + Vector_0000 = 0, + Vector_0001 = 1, + }; + enum class Vector : u32 { + Vector_0000 = 0, + Vector_0001 = 1, + }; + enum class FixedFncTexture : u32 { + Vector_0000 = 0, + Vector_0001 = 1, + }; + enum class DX9Color0 : u32 { + Vector_0000 = 0, + Vector_1111 = 1, + }; + enum class DX9Color1To15 : u32 { + Vector_0000 = 0, + Vector_0001 = 1, + }; - INSERT_PADDING_WORDS_NOINIT(0x1B); + union { + BitField<0, 1, Diffuse> color_front_diffuse; + BitField<1, 1, Specular> color_front_specular; + BitField<2, 1, Vector> generic_vector; + BitField<3, 1, FixedFncTexture> fixed_fnc_texture; + BitField<4, 1, DX9Color0> dx9_color0; + BitField<5, 1, DX9Color1To15> dx9_color1_to_15; + }; + }; - u32 invalidate_sampler_cache_no_wfi; - u32 invalidate_texture_header_cache_no_wfi; + struct Draw { + enum class PrimitiveId : u32 { + First = 0, + Unchanged = 1, + }; + enum class InstanceId : u32 { + First = 0, + Subsequent = 1, + Unchanged = 2, + }; + enum class SplitMode : u32 { + NormalBeginNormal = 0, + NormalBeginOpen = 1, + OpenBeginOpen = 2, + OpenBeginNormal = 3, + }; - INSERT_PADDING_WORDS_NOINIT(0x2); + u32 end; + union { + u32 begin; + BitField<0, 16, PrimitiveTopology> topology; + BitField<24, 1, PrimitiveId> primitive_id; + BitField<26, 2, InstanceId> instance_id; + BitField<29, 2, SplitMode> split_mode; + }; + }; - u32 vb_element_base; - u32 vb_base_instance; + struct VertexIdCopy { + union { + BitField<0, 1, u32> enable; + BitField<4, 8, u32> attribute_slot; + }; + }; - INSERT_PADDING_WORDS_NOINIT(0x35); + struct ShaderBasedCull { + union { + BitField<1, 1, u32> batch_cull_enable; + BitField<0, 1, u32> before_fetch_enable; + }; + }; - u32 clip_distance_enabled; + struct ClassVersion { + union { + BitField<0, 16, u32> current; + BitField<16, 16, u32> oldest_supported; + }; + }; - u32 samplecnt_enable; + struct PrimitiveRestart { + u32 enabled; + u32 index; + }; - float point_size; + struct OutputVertexId { + union { + BitField<12, 1, u32> uses_array_start; + }; + }; - INSERT_PADDING_WORDS_NOINIT(0x1); + enum class PointCenterMode : u32 { + GL = 0, + D3D = 1, + }; - u32 point_sprite_enable; + enum class LineSmoothParams : u32 { + Falloff_1_00 = 0, + Falloff_1_33 = 1, + Falloff_1_66 = 2, + }; - INSERT_PADDING_WORDS_NOINIT(0x3); + struct LineSmoothEdgeTable { + union { + BitField<0, 8, u32> v0; + BitField<8, 8, u32> v1; + BitField<16, 8, u32> v2; + BitField<24, 8, u32> v3; + }; + }; - CounterReset counter_reset; + struct LineStippleParams { + union { + BitField<0, 8, u32> factor; + BitField<8, 16, u32> pattern; + }; + }; - u32 multisample_enable; + enum class ProvokingVertex : u32 { + First = 0, + Last = 1, + }; - u32 zeta_enable; + struct ShaderControl { + enum class Partial : u32 { + Zero = 0, + Infinity = 1, + }; + enum class FP32NanBehavior : u32 { + Legacy = 0, + FP64Compatible = 1, + }; + enum class FP32F2INanBehavior : u32 { + PassZero = 0, + PassIndefinite = 1, + }; - union { - BitField<0, 1, u32> alpha_to_coverage; - BitField<4, 1, u32> alpha_to_one; - } multisample_control; + union { + BitField<0, 1, Partial> default_partial; + BitField<1, 1, FP32NanBehavior> fp32_nan_behavior; + BitField<2, 1, FP32F2INanBehavior> fp32_f2i_nan_behavior; + }; + }; - INSERT_PADDING_WORDS_NOINIT(0x4); + struct SphVersion { + union { + BitField<0, 16, u32> current; + BitField<16, 16, u32> oldest_supported; + }; + }; - struct { - u32 address_high; - u32 address_low; - ConditionMode mode; + struct AlphaToCoverageOverride { + union { + BitField<0, 1, u32> qualify_by_anti_alias_enable; + BitField<1, 1, u32> qualify_by_ps_sample_mask_enable; + }; + }; - GPUVAddr Address() const { - return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | - address_low); - } - } condition; + struct AamVersion { + union { + BitField<0, 16, u32> current; + BitField<16, 16, u32> oldest_supported; + }; + }; - struct { - u32 address_high; - u32 address_low; - u32 limit; + struct IndexBuffer { + u32 start_addr_high; + u32 start_addr_low; + u32 limit_addr_high; + u32 limit_addr_low; + IndexFormat format; + u32 first; + u32 count; + + unsigned FormatSizeInBytes() const { + switch (format) { + case IndexFormat::UnsignedByte: + return 1; + case IndexFormat::UnsignedShort: + return 2; + case IndexFormat::UnsignedInt: + return 4; + } + ASSERT(false); + return 1; + } - GPUVAddr Address() const { - return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | - address_low); - } - } tsc; + GPUVAddr StartAddress() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(start_addr_high) << 32) | + start_addr_low); + } - INSERT_PADDING_WORDS_NOINIT(0x1); + GPUVAddr EndAddress() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(limit_addr_high) << 32) | + limit_addr_low); + } - float polygon_offset_factor; + /// Adjust the index buffer offset so it points to the first desired index. + GPUVAddr IndexStart() const { + return StartAddress() + + static_cast<size_t>(first) * static_cast<size_t>(FormatSizeInBytes()); + } + }; - u32 line_smooth_enable; + struct IndexBufferSmall { + union { + BitField<0, 16, u32> first; + BitField<16, 12, u32> count; + BitField<28, 4, PrimitiveTopology> topology; + }; + }; - struct { - u32 address_high; - u32 address_low; - u32 limit; + struct VertexStreamInstances { + std::array<u32, NumVertexArrays> is_instanced; - GPUVAddr Address() const { - return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | - address_low); - } - } tic; + /// Returns whether the vertex array specified by index is supposed to be + /// accessed per instance or not. + bool IsInstancingEnabled(std::size_t index) const { + return is_instanced[index]; + } + }; - INSERT_PADDING_WORDS_NOINIT(0x5); + struct AttributePointSize { + union { + BitField<0, 1, u32> enabled; + BitField<4, 8, u32> slot; + }; + }; - u32 stencil_two_side_enable; - StencilOp stencil_back_op_fail; - StencilOp stencil_back_op_zfail; - StencilOp stencil_back_op_zpass; - ComparisonOp stencil_back_func_func; + struct ViewportClipControl { + enum class GeometryGuardband : u32 { + Scale256 = 0, + Scale1 = 1, + }; + enum class GeometryClip : u32 { + WZero = 0, + Passthrough = 1, + FrustumXY = 2, + FrustumXYZ = 3, + WZeroNoZCull = 4, + FrustumZ = 5, + WZeroTriFillOrClip = 6, + }; + enum class GeometryGuardbandZ : u32 { + SameAsXY = 0, + Scale256 = 1, + Scale1 = 2, + }; - INSERT_PADDING_WORDS_NOINIT(0x4); + union { + BitField<0, 1, u32> depth_0_to_1; + BitField<3, 1, u32> pixel_min_z; + BitField<4, 1, u32> pixel_max_z; + BitField<7, 1, GeometryGuardband> geometry_guardband; + BitField<11, 3, GeometryClip> geometry_clip; + BitField<1, 2, GeometryGuardbandZ> geometry_guardband_z; + }; + }; - u32 framebuffer_srgb; + enum class PrimitiveTopologyControl : u32 { + UseInBeginMethods = 0, + UseSeparateState = 1, + }; - float polygon_offset_units; + struct WindowClip { + enum class Type : u32 { + Inclusive = 0, + Exclusive = 1, + ClipAll = 2, + }; - INSERT_PADDING_WORDS_NOINIT(0x4); + u32 enable; + Type type; + }; - Tegra::Texture::MsaaMode multisample_mode; + enum class InvalidateZCull : u32 { + Invalidate = 0, + }; - INSERT_PADDING_WORDS_NOINIT(0xC); + struct ZCull { + union { + BitField<0, 1, u32> z_enable; + BitField<1, 1, u32> stencil_enable; + }; + union { + BitField<0, 1, u32> z_min_enbounded; + BitField<1, 1, u32> z_max_unbounded; + }; + }; - union { - BitField<2, 1, u32> coord_origin; - BitField<3, 10, u32> enable; - } point_coord_replace; - - struct { - u32 code_address_high; - u32 code_address_low; - - GPUVAddr CodeAddress() const { - return static_cast<GPUVAddr>( - (static_cast<GPUVAddr>(code_address_high) << 32) | code_address_low); - } - } code_address; - INSERT_PADDING_WORDS_NOINIT(1); - - struct { - u32 vertex_end_gl; - union { - u32 vertex_begin_gl; - BitField<0, 16, PrimitiveTopology> topology; - BitField<26, 1, u32> instance_next; - BitField<27, 1, u32> instance_cont; - }; - } draw; - - INSERT_PADDING_WORDS_NOINIT(0xA); - - struct { - u32 enabled; - u32 index; - } primitive_restart; - - INSERT_PADDING_WORDS_NOINIT(0xE); - - u32 provoking_vertex_last; - - INSERT_PADDING_WORDS_NOINIT(0x50); - - struct { - u32 start_addr_high; - u32 start_addr_low; - u32 end_addr_high; - u32 end_addr_low; - IndexFormat format; - u32 first; - u32 count; - - unsigned FormatSizeInBytes() const { - switch (format) { - case IndexFormat::UnsignedByte: - return 1; - case IndexFormat::UnsignedShort: - return 2; - case IndexFormat::UnsignedInt: - return 4; - } - ASSERT(false); - return 1; - } - - GPUVAddr StartAddress() const { - return static_cast<GPUVAddr>( - (static_cast<GPUVAddr>(start_addr_high) << 32) | start_addr_low); - } - - GPUVAddr EndAddress() const { - return static_cast<GPUVAddr>((static_cast<GPUVAddr>(end_addr_high) << 32) | - end_addr_low); - } - - /// Adjust the index buffer offset so it points to the first desired index. - GPUVAddr IndexStart() const { - return StartAddress() + static_cast<size_t>(first) * - static_cast<size_t>(FormatSizeInBytes()); - } - } index_array; + struct LogicOp { + enum class Op : u32 { + Clear = 0x1500, + And = 0x1501, + AndReverse = 0x1502, + Copy = 0x1503, + AndInverted = 0x1504, + NoOp = 0x1505, + Xor = 0x1506, + Or = 0x1507, + Nor = 0x1508, + Equiv = 0x1509, + Invert = 0x150A, + OrReverse = 0x150B, + CopyInverted = 0x150C, + OrInverted = 0x150D, + Nand = 0x150E, + Set = 0x150F, + }; - union { - BitField<0, 16, u32> first; - BitField<16, 16, u32> count; - } small_index; + u32 enable; + Op op; + }; - union { - BitField<0, 16, u32> first; - BitField<16, 16, u32> count; - } small_index_2; + struct ClearSurface { + union { + u32 raw; + BitField<0, 1, u32> Z; + BitField<1, 1, u32> S; + BitField<2, 1, u32> R; + BitField<3, 1, u32> G; + BitField<4, 1, u32> B; + BitField<5, 1, u32> A; + BitField<6, 4, u32> RT; + BitField<10, 16, u32> layer; + }; + }; - INSERT_PADDING_WORDS_NOINIT(0x5); + struct ReportSemaphore { + struct Compare { + u32 initial_sequence; + u32 initial_mode; + u32 unknown1; + u32 unknown2; + u32 current_sequence; + u32 current_mode; + }; - INSERT_PADDING_WORDS_NOINIT(0x1F); + enum class Operation : u32 { + Release = 0, + Acquire = 1, + ReportOnly = 2, + Trap = 3, + }; - float polygon_offset_clamp; + enum class Release : u32 { + AfterAllPreceedingReads = 0, + AfterAllPreceedingWrites = 1, + }; - struct { - u32 is_instanced[NumVertexArrays]; + enum class Acquire : u32 { + BeforeAnyFollowingWrites = 0, + BeforeAnyFollowingReads = 1, + }; - /// Returns whether the vertex array specified by index is supposed to be - /// accessed per instance or not. - bool IsInstancingEnabled(std::size_t index) const { - return is_instanced[index]; - } - } instanced_arrays; + enum class Location : u32 { + None = 0, + VertexFetch = 1, + VertexShader = 2, + VPC = 4, + StreamingOutput = 5, + GeometryShader = 6, + ZCull = 7, + TessellationInit = 8, + TessellationShader = 9, + PixelShader = 10, + DepthTest = 12, + All = 15, + }; - INSERT_PADDING_WORDS_NOINIT(0x4); + enum class Comparison : u32 { + NotEqual = 0, + GreaterOrEqual = 1, + }; - union { - BitField<0, 1, u32> enable; - BitField<4, 8, u32> unk4; - } vp_point_size; + enum class Report : u32 { + Payload = 0, // "None" in docs, but confirmed via hardware to return the payload + VerticesGenerated = 1, + ZPassPixelCount = 2, + PrimitivesGenerated = 3, + AlphaBetaClocks = 4, + VertexShaderInvocations = 5, + StreamingPrimitivesNeededMinusSucceeded = 6, + GeometryShaderInvocations = 7, + GeometryShaderPrimitivesGenerated = 9, + ZCullStats0 = 10, + StreamingPrimitivesSucceeded = 11, + ZCullStats1 = 12, + StreamingPrimitivesNeeded = 13, + ZCullStats2 = 14, + ClipperInvocations = 15, + ZCullStats3 = 16, + ClipperPrimitivesGenerated = 17, + VtgPrimitivesOut = 18, + PixelShaderInvocations = 19, + ZPassPixelCount64 = 21, + IEEECleanColorTarget = 24, + IEEECleanZetaTarget = 25, + StreamingByteCount = 26, + TessellationInitInvocations = 27, + BoundingRectangle = 28, + TessellationShaderInvocations = 29, + TotalStreamingPrimitivesNeededMinusSucceeded = 30, + TessellationShaderPrimitivesGenerated = 31, + }; - INSERT_PADDING_WORDS_NOINIT(1); + u32 address_high; + u32 address_low; + u32 payload; + union { + u32 raw; + BitField<0, 2, Operation> operation; + BitField<4, 1, Release> release; + BitField<8, 1, Acquire> acquire; + BitField<12, 4, Location> location; + BitField<16, 1, Comparison> comparison; + BitField<20, 1, u32> awaken_enable; + BitField<23, 5, Report> report; + BitField<28, 1, u32> short_query; + BitField<5, 3, u32> sub_report; + BitField<21, 1, u32> dword_number; + BitField<2, 1, u32> disable_flush; + BitField<3, 1, u32> reduction_enable; + BitField<9, 3, ReductionOp> reduction_op; + BitField<17, 2, u32> format_signed; + } query; - u32 cull_test_enabled; - FrontFace front_face; - CullFace cull_face; + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } + }; - u32 pixel_center_integer; + struct VertexStream { + union { + BitField<0, 12, u32> stride; + BitField<12, 1, u32> enable; + }; + u32 address_high; + u32 address_low; + u32 frequency; - INSERT_PADDING_WORDS_NOINIT(0x1); + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } - u32 viewport_transform_enabled; + bool IsEnabled() const { + return enable != 0 && Address() != 0; + } + }; + static_assert(sizeof(VertexStream) == 0x10); - INSERT_PADDING_WORDS_NOINIT(0x3); + struct VertexStreamLimit { + u32 address_high; + u32 address_low; - union { - BitField<0, 1, u32> depth_range_0_1; - BitField<3, 1, u32> depth_clamp_near; - BitField<4, 1, u32> depth_clamp_far; - BitField<11, 1, u32> depth_clamp_disabled; - } view_volume_clip_control; + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } + }; + static_assert(sizeof(VertexStreamLimit) == 0x8); - INSERT_PADDING_WORDS_NOINIT(0xC); + enum class ShaderType : u32 { + VertexA = 0, + VertexB = 1, + TessellationInit = 2, + Tessellation = 3, + Geometry = 4, + Pixel = 5, + }; - PrimitiveTopologyOverride topology_override; + struct Pipeline { + union { + BitField<0, 1, u32> enable; + BitField<4, 4, ShaderType> program; + }; + u32 offset; + u32 reservedA; + u32 register_count; + u32 binding_group; + std::array<u32, 4> reserved; + INSERT_PADDING_BYTES_NOINIT(0x1C); + }; + static_assert(sizeof(Pipeline) == 0x40); - INSERT_PADDING_WORDS_NOINIT(0x12); + bool IsShaderConfigEnabled(std::size_t index) const { + // The VertexB is always enabled. + if (index == static_cast<std::size_t>(ShaderType::VertexB)) { + return true; + } + return pipelines[index].enable != 0; + } - u32 depth_bounds_enable; + bool IsShaderConfigEnabled(ShaderType type) const { + return IsShaderConfigEnabled(static_cast<std::size_t>(type)); + } - INSERT_PADDING_WORDS_NOINIT(1); + struct ConstantBuffer { + u32 size; + u32 address_high; + u32 address_low; + u32 offset; + std::array<u32, NumCBData> buffer; - struct { - u32 enable; - LogicOperation operation; - } logic_op; + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } + }; - INSERT_PADDING_WORDS_NOINIT(0x1); + struct BindGroup { + std::array<u32, 4> reserved; + union { + u32 raw_config; + BitField<0, 1, u32> valid; + BitField<4, 5, u32> shader_slot; + }; + INSERT_PADDING_BYTES_NOINIT(0xC); + }; + static_assert(sizeof(BindGroup) == 0x20); + struct StreamOutLayout { + union { + BitField<0, 8, u32> attribute0; + BitField<8, 8, u32> attribute1; + BitField<16, 8, u32> attribute2; + BitField<24, 8, u32> attribute3; + }; + }; + + struct ShaderPerformance { + struct ControlA { union { - u32 raw; - BitField<0, 1, u32> Z; - BitField<1, 1, u32> S; - BitField<2, 1, u32> R; - BitField<3, 1, u32> G; - BitField<4, 1, u32> B; - BitField<5, 1, u32> A; - BitField<6, 4, u32> RT; - BitField<10, 11, u32> layer; - } clear_buffers; - INSERT_PADDING_WORDS_NOINIT(0xB); - std::array<ColorMask, NumRenderTargets> color_mask; - INSERT_PADDING_WORDS_NOINIT(0x38); - - struct { - u32 query_address_high; - u32 query_address_low; - u32 query_sequence; - union { - u32 raw; - BitField<0, 2, QueryOperation> operation; - BitField<4, 1, u32> fence; - BitField<12, 4, QueryUnit> unit; - BitField<16, 1, QuerySyncCondition> sync_cond; - BitField<23, 5, QuerySelect> select; - BitField<28, 1, u32> short_query; - } query_get; - - GPUVAddr QueryAddress() const { - return static_cast<GPUVAddr>( - (static_cast<GPUVAddr>(query_address_high) << 32) | query_address_low); - } - } query; - - INSERT_PADDING_WORDS_NOINIT(0x3C); - - struct { - union { - BitField<0, 12, u32> stride; - BitField<12, 1, u32> enable; - }; - u32 start_high; - u32 start_low; - u32 divisor; - - GPUVAddr StartAddress() const { - return static_cast<GPUVAddr>((static_cast<GPUVAddr>(start_high) << 32) | - start_low); - } - - bool IsEnabled() const { - return enable != 0 && StartAddress() != 0; - } - - } vertex_array[NumVertexArrays]; - - Blend independent_blend[NumRenderTargets]; - - struct { - u32 limit_high; - u32 limit_low; - - GPUVAddr LimitAddress() const { - return static_cast<GPUVAddr>((static_cast<GPUVAddr>(limit_high) << 32) | - limit_low); - } - } vertex_array_limit[NumVertexArrays]; - - struct { - union { - BitField<0, 1, u32> enable; - BitField<4, 4, ShaderProgram> program; - }; - u32 offset; - INSERT_PADDING_WORDS_NOINIT(14); - } shader_config[MaxShaderProgram]; - - INSERT_PADDING_WORDS_NOINIT(0x60); - - u32 firmware[0x20]; - - struct { - u32 cb_size; - u32 cb_address_high; - u32 cb_address_low; - u32 cb_pos; - std::array<u32, NumCBData> cb_data; - - GPUVAddr BufferAddress() const { - return static_cast<GPUVAddr>( - (static_cast<GPUVAddr>(cb_address_high) << 32) | cb_address_low); - } - } const_buffer; - - INSERT_PADDING_WORDS_NOINIT(0x10); - - struct { - union { - u32 raw_config; - BitField<0, 1, u32> valid; - BitField<4, 5, u32> index; - }; - INSERT_PADDING_WORDS_NOINIT(7); - } cb_bind[MaxShaderStage]; - - INSERT_PADDING_WORDS_NOINIT(0x56); - - u32 tex_cb_index; - - INSERT_PADDING_WORDS_NOINIT(0x7D); - - std::array<std::array<u8, 128>, NumTransformFeedbackBuffers> tfb_varying_locs; - - INSERT_PADDING_WORDS_NOINIT(0x298); - - struct { - /// Compressed address of a buffer that holds information about bound SSBOs. - /// This address is usually bound to c0 in the shaders. - u32 buffer_address; - - GPUVAddr BufferAddress() const { - return static_cast<GPUVAddr>(buffer_address) << 8; - } - } ssbo_info; + BitField<0, 2, u32> event0; + BitField<2, 3, u32> bit0; + BitField<5, 2, u32> event1; + BitField<7, 3, u32> bit1; + BitField<10, 2, u32> event2; + BitField<12, 3, u32> bit2; + BitField<15, 2, u32> event3; + BitField<17, 3, u32> bit3; + BitField<20, 2, u32> event4; + BitField<22, 3, u32> bit4; + BitField<25, 2, u32> event5; + BitField<27, 3, u32> bit5; + BitField<30, 2, u32> spare; + }; + }; - INSERT_PADDING_WORDS_NOINIT(0x11); + struct ControlB { + union { + BitField<0, 1, u32> edge; + BitField<1, 2, u32> mode; + BitField<3, 1, u32> windowed; + BitField<4, 16, u32> func; + }; + }; - struct { - u32 address[MaxShaderStage]; - u32 size[MaxShaderStage]; - } tex_info_buffers; + std::array<u32, 8> values_upper; + std::array<u32, 8> values; + std::array<u32, 8> events; + std::array<ControlA, 8> control_a; + std::array<ControlB, 8> control_b; + u32 trap_control_mask; + u32 start_shader_mask; + u32 stop_shader_mask; + }; - INSERT_PADDING_WORDS_NOINIT(0xCC); + // clang-format off + union { + struct { + ID object_id; ///< 0x0000 + INSERT_PADDING_BYTES_NOINIT(0xFC); + u32 nop; ///< 0x0100 + Notify notify; ///< 0x0104 + u32 wait_for_idle; ///< 0x0110 + LoadMME load_mme; ///< 0x0114 + ShadowRamControl shadow_ram_control; ///< 0x0124 + PeerSemaphore peer; ///< 0x0128 + GlobalRender global_render; ///< 0x0130 + u32 go_idle; ///< 0x013C + u32 trigger; ///< 0x0140 + u32 trigger_wfi; ///< 0x0144 + INSERT_PADDING_BYTES_NOINIT(0x8); + u32 instrumentation_method_header; ///< 0x0150 + u32 instrumentation_method_data; ///< 0x0154 + INSERT_PADDING_BYTES_NOINIT(0x28); + Upload::Registers upload; ///< 0x0180 + LaunchDMA launch_dma; ///< 0x01B0 + u32 inline_data; ///< 0x01B4 + INSERT_PADDING_BYTES_NOINIT(0x24); + I2M i2m; ///< 0x01DC + u32 run_ds_now; ///< 0x0200 + OpportunisticEarlyZ opportunistic_early_z; ///< 0x0204 + INSERT_PADDING_BYTES_NOINIT(0x4); + u32 aliased_line_width_enabled; ///< 0x020C + u32 mandated_early_z; ///< 0x0210 + GeometryShaderDmFifo gs_dm_fifo; ///< 0x0214 + L2CacheControl l2_cache_control; ///< 0x0218 + InvalidateShaderCache invalidate_shader_cache; ///< 0x021C + INSERT_PADDING_BYTES_NOINIT(0xA8); + SyncInfo sync_info; ///< 0x02C8 + INSERT_PADDING_BYTES_NOINIT(0x4); + u32 prim_circular_buffer_throttle; ///< 0x02D0 + u32 flush_invalidate_rop_mini_cache; ///< 0x02D4 + SurfaceClipBlockId surface_clip_block_id; ///< 0x02D8 + u32 alpha_circular_buffer_size; ///< 0x02DC + DecompressSurface decompress_surface; ///< 0x02E0 + ZCullRopBypass zcull_rop_bypass; ///< 0x02E4 + ZCullSubregion zcull_subregion; ///< 0x02E8 + RasterBoundingBox raster_bounding_box; ///< 0x02EC + u32 peer_semaphore_release; ///< 0x02F0 + u32 iterated_blend_optimization; ///< 0x02F4 + ZCullSubregionAllocation zcull_subregion_allocation; ///< 0x02F8 + ZCullSubregionAlgorithm zcull_subregion_algorithm; ///< 0x02FC + PixelShaderOutputSampleMaskUsage ps_output_sample_mask_usage; ///< 0x0300 + u32 draw_zero_index; ///< 0x0304 + L1Configuration l1_configuration; ///< 0x0308 + u32 render_enable_control_load_const_buffer; ///< 0x030C + SPAVersion spa_version; ///< 0x0310 + u32 ieee_clean_update; ///< 0x0314 + SnapGrid snap_grid; ///< 0x0318 + Tessellation tessellation; ///< 0x0320 + SubTilingPerf sub_tiling_perf; ///< 0x0360 + ZCullSubregionReport zcull_subregion_report; ///< 0x036C + BalancedPrimitiveWorkload balanced_primitive_workload; ///< 0x0374 + u32 max_patches_per_batch; ///< 0x0378 + u32 rasterize_enable; ///< 0x037C + TransformFeedback transform_feedback; ///< 0x0380 + u32 raster_input; ///< 0x0740 + u32 transform_feedback_enabled; ///< 0x0744 + u32 primitive_restart_topology_change_enable; ///< 0x0748 + u32 alpha_fraction; ///< 0x074C + INSERT_PADDING_BYTES_NOINIT(0x4); + HybridAntiAliasControl hybrid_aa_control; ///< 0x0754 + INSERT_PADDING_BYTES_NOINIT(0x24); + ShaderLocalMemory shader_local_memory; ///< 0x077C + u32 color_zero_bandwidth_clear; ///< 0x07A4 + u32 z_zero_bandwidth_clear; ///< 0x07A8 + u32 isbe_save_restore_program_offset; ///< 0x07AC + INSERT_PADDING_BYTES_NOINIT(0x10); + ZCullRegion zcull_region; ///< 0x07C0 + ZetaReadOnly zeta_read_only; ///< 0x07F8 + INSERT_PADDING_BYTES_NOINIT(0x4); + std::array<RenderTargetConfig, NumRenderTargets> rt; ///< 0x0800 + std::array<ViewportTransform, NumViewports> viewport_transform; ///< 0x0A00 + std::array<Viewport, NumViewports> viewports; ///< 0x0C00 + std::array<Window, 8> windows; ///< 0x0D00 + std::array<ClipIdExtent, 4> clip_id_extent; ///< 0x0D40 + u32 max_geometry_instances_per_task; ///< 0x0D60 + VisibleCallLimit visible_call_limit; ///< 0x0D64 + StatisticsCounter statistics_count; ///< 0x0D68 + ClearRect clear_rect; ///< 0x0D6C + VertexBuffer vertex_buffer; ///< 0x0D74 + DepthMode depth_mode; ///< 0x0D7C + std::array<f32, 4> clear_color; ///< 0x0D80 + f32 clear_depth; ///< 0x0D90 + u32 shader_cache_icache_prefetch; ///< 0x0D94 + u32 force_transition_to_beta; ///< 0x0D98 + u32 reduce_colour_thresholds; ///< 0x0D9C + s32 clear_stencil; ///< 0x0DA0 + InvalidateShaderCacheNoWFI invalidate_shader_cache_no_wfi; ///< 0x0DA4 + ZCullSerialization zcull_serialization; ///< 0x0DA8 + PolygonMode polygon_mode_front; ///< 0x0DAC + PolygonMode polygon_mode_back; ///< 0x0DB0 + u32 polygon_smooth; ///< 0x0DB4 + u32 zeta_mark_clean_ieee; ///< 0x0DB8 + ZCullDirFormat zcull_dir_format; ///< 0x0DBC + u32 polygon_offset_point_enable; ///< 0x0DC0 + u32 polygon_offset_line_enable; ///< 0x0DC4 + u32 polygon_offset_fill_enable; ///< 0x0DC8 + u32 patch_vertices; ///< 0x0DCC + IteratedBlend iterated_blend; ///< 0x0DD0 + ZCullCriterion zcull_criteria; ///< 0x0DD8 + INSERT_PADDING_BYTES_NOINIT(0x4); + u32 fragment_barrier; ///< 0x0DE0 + u32 sm_timeout; ///< 0x0DE4 + u32 primitive_restart_array; ///< 0x0DE8 + INSERT_PADDING_BYTES_NOINIT(0x4); + LoadIteratedBlend load_iterated_blend; ///< 0x0DF0 + u32 window_offset_x; ///< 0x0DF8 + u32 window_offset_y; ///< 0x0DFC + std::array<ScissorTest, NumViewports> scissor_test; ///< 0x0E00 + INSERT_PADDING_BYTES_NOINIT(0x10); + u32 select_texture_headers; ///< 0x0F10 + VPCPerf vpc_perf; ///< 0x0F14 + u32 pm_local_trigger; ///< 0x0F18 + u32 post_z_pixel_imask; ///< 0x0F1C + INSERT_PADDING_BYTES_NOINIT(0x20); + ConstantColorRendering const_color_rendering; ///< 0x0F40 + s32 stencil_back_ref; ///< 0x0F54 + u32 stencil_back_mask; ///< 0x0F58 + u32 stencil_back_func_mask; ///< 0x0F5C + INSERT_PADDING_BYTES_NOINIT(0x24); + VertexStreamSubstitute vertex_stream_substitute; ///< 0x0F84 + u32 line_mode_clip_generated_edge_do_not_draw; ///< 0x0F8C + u32 color_mask_common; ///< 0x0F90 + INSERT_PADDING_BYTES_NOINIT(0x4); + VTGWarpWatermarks vtg_warp_watermarks; ///< 0x0F98 + f32 depth_bounds[2]; ///< 0x0F9C + SampleMask::Target sample_mask_target; ///< 0x0FA4 + u32 color_target_mrt_enable; ///< 0x0FAC + NonMultisampledZ non_multisampled_z; ///< 0x0FB0 + TIRMode tir_mode; ///< 0x0FB4 + AntiAliasRaster anti_alias_raster; ///< 0x0FB8 + SampleMask::Pos sample_mask_pos; ///< 0x0FBC + SurfaceClipIDMemory surface_clip_id_memory; ///< 0x0FCC + TIRModulation tir_modulation; ///< 0x0FD4 + u32 blend_control_allow_float_pixel_kills; ///< 0x0FDC + Zeta zeta; ///< 0x0FE0 + SurfaceClip surface_clip; ///< 0x0FF4 + u32 tiled_cache_treat_heavy_as_light; ///< 0x0FFC + L2CacheVAFRequests l2_cache_vaf; ///< 0x1000 + ViewportMulticast viewport_multicast; ///< 0x1004 + u32 tessellation_cut_height; ///< 0x1008 + u32 max_gs_instances_per_task; ///< 0x100C + u32 max_gs_output_vertices_per_task; ///< 0x1010 + u32 reserved_sw_method0; ///< 0x1014 + u32 gs_output_cb_storage_multiplier; ///< 0x1018 + u32 beta_cb_storage_constant; ///< 0x101C + u32 ti_output_cb_storage_multiplier; ///< 0x1020 + u32 alpha_cb_storage_constraint; ///< 0x1024 + u32 reserved_sw_method1; ///< 0x1028 + u32 reserved_sw_method2; ///< 0x102C + std::array<TIRModulationCoeff, 5> tir_modulation_coeff; ///< 0x1030 + std::array<u32, 15> spare_nop; ///< 0x1044 + INSERT_PADDING_BYTES_NOINIT(0x30); + std::array<u32, 7> reserved_sw_method3_to_7; ///< 0x10B0 + ReduceColorThreshold reduce_color_thresholds_unorm8; ///< 0x10CC + std::array<u32, 4> reserved_sw_method10_to_13; ///< 0x10D0 + ReduceColorThreshold reduce_color_thresholds_unorm10; ///< 0x10E0 + ReduceColorThreshold reduce_color_thresholds_unorm16; ///< 0x10E4 + ReduceColorThreshold reduce_color_thresholds_fp11; ///< 0x10E8 + ReduceColorThreshold reduce_color_thresholds_fp16; ///< 0x10EC + ReduceColorThreshold reduce_color_thresholds_srgb8; ///< 0x10F0 + u32 unbind_all_constant_buffers; ///< 0x10F4 + ClearControl clear_control; ///< 0x10F8 + L2CacheRopNonInterlockedReads l2_cache_rop_non_interlocked_reads; ///< 0x10FC + u32 reserved_sw_method14; ///< 0x1100 + u32 reserved_sw_method15; ///< 0x1104 + INSERT_PADDING_BYTES_NOINIT(0x4); + u32 no_operation_data_high; ///< 0x110C + u32 depth_bias_control; ///< 0x1110 + u32 pm_trigger_end; ///< 0x1114 + u32 vertex_id_base; ///< 0x1118 + u32 stencil_compression_enabled; ///< 0x111C + VertexOutputAttributeSkipMasks vertex_output_attribute_skip_masks; ///< 0x1120 + TIRControl tir_control; ///< 0x1130 + u32 mutable_method_treat_mutable_as_heavy; ///< 0x1134 + u32 post_ps_use_pre_ps_coverage; ///< 0x1138 + FillViaTriangleMode fill_via_triangle_mode; ///< 0x113C + u32 blend_per_format_snorm8_unorm16_snorm16_enabled; ///< 0x1140 + u32 flush_pending_writes_sm_gloal_store; ///< 0x1144 + INSERT_PADDING_BYTES_NOINIT(0x18); + std::array<VertexAttribute, NumVertexAttributes> vertex_attrib_format; ///< 0x1160 + std::array<MsaaSampleLocation, 4> multisample_sample_locations; ///< 0x11E0 + u32 offset_render_target_index_by_viewport_index; ///< 0x11F0 + u32 force_heavy_method_sync; ///< 0x11F4 + MultisampleCoverageToColor multisample_coverage_to_color; ///< 0x11F8 + DecompressZetaSurface decompress_zeta_surface; ///< 0x11FC + INSERT_PADDING_BYTES_NOINIT(0x8); + ZetaSparse zeta_sparse; ///< 0x1208 + u32 invalidate_sampler_cache; ///< 0x120C + u32 invalidate_texture_header_cache; ///< 0x1210 + VertexArray vertex_array_instance_first; ///< 0x1214 + VertexArray vertex_array_instance_subsequent; ///< 0x1218 + RtControl rt_control; ///< 0x121C + CompressionThresholdSamples compression_threshold_samples; ///< 0x1220 + PixelShaderInterlockControl ps_interlock_control; ///< 0x1224 + ZetaSize zeta_size; ///< 0x1228 + SamplerBinding sampler_binding; ///< 0x1234 + INSERT_PADDING_BYTES_NOINIT(0x4); + u32 draw_auto_byte_count; ///< 0x123C + std::array<u32, 8> post_vtg_shader_attrib_skip_mask; ///< 0x1240 + PsTicketDispenserValue ps_ticket_dispenser_value; ///< 0x1260 + INSERT_PADDING_BYTES_NOINIT(0x1C); + u32 circular_buffer_size; ///< 0x1280 + RegisterWatermarks vtg_register_watermarks; ///< 0x1284 + InvalidateTextureDataCacheNoWfi invalidate_texture_cache_no_wfi; ///< 0x1288 + INSERT_PADDING_BYTES_NOINIT(0x4); + L2CacheRopNonInterlockedReads l2_cache_rop_interlocked_reads; ///< 0x1290 + INSERT_PADDING_BYTES_NOINIT(0x10); + u32 primitive_restart_topology_change_index; ///< 0x12A4 + INSERT_PADDING_BYTES_NOINIT(0x20); + ZCullRegionEnable zcull_region_enable; ///< 0x12C8 + u32 depth_test_enable; ///< 0x12CC + FillMode fill_mode; ///< 0x12D0 + ShadeMode shade_mode; ///< 0x12D4 + L2CacheRopNonInterlockedReads l2_cache_rop_non_interlocked_writes; ///< 0x12D8 + L2CacheRopNonInterlockedReads l2_cache_rop_interlocked_writes; ///< 0x12DC + AlphaToCoverageDither alpha_to_coverage_dither; ///< 0x12E0 + u32 blend_per_target_enabled; ///< 0x12E4 + u32 depth_write_enabled; ///< 0x12E8 + u32 alpha_test_enabled; ///< 0x12EC + INSERT_PADDING_BYTES_NOINIT(0x10); + InlineIndex4x8Align inline_index_4x8_align; ///< 0x1300 + InlineIndex4x8Index inline_index_4x8_index; ///< 0x1304 + D3DCullMode d3d_cull_mode; ///< 0x1308 + ComparisonOp depth_test_func; ///< 0x130C + f32 alpha_test_ref; ///< 0x1310 + ComparisonOp alpha_test_func; ///< 0x1314 + u32 draw_auto_stride; ///< 0x1318 + BlendColor blend_color; ///< 0x131C + INSERT_PADDING_BYTES_NOINIT(0x4); + InvalidateCacheLines invalidate_sampler_cache_lines; ///< 0x1330 + InvalidateCacheLines invalidate_texture_header_cache_lines; ///< 0x1334 + InvalidateCacheLines invalidate_texture_data_cache_lines; ///< 0x1338 + Blend blend; ///< 0x133C + u32 stencil_enable; ///< 0x1380 + StencilOp stencil_front_op; ///< 0x1384 + s32 stencil_front_ref; ///< 0x1394 + s32 stencil_front_func_mask; ///< 0x1398 + s32 stencil_front_mask; ///< 0x139C + INSERT_PADDING_BYTES_NOINIT(0x4); + u32 draw_auto_start_byte_count; ///< 0x13A4 + PsSaturate frag_color_clamp; ///< 0x13A8 + WindowOrigin window_origin; ///< 0x13AC + f32 line_width_smooth; ///< 0x13B0 + f32 line_width_aliased; ///< 0x13B4 + INSERT_PADDING_BYTES_NOINIT(0x60); + u32 line_override_multisample; ///< 0x1418 + INSERT_PADDING_BYTES_NOINIT(0x4); + u32 alpha_hysteresis_rounds; ///< 0x1420 + InvalidateCacheLines invalidate_sampler_cache_no_wfi; ///< 0x1424 + InvalidateCacheLines invalidate_texture_header_cache_no_wfi; ///< 0x1428 + INSERT_PADDING_BYTES_NOINIT(0x8); + u32 global_base_vertex_index; ///< 0x1434 + u32 global_base_instance_index; ///< 0x1438 + INSERT_PADDING_BYTES_NOINIT(0x14); + RegisterWatermarks ps_warp_watermarks; ///< 0x1450 + RegisterWatermarks ps_regster_watermarks; ///< 0x1454 + INSERT_PADDING_BYTES_NOINIT(0xC); + u32 store_zcull; ///< 0x1464 + INSERT_PADDING_BYTES_NOINIT(0x18); + std::array<IteratedBlendConstants, NumRenderTargets> + iterated_blend_constants; ///< 0x1480 + u32 load_zcull; ///< 0x1500 + u32 surface_clip_id_height; ///< 0x1504 + Window surface_clip_id_clear_rect; ///< 0x1508 + UserClip::Enable user_clip_enable; ///< 0x1510 + u32 zpass_pixel_count_enable; ///< 0x1514 + f32 point_size; ///< 0x1518 + u32 zcull_stats_enable; ///< 0x151C + u32 point_sprite_enable; ///< 0x1520 + INSERT_PADDING_BYTES_NOINIT(0x4); + u32 shader_exceptions_enable; ///< 0x1528 + INSERT_PADDING_BYTES_NOINIT(0x4); + ClearReport clear_report_value; ///< 0x1530 + u32 anti_alias_enable; ///< 0x1534 + u32 zeta_enable; ///< 0x1538 + AntiAliasAlphaControl anti_alias_alpha_control; ///< 0x153C + INSERT_PADDING_BYTES_NOINIT(0x10); + RenderEnable render_enable; ///< 0x1550 + TexSampler tex_sampler; ///< 0x155C + INSERT_PADDING_BYTES_NOINIT(0x4); + f32 slope_scale_depth_bias; ///< 0x156C + u32 line_anti_alias_enable; ///< 0x1570 + TexHeader tex_header; ///< 0x1574 + INSERT_PADDING_BYTES_NOINIT(0x10); + u32 active_zcull_region_id; ///< 0x1590 + u32 stencil_two_side_enable; ///< 0x1594 + StencilOp stencil_back_op; ///< 0x1598 + INSERT_PADDING_BYTES_NOINIT(0x10); + u32 framebuffer_srgb; ///< 0x15B8 + f32 depth_bias; ///< 0x15BC + INSERT_PADDING_BYTES_NOINIT(0x8); + ZCullRegionFormat zcull_region_format; ///< 0x15C8 + RtLayer rt_layer; ///< 0x15CC + Tegra::Texture::MsaaMode anti_alias_samples_mode; ///< 0x15D0 + INSERT_PADDING_BYTES_NOINIT(0x10); + u32 edge_flag; ///< 0x15E4 + u32 draw_inline_index; ///< 0x15E8 + InlineIndex2x16 inline_index_2x16; ///< 0x15EC + VertexGlobalBaseOffset vertex_global_base_offset; ///< 0x15F4 + ZCullRegionPixelOffset zcull_region_pixel_offset; ///< 0x15FC + PointSprite point_sprite; ///< 0x1604 + ProgramRegion program_region; ///< 0x1608 + DefaultAttributes default_attributes; ///< 0x1610 + Draw draw; ///< 0x1614 + VertexIdCopy vertex_id_copy; ///< 0x161C + u32 add_to_primitive_id; ///< 0x1620 + u32 load_to_primitive_id; ///< 0x1624 + INSERT_PADDING_BYTES_NOINIT(0x4); + ShaderBasedCull shader_based_cull; ///< 0x162C + INSERT_PADDING_BYTES_NOINIT(0x8); + ClassVersion class_version; ///< 0x1638 + INSERT_PADDING_BYTES_NOINIT(0x8); + PrimitiveRestart primitive_restart; ///< 0x1644 + OutputVertexId output_vertex_id; ///< 0x164C + INSERT_PADDING_BYTES_NOINIT(0x8); + u32 anti_alias_point_enable; ///< 0x1658 + PointCenterMode point_center_mode; ///< 0x165C + INSERT_PADDING_BYTES_NOINIT(0x8); + LineSmoothParams line_smooth_params; ///< 0x1668 + u32 line_stipple_enable; ///< 0x166C + std::array<LineSmoothEdgeTable, 4> line_smooth_edge_table; ///< 0x1670 + LineStippleParams line_stipple_params; ///< 0x1680 + ProvokingVertex provoking_vertex; ///< 0x1684 + u32 two_sided_light_enabled; ///< 0x1688 + u32 polygon_stipple_enabled; ///< 0x168C + ShaderControl shader_control; ///< 0x1690 + INSERT_PADDING_BYTES_NOINIT(0xC); + ClassVersion class_version_check; ///< 0x16A0 + SphVersion sph_version; ///< 0x16A4 + SphVersion sph_version_check; ///< 0x16A8 + INSERT_PADDING_BYTES_NOINIT(0x8); + AlphaToCoverageOverride alpha_to_coverage_override; ///< 0x16B4 + INSERT_PADDING_BYTES_NOINIT(0x48); + std::array<u32, 32> polygon_stipple_pattern; ///< 0x1700 + INSERT_PADDING_BYTES_NOINIT(0x10); + AamVersion aam_version; ///< 0x1790 + AamVersion aam_version_check; ///< 0x1794 + INSERT_PADDING_BYTES_NOINIT(0x4); + u32 zeta_layer_offset; ///< 0x179C + INSERT_PADDING_BYTES_NOINIT(0x28); + IndexBuffer index_buffer; ///< 0x17C8 + IndexBufferSmall index_buffer32_first; ///< 0x17E4 + IndexBufferSmall index_buffer16_first; ///< 0x17E8 + IndexBufferSmall index_buffer8_first; ///< 0x17EC + IndexBufferSmall index_buffer32_subsequent; ///< 0x17F0 + IndexBufferSmall index_buffer16_subsequent; ///< 0x17F4 + IndexBufferSmall index_buffer8_subsequent; ///< 0x17F8 + INSERT_PADDING_BYTES_NOINIT(0x80); + f32 depth_bias_clamp; ///< 0x187C + VertexStreamInstances vertex_stream_instances; ///< 0x1880 + INSERT_PADDING_BYTES_NOINIT(0x10); + AttributePointSize point_size_attribute; ///< 0x1910 + INSERT_PADDING_BYTES_NOINIT(0x4); + u32 gl_cull_test_enabled; ///< 0x1918 + FrontFace gl_front_face; ///< 0x191C + CullFace gl_cull_face; ///< 0x1920 + Viewport::PixelCenter viewport_pixel_center; ///< 0x1924 + INSERT_PADDING_BYTES_NOINIT(0x4); + u32 viewport_scale_offset_enbled; ///< 0x192C + INSERT_PADDING_BYTES_NOINIT(0xC); + ViewportClipControl viewport_clip_control; ///< 0x193C + UserClip::Op user_clip_op; ///< 0x1940 + RenderEnable::Override render_enable_override; ///< 0x1944 + PrimitiveTopologyControl primitive_topology_control; ///< 0x1948 + WindowClip window_clip_enable; ///< 0x194C + INSERT_PADDING_BYTES_NOINIT(0x4); + InvalidateZCull invalidate_zcull; ///< 0x1958 + INSERT_PADDING_BYTES_NOINIT(0xC); + ZCull zcull; ///< 0x1968 + PrimitiveTopologyOverride topology_override; ///< 0x1970 + INSERT_PADDING_BYTES_NOINIT(0x4); + u32 zcull_sync; ///< 0x1978 + u32 clip_id_test_enable; ///< 0x197C + u32 surface_clip_id_width; ///< 0x1980 + u32 clip_id; ///< 0x1984 + INSERT_PADDING_BYTES_NOINIT(0x34); + u32 depth_bounds_enable; ///< 0x19BC + u32 blend_float_zero_times_anything_is_zero; ///< 0x19C0 + LogicOp logic_op; ///< 0x19C4 + u32 z_compression_enable; ///< 0x19CC + ClearSurface clear_surface; ///< 0x19D0 + u32 clear_clip_id_surface; ///< 0x19D4 + INSERT_PADDING_BYTES_NOINIT(0x8); + std::array<u32, NumRenderTargets> color_compression_enable; ///< 0x19E0 + std::array<ColorMask, NumRenderTargets> color_mask; ///< 0x1A00 + INSERT_PADDING_BYTES_NOINIT(0xC); + u32 pipe_nop; ///< 0x1A2C + std::array<u32, 4> spare; ///< 0x1A30 + INSERT_PADDING_BYTES_NOINIT(0xC0); + ReportSemaphore report_semaphore; ///< 0x1B00 + INSERT_PADDING_BYTES_NOINIT(0xF0); + std::array<VertexStream, NumVertexArrays> vertex_streams; ///< 0x1C00 + BlendPerTarget blend_per_target[NumRenderTargets]; ///< 0x1E00 + std::array<VertexStreamLimit, NumVertexArrays> vertex_stream_limits; ///< 0x1F00 + std::array<Pipeline, MaxShaderProgram> pipelines; ///< 0x2000 + INSERT_PADDING_BYTES_NOINIT(0x180); + u32 falcon[32]; ///< 0x2300 + ConstantBuffer const_buffer; ///< 0x2380 + INSERT_PADDING_BYTES_NOINIT(0x30); + BindGroup bind_groups[MaxShaderStage]; ///< 0x2400 + INSERT_PADDING_BYTES_NOINIT(0x160); + u32 color_clamp_enable; ///< 0x2600 + INSERT_PADDING_BYTES_NOINIT(0x4); + u32 bindless_texture_const_buffer_slot; ///< 0x2608 + u32 trap_handler; ///< 0x260C + INSERT_PADDING_BYTES_NOINIT(0x1F0); + std::array<std::array<StreamOutLayout, 32>, NumTransformFeedbackBuffers> + stream_out_layout; ///< 0x2800 + INSERT_PADDING_BYTES_NOINIT(0x93C); + ShaderPerformance shader_performance; ///< 0x333C + INSERT_PADDING_BYTES_NOINIT(0x18); + std::array<u32, 0x100> shadow_scratch; ///< 0x3400 }; std::array<u32, NUM_REGS> reg_array; }; }; + // clang-format on Regs regs{}; @@ -1591,147 +3201,350 @@ private: }; #define ASSERT_REG_POSITION(field_name, position) \ - static_assert(offsetof(Maxwell3D::Regs, field_name) == position * 4, \ + static_assert(offsetof(Maxwell3D::Regs, field_name) == position, \ "Field " #field_name " has invalid position") -ASSERT_REG_POSITION(wait_for_idle, 0x44); -ASSERT_REG_POSITION(macros, 0x45); -ASSERT_REG_POSITION(shadow_ram_control, 0x49); -ASSERT_REG_POSITION(upload, 0x60); -ASSERT_REG_POSITION(exec_upload, 0x6C); -ASSERT_REG_POSITION(data_upload, 0x6D); -ASSERT_REG_POSITION(force_early_fragment_tests, 0x84); -ASSERT_REG_POSITION(sync_info, 0xB2); -ASSERT_REG_POSITION(tess_mode, 0xC8); -ASSERT_REG_POSITION(tess_level_outer, 0xC9); -ASSERT_REG_POSITION(tess_level_inner, 0xCD); -ASSERT_REG_POSITION(rasterize_enable, 0xDF); -ASSERT_REG_POSITION(tfb_bindings, 0xE0); -ASSERT_REG_POSITION(tfb_layouts, 0x1C0); -ASSERT_REG_POSITION(tfb_enabled, 0x1D1); -ASSERT_REG_POSITION(rt, 0x200); -ASSERT_REG_POSITION(viewport_transform, 0x280); -ASSERT_REG_POSITION(viewports, 0x300); -ASSERT_REG_POSITION(vertex_buffer, 0x35D); -ASSERT_REG_POSITION(depth_mode, 0x35F); -ASSERT_REG_POSITION(clear_color[0], 0x360); -ASSERT_REG_POSITION(clear_depth, 0x364); -ASSERT_REG_POSITION(clear_stencil, 0x368); -ASSERT_REG_POSITION(polygon_mode_front, 0x36B); -ASSERT_REG_POSITION(polygon_mode_back, 0x36C); -ASSERT_REG_POSITION(polygon_offset_point_enable, 0x370); -ASSERT_REG_POSITION(polygon_offset_line_enable, 0x371); -ASSERT_REG_POSITION(polygon_offset_fill_enable, 0x372); -ASSERT_REG_POSITION(patch_vertices, 0x373); -ASSERT_REG_POSITION(fragment_barrier, 0x378); -ASSERT_REG_POSITION(scissor_test, 0x380); -ASSERT_REG_POSITION(stencil_back_func_ref, 0x3D5); -ASSERT_REG_POSITION(stencil_back_mask, 0x3D6); -ASSERT_REG_POSITION(stencil_back_func_mask, 0x3D7); -ASSERT_REG_POSITION(invalidate_texture_data_cache, 0x3DD); -ASSERT_REG_POSITION(tiled_cache_barrier, 0x3DF); -ASSERT_REG_POSITION(color_mask_common, 0x3E4); -ASSERT_REG_POSITION(depth_bounds, 0x3E7); -ASSERT_REG_POSITION(rt_separate_frag_data, 0x3EB); -ASSERT_REG_POSITION(multisample_raster_enable, 0x3ED); -ASSERT_REG_POSITION(multisample_raster_samples, 0x3EE); -ASSERT_REG_POSITION(multisample_sample_mask, 0x3EF); -ASSERT_REG_POSITION(zeta, 0x3F8); -ASSERT_REG_POSITION(render_area, 0x3FD); -ASSERT_REG_POSITION(clear_flags, 0x43E); -ASSERT_REG_POSITION(fill_rectangle, 0x44F); -ASSERT_REG_POSITION(conservative_raster_enable, 0x452); -ASSERT_REG_POSITION(vertex_attrib_format, 0x458); -ASSERT_REG_POSITION(multisample_sample_locations, 0x478); -ASSERT_REG_POSITION(multisample_coverage_to_color, 0x47E); -ASSERT_REG_POSITION(rt_control, 0x487); -ASSERT_REG_POSITION(zeta_width, 0x48a); -ASSERT_REG_POSITION(zeta_height, 0x48b); -ASSERT_REG_POSITION(zeta_depth, 0x48c); -ASSERT_REG_POSITION(sampler_index, 0x48D); -ASSERT_REG_POSITION(gp_passthrough_mask, 0x490); -ASSERT_REG_POSITION(depth_test_enable, 0x4B3); -ASSERT_REG_POSITION(independent_blend_enable, 0x4B9); -ASSERT_REG_POSITION(depth_write_enabled, 0x4BA); -ASSERT_REG_POSITION(alpha_test_enabled, 0x4BB); -ASSERT_REG_POSITION(d3d_cull_mode, 0x4C2); -ASSERT_REG_POSITION(depth_test_func, 0x4C3); -ASSERT_REG_POSITION(alpha_test_ref, 0x4C4); -ASSERT_REG_POSITION(alpha_test_func, 0x4C5); -ASSERT_REG_POSITION(draw_tfb_stride, 0x4C6); -ASSERT_REG_POSITION(blend_color, 0x4C7); -ASSERT_REG_POSITION(blend, 0x4CF); -ASSERT_REG_POSITION(stencil_enable, 0x4E0); -ASSERT_REG_POSITION(stencil_front_op_fail, 0x4E1); -ASSERT_REG_POSITION(stencil_front_op_zfail, 0x4E2); -ASSERT_REG_POSITION(stencil_front_op_zpass, 0x4E3); -ASSERT_REG_POSITION(stencil_front_func_func, 0x4E4); -ASSERT_REG_POSITION(stencil_front_func_ref, 0x4E5); -ASSERT_REG_POSITION(stencil_front_func_mask, 0x4E6); -ASSERT_REG_POSITION(stencil_front_mask, 0x4E7); -ASSERT_REG_POSITION(frag_color_clamp, 0x4EA); -ASSERT_REG_POSITION(screen_y_control, 0x4EB); -ASSERT_REG_POSITION(line_width_smooth, 0x4EC); -ASSERT_REG_POSITION(line_width_aliased, 0x4ED); -ASSERT_REG_POSITION(invalidate_sampler_cache_no_wfi, 0x509); -ASSERT_REG_POSITION(invalidate_texture_header_cache_no_wfi, 0x50A); -ASSERT_REG_POSITION(vb_element_base, 0x50D); -ASSERT_REG_POSITION(vb_base_instance, 0x50E); -ASSERT_REG_POSITION(clip_distance_enabled, 0x544); -ASSERT_REG_POSITION(samplecnt_enable, 0x545); -ASSERT_REG_POSITION(point_size, 0x546); -ASSERT_REG_POSITION(point_sprite_enable, 0x548); -ASSERT_REG_POSITION(counter_reset, 0x54C); -ASSERT_REG_POSITION(multisample_enable, 0x54D); -ASSERT_REG_POSITION(zeta_enable, 0x54E); -ASSERT_REG_POSITION(multisample_control, 0x54F); -ASSERT_REG_POSITION(condition, 0x554); -ASSERT_REG_POSITION(tsc, 0x557); -ASSERT_REG_POSITION(polygon_offset_factor, 0x55B); -ASSERT_REG_POSITION(line_smooth_enable, 0x55C); -ASSERT_REG_POSITION(tic, 0x55D); -ASSERT_REG_POSITION(stencil_two_side_enable, 0x565); -ASSERT_REG_POSITION(stencil_back_op_fail, 0x566); -ASSERT_REG_POSITION(stencil_back_op_zfail, 0x567); -ASSERT_REG_POSITION(stencil_back_op_zpass, 0x568); -ASSERT_REG_POSITION(stencil_back_func_func, 0x569); -ASSERT_REG_POSITION(framebuffer_srgb, 0x56E); -ASSERT_REG_POSITION(polygon_offset_units, 0x56F); -ASSERT_REG_POSITION(multisample_mode, 0x574); -ASSERT_REG_POSITION(point_coord_replace, 0x581); -ASSERT_REG_POSITION(code_address, 0x582); -ASSERT_REG_POSITION(draw, 0x585); -ASSERT_REG_POSITION(primitive_restart, 0x591); -ASSERT_REG_POSITION(provoking_vertex_last, 0x5A1); -ASSERT_REG_POSITION(index_array, 0x5F2); -ASSERT_REG_POSITION(small_index, 0x5F9); -ASSERT_REG_POSITION(polygon_offset_clamp, 0x61F); -ASSERT_REG_POSITION(instanced_arrays, 0x620); -ASSERT_REG_POSITION(vp_point_size, 0x644); -ASSERT_REG_POSITION(cull_test_enabled, 0x646); -ASSERT_REG_POSITION(front_face, 0x647); -ASSERT_REG_POSITION(cull_face, 0x648); -ASSERT_REG_POSITION(pixel_center_integer, 0x649); -ASSERT_REG_POSITION(viewport_transform_enabled, 0x64B); -ASSERT_REG_POSITION(view_volume_clip_control, 0x64F); -ASSERT_REG_POSITION(topology_override, 0x65C); -ASSERT_REG_POSITION(depth_bounds_enable, 0x66F); -ASSERT_REG_POSITION(logic_op, 0x671); -ASSERT_REG_POSITION(clear_buffers, 0x674); -ASSERT_REG_POSITION(color_mask, 0x680); -ASSERT_REG_POSITION(query, 0x6C0); -ASSERT_REG_POSITION(vertex_array[0], 0x700); -ASSERT_REG_POSITION(independent_blend, 0x780); -ASSERT_REG_POSITION(vertex_array_limit[0], 0x7C0); -ASSERT_REG_POSITION(shader_config[0], 0x800); -ASSERT_REG_POSITION(firmware, 0x8C0); -ASSERT_REG_POSITION(const_buffer, 0x8E0); -ASSERT_REG_POSITION(cb_bind[0], 0x904); -ASSERT_REG_POSITION(tex_cb_index, 0x982); -ASSERT_REG_POSITION(tfb_varying_locs, 0xA00); -ASSERT_REG_POSITION(ssbo_info, 0xD18); -ASSERT_REG_POSITION(tex_info_buffers.address[0], 0xD2A); -ASSERT_REG_POSITION(tex_info_buffers.size[0], 0xD2F); +ASSERT_REG_POSITION(object_id, 0x0000); +ASSERT_REG_POSITION(nop, 0x0100); +ASSERT_REG_POSITION(notify, 0x0104); +ASSERT_REG_POSITION(wait_for_idle, 0x0110); +ASSERT_REG_POSITION(load_mme, 0x0114); +ASSERT_REG_POSITION(shadow_ram_control, 0x0124); +ASSERT_REG_POSITION(peer, 0x0128); +ASSERT_REG_POSITION(global_render, 0x0130); +ASSERT_REG_POSITION(go_idle, 0x013C); +ASSERT_REG_POSITION(trigger, 0x0140); +ASSERT_REG_POSITION(trigger_wfi, 0x0144); +ASSERT_REG_POSITION(instrumentation_method_header, 0x0150); +ASSERT_REG_POSITION(instrumentation_method_data, 0x0154); +ASSERT_REG_POSITION(upload, 0x0180); +ASSERT_REG_POSITION(launch_dma, 0x01B0); +ASSERT_REG_POSITION(inline_data, 0x01B4); +ASSERT_REG_POSITION(i2m, 0x01DC); +ASSERT_REG_POSITION(run_ds_now, 0x0200); +ASSERT_REG_POSITION(opportunistic_early_z, 0x0204); +ASSERT_REG_POSITION(aliased_line_width_enabled, 0x020C); +ASSERT_REG_POSITION(mandated_early_z, 0x0210); +ASSERT_REG_POSITION(gs_dm_fifo, 0x0214); +ASSERT_REG_POSITION(l2_cache_control, 0x0218); +ASSERT_REG_POSITION(invalidate_shader_cache, 0x021C); +ASSERT_REG_POSITION(sync_info, 0x02C8); +ASSERT_REG_POSITION(prim_circular_buffer_throttle, 0x02D0); +ASSERT_REG_POSITION(flush_invalidate_rop_mini_cache, 0x02D4); +ASSERT_REG_POSITION(surface_clip_block_id, 0x02D8); +ASSERT_REG_POSITION(alpha_circular_buffer_size, 0x02DC); +ASSERT_REG_POSITION(decompress_surface, 0x02E0); +ASSERT_REG_POSITION(zcull_rop_bypass, 0x02E4); +ASSERT_REG_POSITION(zcull_subregion, 0x02E8); +ASSERT_REG_POSITION(raster_bounding_box, 0x02EC); +ASSERT_REG_POSITION(peer_semaphore_release, 0x02F0); +ASSERT_REG_POSITION(iterated_blend_optimization, 0x02F4); +ASSERT_REG_POSITION(zcull_subregion_allocation, 0x02F8); +ASSERT_REG_POSITION(zcull_subregion_algorithm, 0x02FC); +ASSERT_REG_POSITION(ps_output_sample_mask_usage, 0x0300); +ASSERT_REG_POSITION(draw_zero_index, 0x0304); +ASSERT_REG_POSITION(l1_configuration, 0x0308); +ASSERT_REG_POSITION(render_enable_control_load_const_buffer, 0x030C); +ASSERT_REG_POSITION(spa_version, 0x0310); +ASSERT_REG_POSITION(ieee_clean_update, 0x0314); +ASSERT_REG_POSITION(snap_grid, 0x0318); +ASSERT_REG_POSITION(tessellation, 0x0320); +ASSERT_REG_POSITION(sub_tiling_perf, 0x0360); +ASSERT_REG_POSITION(zcull_subregion_report, 0x036C); +ASSERT_REG_POSITION(balanced_primitive_workload, 0x0374); +ASSERT_REG_POSITION(max_patches_per_batch, 0x0378); +ASSERT_REG_POSITION(rasterize_enable, 0x037C); +ASSERT_REG_POSITION(transform_feedback, 0x0380); +ASSERT_REG_POSITION(transform_feedback.controls, 0x0700); +ASSERT_REG_POSITION(raster_input, 0x0740); +ASSERT_REG_POSITION(transform_feedback_enabled, 0x0744); +ASSERT_REG_POSITION(primitive_restart_topology_change_enable, 0x0748); +ASSERT_REG_POSITION(alpha_fraction, 0x074C); +ASSERT_REG_POSITION(hybrid_aa_control, 0x0754); +ASSERT_REG_POSITION(shader_local_memory, 0x077C); +ASSERT_REG_POSITION(color_zero_bandwidth_clear, 0x07A4); +ASSERT_REG_POSITION(z_zero_bandwidth_clear, 0x07A8); +ASSERT_REG_POSITION(isbe_save_restore_program_offset, 0x07AC); +ASSERT_REG_POSITION(zcull_region, 0x07C0); +ASSERT_REG_POSITION(zeta_read_only, 0x07F8); +ASSERT_REG_POSITION(rt, 0x0800); +ASSERT_REG_POSITION(viewport_transform, 0x0A00); +ASSERT_REG_POSITION(viewports, 0x0C00); +ASSERT_REG_POSITION(windows, 0x0D00); +ASSERT_REG_POSITION(clip_id_extent, 0x0D40); +ASSERT_REG_POSITION(max_geometry_instances_per_task, 0x0D60); +ASSERT_REG_POSITION(visible_call_limit, 0x0D64); +ASSERT_REG_POSITION(statistics_count, 0x0D68); +ASSERT_REG_POSITION(clear_rect, 0x0D6C); +ASSERT_REG_POSITION(vertex_buffer, 0x0D74); +ASSERT_REG_POSITION(depth_mode, 0x0D7C); +ASSERT_REG_POSITION(clear_color, 0x0D80); +ASSERT_REG_POSITION(clear_depth, 0x0D90); +ASSERT_REG_POSITION(shader_cache_icache_prefetch, 0x0D94); +ASSERT_REG_POSITION(force_transition_to_beta, 0x0D98); +ASSERT_REG_POSITION(reduce_colour_thresholds, 0x0D9C); +ASSERT_REG_POSITION(clear_stencil, 0x0DA0); +ASSERT_REG_POSITION(invalidate_shader_cache_no_wfi, 0x0DA4); +ASSERT_REG_POSITION(zcull_serialization, 0x0DA8); +ASSERT_REG_POSITION(polygon_mode_front, 0x0DAC); +ASSERT_REG_POSITION(polygon_mode_back, 0x0DB0); +ASSERT_REG_POSITION(polygon_smooth, 0x0DB4); +ASSERT_REG_POSITION(zeta_mark_clean_ieee, 0x0DB8); +ASSERT_REG_POSITION(zcull_dir_format, 0x0DBC); +ASSERT_REG_POSITION(polygon_offset_point_enable, 0x0DC0); +ASSERT_REG_POSITION(polygon_offset_line_enable, 0x0DC4); +ASSERT_REG_POSITION(polygon_offset_fill_enable, 0x0DC8); +ASSERT_REG_POSITION(patch_vertices, 0x0DCC); +ASSERT_REG_POSITION(iterated_blend, 0x0DD0); +ASSERT_REG_POSITION(zcull_criteria, 0x0DD8); +ASSERT_REG_POSITION(fragment_barrier, 0x0DE0); +ASSERT_REG_POSITION(sm_timeout, 0x0DE4); +ASSERT_REG_POSITION(primitive_restart_array, 0x0DE8); +ASSERT_REG_POSITION(load_iterated_blend, 0x0DF0); +ASSERT_REG_POSITION(window_offset_x, 0x0DF8); +ASSERT_REG_POSITION(window_offset_y, 0x0DFC); +ASSERT_REG_POSITION(scissor_test, 0x0E00); +ASSERT_REG_POSITION(select_texture_headers, 0x0F10); +ASSERT_REG_POSITION(vpc_perf, 0x0F14); +ASSERT_REG_POSITION(pm_local_trigger, 0x0F18); +ASSERT_REG_POSITION(post_z_pixel_imask, 0x0F1C); +ASSERT_REG_POSITION(const_color_rendering, 0x0F40); +ASSERT_REG_POSITION(stencil_back_ref, 0x0F54); +ASSERT_REG_POSITION(stencil_back_mask, 0x0F58); +ASSERT_REG_POSITION(stencil_back_func_mask, 0x0F5C); +ASSERT_REG_POSITION(vertex_stream_substitute, 0x0F84); +ASSERT_REG_POSITION(line_mode_clip_generated_edge_do_not_draw, 0x0F8C); +ASSERT_REG_POSITION(color_mask_common, 0x0F90); +ASSERT_REG_POSITION(vtg_warp_watermarks, 0x0F98); +ASSERT_REG_POSITION(depth_bounds, 0x0F9C); +ASSERT_REG_POSITION(sample_mask_target, 0x0FA4); +ASSERT_REG_POSITION(color_target_mrt_enable, 0x0FAC); +ASSERT_REG_POSITION(non_multisampled_z, 0x0FB0); +ASSERT_REG_POSITION(tir_mode, 0x0FB4); +ASSERT_REG_POSITION(anti_alias_raster, 0x0FB8); +ASSERT_REG_POSITION(sample_mask_pos, 0x0FBC); +ASSERT_REG_POSITION(surface_clip_id_memory, 0x0FCC); +ASSERT_REG_POSITION(tir_modulation, 0x0FD4); +ASSERT_REG_POSITION(blend_control_allow_float_pixel_kills, 0x0FDC); +ASSERT_REG_POSITION(zeta, 0x0FE0); +ASSERT_REG_POSITION(surface_clip, 0x0FF4); +ASSERT_REG_POSITION(tiled_cache_treat_heavy_as_light, 0x0FFC); +ASSERT_REG_POSITION(l2_cache_vaf, 0x1000); +ASSERT_REG_POSITION(viewport_multicast, 0x1004); +ASSERT_REG_POSITION(tessellation_cut_height, 0x1008); +ASSERT_REG_POSITION(max_gs_instances_per_task, 0x100C); +ASSERT_REG_POSITION(max_gs_output_vertices_per_task, 0x1010); +ASSERT_REG_POSITION(reserved_sw_method0, 0x1014); +ASSERT_REG_POSITION(gs_output_cb_storage_multiplier, 0x1018); +ASSERT_REG_POSITION(beta_cb_storage_constant, 0x101C); +ASSERT_REG_POSITION(ti_output_cb_storage_multiplier, 0x1020); +ASSERT_REG_POSITION(alpha_cb_storage_constraint, 0x1024); +ASSERT_REG_POSITION(reserved_sw_method1, 0x1028); +ASSERT_REG_POSITION(reserved_sw_method2, 0x102C); +ASSERT_REG_POSITION(tir_modulation_coeff, 0x1030); +ASSERT_REG_POSITION(spare_nop, 0x1044); +ASSERT_REG_POSITION(reserved_sw_method3_to_7, 0x10B0); +ASSERT_REG_POSITION(reduce_color_thresholds_unorm8, 0x10CC); +ASSERT_REG_POSITION(reserved_sw_method10_to_13, 0x10D0); +ASSERT_REG_POSITION(reduce_color_thresholds_unorm10, 0x10E0); +ASSERT_REG_POSITION(reduce_color_thresholds_unorm16, 0x10E4); +ASSERT_REG_POSITION(reduce_color_thresholds_fp11, 0x10E8); +ASSERT_REG_POSITION(reduce_color_thresholds_fp16, 0x10EC); +ASSERT_REG_POSITION(reduce_color_thresholds_srgb8, 0x10F0); +ASSERT_REG_POSITION(unbind_all_constant_buffers, 0x10F4); +ASSERT_REG_POSITION(clear_control, 0x10F8); +ASSERT_REG_POSITION(l2_cache_rop_non_interlocked_reads, 0x10FC); +ASSERT_REG_POSITION(reserved_sw_method14, 0x1100); +ASSERT_REG_POSITION(reserved_sw_method15, 0x1104); +ASSERT_REG_POSITION(no_operation_data_high, 0x110C); +ASSERT_REG_POSITION(depth_bias_control, 0x1110); +ASSERT_REG_POSITION(pm_trigger_end, 0x1114); +ASSERT_REG_POSITION(vertex_id_base, 0x1118); +ASSERT_REG_POSITION(stencil_compression_enabled, 0x111C); +ASSERT_REG_POSITION(vertex_output_attribute_skip_masks, 0x1120); +ASSERT_REG_POSITION(tir_control, 0x1130); +ASSERT_REG_POSITION(mutable_method_treat_mutable_as_heavy, 0x1134); +ASSERT_REG_POSITION(post_ps_use_pre_ps_coverage, 0x1138); +ASSERT_REG_POSITION(fill_via_triangle_mode, 0x113C); +ASSERT_REG_POSITION(blend_per_format_snorm8_unorm16_snorm16_enabled, 0x1140); +ASSERT_REG_POSITION(flush_pending_writes_sm_gloal_store, 0x1144); +ASSERT_REG_POSITION(vertex_attrib_format, 0x1160); +ASSERT_REG_POSITION(multisample_sample_locations, 0x11E0); +ASSERT_REG_POSITION(offset_render_target_index_by_viewport_index, 0x11F0); +ASSERT_REG_POSITION(force_heavy_method_sync, 0x11F4); +ASSERT_REG_POSITION(multisample_coverage_to_color, 0x11F8); +ASSERT_REG_POSITION(decompress_zeta_surface, 0x11FC); +ASSERT_REG_POSITION(zeta_sparse, 0x1208); +ASSERT_REG_POSITION(invalidate_sampler_cache, 0x120C); +ASSERT_REG_POSITION(invalidate_texture_header_cache, 0x1210); +ASSERT_REG_POSITION(vertex_array_instance_first, 0x1214); +ASSERT_REG_POSITION(vertex_array_instance_subsequent, 0x1218); +ASSERT_REG_POSITION(rt_control, 0x121C); +ASSERT_REG_POSITION(compression_threshold_samples, 0x1220); +ASSERT_REG_POSITION(ps_interlock_control, 0x1224); +ASSERT_REG_POSITION(zeta_size, 0x1228); +ASSERT_REG_POSITION(sampler_binding, 0x1234); +ASSERT_REG_POSITION(draw_auto_byte_count, 0x123C); +ASSERT_REG_POSITION(post_vtg_shader_attrib_skip_mask, 0x1240); +ASSERT_REG_POSITION(ps_ticket_dispenser_value, 0x1260); +ASSERT_REG_POSITION(circular_buffer_size, 0x1280); +ASSERT_REG_POSITION(vtg_register_watermarks, 0x1284); +ASSERT_REG_POSITION(invalidate_texture_cache_no_wfi, 0x1288); +ASSERT_REG_POSITION(l2_cache_rop_interlocked_reads, 0x1290); +ASSERT_REG_POSITION(primitive_restart_topology_change_index, 0x12A4); +ASSERT_REG_POSITION(zcull_region_enable, 0x12C8); +ASSERT_REG_POSITION(depth_test_enable, 0x12CC); +ASSERT_REG_POSITION(fill_mode, 0x12D0); +ASSERT_REG_POSITION(shade_mode, 0x12D4); +ASSERT_REG_POSITION(l2_cache_rop_non_interlocked_writes, 0x12D8); +ASSERT_REG_POSITION(l2_cache_rop_interlocked_writes, 0x12DC); +ASSERT_REG_POSITION(alpha_to_coverage_dither, 0x12E0); +ASSERT_REG_POSITION(blend_per_target_enabled, 0x12E4); +ASSERT_REG_POSITION(depth_write_enabled, 0x12E8); +ASSERT_REG_POSITION(alpha_test_enabled, 0x12EC); +ASSERT_REG_POSITION(inline_index_4x8_align, 0x1300); +ASSERT_REG_POSITION(inline_index_4x8_index, 0x1304); +ASSERT_REG_POSITION(d3d_cull_mode, 0x1308); +ASSERT_REG_POSITION(depth_test_func, 0x130C); +ASSERT_REG_POSITION(alpha_test_ref, 0x1310); +ASSERT_REG_POSITION(alpha_test_func, 0x1314); +ASSERT_REG_POSITION(draw_auto_stride, 0x1318); +ASSERT_REG_POSITION(blend_color, 0x131C); +ASSERT_REG_POSITION(invalidate_sampler_cache_lines, 0x1330); +ASSERT_REG_POSITION(invalidate_texture_header_cache_lines, 0x1334); +ASSERT_REG_POSITION(invalidate_texture_data_cache_lines, 0x1338); +ASSERT_REG_POSITION(blend, 0x133C); +ASSERT_REG_POSITION(stencil_enable, 0x1380); +ASSERT_REG_POSITION(stencil_front_op, 0x1384); +ASSERT_REG_POSITION(stencil_front_ref, 0x1394); +ASSERT_REG_POSITION(stencil_front_func_mask, 0x1398); +ASSERT_REG_POSITION(stencil_front_mask, 0x139C); +ASSERT_REG_POSITION(draw_auto_start_byte_count, 0x13A4); +ASSERT_REG_POSITION(frag_color_clamp, 0x13A8); +ASSERT_REG_POSITION(window_origin, 0x13AC); +ASSERT_REG_POSITION(line_width_smooth, 0x13B0); +ASSERT_REG_POSITION(line_width_aliased, 0x13B4); +ASSERT_REG_POSITION(line_override_multisample, 0x1418); +ASSERT_REG_POSITION(alpha_hysteresis_rounds, 0x1420); +ASSERT_REG_POSITION(invalidate_sampler_cache_no_wfi, 0x1424); +ASSERT_REG_POSITION(invalidate_texture_header_cache_no_wfi, 0x1428); +ASSERT_REG_POSITION(global_base_vertex_index, 0x1434); +ASSERT_REG_POSITION(global_base_instance_index, 0x1438); +ASSERT_REG_POSITION(ps_warp_watermarks, 0x1450); +ASSERT_REG_POSITION(ps_regster_watermarks, 0x1454); +ASSERT_REG_POSITION(store_zcull, 0x1464); +ASSERT_REG_POSITION(iterated_blend_constants, 0x1480); +ASSERT_REG_POSITION(load_zcull, 0x1500); +ASSERT_REG_POSITION(surface_clip_id_height, 0x1504); +ASSERT_REG_POSITION(surface_clip_id_clear_rect, 0x1508); +ASSERT_REG_POSITION(user_clip_enable, 0x1510); +ASSERT_REG_POSITION(zpass_pixel_count_enable, 0x1514); +ASSERT_REG_POSITION(point_size, 0x1518); +ASSERT_REG_POSITION(zcull_stats_enable, 0x151C); +ASSERT_REG_POSITION(point_sprite_enable, 0x1520); +ASSERT_REG_POSITION(shader_exceptions_enable, 0x1528); +ASSERT_REG_POSITION(clear_report_value, 0x1530); +ASSERT_REG_POSITION(anti_alias_enable, 0x1534); +ASSERT_REG_POSITION(zeta_enable, 0x1538); +ASSERT_REG_POSITION(anti_alias_alpha_control, 0x153C); +ASSERT_REG_POSITION(render_enable, 0x1550); +ASSERT_REG_POSITION(tex_sampler, 0x155C); +ASSERT_REG_POSITION(slope_scale_depth_bias, 0x156C); +ASSERT_REG_POSITION(line_anti_alias_enable, 0x1570); +ASSERT_REG_POSITION(tex_header, 0x1574); +ASSERT_REG_POSITION(active_zcull_region_id, 0x1590); +ASSERT_REG_POSITION(stencil_two_side_enable, 0x1594); +ASSERT_REG_POSITION(stencil_back_op, 0x1598); +ASSERT_REG_POSITION(framebuffer_srgb, 0x15B8); +ASSERT_REG_POSITION(depth_bias, 0x15BC); +ASSERT_REG_POSITION(zcull_region_format, 0x15C8); +ASSERT_REG_POSITION(rt_layer, 0x15CC); +ASSERT_REG_POSITION(anti_alias_samples_mode, 0x15D0); +ASSERT_REG_POSITION(edge_flag, 0x15E4); +ASSERT_REG_POSITION(draw_inline_index, 0x15E8); +ASSERT_REG_POSITION(inline_index_2x16, 0x15EC); +ASSERT_REG_POSITION(vertex_global_base_offset, 0x15F4); +ASSERT_REG_POSITION(zcull_region_pixel_offset, 0x15FC); +ASSERT_REG_POSITION(point_sprite, 0x1604); +ASSERT_REG_POSITION(program_region, 0x1608); +ASSERT_REG_POSITION(default_attributes, 0x1610); +ASSERT_REG_POSITION(draw, 0x1614); +ASSERT_REG_POSITION(vertex_id_copy, 0x161C); +ASSERT_REG_POSITION(add_to_primitive_id, 0x1620); +ASSERT_REG_POSITION(load_to_primitive_id, 0x1624); +ASSERT_REG_POSITION(shader_based_cull, 0x162C); +ASSERT_REG_POSITION(class_version, 0x1638); +ASSERT_REG_POSITION(primitive_restart, 0x1644); +ASSERT_REG_POSITION(output_vertex_id, 0x164C); +ASSERT_REG_POSITION(anti_alias_point_enable, 0x1658); +ASSERT_REG_POSITION(point_center_mode, 0x165C); +ASSERT_REG_POSITION(line_smooth_params, 0x1668); +ASSERT_REG_POSITION(line_stipple_enable, 0x166C); +ASSERT_REG_POSITION(line_smooth_edge_table, 0x1670); +ASSERT_REG_POSITION(line_stipple_params, 0x1680); +ASSERT_REG_POSITION(provoking_vertex, 0x1684); +ASSERT_REG_POSITION(two_sided_light_enabled, 0x1688); +ASSERT_REG_POSITION(polygon_stipple_enabled, 0x168C); +ASSERT_REG_POSITION(shader_control, 0x1690); +ASSERT_REG_POSITION(class_version_check, 0x16A0); +ASSERT_REG_POSITION(sph_version, 0x16A4); +ASSERT_REG_POSITION(sph_version_check, 0x16A8); +ASSERT_REG_POSITION(alpha_to_coverage_override, 0x16B4); +ASSERT_REG_POSITION(polygon_stipple_pattern, 0x1700); +ASSERT_REG_POSITION(aam_version, 0x1790); +ASSERT_REG_POSITION(aam_version_check, 0x1794); +ASSERT_REG_POSITION(zeta_layer_offset, 0x179C); +ASSERT_REG_POSITION(index_buffer, 0x17C8); +ASSERT_REG_POSITION(index_buffer32_first, 0x17E4); +ASSERT_REG_POSITION(index_buffer16_first, 0x17E8); +ASSERT_REG_POSITION(index_buffer8_first, 0x17EC); +ASSERT_REG_POSITION(index_buffer32_subsequent, 0x17F0); +ASSERT_REG_POSITION(index_buffer16_subsequent, 0x17F4); +ASSERT_REG_POSITION(index_buffer8_subsequent, 0x17F8); +ASSERT_REG_POSITION(depth_bias_clamp, 0x187C); +ASSERT_REG_POSITION(vertex_stream_instances, 0x1880); +ASSERT_REG_POSITION(point_size_attribute, 0x1910); +ASSERT_REG_POSITION(gl_cull_test_enabled, 0x1918); +ASSERT_REG_POSITION(gl_front_face, 0x191C); +ASSERT_REG_POSITION(gl_cull_face, 0x1920); +ASSERT_REG_POSITION(viewport_pixel_center, 0x1924); +ASSERT_REG_POSITION(viewport_scale_offset_enbled, 0x192C); +ASSERT_REG_POSITION(viewport_clip_control, 0x193C); +ASSERT_REG_POSITION(user_clip_op, 0x1940); +ASSERT_REG_POSITION(render_enable_override, 0x1944); +ASSERT_REG_POSITION(primitive_topology_control, 0x1948); +ASSERT_REG_POSITION(window_clip_enable, 0x194C); +ASSERT_REG_POSITION(invalidate_zcull, 0x1958); +ASSERT_REG_POSITION(zcull, 0x1968); +ASSERT_REG_POSITION(topology_override, 0x1970); +ASSERT_REG_POSITION(zcull_sync, 0x1978); +ASSERT_REG_POSITION(clip_id_test_enable, 0x197C); +ASSERT_REG_POSITION(surface_clip_id_width, 0x1980); +ASSERT_REG_POSITION(clip_id, 0x1984); +ASSERT_REG_POSITION(depth_bounds_enable, 0x19BC); +ASSERT_REG_POSITION(blend_float_zero_times_anything_is_zero, 0x19C0); +ASSERT_REG_POSITION(logic_op, 0x19C4); +ASSERT_REG_POSITION(z_compression_enable, 0x19CC); +ASSERT_REG_POSITION(clear_surface, 0x19D0); +ASSERT_REG_POSITION(clear_clip_id_surface, 0x19D4); +ASSERT_REG_POSITION(color_compression_enable, 0x19E0); +ASSERT_REG_POSITION(color_mask, 0x1A00); +ASSERT_REG_POSITION(pipe_nop, 0x1A2C); +ASSERT_REG_POSITION(spare, 0x1A30); +ASSERT_REG_POSITION(report_semaphore, 0x1B00); +ASSERT_REG_POSITION(vertex_streams, 0x1C00); +ASSERT_REG_POSITION(blend_per_target, 0x1E00); +ASSERT_REG_POSITION(vertex_stream_limits, 0x1F00); +ASSERT_REG_POSITION(pipelines, 0x2000); +ASSERT_REG_POSITION(falcon, 0x2300); +ASSERT_REG_POSITION(const_buffer, 0x2380); +ASSERT_REG_POSITION(bind_groups, 0x2400); +ASSERT_REG_POSITION(color_clamp_enable, 0x2600); +ASSERT_REG_POSITION(bindless_texture_const_buffer_slot, 0x2608); +ASSERT_REG_POSITION(trap_handler, 0x260C); +ASSERT_REG_POSITION(stream_out_layout, 0x2800); +ASSERT_REG_POSITION(shader_performance, 0x333C); +ASSERT_REG_POSITION(shadow_scratch, 0x3400); #undef ASSERT_REG_POSITION diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index 0efe58282..4eb7a100d 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later +#include "common/algorithm.h" #include "common/assert.h" #include "common/logging/log.h" #include "common/microprofile.h" @@ -54,90 +55,124 @@ void MaxwellDMA::Launch() { const LaunchDMA& launch = regs.launch_dma; ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE); ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED); - ASSERT(regs.dst_params.origin.x == 0); - ASSERT(regs.dst_params.origin.y == 0); - const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH; - const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH; + if (launch.multi_line_enable) { + const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH; + const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH; - if (!is_src_pitch && !is_dst_pitch) { - // If both the source and the destination are in block layout, assert. - UNIMPLEMENTED_MSG("Tiled->Tiled DMA transfers are not yet implemented"); - return; - } + if (!is_src_pitch && !is_dst_pitch) { + // If both the source and the destination are in block layout, assert. + UNIMPLEMENTED_MSG("Tiled->Tiled DMA transfers are not yet implemented"); + return; + } - if (is_src_pitch && is_dst_pitch) { - CopyPitchToPitch(); + if (is_src_pitch && is_dst_pitch) { + for (u32 line = 0; line < regs.line_count; ++line) { + const GPUVAddr source_line = + regs.offset_in + static_cast<size_t>(line) * regs.pitch_in; + const GPUVAddr dest_line = + regs.offset_out + static_cast<size_t>(line) * regs.pitch_out; + memory_manager.CopyBlock(dest_line, source_line, regs.line_length_in); + } + } else { + if (!is_src_pitch && is_dst_pitch) { + CopyBlockLinearToPitch(); + } else { + CopyPitchToBlockLinear(); + } + } } else { - ASSERT(launch.multi_line_enable == 1); - - if (!is_src_pitch && is_dst_pitch) { - CopyBlockLinearToPitch(); + // TODO: allow multisized components. + auto& accelerate = rasterizer->AccessAccelerateDMA(); + const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A; + if (regs.launch_dma.remap_enable != 0 && is_const_a_dst) { + ASSERT(regs.remap_const.component_size_minus_one == 3); + accelerate.BufferClear(regs.offset_out, regs.line_length_in, regs.remap_consta_value); + std::vector<u32> tmp_buffer(regs.line_length_in, regs.remap_consta_value); + memory_manager.WriteBlockUnsafe(regs.offset_out, + reinterpret_cast<u8*>(tmp_buffer.data()), + regs.line_length_in * sizeof(u32)); } else { - CopyPitchToBlockLinear(); + auto convert_linear_2_blocklinear_addr = [](u64 address) { + return (address & ~0x1f0ULL) | ((address & 0x40) >> 2) | ((address & 0x10) << 1) | + ((address & 0x180) >> 1) | ((address & 0x20) << 3); + }; + auto src_kind = memory_manager.GetPageKind(regs.offset_in); + auto dst_kind = memory_manager.GetPageKind(regs.offset_out); + const bool is_src_pitch = IsPitchKind(static_cast<PTEKind>(src_kind)); + const bool is_dst_pitch = IsPitchKind(static_cast<PTEKind>(dst_kind)); + if (!is_src_pitch && is_dst_pitch) { + std::vector<u8> tmp_buffer(regs.line_length_in); + std::vector<u8> dst_buffer(regs.line_length_in); + memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(), + regs.line_length_in); + for (u32 offset = 0; offset < regs.line_length_in; ++offset) { + dst_buffer[offset] = + tmp_buffer[convert_linear_2_blocklinear_addr(regs.offset_in + offset) - + regs.offset_in]; + } + memory_manager.WriteBlock(regs.offset_out, dst_buffer.data(), regs.line_length_in); + } else if (is_src_pitch && !is_dst_pitch) { + std::vector<u8> tmp_buffer(regs.line_length_in); + std::vector<u8> dst_buffer(regs.line_length_in); + memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(), + regs.line_length_in); + for (u32 offset = 0; offset < regs.line_length_in; ++offset) { + dst_buffer[convert_linear_2_blocklinear_addr(regs.offset_out + offset) - + regs.offset_out] = tmp_buffer[offset]; + } + memory_manager.WriteBlock(regs.offset_out, dst_buffer.data(), regs.line_length_in); + } else { + if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) { + std::vector<u8> tmp_buffer(regs.line_length_in); + memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(), + regs.line_length_in); + memory_manager.WriteBlock(regs.offset_out, tmp_buffer.data(), + regs.line_length_in); + } + } } } - ReleaseSemaphore(); -} -void MaxwellDMA::CopyPitchToPitch() { - // When `multi_line_enable` bit is enabled we copy a 2D image of dimensions - // (line_length_in, line_count). - // Otherwise the copy is performed as if we were copying a 1D buffer of length line_length_in. - const bool remap_enabled = regs.launch_dma.remap_enable != 0; - if (regs.launch_dma.multi_line_enable) { - UNIMPLEMENTED_IF(remap_enabled); - - // Perform a line-by-line copy. - // We're going to take a subrect of size (line_length_in, line_count) from the source - // rectangle. There is no need to manually flush/invalidate the regions because CopyBlock - // does that for us. - for (u32 line = 0; line < regs.line_count; ++line) { - const GPUVAddr source_line = regs.offset_in + static_cast<size_t>(line) * regs.pitch_in; - const GPUVAddr dest_line = regs.offset_out + static_cast<size_t>(line) * regs.pitch_out; - memory_manager.CopyBlock(dest_line, source_line, regs.line_length_in); - } - return; - } - // TODO: allow multisized components. - auto& accelerate = rasterizer->AccessAccelerateDMA(); - const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A; - const bool is_buffer_clear = remap_enabled && is_const_a_dst; - if (is_buffer_clear) { - ASSERT(regs.remap_const.component_size_minus_one == 3); - accelerate.BufferClear(regs.offset_out, regs.line_length_in, regs.remap_consta_value); - std::vector<u32> tmp_buffer(regs.line_length_in, regs.remap_consta_value); - memory_manager.WriteBlockUnsafe(regs.offset_out, reinterpret_cast<u8*>(tmp_buffer.data()), - regs.line_length_in * sizeof(u32)); - return; - } - UNIMPLEMENTED_IF(remap_enabled); - if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) { - std::vector<u8> tmp_buffer(regs.line_length_in); - memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(), regs.line_length_in); - memory_manager.WriteBlock(regs.offset_out, tmp_buffer.data(), regs.line_length_in); - } + ReleaseSemaphore(); } void MaxwellDMA::CopyBlockLinearToPitch() { UNIMPLEMENTED_IF(regs.src_params.block_size.width != 0); - UNIMPLEMENTED_IF(regs.src_params.block_size.depth != 0); UNIMPLEMENTED_IF(regs.src_params.layer != 0); + const bool is_remapping = regs.launch_dma.remap_enable != 0; + // Optimized path for micro copies. const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count; - if (dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X && + if (!is_remapping && dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X && regs.src_params.height > GOB_SIZE_Y) { FastCopyBlockLinearToPitch(); return; } // Deswizzle the input and copy it over. - UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0); - const u32 bytes_per_pixel = - regs.launch_dma.remap_enable ? regs.pitch_out / regs.line_length_in : 1; const Parameters& src_params = regs.src_params; - const u32 width = src_params.width; + + const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1; + const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1; + + const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size; + + u32 width = src_params.width; + u32 x_elements = regs.line_length_in; + u32 x_offset = src_params.origin.x; + u32 bpp_shift = 0U; + if (!is_remapping) { + bpp_shift = Common::FoldRight( + 4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); }, + width, x_elements, x_offset, static_cast<u32>(regs.offset_in)); + width >>= bpp_shift; + x_elements >>= bpp_shift; + x_offset >>= bpp_shift; + } + + const u32 bytes_per_pixel = base_bpp << bpp_shift; const u32 height = src_params.height; const u32 depth = src_params.depth; const u32 block_height = src_params.block_size.height; @@ -155,30 +190,45 @@ void MaxwellDMA::CopyBlockLinearToPitch() { memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); - UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, width, bytes_per_pixel, - block_height, src_params.origin.x, src_params.origin.y, write_buffer.data(), - read_buffer.data()); + UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset, + src_params.origin.y, x_elements, regs.line_count, block_height, block_depth, + regs.pitch_out); memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); } void MaxwellDMA::CopyPitchToBlockLinear() { UNIMPLEMENTED_IF_MSG(regs.dst_params.block_size.width != 0, "Block width is not one"); - UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0); + UNIMPLEMENTED_IF(regs.dst_params.layer != 0); + + const bool is_remapping = regs.launch_dma.remap_enable != 0; + const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1; + const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1; const auto& dst_params = regs.dst_params; - const u32 bytes_per_pixel = - regs.launch_dma.remap_enable ? regs.pitch_in / regs.line_length_in : 1; - const u32 width = dst_params.width; + + const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size; + + u32 width = dst_params.width; + u32 x_elements = regs.line_length_in; + u32 x_offset = dst_params.origin.x; + u32 bpp_shift = 0U; + if (!is_remapping) { + bpp_shift = Common::FoldRight( + 4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); }, + width, x_elements, x_offset, static_cast<u32>(regs.offset_out)); + width >>= bpp_shift; + x_elements >>= bpp_shift; + x_offset >>= bpp_shift; + } + + const u32 bytes_per_pixel = base_bpp << bpp_shift; const u32 height = dst_params.height; const u32 depth = dst_params.depth; const u32 block_height = dst_params.block_size.height; const u32 block_depth = dst_params.block_size.depth; const size_t dst_size = CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth); - const size_t dst_layer_size = - CalculateSize(true, bytes_per_pixel, width, height, 1, block_height, block_depth); - const size_t src_size = static_cast<size_t>(regs.pitch_in) * regs.line_count; if (read_buffer.size() < src_size) { @@ -188,32 +238,23 @@ void MaxwellDMA::CopyPitchToBlockLinear() { write_buffer.resize(dst_size); } + memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); if (Settings::IsGPULevelExtreme()) { - memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); } else { - memory_manager.ReadBlockUnsafe(regs.offset_in, read_buffer.data(), src_size); memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size); } // If the input is linear and the output is tiled, swizzle the input and copy it over. - if (regs.dst_params.block_size.depth > 0) { - ASSERT(dst_params.layer == 0); - SwizzleSliceToVoxel(regs.line_length_in, regs.line_count, regs.pitch_in, width, height, - bytes_per_pixel, block_height, block_depth, dst_params.origin.x, - dst_params.origin.y, write_buffer.data(), read_buffer.data()); - } else { - SwizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_in, width, bytes_per_pixel, - write_buffer.data() + dst_layer_size * dst_params.layer, read_buffer.data(), - block_height, dst_params.origin.x, dst_params.origin.y); - } + SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset, + dst_params.origin.y, x_elements, regs.line_count, block_height, block_depth, + regs.pitch_in); memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); } void MaxwellDMA::FastCopyBlockLinearToPitch() { - const u32 bytes_per_pixel = - regs.launch_dma.remap_enable ? regs.pitch_out / regs.line_length_in : 1; + const u32 bytes_per_pixel = 1U; const size_t src_size = GOB_SIZE; const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count; u32 pos_x = regs.src_params.origin.x; @@ -239,9 +280,10 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() { memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size); } - UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, regs.src_params.width, - bytes_per_pixel, regs.src_params.block_size.height, pos_x, pos_y, - write_buffer.data(), read_buffer.data()); + UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, regs.src_params.width, + regs.src_params.height, 1, pos_x, pos_y, regs.line_length_in, regs.line_count, + regs.src_params.block_size.height, regs.src_params.block_size.depth, + regs.pitch_out); memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); } @@ -249,16 +291,24 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() { void MaxwellDMA::ReleaseSemaphore() { const auto type = regs.launch_dma.semaphore_type; const GPUVAddr address = regs.semaphore.address; + const u32 payload = regs.semaphore.payload; switch (type) { case LaunchDMA::SemaphoreType::NONE: break; - case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE: - memory_manager.Write<u32>(address, regs.semaphore.payload); + case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE: { + std::function<void()> operation( + [this, address, payload] { memory_manager.Write<u32>(address, payload); }); + rasterizer->SignalFence(std::move(operation)); break; - case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE: - memory_manager.Write<u64>(address, static_cast<u64>(regs.semaphore.payload)); - memory_manager.Write<u64>(address + 8, system.GPU().GetTicks()); + } + case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE: { + std::function<void()> operation([this, address, payload] { + memory_manager.Write<u64>(address + sizeof(u64), system.GPU().GetTicks()); + memory_manager.Write<u64>(address, payload); + }); + rasterizer->SignalFence(std::move(operation)); break; + } default: ASSERT_MSG(false, "Unknown semaphore type: {}", static_cast<u32>(type.Value())); } diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h index 074bac92c..953e34adc 100644 --- a/src/video_core/engines/maxwell_dma.h +++ b/src/video_core/engines/maxwell_dma.h @@ -189,10 +189,16 @@ public: BitField<4, 3, Swizzle> dst_y; BitField<8, 3, Swizzle> dst_z; BitField<12, 3, Swizzle> dst_w; + BitField<0, 12, u32> dst_components_raw; BitField<16, 2, u32> component_size_minus_one; BitField<20, 2, u32> num_src_components_minus_one; BitField<24, 2, u32> num_dst_components_minus_one; }; + + Swizzle GetComponent(size_t i) const { + const u32 raw = dst_components_raw; + return static_cast<Swizzle>((raw >> (i * 3)) & 0x7); + } }; static_assert(sizeof(RemapConst) == 12); @@ -213,8 +219,6 @@ private: /// registers. void Launch(); - void CopyPitchToPitch(); - void CopyBlockLinearToPitch(); void CopyPitchToBlockLinear(); diff --git a/src/video_core/engines/puller.cpp b/src/video_core/engines/puller.cpp new file mode 100644 index 000000000..3977bb0fb --- /dev/null +++ b/src/video_core/engines/puller.cpp @@ -0,0 +1,305 @@ +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common/assert.h" +#include "common/logging/log.h" +#include "common/settings.h" +#include "core/core.h" +#include "video_core/control/channel_state.h" +#include "video_core/dma_pusher.h" +#include "video_core/engines/fermi_2d.h" +#include "video_core/engines/kepler_compute.h" +#include "video_core/engines/kepler_memory.h" +#include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/maxwell_dma.h" +#include "video_core/engines/puller.h" +#include "video_core/gpu.h" +#include "video_core/memory_manager.h" +#include "video_core/rasterizer_interface.h" + +namespace Tegra::Engines { + +Puller::Puller(GPU& gpu_, MemoryManager& memory_manager_, DmaPusher& dma_pusher_, + Control::ChannelState& channel_state_) + : gpu{gpu_}, memory_manager{memory_manager_}, dma_pusher{dma_pusher_}, channel_state{ + channel_state_} {} + +Puller::~Puller() = default; + +void Puller::ProcessBindMethod(const MethodCall& method_call) { + // Bind the current subchannel to the desired engine id. + LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel, + method_call.argument); + const auto engine_id = static_cast<EngineID>(method_call.argument); + bound_engines[method_call.subchannel] = static_cast<EngineID>(engine_id); + switch (engine_id) { + case EngineID::FERMI_TWOD_A: + dma_pusher.BindSubchannel(channel_state.fermi_2d.get(), method_call.subchannel); + break; + case EngineID::MAXWELL_B: + dma_pusher.BindSubchannel(channel_state.maxwell_3d.get(), method_call.subchannel); + break; + case EngineID::KEPLER_COMPUTE_B: + dma_pusher.BindSubchannel(channel_state.kepler_compute.get(), method_call.subchannel); + break; + case EngineID::MAXWELL_DMA_COPY_A: + dma_pusher.BindSubchannel(channel_state.maxwell_dma.get(), method_call.subchannel); + break; + case EngineID::KEPLER_INLINE_TO_MEMORY_B: + dma_pusher.BindSubchannel(channel_state.kepler_memory.get(), method_call.subchannel); + break; + default: + UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", engine_id); + } +} + +void Puller::ProcessFenceActionMethod() { + switch (regs.fence_action.op) { + case Puller::FenceOperation::Acquire: + // UNIMPLEMENTED_MSG("Channel Scheduling pending."); + // WaitFence(regs.fence_action.syncpoint_id, regs.fence_value); + rasterizer->ReleaseFences(); + break; + case Puller::FenceOperation::Increment: + rasterizer->SignalSyncPoint(regs.fence_action.syncpoint_id); + break; + default: + UNIMPLEMENTED_MSG("Unimplemented operation {}", regs.fence_action.op.Value()); + } +} + +void Puller::ProcessSemaphoreTriggerMethod() { + const auto semaphoreOperationMask = 0xF; + const auto op = + static_cast<GpuSemaphoreOperation>(regs.semaphore_trigger & semaphoreOperationMask); + if (op == GpuSemaphoreOperation::WriteLong) { + const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()}; + const u32 payload = regs.semaphore_sequence; + [this, sequence_address, payload] { + memory_manager.Write<u64>(sequence_address + sizeof(u64), gpu.GetTicks()); + memory_manager.Write<u64>(sequence_address, payload); + }(); + } else { + do { + const u32 word{memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress())}; + regs.acquire_source = true; + regs.acquire_value = regs.semaphore_sequence; + if (op == GpuSemaphoreOperation::AcquireEqual) { + regs.acquire_active = true; + regs.acquire_mode = false; + if (word != regs.acquire_value) { + rasterizer->ReleaseFences(); + continue; + } + } else if (op == GpuSemaphoreOperation::AcquireGequal) { + regs.acquire_active = true; + regs.acquire_mode = true; + if (word < regs.acquire_value) { + rasterizer->ReleaseFences(); + continue; + } + } else if (op == GpuSemaphoreOperation::AcquireMask) { + if (word && regs.semaphore_sequence == 0) { + rasterizer->ReleaseFences(); + continue; + } + } else { + LOG_ERROR(HW_GPU, "Invalid semaphore operation"); + } + } while (false); + } +} + +void Puller::ProcessSemaphoreRelease() { + const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()}; + const u32 payload = regs.semaphore_release; + std::function<void()> operation([this, sequence_address, payload] { + memory_manager.Write<u32>(sequence_address, payload); + }); + rasterizer->SyncOperation(std::move(operation)); +} + +void Puller::ProcessSemaphoreAcquire() { + u32 word = memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress()); + const auto value = regs.semaphore_acquire; + while (word != value) { + regs.acquire_active = true; + regs.acquire_value = value; + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + rasterizer->ReleaseFences(); + word = memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress()); + // TODO(kemathe73) figure out how to do the acquire_timeout + regs.acquire_mode = false; + regs.acquire_source = false; + } +} + +/// Calls a GPU puller method. +void Puller::CallPullerMethod(const MethodCall& method_call) { + regs.reg_array[method_call.method] = method_call.argument; + const auto method = static_cast<BufferMethods>(method_call.method); + + switch (method) { + case BufferMethods::BindObject: { + ProcessBindMethod(method_call); + break; + } + case BufferMethods::Nop: + case BufferMethods::SemaphoreAddressHigh: + case BufferMethods::SemaphoreAddressLow: + case BufferMethods::SemaphoreSequencePayload: + case BufferMethods::SyncpointPayload: + break; + case BufferMethods::WrcacheFlush: + case BufferMethods::RefCnt: + rasterizer->SignalReference(); + break; + case BufferMethods::SyncpointOperation: + ProcessFenceActionMethod(); + break; + case BufferMethods::WaitForIdle: + rasterizer->WaitForIdle(); + break; + case BufferMethods::SemaphoreOperation: { + ProcessSemaphoreTriggerMethod(); + break; + } + case BufferMethods::NonStallInterrupt: { + LOG_ERROR(HW_GPU, "Special puller engine method NonStallInterrupt not implemented"); + break; + } + case BufferMethods::MemOpA: { + LOG_ERROR(HW_GPU, "Memory Operation A"); + break; + } + case BufferMethods::MemOpB: { + // Implement this better. + rasterizer->InvalidateGPUCache(); + break; + } + case BufferMethods::MemOpC: + case BufferMethods::MemOpD: { + LOG_ERROR(HW_GPU, "Memory Operation C,D"); + break; + } + case BufferMethods::SemaphoreAcquire: { + ProcessSemaphoreAcquire(); + break; + } + case BufferMethods::SemaphoreRelease: { + ProcessSemaphoreRelease(); + break; + } + case BufferMethods::Yield: { + // TODO(Kmather73): Research and implement this method. + LOG_ERROR(HW_GPU, "Special puller engine method Yield not implemented"); + break; + } + default: + LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented", method); + break; + } +} + +/// Calls a GPU engine method. +void Puller::CallEngineMethod(const MethodCall& method_call) { + const EngineID engine = bound_engines[method_call.subchannel]; + + switch (engine) { + case EngineID::FERMI_TWOD_A: + channel_state.fermi_2d->CallMethod(method_call.method, method_call.argument, + method_call.IsLastCall()); + break; + case EngineID::MAXWELL_B: + channel_state.maxwell_3d->CallMethod(method_call.method, method_call.argument, + method_call.IsLastCall()); + break; + case EngineID::KEPLER_COMPUTE_B: + channel_state.kepler_compute->CallMethod(method_call.method, method_call.argument, + method_call.IsLastCall()); + break; + case EngineID::MAXWELL_DMA_COPY_A: + channel_state.maxwell_dma->CallMethod(method_call.method, method_call.argument, + method_call.IsLastCall()); + break; + case EngineID::KEPLER_INLINE_TO_MEMORY_B: + channel_state.kepler_memory->CallMethod(method_call.method, method_call.argument, + method_call.IsLastCall()); + break; + default: + UNIMPLEMENTED_MSG("Unimplemented engine"); + } +} + +/// Calls a GPU engine multivalue method. +void Puller::CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, + u32 methods_pending) { + const EngineID engine = bound_engines[subchannel]; + + switch (engine) { + case EngineID::FERMI_TWOD_A: + channel_state.fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending); + break; + case EngineID::MAXWELL_B: + channel_state.maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending); + break; + case EngineID::KEPLER_COMPUTE_B: + channel_state.kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending); + break; + case EngineID::MAXWELL_DMA_COPY_A: + channel_state.maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending); + break; + case EngineID::KEPLER_INLINE_TO_MEMORY_B: + channel_state.kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending); + break; + default: + UNIMPLEMENTED_MSG("Unimplemented engine"); + } +} + +/// Calls a GPU method. +void Puller::CallMethod(const MethodCall& method_call) { + LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method_call.method, + method_call.subchannel); + + ASSERT(method_call.subchannel < bound_engines.size()); + + if (ExecuteMethodOnEngine(method_call.method)) { + CallEngineMethod(method_call); + } else { + CallPullerMethod(method_call); + } +} + +/// Calls a GPU multivalue method. +void Puller::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, + u32 methods_pending) { + LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel); + + ASSERT(subchannel < bound_engines.size()); + + if (ExecuteMethodOnEngine(method)) { + CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending); + } else { + for (std::size_t i = 0; i < amount; i++) { + CallPullerMethod(MethodCall{ + method, + base_start[i], + subchannel, + methods_pending - static_cast<u32>(i), + }); + } + } +} + +void Puller::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) { + rasterizer = rasterizer_; +} + +/// Determines where the method should be executed. +[[nodiscard]] bool Puller::ExecuteMethodOnEngine(u32 method) { + const auto buffer_method = static_cast<BufferMethods>(method); + return buffer_method >= BufferMethods::NonPullerMethods; +} + +} // namespace Tegra::Engines diff --git a/src/video_core/engines/puller.h b/src/video_core/engines/puller.h new file mode 100644 index 000000000..d4175ee94 --- /dev/null +++ b/src/video_core/engines/puller.h @@ -0,0 +1,177 @@ +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include <array> +#include <cstddef> +#include <vector> +#include "common/bit_field.h" +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "video_core/engines/engine_interface.h" + +namespace Core { +class System; +} + +namespace Tegra { +class MemoryManager; +class DmaPusher; + +enum class EngineID { + FERMI_TWOD_A = 0x902D, // 2D Engine + MAXWELL_B = 0xB197, // 3D Engine + KEPLER_COMPUTE_B = 0xB1C0, + KEPLER_INLINE_TO_MEMORY_B = 0xA140, + MAXWELL_DMA_COPY_A = 0xB0B5, +}; + +namespace Control { +struct ChannelState; +} +} // namespace Tegra + +namespace VideoCore { +class RasterizerInterface; +} + +namespace Tegra::Engines { + +class Puller final { +public: + struct MethodCall { + u32 method{}; + u32 argument{}; + u32 subchannel{}; + u32 method_count{}; + + explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0) + : method(method_), argument(argument_), subchannel(subchannel_), + method_count(method_count_) {} + + [[nodiscard]] bool IsLastCall() const { + return method_count <= 1; + } + }; + + enum class FenceOperation : u32 { + Acquire = 0, + Increment = 1, + }; + + union FenceAction { + u32 raw; + BitField<0, 1, FenceOperation> op; + BitField<8, 24, u32> syncpoint_id; + }; + + explicit Puller(GPU& gpu_, MemoryManager& memory_manager_, DmaPusher& dma_pusher, + Control::ChannelState& channel_state); + ~Puller(); + + void CallMethod(const MethodCall& method_call); + + void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, + u32 methods_pending); + + void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); + + void CallPullerMethod(const MethodCall& method_call); + + void CallEngineMethod(const MethodCall& method_call); + + void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, + u32 methods_pending); + +private: + Tegra::GPU& gpu; + + MemoryManager& memory_manager; + DmaPusher& dma_pusher; + Control::ChannelState& channel_state; + VideoCore::RasterizerInterface* rasterizer = nullptr; + + static constexpr std::size_t NUM_REGS = 0x800; + struct Regs { + static constexpr size_t NUM_REGS = 0x40; + + union { + struct { + INSERT_PADDING_WORDS_NOINIT(0x4); + struct { + u32 address_high; + u32 address_low; + + [[nodiscard]] GPUVAddr SemaphoreAddress() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } + } semaphore_address; + + u32 semaphore_sequence; + u32 semaphore_trigger; + INSERT_PADDING_WORDS_NOINIT(0xC); + + // The pusher and the puller share the reference counter, the pusher only has read + // access + u32 reference_count; + INSERT_PADDING_WORDS_NOINIT(0x5); + + u32 semaphore_acquire; + u32 semaphore_release; + u32 fence_value; + FenceAction fence_action; + INSERT_PADDING_WORDS_NOINIT(0xE2); + + // Puller state + u32 acquire_mode; + u32 acquire_source; + u32 acquire_active; + u32 acquire_timeout; + u32 acquire_value; + }; + std::array<u32, NUM_REGS> reg_array; + }; + } regs{}; + + void ProcessBindMethod(const MethodCall& method_call); + void ProcessFenceActionMethod(); + void ProcessSemaphoreAcquire(); + void ProcessSemaphoreRelease(); + void ProcessSemaphoreTriggerMethod(); + [[nodiscard]] bool ExecuteMethodOnEngine(u32 method); + + /// Mapping of command subchannels to their bound engine ids + std::array<EngineID, 8> bound_engines{}; + + enum class GpuSemaphoreOperation { + AcquireEqual = 0x1, + WriteLong = 0x2, + AcquireGequal = 0x4, + AcquireMask = 0x8, + }; + +#define ASSERT_REG_POSITION(field_name, position) \ + static_assert(offsetof(Regs, field_name) == position * 4, \ + "Field " #field_name " has invalid position") + + ASSERT_REG_POSITION(semaphore_address, 0x4); + ASSERT_REG_POSITION(semaphore_sequence, 0x6); + ASSERT_REG_POSITION(semaphore_trigger, 0x7); + ASSERT_REG_POSITION(reference_count, 0x14); + ASSERT_REG_POSITION(semaphore_acquire, 0x1A); + ASSERT_REG_POSITION(semaphore_release, 0x1B); + ASSERT_REG_POSITION(fence_value, 0x1C); + ASSERT_REG_POSITION(fence_action, 0x1D); + + ASSERT_REG_POSITION(acquire_mode, 0x100); + ASSERT_REG_POSITION(acquire_source, 0x101); + ASSERT_REG_POSITION(acquire_active, 0x102); + ASSERT_REG_POSITION(acquire_timeout, 0x103); + ASSERT_REG_POSITION(acquire_value, 0x104); + +#undef ASSERT_REG_POSITION +}; + +} // namespace Tegra::Engines diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h index 1e9832ddd..c390ac91b 100644 --- a/src/video_core/fence_manager.h +++ b/src/video_core/fence_manager.h @@ -4,40 +4,24 @@ #pragma once #include <algorithm> +#include <cstring> +#include <deque> +#include <functional> +#include <memory> #include <queue> #include "common/common_types.h" #include "video_core/delayed_destruction_ring.h" #include "video_core/gpu.h" -#include "video_core/memory_manager.h" +#include "video_core/host1x/host1x.h" +#include "video_core/host1x/syncpoint_manager.h" #include "video_core/rasterizer_interface.h" namespace VideoCommon { class FenceBase { public: - explicit FenceBase(u32 payload_, bool is_stubbed_) - : address{}, payload{payload_}, is_semaphore{false}, is_stubbed{is_stubbed_} {} - - explicit FenceBase(GPUVAddr address_, u32 payload_, bool is_stubbed_) - : address{address_}, payload{payload_}, is_semaphore{true}, is_stubbed{is_stubbed_} {} - - GPUVAddr GetAddress() const { - return address; - } - - u32 GetPayload() const { - return payload; - } - - bool IsSemaphore() const { - return is_semaphore; - } - -private: - GPUVAddr address; - u32 payload; - bool is_semaphore; + explicit FenceBase(bool is_stubbed_) : is_stubbed{is_stubbed_} {} protected: bool is_stubbed; @@ -57,30 +41,28 @@ public: buffer_cache.AccumulateFlushes(); } - void SignalSemaphore(GPUVAddr addr, u32 value) { + void SyncOperation(std::function<void()>&& func) { + uncommitted_operations.emplace_back(std::move(func)); + } + + void SignalFence(std::function<void()>&& func) { TryReleasePendingFences(); const bool should_flush = ShouldFlush(); CommitAsyncFlushes(); - TFence new_fence = CreateFence(addr, value, !should_flush); + uncommitted_operations.emplace_back(std::move(func)); + CommitOperations(); + TFence new_fence = CreateFence(!should_flush); fences.push(new_fence); QueueFence(new_fence); if (should_flush) { rasterizer.FlushCommands(); } - rasterizer.SyncGuestHost(); } void SignalSyncPoint(u32 value) { - TryReleasePendingFences(); - const bool should_flush = ShouldFlush(); - CommitAsyncFlushes(); - TFence new_fence = CreateFence(value, !should_flush); - fences.push(new_fence); - QueueFence(new_fence); - if (should_flush) { - rasterizer.FlushCommands(); - } - rasterizer.SyncGuestHost(); + syncpoint_manager.IncrementGuest(value); + std::function<void()> func([this, value] { syncpoint_manager.IncrementHost(value); }); + SignalFence(std::move(func)); } void WaitPendingFences() { @@ -90,11 +72,10 @@ public: WaitFence(current_fence); } PopAsyncFlushes(); - if (current_fence->IsSemaphore()) { - gpu_memory.template Write<u32>(current_fence->GetAddress(), - current_fence->GetPayload()); - } else { - gpu.IncrementSyncPoint(current_fence->GetPayload()); + auto operations = std::move(pending_operations.front()); + pending_operations.pop_front(); + for (auto& operation : operations) { + operation(); } PopFence(); } @@ -104,16 +85,14 @@ protected: explicit FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_, TTextureCache& texture_cache_, TTBufferCache& buffer_cache_, TQueryCache& query_cache_) - : rasterizer{rasterizer_}, gpu{gpu_}, gpu_memory{gpu.MemoryManager()}, + : rasterizer{rasterizer_}, gpu{gpu_}, syncpoint_manager{gpu.Host1x().GetSyncpointManager()}, texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, query_cache{query_cache_} {} virtual ~FenceManager() = default; - /// Creates a Sync Point Fence Interface, does not create a backend fence if 'is_stubbed' is + /// Creates a Fence Interface, does not create a backend fence if 'is_stubbed' is /// true - virtual TFence CreateFence(u32 value, bool is_stubbed) = 0; - /// Creates a Semaphore Fence Interface, does not create a backend fence if 'is_stubbed' is true - virtual TFence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) = 0; + virtual TFence CreateFence(bool is_stubbed) = 0; /// Queues a fence into the backend if the fence isn't stubbed. virtual void QueueFence(TFence& fence) = 0; /// Notifies that the backend fence has been signaled/reached in host GPU. @@ -123,7 +102,7 @@ protected: VideoCore::RasterizerInterface& rasterizer; Tegra::GPU& gpu; - Tegra::MemoryManager& gpu_memory; + Tegra::Host1x::SyncpointManager& syncpoint_manager; TTextureCache& texture_cache; TTBufferCache& buffer_cache; TQueryCache& query_cache; @@ -136,11 +115,10 @@ private: return; } PopAsyncFlushes(); - if (current_fence->IsSemaphore()) { - gpu_memory.template Write<u32>(current_fence->GetAddress(), - current_fence->GetPayload()); - } else { - gpu.IncrementSyncPoint(current_fence->GetPayload()); + auto operations = std::move(pending_operations.front()); + pending_operations.pop_front(); + for (auto& operation : operations) { + operation(); } PopFence(); } @@ -159,16 +137,20 @@ private: } void PopAsyncFlushes() { - std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; - texture_cache.PopAsyncFlushes(); - buffer_cache.PopAsyncFlushes(); + { + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + texture_cache.PopAsyncFlushes(); + buffer_cache.PopAsyncFlushes(); + } query_cache.PopAsyncFlushes(); } void CommitAsyncFlushes() { - std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; - texture_cache.CommitAsyncFlushes(); - buffer_cache.CommitAsyncFlushes(); + { + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + texture_cache.CommitAsyncFlushes(); + buffer_cache.CommitAsyncFlushes(); + } query_cache.CommitAsyncFlushes(); } @@ -177,7 +159,13 @@ private: fences.pop(); } + void CommitOperations() { + pending_operations.emplace_back(std::move(uncommitted_operations)); + } + std::queue<TFence> fences; + std::deque<std::function<void()>> uncommitted_operations; + std::deque<std::deque<std::function<void()>>> pending_operations; DelayedDestructionRing<TFence, 6> delayed_destruction_ring; }; diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 33431f2a0..28b38273e 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -14,10 +14,11 @@ #include "core/core.h" #include "core/core_timing.h" #include "core/frontend/emu_window.h" -#include "core/hardware_interrupt_manager.h" #include "core/hle/service/nvdrv/nvdata.h" #include "core/perf_stats.h" #include "video_core/cdma_pusher.h" +#include "video_core/control/channel_state.h" +#include "video_core/control/scheduler.h" #include "video_core/dma_pusher.h" #include "video_core/engines/fermi_2d.h" #include "video_core/engines/kepler_compute.h" @@ -26,75 +27,64 @@ #include "video_core/engines/maxwell_dma.h" #include "video_core/gpu.h" #include "video_core/gpu_thread.h" +#include "video_core/host1x/host1x.h" +#include "video_core/host1x/syncpoint_manager.h" #include "video_core/memory_manager.h" #include "video_core/renderer_base.h" #include "video_core/shader_notify.h" namespace Tegra { -MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192)); - struct GPU::Impl { explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_) - : gpu{gpu_}, system{system_}, memory_manager{std::make_unique<Tegra::MemoryManager>( - system)}, - dma_pusher{std::make_unique<Tegra::DmaPusher>(system, gpu)}, use_nvdec{use_nvdec_}, - maxwell_3d{std::make_unique<Engines::Maxwell3D>(system, *memory_manager)}, - fermi_2d{std::make_unique<Engines::Fermi2D>()}, - kepler_compute{std::make_unique<Engines::KeplerCompute>(system, *memory_manager)}, - maxwell_dma{std::make_unique<Engines::MaxwellDMA>(system, *memory_manager)}, - kepler_memory{std::make_unique<Engines::KeplerMemory>(system, *memory_manager)}, + : gpu{gpu_}, system{system_}, host1x{system.Host1x()}, use_nvdec{use_nvdec_}, shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_}, - gpu_thread{system_, is_async_} {} + gpu_thread{system_, is_async_}, scheduler{std::make_unique<Control::Scheduler>(gpu)} {} ~Impl() = default; - /// Binds a renderer to the GPU. - void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer_) { - renderer = std::move(renderer_); - rasterizer = renderer->ReadRasterizer(); - - memory_manager->BindRasterizer(rasterizer); - maxwell_3d->BindRasterizer(rasterizer); - fermi_2d->BindRasterizer(rasterizer); - kepler_compute->BindRasterizer(rasterizer); - kepler_memory->BindRasterizer(rasterizer); - maxwell_dma->BindRasterizer(rasterizer); + std::shared_ptr<Control::ChannelState> CreateChannel(s32 channel_id) { + auto channel_state = std::make_shared<Tegra::Control::ChannelState>(channel_id); + channels.emplace(channel_id, channel_state); + scheduler->DeclareChannel(channel_state); + return channel_state; } - /// Calls a GPU method. - void CallMethod(const GPU::MethodCall& method_call) { - LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method_call.method, - method_call.subchannel); + void BindChannel(s32 channel_id) { + if (bound_channel == channel_id) { + return; + } + auto it = channels.find(channel_id); + ASSERT(it != channels.end()); + bound_channel = channel_id; + current_channel = it->second.get(); - ASSERT(method_call.subchannel < bound_engines.size()); + rasterizer->BindChannel(*current_channel); + } - if (ExecuteMethodOnEngine(method_call.method)) { - CallEngineMethod(method_call); - } else { - CallPullerMethod(method_call); - } + std::shared_ptr<Control::ChannelState> AllocateChannel() { + return CreateChannel(new_channel_id++); } - /// Calls a GPU multivalue method. - void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, - u32 methods_pending) { - LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel); + void InitChannel(Control::ChannelState& to_init) { + to_init.Init(system, gpu); + to_init.BindRasterizer(rasterizer); + rasterizer->InitializeChannel(to_init); + } - ASSERT(subchannel < bound_engines.size()); + void InitAddressSpace(Tegra::MemoryManager& memory_manager) { + memory_manager.BindRasterizer(rasterizer); + } - if (ExecuteMethodOnEngine(method)) { - CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending); - } else { - for (std::size_t i = 0; i < amount; i++) { - CallPullerMethod(GPU::MethodCall{ - method, - base_start[i], - subchannel, - methods_pending - static_cast<u32>(i), - }); - } - } + void ReleaseChannel(Control::ChannelState& to_release) { + UNIMPLEMENTED(); + } + + /// Binds a renderer to the GPU. + void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer_) { + renderer = std::move(renderer_); + rasterizer = renderer->ReadRasterizer(); + host1x.MemoryManager().BindRasterizer(rasterizer); } /// Flush all current written commands into the host GPU for execution. @@ -103,85 +93,82 @@ struct GPU::Impl { } /// Synchronizes CPU writes with Host GPU memory. - void SyncGuestHost() { - rasterizer->SyncGuestHost(); + void InvalidateGPUCache() { + rasterizer->InvalidateGPUCache(); } /// Signal the ending of command list. void OnCommandListEnd() { - if (is_async) { - // This command only applies to asynchronous GPU mode - gpu_thread.OnCommandListEnd(); - } + gpu_thread.OnCommandListEnd(); } /// Request a host GPU memory flush from the CPU. - [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size) { - std::unique_lock lck{flush_request_mutex}; - const u64 fence = ++last_flush_fence; - flush_requests.emplace_back(fence, addr, size); + template <typename Func> + [[nodiscard]] u64 RequestSyncOperation(Func&& action) { + std::unique_lock lck{sync_request_mutex}; + const u64 fence = ++last_sync_fence; + sync_requests.emplace_back(action); return fence; } /// Obtains current flush request fence id. - [[nodiscard]] u64 CurrentFlushRequestFence() const { - return current_flush_fence.load(std::memory_order_relaxed); + [[nodiscard]] u64 CurrentSyncRequestFence() const { + return current_sync_fence.load(std::memory_order_relaxed); + } + + void WaitForSyncOperation(const u64 fence) { + std::unique_lock lck{sync_request_mutex}; + sync_request_cv.wait(lck, [this, fence] { return CurrentSyncRequestFence() >= fence; }); } /// Tick pending requests within the GPU. void TickWork() { - std::unique_lock lck{flush_request_mutex}; - while (!flush_requests.empty()) { - auto& request = flush_requests.front(); - const u64 fence = request.fence; - const VAddr addr = request.addr; - const std::size_t size = request.size; - flush_requests.pop_front(); - flush_request_mutex.unlock(); - rasterizer->FlushRegion(addr, size); - current_flush_fence.store(fence); - flush_request_mutex.lock(); + std::unique_lock lck{sync_request_mutex}; + while (!sync_requests.empty()) { + auto request = std::move(sync_requests.front()); + sync_requests.pop_front(); + sync_request_mutex.unlock(); + request(); + current_sync_fence.fetch_add(1, std::memory_order_release); + sync_request_mutex.lock(); + sync_request_cv.notify_all(); } } /// Returns a reference to the Maxwell3D GPU engine. [[nodiscard]] Engines::Maxwell3D& Maxwell3D() { - return *maxwell_3d; + ASSERT(current_channel); + return *current_channel->maxwell_3d; } /// Returns a const reference to the Maxwell3D GPU engine. [[nodiscard]] const Engines::Maxwell3D& Maxwell3D() const { - return *maxwell_3d; + ASSERT(current_channel); + return *current_channel->maxwell_3d; } /// Returns a reference to the KeplerCompute GPU engine. [[nodiscard]] Engines::KeplerCompute& KeplerCompute() { - return *kepler_compute; + ASSERT(current_channel); + return *current_channel->kepler_compute; } /// Returns a reference to the KeplerCompute GPU engine. [[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const { - return *kepler_compute; - } - - /// Returns a reference to the GPU memory manager. - [[nodiscard]] Tegra::MemoryManager& MemoryManager() { - return *memory_manager; - } - - /// Returns a const reference to the GPU memory manager. - [[nodiscard]] const Tegra::MemoryManager& MemoryManager() const { - return *memory_manager; + ASSERT(current_channel); + return *current_channel->kepler_compute; } /// Returns a reference to the GPU DMA pusher. [[nodiscard]] Tegra::DmaPusher& DmaPusher() { - return *dma_pusher; + ASSERT(current_channel); + return *current_channel->dma_pusher; } /// Returns a const reference to the GPU DMA pusher. [[nodiscard]] const Tegra::DmaPusher& DmaPusher() const { - return *dma_pusher; + ASSERT(current_channel); + return *current_channel->dma_pusher; } /// Returns a reference to the underlying renderer. @@ -204,77 +191,6 @@ struct GPU::Impl { return *shader_notify; } - /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame. - void WaitFence(u32 syncpoint_id, u32 value) { - // Synced GPU, is always in sync - if (!is_async) { - return; - } - if (syncpoint_id == UINT32_MAX) { - // TODO: Research what this does. - LOG_ERROR(HW_GPU, "Waiting for syncpoint -1 not implemented"); - return; - } - MICROPROFILE_SCOPE(GPU_wait); - std::unique_lock lock{sync_mutex}; - sync_cv.wait(lock, [=, this] { - if (shutting_down.load(std::memory_order_relaxed)) { - // We're shutting down, ensure no threads continue to wait for the next syncpoint - return true; - } - return syncpoints.at(syncpoint_id).load() >= value; - }); - } - - void IncrementSyncPoint(u32 syncpoint_id) { - auto& syncpoint = syncpoints.at(syncpoint_id); - syncpoint++; - std::scoped_lock lock{sync_mutex}; - sync_cv.notify_all(); - auto& interrupt = syncpt_interrupts.at(syncpoint_id); - if (!interrupt.empty()) { - u32 value = syncpoint.load(); - auto it = interrupt.begin(); - while (it != interrupt.end()) { - if (value >= *it) { - TriggerCpuInterrupt(syncpoint_id, *it); - it = interrupt.erase(it); - continue; - } - it++; - } - } - } - - [[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const { - return syncpoints.at(syncpoint_id).load(); - } - - void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) { - std::scoped_lock lock{sync_mutex}; - auto& interrupt = syncpt_interrupts.at(syncpoint_id); - bool contains = std::any_of(interrupt.begin(), interrupt.end(), - [value](u32 in_value) { return in_value == value; }); - if (contains) { - return; - } - interrupt.emplace_back(value); - } - - [[nodiscard]] bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value) { - std::scoped_lock lock{sync_mutex}; - auto& interrupt = syncpt_interrupts.at(syncpoint_id); - const auto iter = - std::find_if(interrupt.begin(), interrupt.end(), - [value](u32 interrupt_value) { return value == interrupt_value; }); - - if (iter == interrupt.end()) { - return false; - } - interrupt.erase(iter); - return true; - } - [[nodiscard]] u64 GetTicks() const { // This values were reversed engineered by fincs from NVN // The gpu clock is reported in units of 385/625 nanoseconds @@ -306,7 +222,7 @@ struct GPU::Impl { /// This can be used to launch any necessary threads and register any necessary /// core timing events. void Start() { - gpu_thread.StartThread(*renderer, renderer->Context(), *dma_pusher); + gpu_thread.StartThread(*renderer, renderer->Context(), *scheduler); cpu_context = renderer->GetRenderWindow().CreateSharedContext(); cpu_context->MakeCurrent(); } @@ -328,8 +244,8 @@ struct GPU::Impl { } /// Push GPU command entries to be processed - void PushGPUEntries(Tegra::CommandList&& entries) { - gpu_thread.SubmitList(std::move(entries)); + void PushGPUEntries(s32 channel, Tegra::CommandList&& entries) { + gpu_thread.SubmitList(channel, std::move(entries)); } /// Push GPU command buffer entries to be processed @@ -339,7 +255,7 @@ struct GPU::Impl { } if (!cdma_pushers.contains(id)) { - cdma_pushers.insert_or_assign(id, std::make_unique<Tegra::CDmaPusher>(gpu)); + cdma_pushers.insert_or_assign(id, std::make_unique<Tegra::CDmaPusher>(host1x)); } // SubmitCommandBuffer would make the nvdec operations async, this is not currently working @@ -376,308 +292,55 @@ struct GPU::Impl { gpu_thread.FlushAndInvalidateRegion(addr, size); } - void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const { - auto& interrupt_manager = system.InterruptManager(); - interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value); - } - - void ProcessBindMethod(const GPU::MethodCall& method_call) { - // Bind the current subchannel to the desired engine id. - LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel, - method_call.argument); - const auto engine_id = static_cast<EngineID>(method_call.argument); - bound_engines[method_call.subchannel] = static_cast<EngineID>(engine_id); - switch (engine_id) { - case EngineID::FERMI_TWOD_A: - dma_pusher->BindSubchannel(fermi_2d.get(), method_call.subchannel); - break; - case EngineID::MAXWELL_B: - dma_pusher->BindSubchannel(maxwell_3d.get(), method_call.subchannel); - break; - case EngineID::KEPLER_COMPUTE_B: - dma_pusher->BindSubchannel(kepler_compute.get(), method_call.subchannel); - break; - case EngineID::MAXWELL_DMA_COPY_A: - dma_pusher->BindSubchannel(maxwell_dma.get(), method_call.subchannel); - break; - case EngineID::KEPLER_INLINE_TO_MEMORY_B: - dma_pusher->BindSubchannel(kepler_memory.get(), method_call.subchannel); - break; - default: - UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", engine_id); - } - } - - void ProcessFenceActionMethod() { - switch (regs.fence_action.op) { - case GPU::FenceOperation::Acquire: - WaitFence(regs.fence_action.syncpoint_id, regs.fence_value); - break; - case GPU::FenceOperation::Increment: - IncrementSyncPoint(regs.fence_action.syncpoint_id); - break; - default: - UNIMPLEMENTED_MSG("Unimplemented operation {}", regs.fence_action.op.Value()); - } - } - - void ProcessWaitForInterruptMethod() { - // TODO(bunnei) ImplementMe - LOG_WARNING(HW_GPU, "(STUBBED) called"); - } - - void ProcessSemaphoreTriggerMethod() { - const auto semaphoreOperationMask = 0xF; - const auto op = - static_cast<GpuSemaphoreOperation>(regs.semaphore_trigger & semaphoreOperationMask); - if (op == GpuSemaphoreOperation::WriteLong) { - struct Block { - u32 sequence; - u32 zeros = 0; - u64 timestamp; - }; - - Block block{}; - block.sequence = regs.semaphore_sequence; - // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of - // CoreTiming - block.timestamp = GetTicks(); - memory_manager->WriteBlock(regs.semaphore_address.SemaphoreAddress(), &block, - sizeof(block)); - } else { - const u32 word{memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress())}; - if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) || - (op == GpuSemaphoreOperation::AcquireGequal && - static_cast<s32>(word - regs.semaphore_sequence) > 0) || - (op == GpuSemaphoreOperation::AcquireMask && (word & regs.semaphore_sequence))) { - // Nothing to do in this case + void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer, + std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences) { + size_t current_request_counter{}; + { + std::unique_lock<std::mutex> lk(request_swap_mutex); + if (free_swap_counters.empty()) { + current_request_counter = request_swap_counters.size(); + request_swap_counters.emplace_back(num_fences); } else { - regs.acquire_source = true; - regs.acquire_value = regs.semaphore_sequence; - if (op == GpuSemaphoreOperation::AcquireEqual) { - regs.acquire_active = true; - regs.acquire_mode = false; - } else if (op == GpuSemaphoreOperation::AcquireGequal) { - regs.acquire_active = true; - regs.acquire_mode = true; - } else if (op == GpuSemaphoreOperation::AcquireMask) { - // TODO(kemathe) The acquire mask operation waits for a value that, ANDed with - // semaphore_sequence, gives a non-0 result - LOG_ERROR(HW_GPU, "Invalid semaphore operation AcquireMask not implemented"); - } else { - LOG_ERROR(HW_GPU, "Invalid semaphore operation"); - } + current_request_counter = free_swap_counters.front(); + request_swap_counters[current_request_counter] = num_fences; + free_swap_counters.pop_front(); } } - } - - void ProcessSemaphoreRelease() { - memory_manager->Write<u32>(regs.semaphore_address.SemaphoreAddress(), - regs.semaphore_release); - } - - void ProcessSemaphoreAcquire() { - const u32 word = memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress()); - const auto value = regs.semaphore_acquire; - if (word != value) { - regs.acquire_active = true; - regs.acquire_value = value; - // TODO(kemathe73) figure out how to do the acquire_timeout - regs.acquire_mode = false; - regs.acquire_source = false; - } - } - - /// Calls a GPU puller method. - void CallPullerMethod(const GPU::MethodCall& method_call) { - regs.reg_array[method_call.method] = method_call.argument; - const auto method = static_cast<BufferMethods>(method_call.method); - - switch (method) { - case BufferMethods::BindObject: { - ProcessBindMethod(method_call); - break; - } - case BufferMethods::Nop: - case BufferMethods::SemaphoreAddressHigh: - case BufferMethods::SemaphoreAddressLow: - case BufferMethods::SemaphoreSequence: - break; - case BufferMethods::UnkCacheFlush: - rasterizer->SyncGuestHost(); - break; - case BufferMethods::WrcacheFlush: - rasterizer->SignalReference(); - break; - case BufferMethods::FenceValue: - break; - case BufferMethods::RefCnt: - rasterizer->SignalReference(); - break; - case BufferMethods::FenceAction: - ProcessFenceActionMethod(); - break; - case BufferMethods::WaitForInterrupt: - rasterizer->WaitForIdle(); - break; - case BufferMethods::SemaphoreTrigger: { - ProcessSemaphoreTriggerMethod(); - break; - } - case BufferMethods::NotifyIntr: { - // TODO(Kmather73): Research and implement this method. - LOG_ERROR(HW_GPU, "Special puller engine method NotifyIntr not implemented"); - break; - } - case BufferMethods::Unk28: { - // TODO(Kmather73): Research and implement this method. - LOG_ERROR(HW_GPU, "Special puller engine method Unk28 not implemented"); - break; - } - case BufferMethods::SemaphoreAcquire: { - ProcessSemaphoreAcquire(); - break; - } - case BufferMethods::SemaphoreRelease: { - ProcessSemaphoreRelease(); - break; - } - case BufferMethods::Yield: { - // TODO(Kmather73): Research and implement this method. - LOG_ERROR(HW_GPU, "Special puller engine method Yield not implemented"); - break; - } - default: - LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented", method); - break; - } - } - - /// Calls a GPU engine method. - void CallEngineMethod(const GPU::MethodCall& method_call) { - const EngineID engine = bound_engines[method_call.subchannel]; - - switch (engine) { - case EngineID::FERMI_TWOD_A: - fermi_2d->CallMethod(method_call.method, method_call.argument, - method_call.IsLastCall()); - break; - case EngineID::MAXWELL_B: - maxwell_3d->CallMethod(method_call.method, method_call.argument, - method_call.IsLastCall()); - break; - case EngineID::KEPLER_COMPUTE_B: - kepler_compute->CallMethod(method_call.method, method_call.argument, - method_call.IsLastCall()); - break; - case EngineID::MAXWELL_DMA_COPY_A: - maxwell_dma->CallMethod(method_call.method, method_call.argument, - method_call.IsLastCall()); - break; - case EngineID::KEPLER_INLINE_TO_MEMORY_B: - kepler_memory->CallMethod(method_call.method, method_call.argument, - method_call.IsLastCall()); - break; - default: - UNIMPLEMENTED_MSG("Unimplemented engine"); - } - } - - /// Calls a GPU engine multivalue method. - void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, - u32 methods_pending) { - const EngineID engine = bound_engines[subchannel]; - - switch (engine) { - case EngineID::FERMI_TWOD_A: - fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending); - break; - case EngineID::MAXWELL_B: - maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending); - break; - case EngineID::KEPLER_COMPUTE_B: - kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending); - break; - case EngineID::MAXWELL_DMA_COPY_A: - maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending); - break; - case EngineID::KEPLER_INLINE_TO_MEMORY_B: - kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending); - break; - default: - UNIMPLEMENTED_MSG("Unimplemented engine"); - } - } - - /// Determines where the method should be executed. - [[nodiscard]] bool ExecuteMethodOnEngine(u32 method) { - const auto buffer_method = static_cast<BufferMethods>(method); - return buffer_method >= BufferMethods::NonPullerMethods; - } - - struct Regs { - static constexpr size_t NUM_REGS = 0x40; - - union { - struct { - INSERT_PADDING_WORDS_NOINIT(0x4); - struct { - u32 address_high; - u32 address_low; - - [[nodiscard]] GPUVAddr SemaphoreAddress() const { - return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | - address_low); + const auto wait_fence = + RequestSyncOperation([this, current_request_counter, framebuffer, fences, num_fences] { + auto& syncpoint_manager = host1x.GetSyncpointManager(); + if (num_fences == 0) { + renderer->SwapBuffers(framebuffer); + } + const auto executer = [this, current_request_counter, + framebuffer_copy = *framebuffer]() { + { + std::unique_lock<std::mutex> lk(request_swap_mutex); + if (--request_swap_counters[current_request_counter] != 0) { + return; + } + free_swap_counters.push_back(current_request_counter); } - } semaphore_address; - - u32 semaphore_sequence; - u32 semaphore_trigger; - INSERT_PADDING_WORDS_NOINIT(0xC); - - // The pusher and the puller share the reference counter, the pusher only has read - // access - u32 reference_count; - INSERT_PADDING_WORDS_NOINIT(0x5); - - u32 semaphore_acquire; - u32 semaphore_release; - u32 fence_value; - GPU::FenceAction fence_action; - INSERT_PADDING_WORDS_NOINIT(0xE2); - - // Puller state - u32 acquire_mode; - u32 acquire_source; - u32 acquire_active; - u32 acquire_timeout; - u32 acquire_value; - }; - std::array<u32, NUM_REGS> reg_array; - }; - } regs{}; + renderer->SwapBuffers(&framebuffer_copy); + }; + for (size_t i = 0; i < num_fences; i++) { + syncpoint_manager.RegisterGuestAction(fences[i].id, fences[i].value, executer); + } + }); + gpu_thread.TickGPU(); + WaitForSyncOperation(wait_fence); + } GPU& gpu; Core::System& system; - std::unique_ptr<Tegra::MemoryManager> memory_manager; - std::unique_ptr<Tegra::DmaPusher> dma_pusher; + Host1x::Host1x& host1x; + std::map<u32, std::unique_ptr<Tegra::CDmaPusher>> cdma_pushers; std::unique_ptr<VideoCore::RendererBase> renderer; VideoCore::RasterizerInterface* rasterizer = nullptr; const bool use_nvdec; - /// Mapping of command subchannels to their bound engine ids - std::array<EngineID, 8> bound_engines{}; - /// 3D engine - std::unique_ptr<Engines::Maxwell3D> maxwell_3d; - /// 2D engine - std::unique_ptr<Engines::Fermi2D> fermi_2d; - /// Compute engine - std::unique_ptr<Engines::KeplerCompute> kepler_compute; - /// DMA engine - std::unique_ptr<Engines::MaxwellDMA> maxwell_dma; - /// Inline memory engine - std::unique_ptr<Engines::KeplerMemory> kepler_memory; + s32 new_channel_id{1}; /// Shader build notifier std::unique_ptr<VideoCore::ShaderNotify> shader_notify; /// When true, we are about to shut down emulation session, so terminate outstanding tasks @@ -692,51 +355,25 @@ struct GPU::Impl { std::condition_variable sync_cv; - struct FlushRequest { - explicit FlushRequest(u64 fence_, VAddr addr_, std::size_t size_) - : fence{fence_}, addr{addr_}, size{size_} {} - u64 fence; - VAddr addr; - std::size_t size; - }; - - std::list<FlushRequest> flush_requests; - std::atomic<u64> current_flush_fence{}; - u64 last_flush_fence{}; - std::mutex flush_request_mutex; + std::list<std::function<void()>> sync_requests; + std::atomic<u64> current_sync_fence{}; + u64 last_sync_fence{}; + std::mutex sync_request_mutex; + std::condition_variable sync_request_cv; const bool is_async; VideoCommon::GPUThread::ThreadManager gpu_thread; std::unique_ptr<Core::Frontend::GraphicsContext> cpu_context; -#define ASSERT_REG_POSITION(field_name, position) \ - static_assert(offsetof(Regs, field_name) == position * 4, \ - "Field " #field_name " has invalid position") - - ASSERT_REG_POSITION(semaphore_address, 0x4); - ASSERT_REG_POSITION(semaphore_sequence, 0x6); - ASSERT_REG_POSITION(semaphore_trigger, 0x7); - ASSERT_REG_POSITION(reference_count, 0x14); - ASSERT_REG_POSITION(semaphore_acquire, 0x1A); - ASSERT_REG_POSITION(semaphore_release, 0x1B); - ASSERT_REG_POSITION(fence_value, 0x1C); - ASSERT_REG_POSITION(fence_action, 0x1D); - - ASSERT_REG_POSITION(acquire_mode, 0x100); - ASSERT_REG_POSITION(acquire_source, 0x101); - ASSERT_REG_POSITION(acquire_active, 0x102); - ASSERT_REG_POSITION(acquire_timeout, 0x103); - ASSERT_REG_POSITION(acquire_value, 0x104); - -#undef ASSERT_REG_POSITION - - enum class GpuSemaphoreOperation { - AcquireEqual = 0x1, - WriteLong = 0x2, - AcquireGequal = 0x4, - AcquireMask = 0x8, - }; + std::unique_ptr<Tegra::Control::Scheduler> scheduler; + std::unordered_map<s32, std::shared_ptr<Tegra::Control::ChannelState>> channels; + Tegra::Control::ChannelState* current_channel; + s32 bound_channel{-1}; + + std::deque<size_t> free_swap_counters; + std::deque<size_t> request_swap_counters; + std::mutex request_swap_mutex; }; GPU::GPU(Core::System& system, bool is_async, bool use_nvdec) @@ -744,25 +381,36 @@ GPU::GPU(Core::System& system, bool is_async, bool use_nvdec) GPU::~GPU() = default; -void GPU::BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer) { - impl->BindRenderer(std::move(renderer)); +std::shared_ptr<Control::ChannelState> GPU::AllocateChannel() { + return impl->AllocateChannel(); +} + +void GPU::InitChannel(Control::ChannelState& to_init) { + impl->InitChannel(to_init); +} + +void GPU::BindChannel(s32 channel_id) { + impl->BindChannel(channel_id); } -void GPU::CallMethod(const MethodCall& method_call) { - impl->CallMethod(method_call); +void GPU::ReleaseChannel(Control::ChannelState& to_release) { + impl->ReleaseChannel(to_release); } -void GPU::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, - u32 methods_pending) { - impl->CallMultiMethod(method, subchannel, base_start, amount, methods_pending); +void GPU::InitAddressSpace(Tegra::MemoryManager& memory_manager) { + impl->InitAddressSpace(memory_manager); +} + +void GPU::BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer) { + impl->BindRenderer(std::move(renderer)); } void GPU::FlushCommands() { impl->FlushCommands(); } -void GPU::SyncGuestHost() { - impl->SyncGuestHost(); +void GPU::InvalidateGPUCache() { + impl->InvalidateGPUCache(); } void GPU::OnCommandListEnd() { @@ -770,17 +418,32 @@ void GPU::OnCommandListEnd() { } u64 GPU::RequestFlush(VAddr addr, std::size_t size) { - return impl->RequestFlush(addr, size); + return impl->RequestSyncOperation( + [this, addr, size]() { impl->rasterizer->FlushRegion(addr, size); }); } -u64 GPU::CurrentFlushRequestFence() const { - return impl->CurrentFlushRequestFence(); +u64 GPU::CurrentSyncRequestFence() const { + return impl->CurrentSyncRequestFence(); +} + +void GPU::WaitForSyncOperation(u64 fence) { + return impl->WaitForSyncOperation(fence); } void GPU::TickWork() { impl->TickWork(); } +/// Gets a mutable reference to the Host1x interface +Host1x::Host1x& GPU::Host1x() { + return impl->host1x; +} + +/// Gets an immutable reference to the Host1x interface. +const Host1x::Host1x& GPU::Host1x() const { + return impl->host1x; +} + Engines::Maxwell3D& GPU::Maxwell3D() { return impl->Maxwell3D(); } @@ -797,14 +460,6 @@ const Engines::KeplerCompute& GPU::KeplerCompute() const { return impl->KeplerCompute(); } -Tegra::MemoryManager& GPU::MemoryManager() { - return impl->MemoryManager(); -} - -const Tegra::MemoryManager& GPU::MemoryManager() const { - return impl->MemoryManager(); -} - Tegra::DmaPusher& GPU::DmaPusher() { return impl->DmaPusher(); } @@ -829,24 +484,9 @@ const VideoCore::ShaderNotify& GPU::ShaderNotify() const { return impl->ShaderNotify(); } -void GPU::WaitFence(u32 syncpoint_id, u32 value) { - impl->WaitFence(syncpoint_id, value); -} - -void GPU::IncrementSyncPoint(u32 syncpoint_id) { - impl->IncrementSyncPoint(syncpoint_id); -} - -u32 GPU::GetSyncpointValue(u32 syncpoint_id) const { - return impl->GetSyncpointValue(syncpoint_id); -} - -void GPU::RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) { - impl->RegisterSyncptInterrupt(syncpoint_id, value); -} - -bool GPU::CancelSyncptInterrupt(u32 syncpoint_id, u32 value) { - return impl->CancelSyncptInterrupt(syncpoint_id, value); +void GPU::RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer, + std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences) { + impl->RequestSwapBuffers(framebuffer, fences, num_fences); } u64 GPU::GetTicks() const { @@ -881,8 +521,8 @@ void GPU::ReleaseContext() { impl->ReleaseContext(); } -void GPU::PushGPUEntries(Tegra::CommandList&& entries) { - impl->PushGPUEntries(std::move(entries)); +void GPU::PushGPUEntries(s32 channel, Tegra::CommandList&& entries) { + impl->PushGPUEntries(channel, std::move(entries)); } void GPU::PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries) { diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index b939ba315..d0709dc69 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -24,11 +24,15 @@ namespace Tegra { class DmaPusher; struct CommandList; +// TODO: Implement the commented ones enum class RenderTargetFormat : u32 { NONE = 0x0, R32B32G32A32_FLOAT = 0xC0, R32G32B32A32_SINT = 0xC1, R32G32B32A32_UINT = 0xC2, + // R32G32B32X32_FLOAT = 0xC3, + // R32G32B32X32_SINT = 0xC4, + // R32G32B32X32_UINT = 0xC5, R16G16B16A16_UNORM = 0xC6, R16G16B16A16_SNORM = 0xC7, R16G16B16A16_SINT = 0xC8, @@ -38,8 +42,8 @@ enum class RenderTargetFormat : u32 { R32G32_SINT = 0xCC, R32G32_UINT = 0xCD, R16G16B16X16_FLOAT = 0xCE, - B8G8R8A8_UNORM = 0xCF, - B8G8R8A8_SRGB = 0xD0, + A8R8G8B8_UNORM = 0xCF, + A8R8G8B8_SRGB = 0xD0, A2B10G10R10_UNORM = 0xD1, A2B10G10R10_UINT = 0xD2, A8B8G8R8_UNORM = 0xD5, @@ -52,10 +56,13 @@ enum class RenderTargetFormat : u32 { R16G16_SINT = 0xDC, R16G16_UINT = 0xDD, R16G16_FLOAT = 0xDE, + // A2R10G10B10_UNORM = 0xDF, B10G11R11_FLOAT = 0xE0, R32_SINT = 0xE3, R32_UINT = 0xE4, R32_FLOAT = 0xE5, + // X8R8G8B8_UNORM = 0xE6, + // X8R8G8B8_SRGB = 0xE7, R5G6B5_UNORM = 0xE8, A1R5G5B5_UNORM = 0xE9, R8G8_UNORM = 0xEA, @@ -71,17 +78,42 @@ enum class RenderTargetFormat : u32 { R8_SNORM = 0xF4, R8_SINT = 0xF5, R8_UINT = 0xF6, + + /* + A8_UNORM = 0xF7, + X1R5G5B5_UNORM = 0xF8, + X8B8G8R8_UNORM = 0xF9, + X8B8G8R8_SRGB = 0xFA, + Z1R5G5B5_UNORM = 0xFB, + O1R5G5B5_UNORM = 0xFC, + Z8R8G8B8_UNORM = 0xFD, + O8R8G8B8_UNORM = 0xFE, + R32_UNORM = 0xFF, + A16_UNORM = 0x40, + A16_FLOAT = 0x41, + A32_FLOAT = 0x42, + A8R8_UNORM = 0x43, + R16A16_UNORM = 0x44, + R16A16_FLOAT = 0x45, + R32A32_FLOAT = 0x46, + B8G8R8A8_UNORM = 0x47, + */ }; enum class DepthFormat : u32 { - D32_FLOAT = 0xA, - D16_UNORM = 0x13, - S8_UINT_Z24_UNORM = 0x14, - D24X8_UNORM = 0x15, - D24S8_UNORM = 0x16, + Z32_FLOAT = 0xA, + Z16_UNORM = 0x13, + Z24_UNORM_S8_UINT = 0x14, + X8Z24_UNORM = 0x15, + S8Z24_UNORM = 0x16, S8_UINT = 0x17, - D24C8_UNORM = 0x18, - D32_FLOAT_S8X24_UINT = 0x19, + V8Z24_UNORM = 0x18, + Z32_FLOAT_X24S8_UINT = 0x19, + /* + X8Z24_UNORM_X16V8S8_UINT = 0x1D, + Z32_FLOAT_X16V8X8_UINT = 0x1E, + Z32_FLOAT_X16V8S8_UINT = 0x1F, + */ }; namespace Engines { @@ -89,73 +121,58 @@ class Maxwell3D; class KeplerCompute; } // namespace Engines -enum class EngineID { - FERMI_TWOD_A = 0x902D, // 2D Engine - MAXWELL_B = 0xB197, // 3D Engine - KEPLER_COMPUTE_B = 0xB1C0, - KEPLER_INLINE_TO_MEMORY_B = 0xA140, - MAXWELL_DMA_COPY_A = 0xB0B5, -}; +namespace Control { +struct ChannelState; +} + +namespace Host1x { +class Host1x; +} // namespace Host1x class MemoryManager; class GPU final { public: - struct MethodCall { - u32 method{}; - u32 argument{}; - u32 subchannel{}; - u32 method_count{}; - - explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0) - : method(method_), argument(argument_), subchannel(subchannel_), - method_count(method_count_) {} - - [[nodiscard]] bool IsLastCall() const { - return method_count <= 1; - } - }; - - enum class FenceOperation : u32 { - Acquire = 0, - Increment = 1, - }; - - union FenceAction { - u32 raw; - BitField<0, 1, FenceOperation> op; - BitField<8, 24, u32> syncpoint_id; - }; - explicit GPU(Core::System& system, bool is_async, bool use_nvdec); ~GPU(); /// Binds a renderer to the GPU. void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer); - /// Calls a GPU method. - void CallMethod(const MethodCall& method_call); - - /// Calls a GPU multivalue method. - void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, - u32 methods_pending); - /// Flush all current written commands into the host GPU for execution. void FlushCommands(); /// Synchronizes CPU writes with Host GPU memory. - void SyncGuestHost(); + void InvalidateGPUCache(); /// Signal the ending of command list. void OnCommandListEnd(); + std::shared_ptr<Control::ChannelState> AllocateChannel(); + + void InitChannel(Control::ChannelState& to_init); + + void BindChannel(s32 channel_id); + + void ReleaseChannel(Control::ChannelState& to_release); + + void InitAddressSpace(Tegra::MemoryManager& memory_manager); + /// Request a host GPU memory flush from the CPU. [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size); /// Obtains current flush request fence id. - [[nodiscard]] u64 CurrentFlushRequestFence() const; + [[nodiscard]] u64 CurrentSyncRequestFence() const; + + void WaitForSyncOperation(u64 fence); /// Tick pending requests within the GPU. void TickWork(); + /// Gets a mutable reference to the Host1x interface + [[nodiscard]] Host1x::Host1x& Host1x(); + + /// Gets an immutable reference to the Host1x interface. + [[nodiscard]] const Host1x::Host1x& Host1x() const; + /// Returns a reference to the Maxwell3D GPU engine. [[nodiscard]] Engines::Maxwell3D& Maxwell3D(); @@ -168,12 +185,6 @@ public: /// Returns a reference to the KeplerCompute GPU engine. [[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const; - /// Returns a reference to the GPU memory manager. - [[nodiscard]] Tegra::MemoryManager& MemoryManager(); - - /// Returns a const reference to the GPU memory manager. - [[nodiscard]] const Tegra::MemoryManager& MemoryManager() const; - /// Returns a reference to the GPU DMA pusher. [[nodiscard]] Tegra::DmaPusher& DmaPusher(); @@ -192,17 +203,6 @@ public: /// Returns a const reference to the shader notifier. [[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const; - /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame. - void WaitFence(u32 syncpoint_id, u32 value); - - void IncrementSyncPoint(u32 syncpoint_id); - - [[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const; - - void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value); - - [[nodiscard]] bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value); - [[nodiscard]] u64 GetTicks() const; [[nodiscard]] bool IsAsync() const; @@ -211,6 +211,9 @@ public: void RendererFrameEndNotify(); + void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer, + std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences); + /// Performs any additional setup necessary in order to begin GPU emulation. /// This can be used to launch any necessary threads and register any necessary /// core timing events. @@ -226,7 +229,7 @@ public: void ReleaseContext(); /// Push GPU command entries to be processed - void PushGPUEntries(Tegra::CommandList&& entries); + void PushGPUEntries(s32 channel, Tegra::CommandList&& entries); /// Push GPU command buffer entries to be processed void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries); @@ -248,7 +251,7 @@ public: private: struct Impl; - std::unique_ptr<Impl> impl; + mutable std::unique_ptr<Impl> impl; }; } // namespace Tegra diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp index d43f7175a..1bd477011 100644 --- a/src/video_core/gpu_thread.cpp +++ b/src/video_core/gpu_thread.cpp @@ -8,6 +8,7 @@ #include "common/thread.h" #include "core/core.h" #include "core/frontend/emu_window.h" +#include "video_core/control/scheduler.h" #include "video_core/dma_pusher.h" #include "video_core/gpu.h" #include "video_core/gpu_thread.h" @@ -18,8 +19,8 @@ namespace VideoCommon::GPUThread { /// Runs the GPU thread static void RunThread(std::stop_token stop_token, Core::System& system, VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context, - Tegra::DmaPusher& dma_pusher, SynchState& state) { - std::string name = "yuzu:GPU"; + Tegra::Control::Scheduler& scheduler, SynchState& state) { + std::string name = "GPU"; MicroProfileOnThreadCreate(name.c_str()); SCOPE_EXIT({ MicroProfileOnThreadExit(); }); @@ -36,8 +37,7 @@ static void RunThread(std::stop_token stop_token, Core::System& system, break; } if (auto* submit_list = std::get_if<SubmitListCommand>(&next.data)) { - dma_pusher.Push(std::move(submit_list->entries)); - dma_pusher.DispatchCalls(); + scheduler.Push(submit_list->channel, std::move(submit_list->entries)); } else if (const auto* data = std::get_if<SwapBuffersCommand>(&next.data)) { renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr); } else if (std::holds_alternative<OnCommandListEndCommand>(next.data)) { @@ -68,14 +68,14 @@ ThreadManager::~ThreadManager() = default; void ThreadManager::StartThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context, - Tegra::DmaPusher& dma_pusher) { + Tegra::Control::Scheduler& scheduler) { rasterizer = renderer.ReadRasterizer(); thread = std::jthread(RunThread, std::ref(system), std::ref(renderer), std::ref(context), - std::ref(dma_pusher), std::ref(state)); + std::ref(scheduler), std::ref(state)); } -void ThreadManager::SubmitList(Tegra::CommandList&& entries) { - PushCommand(SubmitListCommand(std::move(entries))); +void ThreadManager::SubmitList(s32 channel, Tegra::CommandList&& entries) { + PushCommand(SubmitListCommand(channel, std::move(entries))); } void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { @@ -93,8 +93,12 @@ void ThreadManager::FlushRegion(VAddr addr, u64 size) { } auto& gpu = system.GPU(); u64 fence = gpu.RequestFlush(addr, size); - PushCommand(GPUTickCommand(), true); - ASSERT(fence <= gpu.CurrentFlushRequestFence()); + TickGPU(); + gpu.WaitForSyncOperation(fence); +} + +void ThreadManager::TickGPU() { + PushCommand(GPUTickCommand()); } void ThreadManager::InvalidateRegion(VAddr addr, u64 size) { diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h index 2f8210cb9..64628d3e3 100644 --- a/src/video_core/gpu_thread.h +++ b/src/video_core/gpu_thread.h @@ -15,7 +15,9 @@ namespace Tegra { struct FramebufferConfig; -class DmaPusher; +namespace Control { +class Scheduler; +} } // namespace Tegra namespace Core { @@ -34,8 +36,10 @@ namespace VideoCommon::GPUThread { /// Command to signal to the GPU thread that a command list is ready for processing struct SubmitListCommand final { - explicit SubmitListCommand(Tegra::CommandList&& entries_) : entries{std::move(entries_)} {} + explicit SubmitListCommand(s32 channel_, Tegra::CommandList&& entries_) + : channel{channel_}, entries{std::move(entries_)} {} + s32 channel; Tegra::CommandList entries; }; @@ -112,10 +116,10 @@ public: /// Creates and starts the GPU thread. void StartThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context, - Tegra::DmaPusher& dma_pusher); + Tegra::Control::Scheduler& scheduler); /// Push GPU command entries to be processed - void SubmitList(Tegra::CommandList&& entries); + void SubmitList(s32 channel, Tegra::CommandList&& entries); /// Swap buffers (render frame) void SwapBuffers(const Tegra::FramebufferConfig* framebuffer); @@ -131,6 +135,8 @@ public: void OnCommandListEnd(); + void TickGPU(); + private: /// Pushes a command to be executed by the GPU thread u64 PushCommand(CommandData&& command_data, bool block = false); diff --git a/src/video_core/command_classes/codecs/codec.cpp b/src/video_core/host1x/codecs/codec.cpp index a5eb97b7f..42e7d6e4f 100644 --- a/src/video_core/command_classes/codecs/codec.cpp +++ b/src/video_core/host1x/codecs/codec.cpp @@ -6,11 +6,11 @@ #include <vector> #include "common/assert.h" #include "common/settings.h" -#include "video_core/command_classes/codecs/codec.h" -#include "video_core/command_classes/codecs/h264.h" -#include "video_core/command_classes/codecs/vp8.h" -#include "video_core/command_classes/codecs/vp9.h" -#include "video_core/gpu.h" +#include "video_core/host1x/codecs/codec.h" +#include "video_core/host1x/codecs/h264.h" +#include "video_core/host1x/codecs/vp8.h" +#include "video_core/host1x/codecs/vp9.h" +#include "video_core/host1x/host1x.h" #include "video_core/memory_manager.h" extern "C" { @@ -73,10 +73,10 @@ void AVFrameDeleter(AVFrame* ptr) { av_frame_free(&ptr); } -Codec::Codec(GPU& gpu_, const NvdecCommon::NvdecRegisters& regs) - : gpu(gpu_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(gpu)), - vp8_decoder(std::make_unique<Decoder::VP8>(gpu)), - vp9_decoder(std::make_unique<Decoder::VP9>(gpu)) {} +Codec::Codec(Host1x::Host1x& host1x_, const Host1x::NvdecCommon::NvdecRegisters& regs) + : host1x(host1x_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(host1x)), + vp8_decoder(std::make_unique<Decoder::VP8>(host1x)), + vp9_decoder(std::make_unique<Decoder::VP9>(host1x)) {} Codec::~Codec() { if (!initialized) { @@ -168,11 +168,11 @@ void Codec::InitializeGpuDecoder() { void Codec::Initialize() { const AVCodecID codec = [&] { switch (current_codec) { - case NvdecCommon::VideoCodec::H264: + case Host1x::NvdecCommon::VideoCodec::H264: return AV_CODEC_ID_H264; - case NvdecCommon::VideoCodec::VP8: + case Host1x::NvdecCommon::VideoCodec::VP8: return AV_CODEC_ID_VP8; - case NvdecCommon::VideoCodec::VP9: + case Host1x::NvdecCommon::VideoCodec::VP9: return AV_CODEC_ID_VP9; default: UNIMPLEMENTED_MSG("Unknown codec {}", current_codec); @@ -197,7 +197,7 @@ void Codec::Initialize() { initialized = true; } -void Codec::SetTargetCodec(NvdecCommon::VideoCodec codec) { +void Codec::SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec) { if (current_codec != codec) { current_codec = codec; LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", GetCurrentCodecName()); @@ -215,11 +215,11 @@ void Codec::Decode() { bool vp9_hidden_frame = false; const auto& frame_data = [&]() { switch (current_codec) { - case Tegra::NvdecCommon::VideoCodec::H264: + case Tegra::Host1x::NvdecCommon::VideoCodec::H264: return h264_decoder->ComposeFrame(state, is_first_frame); - case Tegra::NvdecCommon::VideoCodec::VP8: + case Tegra::Host1x::NvdecCommon::VideoCodec::VP8: return vp8_decoder->ComposeFrame(state); - case Tegra::NvdecCommon::VideoCodec::VP9: + case Tegra::Host1x::NvdecCommon::VideoCodec::VP9: vp9_decoder->ComposeFrame(state); vp9_hidden_frame = vp9_decoder->WasFrameHidden(); return vp9_decoder->GetFrameBytes(); @@ -287,21 +287,21 @@ AVFramePtr Codec::GetCurrentFrame() { return frame; } -NvdecCommon::VideoCodec Codec::GetCurrentCodec() const { +Host1x::NvdecCommon::VideoCodec Codec::GetCurrentCodec() const { return current_codec; } std::string_view Codec::GetCurrentCodecName() const { switch (current_codec) { - case NvdecCommon::VideoCodec::None: + case Host1x::NvdecCommon::VideoCodec::None: return "None"; - case NvdecCommon::VideoCodec::H264: + case Host1x::NvdecCommon::VideoCodec::H264: return "H264"; - case NvdecCommon::VideoCodec::VP8: + case Host1x::NvdecCommon::VideoCodec::VP8: return "VP8"; - case NvdecCommon::VideoCodec::H265: + case Host1x::NvdecCommon::VideoCodec::H265: return "H265"; - case NvdecCommon::VideoCodec::VP9: + case Host1x::NvdecCommon::VideoCodec::VP9: return "VP9"; default: return "Unknown"; diff --git a/src/video_core/command_classes/codecs/codec.h b/src/video_core/host1x/codecs/codec.h index 0c2405465..0d45fb7fe 100644 --- a/src/video_core/command_classes/codecs/codec.h +++ b/src/video_core/host1x/codecs/codec.h @@ -6,8 +6,8 @@ #include <memory> #include <string_view> #include <queue> - -#include "video_core/command_classes/nvdec_common.h" +#include "common/common_types.h" +#include "video_core/host1x/nvdec_common.h" extern "C" { #if defined(__GNUC__) || defined(__clang__) @@ -21,7 +21,6 @@ extern "C" { } namespace Tegra { -class GPU; void AVFrameDeleter(AVFrame* ptr); using AVFramePtr = std::unique_ptr<AVFrame, decltype(&AVFrameDeleter)>; @@ -32,16 +31,20 @@ class VP8; class VP9; } // namespace Decoder +namespace Host1x { +class Host1x; +} // namespace Host1x + class Codec { public: - explicit Codec(GPU& gpu, const NvdecCommon::NvdecRegisters& regs); + explicit Codec(Host1x::Host1x& host1x, const Host1x::NvdecCommon::NvdecRegisters& regs); ~Codec(); /// Initialize the codec, returning success or failure void Initialize(); /// Sets NVDEC video stream codec - void SetTargetCodec(NvdecCommon::VideoCodec codec); + void SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec); /// Call decoders to construct headers, decode AVFrame with ffmpeg void Decode(); @@ -50,7 +53,7 @@ public: [[nodiscard]] AVFramePtr GetCurrentFrame(); /// Returns the value of current_codec - [[nodiscard]] NvdecCommon::VideoCodec GetCurrentCodec() const; + [[nodiscard]] Host1x::NvdecCommon::VideoCodec GetCurrentCodec() const; /// Return name of the current codec [[nodiscard]] std::string_view GetCurrentCodecName() const; @@ -63,14 +66,14 @@ private: bool CreateGpuAvDevice(); bool initialized{}; - NvdecCommon::VideoCodec current_codec{NvdecCommon::VideoCodec::None}; + Host1x::NvdecCommon::VideoCodec current_codec{Host1x::NvdecCommon::VideoCodec::None}; const AVCodec* av_codec{nullptr}; AVCodecContext* av_codec_ctx{nullptr}; AVBufferRef* av_gpu_decoder{nullptr}; - GPU& gpu; - const NvdecCommon::NvdecRegisters& state; + Host1x::Host1x& host1x; + const Host1x::NvdecCommon::NvdecRegisters& state; std::unique_ptr<Decoder::H264> h264_decoder; std::unique_ptr<Decoder::VP8> vp8_decoder; std::unique_ptr<Decoder::VP9> vp9_decoder; diff --git a/src/video_core/command_classes/codecs/h264.cpp b/src/video_core/host1x/codecs/h264.cpp index e2acd54d4..e87bd65fa 100644 --- a/src/video_core/command_classes/codecs/h264.cpp +++ b/src/video_core/host1x/codecs/h264.cpp @@ -5,8 +5,8 @@ #include <bit> #include "common/settings.h" -#include "video_core/command_classes/codecs/h264.h" -#include "video_core/gpu.h" +#include "video_core/host1x/codecs/h264.h" +#include "video_core/host1x/host1x.h" #include "video_core/memory_manager.h" namespace Tegra::Decoder { @@ -24,19 +24,20 @@ constexpr std::array<u8, 16> zig_zag_scan{ }; } // Anonymous namespace -H264::H264(GPU& gpu_) : gpu(gpu_) {} +H264::H264(Host1x::Host1x& host1x_) : host1x{host1x_} {} H264::~H264() = default; -const std::vector<u8>& H264::ComposeFrame(const NvdecCommon::NvdecRegisters& state, +const std::vector<u8>& H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state, bool is_first_frame) { H264DecoderContext context; - gpu.MemoryManager().ReadBlock(state.picture_info_offset, &context, sizeof(H264DecoderContext)); + host1x.MemoryManager().ReadBlock(state.picture_info_offset, &context, + sizeof(H264DecoderContext)); const s64 frame_number = context.h264_parameter_set.frame_number.Value(); if (!is_first_frame && frame_number != 0) { frame.resize(context.stream_len); - gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size()); + host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size()); return frame; } @@ -155,8 +156,8 @@ const std::vector<u8>& H264::ComposeFrame(const NvdecCommon::NvdecRegisters& sta frame.resize(encoded_header.size() + context.stream_len); std::memcpy(frame.data(), encoded_header.data(), encoded_header.size()); - gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, - frame.data() + encoded_header.size(), context.stream_len); + host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, + frame.data() + encoded_header.size(), context.stream_len); return frame; } diff --git a/src/video_core/command_classes/codecs/h264.h b/src/video_core/host1x/codecs/h264.h index 261574364..5cc86454e 100644 --- a/src/video_core/command_classes/codecs/h264.h +++ b/src/video_core/host1x/codecs/h264.h @@ -8,10 +8,14 @@ #include "common/bit_field.h" #include "common/common_funcs.h" #include "common/common_types.h" -#include "video_core/command_classes/nvdec_common.h" +#include "video_core/host1x/nvdec_common.h" namespace Tegra { -class GPU; + +namespace Host1x { +class Host1x; +} // namespace Host1x + namespace Decoder { class H264BitWriter { @@ -55,16 +59,16 @@ private: class H264 { public: - explicit H264(GPU& gpu); + explicit H264(Host1x::Host1x& host1x); ~H264(); /// Compose the H264 frame for FFmpeg decoding - [[nodiscard]] const std::vector<u8>& ComposeFrame(const NvdecCommon::NvdecRegisters& state, - bool is_first_frame = false); + [[nodiscard]] const std::vector<u8>& ComposeFrame( + const Host1x::NvdecCommon::NvdecRegisters& state, bool is_first_frame = false); private: std::vector<u8> frame; - GPU& gpu; + Host1x::Host1x& host1x; struct H264ParameterSet { s32 log2_max_pic_order_cnt_lsb_minus4; ///< 0x00 diff --git a/src/video_core/command_classes/codecs/vp8.cpp b/src/video_core/host1x/codecs/vp8.cpp index c83b9bbc2..28fb12cb8 100644 --- a/src/video_core/command_classes/codecs/vp8.cpp +++ b/src/video_core/host1x/codecs/vp8.cpp @@ -3,18 +3,18 @@ #include <vector> -#include "video_core/command_classes/codecs/vp8.h" -#include "video_core/gpu.h" +#include "video_core/host1x/codecs/vp8.h" +#include "video_core/host1x/host1x.h" #include "video_core/memory_manager.h" namespace Tegra::Decoder { -VP8::VP8(GPU& gpu_) : gpu(gpu_) {} +VP8::VP8(Host1x::Host1x& host1x_) : host1x{host1x_} {} VP8::~VP8() = default; -const std::vector<u8>& VP8::ComposeFrame(const NvdecCommon::NvdecRegisters& state) { +const std::vector<u8>& VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) { VP8PictureInfo info; - gpu.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo)); + host1x.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo)); const bool is_key_frame = info.key_frame == 1u; const auto bitstream_size = static_cast<size_t>(info.vld_buffer_size); @@ -45,7 +45,7 @@ const std::vector<u8>& VP8::ComposeFrame(const NvdecCommon::NvdecRegisters& stat frame[9] = static_cast<u8>(((info.frame_height >> 8) & 0x3f)); } const u64 bitstream_offset = state.frame_bitstream_offset; - gpu.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size); + host1x.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size); return frame; } diff --git a/src/video_core/command_classes/codecs/vp8.h b/src/video_core/host1x/codecs/vp8.h index 3357667b0..5bf07ecab 100644 --- a/src/video_core/command_classes/codecs/vp8.h +++ b/src/video_core/host1x/codecs/vp8.h @@ -8,23 +8,28 @@ #include "common/common_funcs.h" #include "common/common_types.h" -#include "video_core/command_classes/nvdec_common.h" +#include "video_core/host1x/nvdec_common.h" namespace Tegra { -class GPU; + +namespace Host1x { +class Host1x; +} // namespace Host1x + namespace Decoder { class VP8 { public: - explicit VP8(GPU& gpu); + explicit VP8(Host1x::Host1x& host1x); ~VP8(); /// Compose the VP8 frame for FFmpeg decoding - [[nodiscard]] const std::vector<u8>& ComposeFrame(const NvdecCommon::NvdecRegisters& state); + [[nodiscard]] const std::vector<u8>& ComposeFrame( + const Host1x::NvdecCommon::NvdecRegisters& state); private: std::vector<u8> frame; - GPU& gpu; + Host1x::Host1x& host1x; struct VP8PictureInfo { INSERT_PADDING_WORDS_NOINIT(14); diff --git a/src/video_core/command_classes/codecs/vp9.cpp b/src/video_core/host1x/codecs/vp9.cpp index c01431441..cf40c9012 100644 --- a/src/video_core/command_classes/codecs/vp9.cpp +++ b/src/video_core/host1x/codecs/vp9.cpp @@ -4,8 +4,8 @@ #include <algorithm> // for std::copy #include <numeric> #include "common/assert.h" -#include "video_core/command_classes/codecs/vp9.h" -#include "video_core/gpu.h" +#include "video_core/host1x/codecs/vp9.h" +#include "video_core/host1x/host1x.h" #include "video_core/memory_manager.h" namespace Tegra::Decoder { @@ -236,7 +236,7 @@ constexpr std::array<u8, 254> map_lut{ } } // Anonymous namespace -VP9::VP9(GPU& gpu_) : gpu{gpu_} {} +VP9::VP9(Host1x::Host1x& host1x_) : host1x{host1x_} {} VP9::~VP9() = default; @@ -355,9 +355,9 @@ void VP9::WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_ } } -Vp9PictureInfo VP9::GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state) { +Vp9PictureInfo VP9::GetVp9PictureInfo(const Host1x::NvdecCommon::NvdecRegisters& state) { PictureInfo picture_info; - gpu.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo)); + host1x.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo)); Vp9PictureInfo vp9_info = picture_info.Convert(); InsertEntropy(state.vp9_entropy_probs_offset, vp9_info.entropy); @@ -372,18 +372,19 @@ Vp9PictureInfo VP9::GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state) void VP9::InsertEntropy(u64 offset, Vp9EntropyProbs& dst) { EntropyProbs entropy; - gpu.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs)); + host1x.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs)); entropy.Convert(dst); } -Vp9FrameContainer VP9::GetCurrentFrame(const NvdecCommon::NvdecRegisters& state) { +Vp9FrameContainer VP9::GetCurrentFrame(const Host1x::NvdecCommon::NvdecRegisters& state) { Vp9FrameContainer current_frame{}; { - gpu.SyncGuestHost(); + // gpu.SyncGuestHost(); epic, why? current_frame.info = GetVp9PictureInfo(state); current_frame.bit_stream.resize(current_frame.info.bitstream_size); - gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, current_frame.bit_stream.data(), - current_frame.info.bitstream_size); + host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, + current_frame.bit_stream.data(), + current_frame.info.bitstream_size); } if (!next_frame.bit_stream.empty()) { Vp9FrameContainer temp{ @@ -769,7 +770,7 @@ VpxBitStreamWriter VP9::ComposeUncompressedHeader() { return uncomp_writer; } -void VP9::ComposeFrame(const NvdecCommon::NvdecRegisters& state) { +void VP9::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) { std::vector<u8> bitstream; { Vp9FrameContainer curr_frame = GetCurrentFrame(state); diff --git a/src/video_core/command_classes/codecs/vp9.h b/src/video_core/host1x/codecs/vp9.h index ecc40e8b1..d4083e8d3 100644 --- a/src/video_core/command_classes/codecs/vp9.h +++ b/src/video_core/host1x/codecs/vp9.h @@ -8,11 +8,15 @@ #include "common/common_types.h" #include "common/stream.h" -#include "video_core/command_classes/codecs/vp9_types.h" -#include "video_core/command_classes/nvdec_common.h" +#include "video_core/host1x/codecs/vp9_types.h" +#include "video_core/host1x/nvdec_common.h" namespace Tegra { -class GPU; + +namespace Host1x { +class Host1x; +} // namespace Host1x + namespace Decoder { /// The VpxRangeEncoder, and VpxBitStreamWriter classes are used to compose the @@ -106,7 +110,7 @@ private: class VP9 { public: - explicit VP9(GPU& gpu_); + explicit VP9(Host1x::Host1x& host1x); ~VP9(); VP9(const VP9&) = delete; @@ -117,7 +121,7 @@ public: /// Composes the VP9 frame from the GPU state information. /// Based on the official VP9 spec documentation - void ComposeFrame(const NvdecCommon::NvdecRegisters& state); + void ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state); /// Returns true if the most recent frame was a hidden frame. [[nodiscard]] bool WasFrameHidden() const { @@ -162,19 +166,21 @@ private: void WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob); /// Returns VP9 information from NVDEC provided offset and size - [[nodiscard]] Vp9PictureInfo GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state); + [[nodiscard]] Vp9PictureInfo GetVp9PictureInfo( + const Host1x::NvdecCommon::NvdecRegisters& state); /// Read and convert NVDEC provided entropy probs to Vp9EntropyProbs struct void InsertEntropy(u64 offset, Vp9EntropyProbs& dst); /// Returns frame to be decoded after buffering - [[nodiscard]] Vp9FrameContainer GetCurrentFrame(const NvdecCommon::NvdecRegisters& state); + [[nodiscard]] Vp9FrameContainer GetCurrentFrame( + const Host1x::NvdecCommon::NvdecRegisters& state); /// Use NVDEC providied information to compose the headers for the current frame [[nodiscard]] std::vector<u8> ComposeCompressedHeader(); [[nodiscard]] VpxBitStreamWriter ComposeUncompressedHeader(); - GPU& gpu; + Host1x::Host1x& host1x; std::vector<u8> frame; std::array<s8, 4> loop_filter_ref_deltas{}; diff --git a/src/video_core/command_classes/codecs/vp9_types.h b/src/video_core/host1x/codecs/vp9_types.h index bb3d8df6e..adad8ed7e 100644 --- a/src/video_core/command_classes/codecs/vp9_types.h +++ b/src/video_core/host1x/codecs/vp9_types.h @@ -9,7 +9,6 @@ #include "common/common_types.h" namespace Tegra { -class GPU; namespace Decoder { struct Vp9FrameDimensions { diff --git a/src/video_core/host1x/control.cpp b/src/video_core/host1x/control.cpp new file mode 100644 index 000000000..dceefdb7f --- /dev/null +++ b/src/video_core/host1x/control.cpp @@ -0,0 +1,33 @@ +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common/assert.h" +#include "video_core/host1x/control.h" +#include "video_core/host1x/host1x.h" + +namespace Tegra::Host1x { + +Control::Control(Host1x& host1x_) : host1x(host1x_) {} + +Control::~Control() = default; + +void Control::ProcessMethod(Method method, u32 argument) { + switch (method) { + case Method::LoadSyncptPayload32: + syncpoint_value = argument; + break; + case Method::WaitSyncpt: + case Method::WaitSyncpt32: + Execute(argument); + break; + default: + UNIMPLEMENTED_MSG("Control method 0x{:X}", static_cast<u32>(method)); + break; + } +} + +void Control::Execute(u32 data) { + host1x.GetSyncpointManager().WaitHost(data, syncpoint_value); +} + +} // namespace Tegra::Host1x diff --git a/src/video_core/command_classes/host1x.h b/src/video_core/host1x/control.h index bb48a4381..e117888a3 100644 --- a/src/video_core/command_classes/host1x.h +++ b/src/video_core/host1x/control.h @@ -1,15 +1,19 @@ -// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once #include "common/common_types.h" namespace Tegra { -class GPU; + +namespace Host1x { + +class Host1x; class Nvdec; -class Host1x { +class Control { public: enum class Method : u32 { WaitSyncpt = 0x8, @@ -17,8 +21,8 @@ public: WaitSyncpt32 = 0x50, }; - explicit Host1x(GPU& gpu); - ~Host1x(); + explicit Control(Host1x& host1x); + ~Control(); /// Writes the method into the state, Invoke Execute() if encountered void ProcessMethod(Method method, u32 argument); @@ -28,7 +32,9 @@ private: void Execute(u32 data); u32 syncpoint_value{}; - GPU& gpu; + Host1x& host1x; }; +} // namespace Host1x + } // namespace Tegra diff --git a/src/video_core/host1x/host1x.cpp b/src/video_core/host1x/host1x.cpp new file mode 100644 index 000000000..7c317a85d --- /dev/null +++ b/src/video_core/host1x/host1x.cpp @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "core/core.h" +#include "video_core/host1x/host1x.h" + +namespace Tegra { + +namespace Host1x { + +Host1x::Host1x(Core::System& system_) + : system{system_}, syncpoint_manager{}, memory_manager{system, 32, 12}, + allocator{std::make_unique<Common::FlatAllocator<u32, 0, 32>>(1 << 12)} {} + +} // namespace Host1x + +} // namespace Tegra diff --git a/src/video_core/host1x/host1x.h b/src/video_core/host1x/host1x.h new file mode 100644 index 000000000..57082ae54 --- /dev/null +++ b/src/video_core/host1x/host1x.h @@ -0,0 +1,57 @@ +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include "common/common_types.h" + +#include "common/address_space.h" +#include "video_core/host1x/syncpoint_manager.h" +#include "video_core/memory_manager.h" + +namespace Core { +class System; +} // namespace Core + +namespace Tegra { + +namespace Host1x { + +class Host1x { +public: + explicit Host1x(Core::System& system); + + SyncpointManager& GetSyncpointManager() { + return syncpoint_manager; + } + + const SyncpointManager& GetSyncpointManager() const { + return syncpoint_manager; + } + + Tegra::MemoryManager& MemoryManager() { + return memory_manager; + } + + const Tegra::MemoryManager& MemoryManager() const { + return memory_manager; + } + + Common::FlatAllocator<u32, 0, 32>& Allocator() { + return *allocator; + } + + const Common::FlatAllocator<u32, 0, 32>& Allocator() const { + return *allocator; + } + +private: + Core::System& system; + SyncpointManager syncpoint_manager; + Tegra::MemoryManager memory_manager; + std::unique_ptr<Common::FlatAllocator<u32, 0, 32>> allocator; +}; + +} // namespace Host1x + +} // namespace Tegra diff --git a/src/video_core/command_classes/nvdec.cpp b/src/video_core/host1x/nvdec.cpp index 4fbbe3da6..a4bd5b79f 100644 --- a/src/video_core/command_classes/nvdec.cpp +++ b/src/video_core/host1x/nvdec.cpp @@ -2,15 +2,16 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include "common/assert.h" -#include "video_core/command_classes/nvdec.h" -#include "video_core/gpu.h" +#include "video_core/host1x/host1x.h" +#include "video_core/host1x/nvdec.h" -namespace Tegra { +namespace Tegra::Host1x { #define NVDEC_REG_INDEX(field_name) \ (offsetof(NvdecCommon::NvdecRegisters, field_name) / sizeof(u64)) -Nvdec::Nvdec(GPU& gpu_) : gpu(gpu_), state{}, codec(std::make_unique<Codec>(gpu, state)) {} +Nvdec::Nvdec(Host1x& host1x_) + : host1x(host1x_), state{}, codec(std::make_unique<Codec>(host1x, state)) {} Nvdec::~Nvdec() = default; @@ -44,4 +45,4 @@ void Nvdec::Execute() { } } -} // namespace Tegra +} // namespace Tegra::Host1x diff --git a/src/video_core/command_classes/nvdec.h b/src/video_core/host1x/nvdec.h index 488531fc6..3949d5181 100644 --- a/src/video_core/command_classes/nvdec.h +++ b/src/video_core/host1x/nvdec.h @@ -6,14 +6,17 @@ #include <memory> #include <vector> #include "common/common_types.h" -#include "video_core/command_classes/codecs/codec.h" +#include "video_core/host1x/codecs/codec.h" namespace Tegra { -class GPU; + +namespace Host1x { + +class Host1x; class Nvdec { public: - explicit Nvdec(GPU& gpu); + explicit Nvdec(Host1x& host1x); ~Nvdec(); /// Writes the method into the state, Invoke Execute() if encountered @@ -26,8 +29,11 @@ private: /// Invoke codec to decode a frame void Execute(); - GPU& gpu; + Host1x& host1x; NvdecCommon::NvdecRegisters state; std::unique_ptr<Codec> codec; }; + +} // namespace Host1x + } // namespace Tegra diff --git a/src/video_core/command_classes/nvdec_common.h b/src/video_core/host1x/nvdec_common.h index 521e5b52b..49d67ebbe 100644 --- a/src/video_core/command_classes/nvdec_common.h +++ b/src/video_core/host1x/nvdec_common.h @@ -7,7 +7,7 @@ #include "common/common_funcs.h" #include "common/common_types.h" -namespace Tegra::NvdecCommon { +namespace Tegra::Host1x::NvdecCommon { enum class VideoCodec : u64 { None = 0x0, @@ -94,4 +94,4 @@ ASSERT_REG_POSITION(vp9_curr_frame_mvs_offset, 0x176); #undef ASSERT_REG_POSITION -} // namespace Tegra::NvdecCommon +} // namespace Tegra::Host1x::NvdecCommon diff --git a/src/video_core/command_classes/sync_manager.cpp b/src/video_core/host1x/sync_manager.cpp index 67e58046f..5ef9ea217 100644 --- a/src/video_core/command_classes/sync_manager.cpp +++ b/src/video_core/host1x/sync_manager.cpp @@ -3,10 +3,13 @@ #include <algorithm> #include "sync_manager.h" -#include "video_core/gpu.h" +#include "video_core/host1x/host1x.h" +#include "video_core/host1x/syncpoint_manager.h" namespace Tegra { -SyncptIncrManager::SyncptIncrManager(GPU& gpu_) : gpu(gpu_) {} +namespace Host1x { + +SyncptIncrManager::SyncptIncrManager(Host1x& host1x_) : host1x(host1x_) {} SyncptIncrManager::~SyncptIncrManager() = default; void SyncptIncrManager::Increment(u32 id) { @@ -36,8 +39,12 @@ void SyncptIncrManager::IncrementAllDone() { if (!increments[done_count].complete) { break; } - gpu.IncrementSyncPoint(increments[done_count].syncpt_id); + auto& syncpoint_manager = host1x.GetSyncpointManager(); + syncpoint_manager.IncrementGuest(increments[done_count].syncpt_id); + syncpoint_manager.IncrementHost(increments[done_count].syncpt_id); } increments.erase(increments.begin(), increments.begin() + done_count); } + +} // namespace Host1x } // namespace Tegra diff --git a/src/video_core/command_classes/sync_manager.h b/src/video_core/host1x/sync_manager.h index 6dfaae080..7bb77fa27 100644 --- a/src/video_core/command_classes/sync_manager.h +++ b/src/video_core/host1x/sync_manager.h @@ -8,7 +8,11 @@ #include "common/common_types.h" namespace Tegra { -class GPU; + +namespace Host1x { + +class Host1x; + struct SyncptIncr { u32 id; u32 class_id; @@ -21,7 +25,7 @@ struct SyncptIncr { class SyncptIncrManager { public: - explicit SyncptIncrManager(GPU& gpu); + explicit SyncptIncrManager(Host1x& host1x); ~SyncptIncrManager(); /// Add syncpoint id and increment all @@ -41,7 +45,9 @@ private: std::mutex increment_lock; u32 current_id{}; - GPU& gpu; + Host1x& host1x; }; +} // namespace Host1x + } // namespace Tegra diff --git a/src/video_core/host1x/syncpoint_manager.cpp b/src/video_core/host1x/syncpoint_manager.cpp new file mode 100644 index 000000000..a44fc83d3 --- /dev/null +++ b/src/video_core/host1x/syncpoint_manager.cpp @@ -0,0 +1,106 @@ +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common/microprofile.h" +#include "video_core/host1x/syncpoint_manager.h" + +namespace Tegra { + +namespace Host1x { + +MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192)); + +SyncpointManager::ActionHandle SyncpointManager::RegisterAction( + std::atomic<u32>& syncpoint, std::list<RegisteredAction>& action_storage, u32 expected_value, + std::function<void()>&& action) { + if (syncpoint.load(std::memory_order_acquire) >= expected_value) { + action(); + return {}; + } + + std::unique_lock lk(guard); + if (syncpoint.load(std::memory_order_relaxed) >= expected_value) { + action(); + return {}; + } + auto it = action_storage.begin(); + while (it != action_storage.end()) { + if (it->expected_value >= expected_value) { + break; + } + ++it; + } + return action_storage.emplace(it, expected_value, std::move(action)); +} + +void SyncpointManager::DeregisterAction(std::list<RegisteredAction>& action_storage, + ActionHandle& handle) { + std::unique_lock lk(guard); + + // We want to ensure the iterator still exists prior to erasing it + // Otherwise, if an invalid iterator was passed in then it could lead to UB + // It is important to avoid UB in that case since the deregister isn't called from a locked + // context + for (auto it = action_storage.begin(); it != action_storage.end(); it++) { + if (it == handle) { + action_storage.erase(it); + return; + } + } +} + +void SyncpointManager::DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle) { + DeregisterAction(guest_action_storage[syncpoint_id], handle); +} + +void SyncpointManager::DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle) { + DeregisterAction(host_action_storage[syncpoint_id], handle); +} + +void SyncpointManager::IncrementGuest(u32 syncpoint_id) { + Increment(syncpoints_guest[syncpoint_id], wait_guest_cv, guest_action_storage[syncpoint_id]); +} + +void SyncpointManager::IncrementHost(u32 syncpoint_id) { + Increment(syncpoints_host[syncpoint_id], wait_host_cv, host_action_storage[syncpoint_id]); +} + +void SyncpointManager::WaitGuest(u32 syncpoint_id, u32 expected_value) { + Wait(syncpoints_guest[syncpoint_id], wait_guest_cv, expected_value); +} + +void SyncpointManager::WaitHost(u32 syncpoint_id, u32 expected_value) { + MICROPROFILE_SCOPE(GPU_wait); + Wait(syncpoints_host[syncpoint_id], wait_host_cv, expected_value); +} + +void SyncpointManager::Increment(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv, + std::list<RegisteredAction>& action_storage) { + auto new_value{syncpoint.fetch_add(1, std::memory_order_acq_rel) + 1}; + + std::unique_lock lk(guard); + auto it = action_storage.begin(); + while (it != action_storage.end()) { + if (it->expected_value > new_value) { + break; + } + it->action(); + it = action_storage.erase(it); + } + wait_cv.notify_all(); +} + +void SyncpointManager::Wait(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv, + u32 expected_value) { + const auto pred = [&]() { return syncpoint.load(std::memory_order_acquire) >= expected_value; }; + if (pred()) { + return; + } + + std::unique_lock lk(guard); + wait_cv.wait(lk, pred); +} + +} // namespace Host1x + +} // namespace Tegra diff --git a/src/video_core/host1x/syncpoint_manager.h b/src/video_core/host1x/syncpoint_manager.h new file mode 100644 index 000000000..50a264e23 --- /dev/null +++ b/src/video_core/host1x/syncpoint_manager.h @@ -0,0 +1,98 @@ +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include <array> +#include <atomic> +#include <condition_variable> +#include <functional> +#include <list> +#include <mutex> + +#include "common/common_types.h" + +namespace Tegra { + +namespace Host1x { + +class SyncpointManager { +public: + u32 GetGuestSyncpointValue(u32 id) const { + return syncpoints_guest[id].load(std::memory_order_acquire); + } + + u32 GetHostSyncpointValue(u32 id) const { + return syncpoints_host[id].load(std::memory_order_acquire); + } + + struct RegisteredAction { + explicit RegisteredAction(u32 expected_value_, std::function<void()>&& action_) + : expected_value{expected_value_}, action{std::move(action_)} {} + u32 expected_value; + std::function<void()> action; + }; + using ActionHandle = std::list<RegisteredAction>::iterator; + + template <typename Func> + ActionHandle RegisterGuestAction(u32 syncpoint_id, u32 expected_value, Func&& action) { + std::function<void()> func(action); + return RegisterAction(syncpoints_guest[syncpoint_id], guest_action_storage[syncpoint_id], + expected_value, std::move(func)); + } + + template <typename Func> + ActionHandle RegisterHostAction(u32 syncpoint_id, u32 expected_value, Func&& action) { + std::function<void()> func(action); + return RegisterAction(syncpoints_host[syncpoint_id], host_action_storage[syncpoint_id], + expected_value, std::move(func)); + } + + void DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle); + + void DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle); + + void IncrementGuest(u32 syncpoint_id); + + void IncrementHost(u32 syncpoint_id); + + void WaitGuest(u32 syncpoint_id, u32 expected_value); + + void WaitHost(u32 syncpoint_id, u32 expected_value); + + bool IsReadyGuest(u32 syncpoint_id, u32 expected_value) const { + return syncpoints_guest[syncpoint_id].load(std::memory_order_acquire) >= expected_value; + } + + bool IsReadyHost(u32 syncpoint_id, u32 expected_value) const { + return syncpoints_host[syncpoint_id].load(std::memory_order_acquire) >= expected_value; + } + +private: + void Increment(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv, + std::list<RegisteredAction>& action_storage); + + ActionHandle RegisterAction(std::atomic<u32>& syncpoint, + std::list<RegisteredAction>& action_storage, u32 expected_value, + std::function<void()>&& action); + + void DeregisterAction(std::list<RegisteredAction>& action_storage, ActionHandle& handle); + + void Wait(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv, u32 expected_value); + + static constexpr size_t NUM_MAX_SYNCPOINTS = 192; + + std::array<std::atomic<u32>, NUM_MAX_SYNCPOINTS> syncpoints_guest{}; + std::array<std::atomic<u32>, NUM_MAX_SYNCPOINTS> syncpoints_host{}; + + std::array<std::list<RegisteredAction>, NUM_MAX_SYNCPOINTS> guest_action_storage; + std::array<std::list<RegisteredAction>, NUM_MAX_SYNCPOINTS> host_action_storage; + + std::mutex guard; + std::condition_variable wait_guest_cv; + std::condition_variable wait_host_cv; +}; + +} // namespace Host1x + +} // namespace Tegra diff --git a/src/video_core/command_classes/vic.cpp b/src/video_core/host1x/vic.cpp index 7c17df353..ac0b7d20e 100644 --- a/src/video_core/command_classes/vic.cpp +++ b/src/video_core/host1x/vic.cpp @@ -18,14 +18,17 @@ extern "C" { #include "common/bit_field.h" #include "common/logging/log.h" -#include "video_core/command_classes/nvdec.h" -#include "video_core/command_classes/vic.h" #include "video_core/engines/maxwell_3d.h" -#include "video_core/gpu.h" +#include "video_core/host1x/host1x.h" +#include "video_core/host1x/nvdec.h" +#include "video_core/host1x/vic.h" #include "video_core/memory_manager.h" #include "video_core/textures/decoders.h" namespace Tegra { + +namespace Host1x { + namespace { enum class VideoPixelFormat : u64_le { RGBA8 = 0x1f, @@ -46,8 +49,8 @@ union VicConfig { BitField<46, 14, u64_le> surface_height_minus1; }; -Vic::Vic(GPU& gpu_, std::shared_ptr<Nvdec> nvdec_processor_) - : gpu(gpu_), +Vic::Vic(Host1x& host1x_, std::shared_ptr<Nvdec> nvdec_processor_) + : host1x(host1x_), nvdec_processor(std::move(nvdec_processor_)), converted_frame_buffer{nullptr, av_free} {} Vic::~Vic() = default; @@ -78,7 +81,7 @@ void Vic::Execute() { LOG_ERROR(Service_NVDRV, "VIC Luma address not set."); return; } - const VicConfig config{gpu.MemoryManager().Read<u64>(config_struct_address + 0x20)}; + const VicConfig config{host1x.MemoryManager().Read<u64>(config_struct_address + 0x20)}; const AVFramePtr frame_ptr = nvdec_processor->GetFrame(); const auto* frame = frame_ptr.get(); if (!frame) { @@ -153,15 +156,16 @@ void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) { const u32 block_height = static_cast<u32>(config.block_linear_height_log2); const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0); luma_buffer.resize(size); - Texture::SwizzleSubrect(width, height, width * 4, width, 4, luma_buffer.data(), - converted_frame_buf_addr, block_height, 0, 0); + std::span<const u8> frame_buff(converted_frame_buf_addr, 4 * width * height); + Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, 0, 0, width, height, + block_height, 0, width * 4); - gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size); + host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size); } else { // send pitch linear frame const size_t linear_size = width * height * 4; - gpu.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr, - linear_size); + host1x.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr, + linear_size); } } @@ -189,8 +193,8 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) { luma_buffer[dst + x] = luma_src[src + x]; } } - gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), - luma_buffer.size()); + host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), + luma_buffer.size()); // Chroma const std::size_t half_height = frame_height / 2; @@ -231,8 +235,10 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) { ASSERT(false); break; } - gpu.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(), - chroma_buffer.size()); + host1x.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(), + chroma_buffer.size()); } +} // namespace Host1x + } // namespace Tegra diff --git a/src/video_core/command_classes/vic.h b/src/video_core/host1x/vic.h index 010daa6b6..2b78786e8 100644 --- a/src/video_core/command_classes/vic.h +++ b/src/video_core/host1x/vic.h @@ -10,7 +10,10 @@ struct SwsContext; namespace Tegra { -class GPU; + +namespace Host1x { + +class Host1x; class Nvdec; union VicConfig; @@ -25,7 +28,7 @@ public: SetOutputSurfaceChromaUnusedOffset = 0x1ca }; - explicit Vic(GPU& gpu, std::shared_ptr<Nvdec> nvdec_processor); + explicit Vic(Host1x& host1x, std::shared_ptr<Nvdec> nvdec_processor); ~Vic(); @@ -39,8 +42,8 @@ private: void WriteYUVFrame(const AVFrame* frame, const VicConfig& config); - GPU& gpu; - std::shared_ptr<Tegra::Nvdec> nvdec_processor; + Host1x& host1x; + std::shared_ptr<Tegra::Host1x::Nvdec> nvdec_processor; /// Avoid reallocation of the following buffers every frame, as their /// size does not change during a stream @@ -58,4 +61,6 @@ private: s32 scaler_height{}; }; +} // namespace Host1x + } // namespace Tegra diff --git a/src/video_core/host_shaders/astc_decoder.comp b/src/video_core/host_shaders/astc_decoder.comp index 3441a5fe5..d608678a3 100644 --- a/src/video_core/host_shaders/astc_decoder.comp +++ b/src/video_core/host_shaders/astc_decoder.comp @@ -1065,7 +1065,7 @@ TexelWeightParams DecodeBlockInfo() { void FillError(ivec3 coord) { for (uint j = 0; j < block_dims.y; j++) { for (uint i = 0; i < block_dims.x; i++) { - imageStore(dest_image, coord + ivec3(i, j, 0), vec4(1.0, 1.0, 0.0, 1.0)); + imageStore(dest_image, coord + ivec3(i, j, 0), vec4(0.0, 0.0, 0.0, 0.0)); } } } diff --git a/src/video_core/macro/macro.cpp b/src/video_core/macro/macro.cpp index 43f8b5904..f61d5998e 100644 --- a/src/video_core/macro/macro.cpp +++ b/src/video_core/macro/macro.cpp @@ -8,6 +8,7 @@ #include <boost/container_hash/hash.hpp> +#include <fstream> #include "common/assert.h" #include "common/fs/fs.h" #include "common/fs/path_util.h" diff --git a/src/video_core/macro/macro_hle.cpp b/src/video_core/macro/macro_hle.cpp index 58382755b..8a8adbb42 100644 --- a/src/video_core/macro/macro_hle.cpp +++ b/src/video_core/macro/macro_hle.cpp @@ -3,6 +3,8 @@ #include <array> #include <vector> +#include "common/scope_exit.h" +#include "video_core/dirty_flags.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/macro/macro.h" #include "video_core/macro/macro_hle.h" @@ -19,16 +21,16 @@ void HLE_771BB18C62444DA0(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& maxwell3d.regs.draw.topology.Assign( static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[0] & 0x3ffffff)); - maxwell3d.regs.vb_base_instance = parameters[5]; + maxwell3d.regs.global_base_instance_index = parameters[5]; maxwell3d.mme_draw.instance_count = instance_count; - maxwell3d.regs.vb_element_base = parameters[3]; - maxwell3d.regs.index_array.count = parameters[1]; - maxwell3d.regs.index_array.first = parameters[4]; + maxwell3d.regs.global_base_vertex_index = parameters[3]; + maxwell3d.regs.index_buffer.count = parameters[1]; + maxwell3d.regs.index_buffer.first = parameters[4]; if (maxwell3d.ShouldExecute()) { maxwell3d.Rasterizer().Draw(true, true); } - maxwell3d.regs.index_array.count = 0; + maxwell3d.regs.index_buffer.count = 0; maxwell3d.mme_draw.instance_count = 0; maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined; } @@ -38,7 +40,7 @@ void HLE_0D61FC9FAAC9FCAD(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& maxwell3d.regs.vertex_buffer.first = parameters[3]; maxwell3d.regs.vertex_buffer.count = parameters[1]; - maxwell3d.regs.vb_base_instance = parameters[4]; + maxwell3d.regs.global_base_instance_index = parameters[4]; maxwell3d.regs.draw.topology.Assign( static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[0])); maxwell3d.mme_draw.instance_count = count; @@ -55,11 +57,12 @@ void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& const u32 instance_count = (maxwell3d.GetRegisterValue(0xD1B) & parameters[2]); const u32 element_base = parameters[4]; const u32 base_instance = parameters[5]; - maxwell3d.regs.index_array.first = parameters[3]; - maxwell3d.regs.reg_array[0x446] = element_base; // vertex id base? - maxwell3d.regs.index_array.count = parameters[1]; - maxwell3d.regs.vb_element_base = element_base; - maxwell3d.regs.vb_base_instance = base_instance; + maxwell3d.regs.index_buffer.first = parameters[3]; + maxwell3d.regs.vertex_id_base = element_base; + maxwell3d.regs.index_buffer.count = parameters[1]; + maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; + maxwell3d.regs.global_base_vertex_index = element_base; + maxwell3d.regs.global_base_instance_index = base_instance; maxwell3d.mme_draw.instance_count = instance_count; maxwell3d.CallMethodFromMME(0x8e3, 0x640); maxwell3d.CallMethodFromMME(0x8e4, element_base); @@ -69,10 +72,10 @@ void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& if (maxwell3d.ShouldExecute()) { maxwell3d.Rasterizer().Draw(true, true); } - maxwell3d.regs.reg_array[0x446] = 0x0; // vertex id base? - maxwell3d.regs.index_array.count = 0; - maxwell3d.regs.vb_element_base = 0x0; - maxwell3d.regs.vb_base_instance = 0x0; + maxwell3d.regs.vertex_id_base = 0x0; + maxwell3d.regs.index_buffer.count = 0; + maxwell3d.regs.global_base_vertex_index = 0x0; + maxwell3d.regs.global_base_instance_index = 0x0; maxwell3d.mme_draw.instance_count = 0; maxwell3d.CallMethodFromMME(0x8e3, 0x640); maxwell3d.CallMethodFromMME(0x8e4, 0x0); @@ -80,10 +83,67 @@ void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined; } -constexpr std::array<std::pair<u64, HLEFunction>, 3> hle_funcs{{ +// Multidraw Indirect +void HLE_3F5E74B9C9A50164(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& parameters) { + SCOPE_EXIT({ + // Clean everything. + maxwell3d.regs.vertex_id_base = 0x0; + maxwell3d.regs.index_buffer.count = 0; + maxwell3d.regs.global_base_vertex_index = 0x0; + maxwell3d.regs.global_base_instance_index = 0x0; + maxwell3d.mme_draw.instance_count = 0; + maxwell3d.CallMethodFromMME(0x8e3, 0x640); + maxwell3d.CallMethodFromMME(0x8e4, 0x0); + maxwell3d.CallMethodFromMME(0x8e5, 0x0); + maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined; + maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; + }); + const u32 start_indirect = parameters[0]; + const u32 end_indirect = parameters[1]; + if (start_indirect >= end_indirect) { + // Nothing to do. + return; + } + const auto topology = + static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[2]); + maxwell3d.regs.draw.topology.Assign(topology); + const u32 padding = parameters[3]; + const std::size_t max_draws = parameters[4]; + + const u32 indirect_words = 5 + padding; + const std::size_t first_draw = start_indirect; + const std::size_t effective_draws = end_indirect - start_indirect; + const std::size_t last_draw = start_indirect + std::min(effective_draws, max_draws); + + for (std::size_t index = first_draw; index < last_draw; index++) { + const std::size_t base = index * indirect_words + 5; + const u32 num_vertices = parameters[base]; + const u32 instance_count = parameters[base + 1]; + const u32 first_index = parameters[base + 2]; + const u32 base_vertex = parameters[base + 3]; + const u32 base_instance = parameters[base + 4]; + maxwell3d.regs.index_buffer.first = first_index; + maxwell3d.regs.vertex_id_base = base_vertex; + maxwell3d.regs.index_buffer.count = num_vertices; + maxwell3d.regs.global_base_vertex_index = base_vertex; + maxwell3d.regs.global_base_instance_index = base_instance; + maxwell3d.mme_draw.instance_count = instance_count; + maxwell3d.CallMethodFromMME(0x8e3, 0x640); + maxwell3d.CallMethodFromMME(0x8e4, base_vertex); + maxwell3d.CallMethodFromMME(0x8e5, base_instance); + maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; + if (maxwell3d.ShouldExecute()) { + maxwell3d.Rasterizer().Draw(true, true); + } + maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined; + } +} + +constexpr std::array<std::pair<u64, HLEFunction>, 4> hle_funcs{{ {0x771BB18C62444DA0, &HLE_771BB18C62444DA0}, {0x0D61FC9FAAC9FCAD, &HLE_0D61FC9FAAC9FCAD}, {0x0217920100488FF7, &HLE_0217920100488FF7}, + {0x3F5E74B9C9A50164, &HLE_3F5E74B9C9A50164}, }}; class HLEMacroImpl final : public CachedMacro { @@ -99,6 +159,7 @@ private: Engines::Maxwell3D& maxwell3d; HLEFunction func; }; + } // Anonymous namespace HLEMacro::HLEMacro(Engines::Maxwell3D& maxwell3d_) : maxwell3d{maxwell3d_} {} diff --git a/src/video_core/macro/macro_jit_x64.cpp b/src/video_core/macro/macro_jit_x64.cpp index aca25d902..a302a9603 100644 --- a/src/video_core/macro/macro_jit_x64.cpp +++ b/src/video_core/macro/macro_jit_x64.cpp @@ -279,28 +279,13 @@ void MacroJITx64Impl::Compile_ExtractInsert(Macro::Opcode opcode) { auto dst = Compile_GetRegister(opcode.src_a, RESULT); auto src = Compile_GetRegister(opcode.src_b, eax); - if (opcode.bf_src_bit != 0 && opcode.bf_src_bit != 31) { - shr(src, opcode.bf_src_bit); - } else if (opcode.bf_src_bit == 31) { - xor_(src, src); - } - // Don't bother masking the whole register since we're using a 32 bit register - if (opcode.bf_size != 31 && opcode.bf_size != 0) { - and_(src, opcode.GetBitfieldMask()); - } else if (opcode.bf_size == 0) { - xor_(src, src); - } - if (opcode.bf_dst_bit != 31 && opcode.bf_dst_bit != 0) { - shl(src, opcode.bf_dst_bit); - } else if (opcode.bf_dst_bit == 31) { - xor_(src, src); - } - const u32 mask = ~(opcode.GetBitfieldMask() << opcode.bf_dst_bit); - if (mask != 0xffffffff) { - and_(dst, mask); - } + and_(dst, mask); + shr(src, opcode.bf_src_bit); + and_(src, opcode.GetBitfieldMask()); + shl(src, opcode.bf_dst_bit); or_(dst, src); + Compile_ProcessResult(opcode.result_operation, opcode.dst); } @@ -309,17 +294,9 @@ void MacroJITx64Impl::Compile_ExtractShiftLeftImmediate(Macro::Opcode opcode) { const auto src = Compile_GetRegister(opcode.src_b, RESULT); shr(src, dst.cvt8()); - if (opcode.bf_size != 0 && opcode.bf_size != 31) { - and_(src, opcode.GetBitfieldMask()); - } else if (opcode.bf_size == 0) { - xor_(src, src); - } + and_(src, opcode.GetBitfieldMask()); + shl(src, opcode.bf_dst_bit); - if (opcode.bf_dst_bit != 0 && opcode.bf_dst_bit != 31) { - shl(src, opcode.bf_dst_bit); - } else if (opcode.bf_dst_bit == 31) { - xor_(src, src); - } Compile_ProcessResult(opcode.result_operation, opcode.dst); } @@ -327,13 +304,8 @@ void MacroJITx64Impl::Compile_ExtractShiftLeftRegister(Macro::Opcode opcode) { const auto dst = Compile_GetRegister(opcode.src_a, ecx); const auto src = Compile_GetRegister(opcode.src_b, RESULT); - if (opcode.bf_src_bit != 0) { - shr(src, opcode.bf_src_bit); - } - - if (opcode.bf_size != 31) { - and_(src, opcode.GetBitfieldMask()); - } + shr(src, opcode.bf_src_bit); + and_(src, opcode.GetBitfieldMask()); shl(src, dst.cvt8()); Compile_ProcessResult(opcode.result_operation, opcode.dst); @@ -429,17 +401,11 @@ void MacroJITx64Impl::Compile_Branch(Macro::Opcode opcode) { Xbyak::Label handle_post_exit{}; Xbyak::Label skip{}; jmp(skip, T_NEAR); - if (opcode.is_exit) { - L(handle_post_exit); - // Execute 1 instruction - mov(BRANCH_HOLDER, end_of_code); - // Jump to next instruction to skip delay slot check - jmp(labels[jump_address], T_NEAR); - } else { - L(handle_post_exit); - xor_(BRANCH_HOLDER, BRANCH_HOLDER); - jmp(labels[jump_address], T_NEAR); - } + + L(handle_post_exit); + xor_(BRANCH_HOLDER, BRANCH_HOLDER); + jmp(labels[jump_address], T_NEAR); + L(skip); mov(BRANCH_HOLDER, handle_post_exit); jmp(delay_skip[pc], T_NEAR); diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index bf9eb735d..384350dbd 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -7,6 +7,7 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/device_memory.h" #include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_process.h" #include "core/memory.h" @@ -16,172 +17,239 @@ namespace Tegra { -MemoryManager::MemoryManager(Core::System& system_) - : system{system_}, page_table(page_table_size) {} +std::atomic<size_t> MemoryManager::unique_identifier_generator{}; + +MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 big_page_bits_, + u64 page_bits_) + : system{system_}, memory{system.Memory()}, device_memory{system.DeviceMemory()}, + address_space_bits{address_space_bits_}, page_bits{page_bits_}, big_page_bits{big_page_bits_}, + entries{}, big_entries{}, page_table{address_space_bits, address_space_bits + page_bits - 38, + page_bits != big_page_bits ? page_bits : 0}, + unique_identifier{unique_identifier_generator.fetch_add(1, std::memory_order_acq_rel)} { + address_space_size = 1ULL << address_space_bits; + page_size = 1ULL << page_bits; + page_mask = page_size - 1ULL; + big_page_size = 1ULL << big_page_bits; + big_page_mask = big_page_size - 1ULL; + const u64 page_table_bits = address_space_bits - page_bits; + const u64 big_page_table_bits = address_space_bits - big_page_bits; + const u64 page_table_size = 1ULL << page_table_bits; + const u64 big_page_table_size = 1ULL << big_page_table_bits; + page_table_mask = page_table_size - 1; + big_page_table_mask = big_page_table_size - 1; + + big_entries.resize(big_page_table_size / 32, 0); + big_page_table_cpu.resize(big_page_table_size); + big_page_continous.resize(big_page_table_size / continous_bits, 0); + std::array<PTEKind, 32> kind_valus; + kind_valus.fill(PTEKind::INVALID); + big_kinds.resize(big_page_table_size / 32, kind_valus); + entries.resize(page_table_size / 32, 0); + kinds.resize(big_page_table_size / 32, kind_valus); +} MemoryManager::~MemoryManager() = default; -void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) { - rasterizer = rasterizer_; -} - -GPUVAddr MemoryManager::UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) { - u64 remaining_size{size}; - for (u64 offset{}; offset < size; offset += page_size) { - if (remaining_size < page_size) { - SetPageEntry(gpu_addr + offset, page_entry + offset, remaining_size); - } else { - SetPageEntry(gpu_addr + offset, page_entry + offset); - } - remaining_size -= page_size; +template <bool is_big_page> +MemoryManager::EntryType MemoryManager::GetEntry(size_t position) const { + if constexpr (is_big_page) { + position = position >> big_page_bits; + const u64 entry_mask = big_entries[position / 32]; + const size_t sub_index = position % 32; + return static_cast<EntryType>((entry_mask >> (2 * sub_index)) & 0x03ULL); + } else { + position = position >> page_bits; + const u64 entry_mask = entries[position / 32]; + const size_t sub_index = position % 32; + return static_cast<EntryType>((entry_mask >> (2 * sub_index)) & 0x03ULL); } - return gpu_addr; } -GPUVAddr MemoryManager::Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size) { - const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first); - if (it != map_ranges.end() && it->first == gpu_addr) { - it->second = size; +template <bool is_big_page> +void MemoryManager::SetEntry(size_t position, MemoryManager::EntryType entry) { + if constexpr (is_big_page) { + position = position >> big_page_bits; + const u64 entry_mask = big_entries[position / 32]; + const size_t sub_index = position % 32; + big_entries[position / 32] = + (~(3ULL << sub_index * 2) & entry_mask) | (static_cast<u64>(entry) << sub_index * 2); } else { - map_ranges.insert(it, MapRange{gpu_addr, size}); + position = position >> page_bits; + const u64 entry_mask = entries[position / 32]; + const size_t sub_index = position % 32; + entries[position / 32] = + (~(3ULL << sub_index * 2) & entry_mask) | (static_cast<u64>(entry) << sub_index * 2); } - return UpdateRange(gpu_addr, cpu_addr, size); } -GPUVAddr MemoryManager::MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align) { - return Map(cpu_addr, *FindFreeRange(size, align), size); +PTEKind MemoryManager::GetPageKind(GPUVAddr gpu_addr) const { + auto entry = GetEntry<true>(gpu_addr); + if (entry == EntryType::Mapped || entry == EntryType::Reserved) [[likely]] { + return GetKind<true>(gpu_addr); + } else { + return GetKind<false>(gpu_addr); + } } -GPUVAddr MemoryManager::MapAllocate32(VAddr cpu_addr, std::size_t size) { - const std::optional<GPUVAddr> gpu_addr = FindFreeRange(size, 1, true); - ASSERT(gpu_addr); - return Map(cpu_addr, *gpu_addr, size); +template <bool is_big_page> +PTEKind MemoryManager::GetKind(size_t position) const { + if constexpr (is_big_page) { + position = position >> big_page_bits; + const size_t sub_index = position % 32; + return big_kinds[position / 32][sub_index]; + } else { + position = position >> page_bits; + const size_t sub_index = position % 32; + return kinds[position / 32][sub_index]; + } } -void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { - if (size == 0) { - return; - } - const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first); - if (it != map_ranges.end()) { - ASSERT(it->first == gpu_addr); - map_ranges.erase(it); +template <bool is_big_page> +void MemoryManager::SetKind(size_t position, PTEKind kind) { + if constexpr (is_big_page) { + position = position >> big_page_bits; + const size_t sub_index = position % 32; + big_kinds[position / 32][sub_index] = kind; } else { - ASSERT_MSG(false, "Unmapping non-existent GPU address=0x{:x}", gpu_addr); + position = position >> page_bits; + const size_t sub_index = position % 32; + kinds[position / 32][sub_index] = kind; } - const auto submapped_ranges = GetSubmappedRange(gpu_addr, size); - - for (const auto& [map_addr, map_size] : submapped_ranges) { - // Flush and invalidate through the GPU interface, to be asynchronous if possible. - const std::optional<VAddr> cpu_addr = GpuToCpuAddress(map_addr); - ASSERT(cpu_addr); +} - rasterizer->UnmapMemory(*cpu_addr, map_size); - } +inline bool MemoryManager::IsBigPageContinous(size_t big_page_index) const { + const u64 entry_mask = big_page_continous[big_page_index / continous_bits]; + const size_t sub_index = big_page_index % continous_bits; + return ((entry_mask >> sub_index) & 0x1ULL) != 0; +} - UpdateRange(gpu_addr, PageEntry::State::Unmapped, size); +inline void MemoryManager::SetBigPageContinous(size_t big_page_index, bool value) { + const u64 continous_mask = big_page_continous[big_page_index / continous_bits]; + const size_t sub_index = big_page_index % continous_bits; + big_page_continous[big_page_index / continous_bits] = + (~(1ULL << sub_index) & continous_mask) | (value ? 1ULL << sub_index : 0); } -std::optional<GPUVAddr> MemoryManager::AllocateFixed(GPUVAddr gpu_addr, std::size_t size) { +template <MemoryManager::EntryType entry_type> +GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size, + PTEKind kind) { + [[maybe_unused]] u64 remaining_size{size}; + if constexpr (entry_type == EntryType::Mapped) { + page_table.ReserveRange(gpu_addr, size); + } for (u64 offset{}; offset < size; offset += page_size) { - if (!GetPageEntry(gpu_addr + offset).IsUnmapped()) { - return std::nullopt; + const GPUVAddr current_gpu_addr = gpu_addr + offset; + [[maybe_unused]] const auto current_entry_type = GetEntry<false>(current_gpu_addr); + SetEntry<false>(current_gpu_addr, entry_type); + SetKind<false>(current_gpu_addr, kind); + if (current_entry_type != entry_type) { + rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, page_size); + } + if constexpr (entry_type == EntryType::Mapped) { + const VAddr current_cpu_addr = cpu_addr + offset; + const auto index = PageEntryIndex<false>(current_gpu_addr); + const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits); + page_table[index] = sub_value; } + remaining_size -= page_size; } - - return UpdateRange(gpu_addr, PageEntry::State::Allocated, size); -} - -GPUVAddr MemoryManager::Allocate(std::size_t size, std::size_t align) { - return *AllocateFixed(*FindFreeRange(size, align), size); + return gpu_addr; } -void MemoryManager::TryLockPage(PageEntry page_entry, std::size_t size) { - if (!page_entry.IsValid()) { - return; +template <MemoryManager::EntryType entry_type> +GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, + size_t size, PTEKind kind) { + [[maybe_unused]] u64 remaining_size{size}; + for (u64 offset{}; offset < size; offset += big_page_size) { + const GPUVAddr current_gpu_addr = gpu_addr + offset; + [[maybe_unused]] const auto current_entry_type = GetEntry<true>(current_gpu_addr); + SetEntry<true>(current_gpu_addr, entry_type); + SetKind<true>(current_gpu_addr, kind); + if (current_entry_type != entry_type) { + rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, big_page_size); + } + if constexpr (entry_type == EntryType::Mapped) { + const VAddr current_cpu_addr = cpu_addr + offset; + const auto index = PageEntryIndex<true>(current_gpu_addr); + const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits); + big_page_table_cpu[index] = sub_value; + const bool is_continous = ([&] { + uintptr_t base_ptr{ + reinterpret_cast<uintptr_t>(memory.GetPointerSilent(current_cpu_addr))}; + if (base_ptr == 0) { + return false; + } + for (VAddr start_cpu = current_cpu_addr + page_size; + start_cpu < current_cpu_addr + big_page_size; start_cpu += page_size) { + base_ptr += page_size; + auto next_ptr = reinterpret_cast<uintptr_t>(memory.GetPointerSilent(start_cpu)); + if (next_ptr == 0 || base_ptr != next_ptr) { + return false; + } + } + return true; + })(); + SetBigPageContinous(index, is_continous); + } + remaining_size -= big_page_size; } - - ASSERT(system.CurrentProcess() - ->PageTable() - .LockForDeviceAddressSpace(page_entry.ToAddress(), size) - .IsSuccess()); + return gpu_addr; } -void MemoryManager::TryUnlockPage(PageEntry page_entry, std::size_t size) { - if (!page_entry.IsValid()) { - return; - } - - ASSERT(system.CurrentProcess() - ->PageTable() - .UnlockForDeviceAddressSpace(page_entry.ToAddress(), size) - .IsSuccess()); +void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) { + rasterizer = rasterizer_; } -PageEntry MemoryManager::GetPageEntry(GPUVAddr gpu_addr) const { - return page_table[PageEntryIndex(gpu_addr)]; +GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, PTEKind kind, + bool is_big_pages) { + if (is_big_pages) [[likely]] { + return BigPageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size, kind); + } + return PageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size, kind); } -void MemoryManager::SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) { - // TODO(bunnei): We should lock/unlock device regions. This currently causes issues due to - // improper tracking, but should be fixed in the future. - - //// Unlock the old page - // TryUnlockPage(page_table[PageEntryIndex(gpu_addr)], size); - - //// Lock the new page - // TryLockPage(page_entry, size); - auto& current_page = page_table[PageEntryIndex(gpu_addr)]; - - if ((!current_page.IsValid() && page_entry.IsValid()) || - current_page.ToAddress() != page_entry.ToAddress()) { - rasterizer->ModifyGPUMemory(gpu_addr, size); +GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages) { + if (is_big_pages) [[likely]] { + return BigPageTableOp<EntryType::Reserved>(gpu_addr, 0, size, PTEKind::INVALID); } - - current_page = page_entry; + return PageTableOp<EntryType::Reserved>(gpu_addr, 0, size, PTEKind::INVALID); } -std::optional<GPUVAddr> MemoryManager::FindFreeRange(std::size_t size, std::size_t align, - bool start_32bit_address) const { - if (!align) { - align = page_size; - } else { - align = Common::AlignUp(align, page_size); +void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { + if (size == 0) { + return; } + const auto submapped_ranges = GetSubmappedRange(gpu_addr, size); - u64 available_size{}; - GPUVAddr gpu_addr{start_32bit_address ? address_space_start_low : address_space_start}; - while (gpu_addr + available_size < address_space_size) { - if (GetPageEntry(gpu_addr + available_size).IsUnmapped()) { - available_size += page_size; - - if (available_size >= size) { - return gpu_addr; - } - } else { - gpu_addr += available_size + page_size; - available_size = 0; + for (const auto& [map_addr, map_size] : submapped_ranges) { + // Flush and invalidate through the GPU interface, to be asynchronous if possible. + const std::optional<VAddr> cpu_addr = GpuToCpuAddress(map_addr); + ASSERT(cpu_addr); - const auto remainder{gpu_addr % align}; - if (remainder) { - gpu_addr = (gpu_addr - remainder) + align; - } - } + rasterizer->UnmapMemory(*cpu_addr, map_size); } - return std::nullopt; + BigPageTableOp<EntryType::Free>(gpu_addr, 0, size, PTEKind::INVALID); + PageTableOp<EntryType::Free>(gpu_addr, 0, size, PTEKind::INVALID); } std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { - if (gpu_addr == 0) { + if (!IsWithinGPUAddressRange(gpu_addr)) [[unlikely]] { return std::nullopt; } - const auto page_entry{GetPageEntry(gpu_addr)}; - if (!page_entry.IsValid()) { - return std::nullopt; + if (GetEntry<true>(gpu_addr) != EntryType::Mapped) [[unlikely]] { + if (GetEntry<false>(gpu_addr) != EntryType::Mapped) { + return std::nullopt; + } + + const VAddr cpu_addr_base = static_cast<VAddr>(page_table[PageEntryIndex<false>(gpu_addr)]) + << cpu_page_bits; + return cpu_addr_base + (gpu_addr & page_mask); } - return page_entry.ToAddress() + (gpu_addr & page_mask); + const VAddr cpu_addr_base = + static_cast<VAddr>(big_page_table_cpu[PageEntryIndex<true>(gpu_addr)]) << cpu_page_bits; + return cpu_addr_base + (gpu_addr & big_page_mask); } std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const { @@ -189,7 +257,7 @@ std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t s const size_t page_last{(addr + size + page_size - 1) >> page_bits}; while (page_index < page_last) { const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; - if (page_addr && *page_addr != 0) { + if (page_addr) { return page_addr; } ++page_index; @@ -232,126 +300,298 @@ template void MemoryManager::Write<u32>(GPUVAddr addr, u32 data); template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data); u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) { - if (!GetPageEntry(gpu_addr).IsValid()) { - return {}; - } - const auto address{GpuToCpuAddress(gpu_addr)}; if (!address) { return {}; } - return system.Memory().GetPointer(*address); + return memory.GetPointer(*address); } const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const { - if (!GetPageEntry(gpu_addr).IsValid()) { - return {}; - } - const auto address{GpuToCpuAddress(gpu_addr)}; if (!address) { return {}; } - return system.Memory().GetPointer(*address); -} - -size_t MemoryManager::BytesToMapEnd(GPUVAddr gpu_addr) const noexcept { - auto it = std::ranges::upper_bound(map_ranges, gpu_addr, {}, &MapRange::first); - --it; - return it->second - (gpu_addr - it->first); -} - -void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, - bool is_safe) const { + return memory.GetPointer(*address); +} + +#ifdef _MSC_VER // no need for gcc / clang but msvc's compiler is more conservative with inlining. +#pragma inline_recursion(on) +#endif + +template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped> +inline void MemoryManager::MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, + FuncMapped&& func_mapped, FuncReserved&& func_reserved, + FuncUnmapped&& func_unmapped) const { + static constexpr bool BOOL_BREAK_MAPPED = std::is_same_v<FuncMapped, bool>; + static constexpr bool BOOL_BREAK_RESERVED = std::is_same_v<FuncReserved, bool>; + static constexpr bool BOOL_BREAK_UNMAPPED = std::is_same_v<FuncUnmapped, bool>; + u64 used_page_size; + u64 used_page_mask; + u64 used_page_bits; + if constexpr (is_big_pages) { + used_page_size = big_page_size; + used_page_mask = big_page_mask; + used_page_bits = big_page_bits; + } else { + used_page_size = page_size; + used_page_mask = page_mask; + used_page_bits = page_bits; + } std::size_t remaining_size{size}; - std::size_t page_index{gpu_src_addr >> page_bits}; - std::size_t page_offset{gpu_src_addr & page_mask}; + std::size_t page_index{gpu_src_addr >> used_page_bits}; + std::size_t page_offset{gpu_src_addr & used_page_mask}; + GPUVAddr current_address = gpu_src_addr; while (remaining_size > 0) { const std::size_t copy_amount{ - std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; - const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; - if (page_addr && *page_addr != 0) { - const auto src_addr{*page_addr + page_offset}; - if (is_safe) { - // Flush must happen on the rasterizer interface, such that memory is always - // synchronous when it is read (even when in asynchronous GPU mode). - // Fixes Dead Cells title menu. - rasterizer->FlushRegion(src_addr, copy_amount); + std::min(static_cast<std::size_t>(used_page_size) - page_offset, remaining_size)}; + auto entry = GetEntry<is_big_pages>(current_address); + if (entry == EntryType::Mapped) [[likely]] { + if constexpr (BOOL_BREAK_MAPPED) { + if (func_mapped(page_index, page_offset, copy_amount)) { + return; + } + } else { + func_mapped(page_index, page_offset, copy_amount); } - system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); - } else { - std::memset(dest_buffer, 0, copy_amount); - } + } else if (entry == EntryType::Reserved) { + if constexpr (BOOL_BREAK_RESERVED) { + if (func_reserved(page_index, page_offset, copy_amount)) { + return; + } + } else { + func_reserved(page_index, page_offset, copy_amount); + } + + } else [[unlikely]] { + if constexpr (BOOL_BREAK_UNMAPPED) { + if (func_unmapped(page_index, page_offset, copy_amount)) { + return; + } + } else { + func_unmapped(page_index, page_offset, copy_amount); + } + } page_index++; page_offset = 0; - dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; remaining_size -= copy_amount; + current_address += copy_amount; } } +template <bool is_safe> +void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, + std::size_t size) const { + auto set_to_zero = [&]([[maybe_unused]] std::size_t page_index, + [[maybe_unused]] std::size_t offset, std::size_t copy_amount) { + std::memset(dest_buffer, 0, copy_amount); + dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; + }; + auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset; + if constexpr (is_safe) { + rasterizer->FlushRegion(cpu_addr_base, copy_amount); + } + u8* physical = memory.GetPointer(cpu_addr_base); + std::memcpy(dest_buffer, physical, copy_amount); + dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; + }; + auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + if constexpr (is_safe) { + rasterizer->FlushRegion(cpu_addr_base, copy_amount); + } + if (!IsBigPageContinous(page_index)) [[unlikely]] { + memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount); + } else { + u8* physical = memory.GetPointer(cpu_addr_base); + std::memcpy(dest_buffer, physical, copy_amount); + } + dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; + }; + auto read_short_pages = [&](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation<false>(base, copy_amount, mapped_normal, set_to_zero, set_to_zero); + }; + MemoryOperation<true>(gpu_src_addr, size, mapped_big, set_to_zero, read_short_pages); +} + void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const { - ReadBlockImpl(gpu_src_addr, dest_buffer, size, true); + ReadBlockImpl<true>(gpu_src_addr, dest_buffer, size); } void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer, const std::size_t size) const { - ReadBlockImpl(gpu_src_addr, dest_buffer, size, false); + ReadBlockImpl<false>(gpu_src_addr, dest_buffer, size); } -void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size, - bool is_safe) { - std::size_t remaining_size{size}; - std::size_t page_index{gpu_dest_addr >> page_bits}; - std::size_t page_offset{gpu_dest_addr & page_mask}; - - while (remaining_size > 0) { - const std::size_t copy_amount{ - std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; - const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; - if (page_addr && *page_addr != 0) { - const auto dest_addr{*page_addr + page_offset}; - - if (is_safe) { - // Invalidate must happen on the rasterizer interface, such that memory is always - // synchronous when it is written (even when in asynchronous GPU mode). - rasterizer->InvalidateRegion(dest_addr, copy_amount); - } - system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); +template <bool is_safe> +void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, + std::size_t size) { + auto just_advance = [&]([[maybe_unused]] std::size_t page_index, + [[maybe_unused]] std::size_t offset, std::size_t copy_amount) { + src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; + }; + auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset; + if constexpr (is_safe) { + rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); } - - page_index++; - page_offset = 0; + u8* physical = memory.GetPointer(cpu_addr_base); + std::memcpy(physical, src_buffer, copy_amount); src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; - remaining_size -= copy_amount; - } + }; + auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + if constexpr (is_safe) { + rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); + } + if (!IsBigPageContinous(page_index)) [[unlikely]] { + memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount); + } else { + u8* physical = memory.GetPointer(cpu_addr_base); + std::memcpy(physical, src_buffer, copy_amount); + } + src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; + }; + auto write_short_pages = [&](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation<false>(base, copy_amount, mapped_normal, just_advance, just_advance); + }; + MemoryOperation<true>(gpu_dest_addr, size, mapped_big, just_advance, write_short_pages); } void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) { - WriteBlockImpl(gpu_dest_addr, src_buffer, size, true); + WriteBlockImpl<true>(gpu_dest_addr, src_buffer, size); } void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) { - WriteBlockImpl(gpu_dest_addr, src_buffer, size, false); + WriteBlockImpl<false>(gpu_dest_addr, src_buffer, size); } void MemoryManager::FlushRegion(GPUVAddr gpu_addr, size_t size) const { - size_t remaining_size{size}; - size_t page_index{gpu_addr >> page_bits}; - size_t page_offset{gpu_addr & page_mask}; - while (remaining_size > 0) { - const size_t num_bytes{std::min(page_size - page_offset, remaining_size)}; - if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { - rasterizer->FlushRegion(*page_addr + page_offset, num_bytes); + auto do_nothing = [&]([[maybe_unused]] std::size_t page_index, + [[maybe_unused]] std::size_t offset, + [[maybe_unused]] std::size_t copy_amount) {}; + + auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset; + rasterizer->FlushRegion(cpu_addr_base, copy_amount); + }; + auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + rasterizer->FlushRegion(cpu_addr_base, copy_amount); + }; + auto flush_short_pages = [&](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation<false>(base, copy_amount, mapped_normal, do_nothing, do_nothing); + }; + MemoryOperation<true>(gpu_addr, size, mapped_big, do_nothing, flush_short_pages); +} + +bool MemoryManager::IsMemoryDirty(GPUVAddr gpu_addr, size_t size) const { + bool result = false; + auto do_nothing = [&]([[maybe_unused]] std::size_t page_index, + [[maybe_unused]] std::size_t offset, + [[maybe_unused]] std::size_t copy_amount) { return false; }; + + auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset; + result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount); + return result; + }; + auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount); + return result; + }; + auto check_short_pages = [&](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation<false>(base, copy_amount, mapped_normal, do_nothing, do_nothing); + return result; + }; + MemoryOperation<true>(gpu_addr, size, mapped_big, do_nothing, check_short_pages); + return result; +} + +size_t MemoryManager::MaxContinousRange(GPUVAddr gpu_addr, size_t size) const { + std::optional<VAddr> old_page_addr{}; + size_t range_so_far = 0; + bool result{false}; + auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, + std::size_t copy_amount) { + result = true; + return true; + }; + auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset; + if (old_page_addr && *old_page_addr != cpu_addr_base) { + result = true; + return true; } - ++page_index; - page_offset = 0; - remaining_size -= num_bytes; - } + range_so_far += copy_amount; + old_page_addr = {cpu_addr_base + copy_amount}; + return false; + }; + auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + if (old_page_addr && *old_page_addr != cpu_addr_base) { + return true; + } + range_so_far += copy_amount; + old_page_addr = {cpu_addr_base + copy_amount}; + return false; + }; + auto check_short_pages = [&](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation<false>(base, copy_amount, short_check, fail, fail); + return result; + }; + MemoryOperation<true>(gpu_addr, size, big_check, fail, check_short_pages); + return range_so_far; +} + +void MemoryManager::InvalidateRegion(GPUVAddr gpu_addr, size_t size) const { + auto do_nothing = [&]([[maybe_unused]] std::size_t page_index, + [[maybe_unused]] std::size_t offset, + [[maybe_unused]] std::size_t copy_amount) {}; + + auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset; + rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); + }; + auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); + }; + auto invalidate_short_pages = [&](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation<false>(base, copy_amount, mapped_normal, do_nothing, do_nothing); + }; + MemoryOperation<true>(gpu_addr, size, mapped_big, do_nothing, invalidate_short_pages); } void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size) { @@ -365,87 +605,134 @@ void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std } bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const { - const auto cpu_addr{GpuToCpuAddress(gpu_addr)}; - if (!cpu_addr) { + if (GetEntry<true>(gpu_addr) == EntryType::Mapped) [[likely]] { + size_t page_index = gpu_addr >> big_page_bits; + if (IsBigPageContinous(page_index)) [[likely]] { + const std::size_t page{(page_index & big_page_mask) + size}; + return page <= big_page_size; + } + const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; + return page <= Core::Memory::YUZU_PAGESIZE; + } + if (GetEntry<false>(gpu_addr) != EntryType::Mapped) { return false; } - const std::size_t page{(*cpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; + const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; return page <= Core::Memory::YUZU_PAGESIZE; } bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const { - size_t page_index{gpu_addr >> page_bits}; - const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits}; std::optional<VAddr> old_page_addr{}; - while (page_index != page_last) { - const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; - if (!page_addr || *page_addr == 0) { - return false; + bool result{true}; + auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, + std::size_t copy_amount) { + result = false; + return true; + }; + auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset; + if (old_page_addr && *old_page_addr != cpu_addr_base) { + result = false; + return true; } - if (old_page_addr) { - if (*old_page_addr + page_size != *page_addr) { - return false; - } + old_page_addr = {cpu_addr_base + copy_amount}; + return false; + }; + auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + if (old_page_addr && *old_page_addr != cpu_addr_base) { + result = false; + return true; } - old_page_addr = page_addr; - ++page_index; - } - return true; + old_page_addr = {cpu_addr_base + copy_amount}; + return false; + }; + auto check_short_pages = [&](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation<false>(base, copy_amount, short_check, fail, fail); + return !result; + }; + MemoryOperation<true>(gpu_addr, size, big_check, fail, check_short_pages); + return result; } bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) const { - size_t page_index{gpu_addr >> page_bits}; - const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits}; - while (page_index < page_last) { - if (!page_table[page_index].IsValid() || page_table[page_index].ToAddress() == 0) { - return false; - } - ++page_index; - } - return true; + bool result{true}; + auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, + [[maybe_unused]] std::size_t copy_amount) { + result = false; + return true; + }; + auto pass = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, + [[maybe_unused]] std::size_t copy_amount) { return false; }; + auto check_short_pages = [&](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation<false>(base, copy_amount, pass, pass, fail); + return !result; + }; + MemoryOperation<true>(gpu_addr, size, pass, fail, check_short_pages); + return result; } std::vector<std::pair<GPUVAddr, std::size_t>> MemoryManager::GetSubmappedRange( GPUVAddr gpu_addr, std::size_t size) const { std::vector<std::pair<GPUVAddr, std::size_t>> result{}; - size_t page_index{gpu_addr >> page_bits}; - size_t remaining_size{size}; - size_t page_offset{gpu_addr & page_mask}; std::optional<std::pair<GPUVAddr, std::size_t>> last_segment{}; std::optional<VAddr> old_page_addr{}; - const auto extend_size = [&last_segment, &page_index, &page_offset](std::size_t bytes) { - if (!last_segment) { - const GPUVAddr new_base_addr = (page_index << page_bits) + page_offset; - last_segment = {new_base_addr, bytes}; - } else { - last_segment->second += bytes; - } - }; - const auto split = [&last_segment, &result] { + const auto split = [&last_segment, &result]([[maybe_unused]] std::size_t page_index, + [[maybe_unused]] std::size_t offset, + [[maybe_unused]] std::size_t copy_amount) { if (last_segment) { result.push_back(*last_segment); last_segment = std::nullopt; } }; - while (remaining_size > 0) { - const size_t num_bytes{std::min(page_size - page_offset, remaining_size)}; - const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; - if (!page_addr || *page_addr == 0) { - split(); - } else if (old_page_addr) { - if (*old_page_addr + page_size != *page_addr) { - split(); + const auto extend_size_big = [this, &split, &old_page_addr, + &last_segment](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + if (old_page_addr) { + if (*old_page_addr != cpu_addr_base) { + split(0, 0, 0); + } + } + old_page_addr = {cpu_addr_base + copy_amount}; + if (!last_segment) { + const GPUVAddr new_base_addr = (page_index << big_page_bits) + offset; + last_segment = {new_base_addr, copy_amount}; + } else { + last_segment->second += copy_amount; + } + }; + const auto extend_size_short = [this, &split, &old_page_addr, + &last_segment](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset; + if (old_page_addr) { + if (*old_page_addr != cpu_addr_base) { + split(0, 0, 0); } - extend_size(num_bytes); + } + old_page_addr = {cpu_addr_base + copy_amount}; + if (!last_segment) { + const GPUVAddr new_base_addr = (page_index << page_bits) + offset; + last_segment = {new_base_addr, copy_amount}; } else { - extend_size(num_bytes); + last_segment->second += copy_amount; } - ++page_index; - page_offset = 0; - remaining_size -= num_bytes; - old_page_addr = page_addr; - } - split(); + }; + auto do_short_pages = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation<false>(base, copy_amount, extend_size_short, split, split); + }; + MemoryOperation<true>(gpu_addr, size, extend_size_big, split, do_short_pages); + split(0, 0, 0); return result; } diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 74f9ce175..ab4bc9ec6 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -3,73 +3,40 @@ #pragma once +#include <atomic> #include <map> #include <optional> #include <vector> #include "common/common_types.h" +#include "common/multi_level_page_table.h" +#include "common/virtual_buffer.h" +#include "video_core/pte_kind.h" namespace VideoCore { class RasterizerInterface; } namespace Core { +class DeviceMemory; +namespace Memory { +class Memory; +} // namespace Memory class System; -} +} // namespace Core namespace Tegra { -class PageEntry final { -public: - enum class State : u32 { - Unmapped = static_cast<u32>(-1), - Allocated = static_cast<u32>(-2), - }; - - constexpr PageEntry() = default; - constexpr PageEntry(State state_) : state{state_} {} - constexpr PageEntry(VAddr addr) : state{static_cast<State>(addr >> ShiftBits)} {} - - [[nodiscard]] constexpr bool IsUnmapped() const { - return state == State::Unmapped; - } - - [[nodiscard]] constexpr bool IsAllocated() const { - return state == State::Allocated; - } - - [[nodiscard]] constexpr bool IsValid() const { - return !IsUnmapped() && !IsAllocated(); - } - - [[nodiscard]] constexpr VAddr ToAddress() const { - if (!IsValid()) { - return {}; - } - - return static_cast<VAddr>(state) << ShiftBits; - } - - [[nodiscard]] constexpr PageEntry operator+(u64 offset) const { - // If this is a reserved value, offsets do not apply - if (!IsValid()) { - return *this; - } - return PageEntry{(static_cast<VAddr>(state) << ShiftBits) + offset}; - } - -private: - static constexpr std::size_t ShiftBits{12}; - - State state{State::Unmapped}; -}; -static_assert(sizeof(PageEntry) == 4, "PageEntry is too large"); - class MemoryManager final { public: - explicit MemoryManager(Core::System& system_); + explicit MemoryManager(Core::System& system_, u64 address_space_bits_ = 40, + u64 big_page_bits_ = 16, u64 page_bits_ = 12); ~MemoryManager(); + size_t GetID() const { + return unique_identifier; + } + /// Binds a renderer to the memory manager. void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); @@ -86,9 +53,6 @@ public: [[nodiscard]] u8* GetPointer(GPUVAddr addr); [[nodiscard]] const u8* GetPointer(GPUVAddr addr) const; - /// Returns the number of bytes until the end of the memory map containing the given GPU address - [[nodiscard]] size_t BytesToMapEnd(GPUVAddr gpu_addr) const noexcept; - /** * ReadBlock and WriteBlock are full read and write operations over virtual * GPU Memory. It's important to use these when GPU memory may not be continuous @@ -135,54 +99,109 @@ public: std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr, std::size_t size) const; - [[nodiscard]] GPUVAddr Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size); - [[nodiscard]] GPUVAddr MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align); - [[nodiscard]] GPUVAddr MapAllocate32(VAddr cpu_addr, std::size_t size); - [[nodiscard]] std::optional<GPUVAddr> AllocateFixed(GPUVAddr gpu_addr, std::size_t size); - [[nodiscard]] GPUVAddr Allocate(std::size_t size, std::size_t align); + GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, + PTEKind kind = PTEKind::INVALID, bool is_big_pages = true); + GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages = true); void Unmap(GPUVAddr gpu_addr, std::size_t size); void FlushRegion(GPUVAddr gpu_addr, size_t size) const; + void InvalidateRegion(GPUVAddr gpu_addr, size_t size) const; + + bool IsMemoryDirty(GPUVAddr gpu_addr, size_t size) const; + + size_t MaxContinousRange(GPUVAddr gpu_addr, size_t size) const; + + bool IsWithinGPUAddressRange(GPUVAddr gpu_addr) const { + return gpu_addr < address_space_size; + } + + PTEKind GetPageKind(GPUVAddr gpu_addr) const; + private: - [[nodiscard]] PageEntry GetPageEntry(GPUVAddr gpu_addr) const; - void SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size = page_size); - GPUVAddr UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size); - [[nodiscard]] std::optional<GPUVAddr> FindFreeRange(std::size_t size, std::size_t align, - bool start_32bit_address = false) const; - - void TryLockPage(PageEntry page_entry, std::size_t size); - void TryUnlockPage(PageEntry page_entry, std::size_t size); - - void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, - bool is_safe) const; - void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size, - bool is_safe); - - [[nodiscard]] static constexpr std::size_t PageEntryIndex(GPUVAddr gpu_addr) { - return (gpu_addr >> page_bits) & page_table_mask; + template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped> + inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped, + FuncReserved&& func_reserved, FuncUnmapped&& func_unmapped) const; + + template <bool is_safe> + void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const; + + template <bool is_safe> + void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size); + + template <bool is_big_page> + [[nodiscard]] std::size_t PageEntryIndex(GPUVAddr gpu_addr) const { + if constexpr (is_big_page) { + return (gpu_addr >> big_page_bits) & big_page_table_mask; + } else { + return (gpu_addr >> page_bits) & page_table_mask; + } } - static constexpr u64 address_space_size = 1ULL << 40; - static constexpr u64 address_space_start = 1ULL << 32; - static constexpr u64 address_space_start_low = 1ULL << 16; - static constexpr u64 page_bits{16}; - static constexpr u64 page_size{1 << page_bits}; - static constexpr u64 page_mask{page_size - 1}; - static constexpr u64 page_table_bits{24}; - static constexpr u64 page_table_size{1 << page_table_bits}; - static constexpr u64 page_table_mask{page_table_size - 1}; + inline bool IsBigPageContinous(size_t big_page_index) const; + inline void SetBigPageContinous(size_t big_page_index, bool value); Core::System& system; + Core::Memory::Memory& memory; + Core::DeviceMemory& device_memory; + + const u64 address_space_bits; + const u64 page_bits; + u64 address_space_size; + u64 page_size; + u64 page_mask; + u64 page_table_mask; + static constexpr u64 cpu_page_bits{12}; + + const u64 big_page_bits; + u64 big_page_size; + u64 big_page_mask; + u64 big_page_table_mask; VideoCore::RasterizerInterface* rasterizer = nullptr; - std::vector<PageEntry> page_table; + enum class EntryType : u64 { + Free = 0, + Reserved = 1, + Mapped = 2, + }; + + std::vector<u64> entries; + std::vector<u64> big_entries; + + template <EntryType entry_type> + GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size, + PTEKind kind); + + template <EntryType entry_type> + GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size, + PTEKind kind); + + template <bool is_big_page> + inline EntryType GetEntry(size_t position) const; + + template <bool is_big_page> + inline void SetEntry(size_t position, EntryType entry); + + std::vector<std::array<PTEKind, 32>> kinds; + std::vector<std::array<PTEKind, 32>> big_kinds; + + template <bool is_big_page> + inline PTEKind GetKind(size_t position) const; + + template <bool is_big_page> + inline void SetKind(size_t position, PTEKind kind); + + Common::MultiLevelPageTable<u32> page_table; + Common::VirtualBuffer<u32> big_page_table_cpu; + + std::vector<u64> big_page_continous; + + constexpr static size_t continous_bits = 64; - using MapRange = std::pair<GPUVAddr, size_t>; - std::vector<MapRange> map_ranges; + const size_t unique_identifier; - std::vector<std::pair<VAddr, std::size_t>> cache_invalidate_queue; + static std::atomic<size_t> unique_identifier_generator; }; } // namespace Tegra diff --git a/src/video_core/pte_kind.h b/src/video_core/pte_kind.h new file mode 100644 index 000000000..591d7214b --- /dev/null +++ b/src/video_core/pte_kind.h @@ -0,0 +1,264 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/common_types.h" + +namespace Tegra { + +// https://github.com/NVIDIA/open-gpu-doc/blob/master/manuals/volta/gv100/dev_mmu.ref.txt +enum class PTEKind : u8 { + INVALID = 0xff, + PITCH = 0x00, + Z16 = 0x01, + Z16_2C = 0x02, + Z16_MS2_2C = 0x03, + Z16_MS4_2C = 0x04, + Z16_MS8_2C = 0x05, + Z16_MS16_2C = 0x06, + Z16_2Z = 0x07, + Z16_MS2_2Z = 0x08, + Z16_MS4_2Z = 0x09, + Z16_MS8_2Z = 0x0a, + Z16_MS16_2Z = 0x0b, + Z16_2CZ = 0x36, + Z16_MS2_2CZ = 0x37, + Z16_MS4_2CZ = 0x38, + Z16_MS8_2CZ = 0x39, + Z16_MS16_2CZ = 0x5f, + Z16_4CZ = 0x0c, + Z16_MS2_4CZ = 0x0d, + Z16_MS4_4CZ = 0x0e, + Z16_MS8_4CZ = 0x0f, + Z16_MS16_4CZ = 0x10, + S8Z24 = 0x11, + S8Z24_1Z = 0x12, + S8Z24_MS2_1Z = 0x13, + S8Z24_MS4_1Z = 0x14, + S8Z24_MS8_1Z = 0x15, + S8Z24_MS16_1Z = 0x16, + S8Z24_2CZ = 0x17, + S8Z24_MS2_2CZ = 0x18, + S8Z24_MS4_2CZ = 0x19, + S8Z24_MS8_2CZ = 0x1a, + S8Z24_MS16_2CZ = 0x1b, + S8Z24_2CS = 0x1c, + S8Z24_MS2_2CS = 0x1d, + S8Z24_MS4_2CS = 0x1e, + S8Z24_MS8_2CS = 0x1f, + S8Z24_MS16_2CS = 0x20, + S8Z24_4CSZV = 0x21, + S8Z24_MS2_4CSZV = 0x22, + S8Z24_MS4_4CSZV = 0x23, + S8Z24_MS8_4CSZV = 0x24, + S8Z24_MS16_4CSZV = 0x25, + V8Z24_MS4_VC12 = 0x26, + V8Z24_MS4_VC4 = 0x27, + V8Z24_MS8_VC8 = 0x28, + V8Z24_MS8_VC24 = 0x29, + V8Z24_MS4_VC12_1ZV = 0x2e, + V8Z24_MS4_VC4_1ZV = 0x2f, + V8Z24_MS8_VC8_1ZV = 0x30, + V8Z24_MS8_VC24_1ZV = 0x31, + V8Z24_MS4_VC12_2CS = 0x32, + V8Z24_MS4_VC4_2CS = 0x33, + V8Z24_MS8_VC8_2CS = 0x34, + V8Z24_MS8_VC24_2CS = 0x35, + V8Z24_MS4_VC12_2CZV = 0x3a, + V8Z24_MS4_VC4_2CZV = 0x3b, + V8Z24_MS8_VC8_2CZV = 0x3c, + V8Z24_MS8_VC24_2CZV = 0x3d, + V8Z24_MS4_VC12_2ZV = 0x3e, + V8Z24_MS4_VC4_2ZV = 0x3f, + V8Z24_MS8_VC8_2ZV = 0x40, + V8Z24_MS8_VC24_2ZV = 0x41, + V8Z24_MS4_VC12_4CSZV = 0x42, + V8Z24_MS4_VC4_4CSZV = 0x43, + V8Z24_MS8_VC8_4CSZV = 0x44, + V8Z24_MS8_VC24_4CSZV = 0x45, + Z24S8 = 0x46, + Z24S8_1Z = 0x47, + Z24S8_MS2_1Z = 0x48, + Z24S8_MS4_1Z = 0x49, + Z24S8_MS8_1Z = 0x4a, + Z24S8_MS16_1Z = 0x4b, + Z24S8_2CS = 0x4c, + Z24S8_MS2_2CS = 0x4d, + Z24S8_MS4_2CS = 0x4e, + Z24S8_MS8_2CS = 0x4f, + Z24S8_MS16_2CS = 0x50, + Z24S8_2CZ = 0x51, + Z24S8_MS2_2CZ = 0x52, + Z24S8_MS4_2CZ = 0x53, + Z24S8_MS8_2CZ = 0x54, + Z24S8_MS16_2CZ = 0x55, + Z24S8_4CSZV = 0x56, + Z24S8_MS2_4CSZV = 0x57, + Z24S8_MS4_4CSZV = 0x58, + Z24S8_MS8_4CSZV = 0x59, + Z24S8_MS16_4CSZV = 0x5a, + Z24V8_MS4_VC12 = 0x5b, + Z24V8_MS4_VC4 = 0x5c, + Z24V8_MS8_VC8 = 0x5d, + Z24V8_MS8_VC24 = 0x5e, + YUV_B8C1_2Y = 0x60, + YUV_B8C2_2Y = 0x61, + YUV_B10C1_2Y = 0x62, + YUV_B10C2_2Y = 0x6b, + YUV_B12C1_2Y = 0x6c, + YUV_B12C2_2Y = 0x6d, + Z24V8_MS4_VC12_1ZV = 0x63, + Z24V8_MS4_VC4_1ZV = 0x64, + Z24V8_MS8_VC8_1ZV = 0x65, + Z24V8_MS8_VC24_1ZV = 0x66, + Z24V8_MS4_VC12_2CS = 0x67, + Z24V8_MS4_VC4_2CS = 0x68, + Z24V8_MS8_VC8_2CS = 0x69, + Z24V8_MS8_VC24_2CS = 0x6a, + Z24V8_MS4_VC12_2CZV = 0x6f, + Z24V8_MS4_VC4_2CZV = 0x70, + Z24V8_MS8_VC8_2CZV = 0x71, + Z24V8_MS8_VC24_2CZV = 0x72, + Z24V8_MS4_VC12_2ZV = 0x73, + Z24V8_MS4_VC4_2ZV = 0x74, + Z24V8_MS8_VC8_2ZV = 0x75, + Z24V8_MS8_VC24_2ZV = 0x76, + Z24V8_MS4_VC12_4CSZV = 0x77, + Z24V8_MS4_VC4_4CSZV = 0x78, + Z24V8_MS8_VC8_4CSZV = 0x79, + Z24V8_MS8_VC24_4CSZV = 0x7a, + ZF32 = 0x7b, + ZF32_1Z = 0x7c, + ZF32_MS2_1Z = 0x7d, + ZF32_MS4_1Z = 0x7e, + ZF32_MS8_1Z = 0x7f, + ZF32_MS16_1Z = 0x80, + ZF32_2CS = 0x81, + ZF32_MS2_2CS = 0x82, + ZF32_MS4_2CS = 0x83, + ZF32_MS8_2CS = 0x84, + ZF32_MS16_2CS = 0x85, + ZF32_2CZ = 0x86, + ZF32_MS2_2CZ = 0x87, + ZF32_MS4_2CZ = 0x88, + ZF32_MS8_2CZ = 0x89, + ZF32_MS16_2CZ = 0x8a, + X8Z24_X16V8S8_MS4_VC12 = 0x8b, + X8Z24_X16V8S8_MS4_VC4 = 0x8c, + X8Z24_X16V8S8_MS8_VC8 = 0x8d, + X8Z24_X16V8S8_MS8_VC24 = 0x8e, + X8Z24_X16V8S8_MS4_VC12_1CS = 0x8f, + X8Z24_X16V8S8_MS4_VC4_1CS = 0x90, + X8Z24_X16V8S8_MS8_VC8_1CS = 0x91, + X8Z24_X16V8S8_MS8_VC24_1CS = 0x92, + X8Z24_X16V8S8_MS4_VC12_1ZV = 0x97, + X8Z24_X16V8S8_MS4_VC4_1ZV = 0x98, + X8Z24_X16V8S8_MS8_VC8_1ZV = 0x99, + X8Z24_X16V8S8_MS8_VC24_1ZV = 0x9a, + X8Z24_X16V8S8_MS4_VC12_1CZV = 0x9b, + X8Z24_X16V8S8_MS4_VC4_1CZV = 0x9c, + X8Z24_X16V8S8_MS8_VC8_1CZV = 0x9d, + X8Z24_X16V8S8_MS8_VC24_1CZV = 0x9e, + X8Z24_X16V8S8_MS4_VC12_2CS = 0x9f, + X8Z24_X16V8S8_MS4_VC4_2CS = 0xa0, + X8Z24_X16V8S8_MS8_VC8_2CS = 0xa1, + X8Z24_X16V8S8_MS8_VC24_2CS = 0xa2, + X8Z24_X16V8S8_MS4_VC12_2CSZV = 0xa3, + X8Z24_X16V8S8_MS4_VC4_2CSZV = 0xa4, + X8Z24_X16V8S8_MS8_VC8_2CSZV = 0xa5, + X8Z24_X16V8S8_MS8_VC24_2CSZV = 0xa6, + ZF32_X16V8S8_MS4_VC12 = 0xa7, + ZF32_X16V8S8_MS4_VC4 = 0xa8, + ZF32_X16V8S8_MS8_VC8 = 0xa9, + ZF32_X16V8S8_MS8_VC24 = 0xaa, + ZF32_X16V8S8_MS4_VC12_1CS = 0xab, + ZF32_X16V8S8_MS4_VC4_1CS = 0xac, + ZF32_X16V8S8_MS8_VC8_1CS = 0xad, + ZF32_X16V8S8_MS8_VC24_1CS = 0xae, + ZF32_X16V8S8_MS4_VC12_1ZV = 0xb3, + ZF32_X16V8S8_MS4_VC4_1ZV = 0xb4, + ZF32_X16V8S8_MS8_VC8_1ZV = 0xb5, + ZF32_X16V8S8_MS8_VC24_1ZV = 0xb6, + ZF32_X16V8S8_MS4_VC12_1CZV = 0xb7, + ZF32_X16V8S8_MS4_VC4_1CZV = 0xb8, + ZF32_X16V8S8_MS8_VC8_1CZV = 0xb9, + ZF32_X16V8S8_MS8_VC24_1CZV = 0xba, + ZF32_X16V8S8_MS4_VC12_2CS = 0xbb, + ZF32_X16V8S8_MS4_VC4_2CS = 0xbc, + ZF32_X16V8S8_MS8_VC8_2CS = 0xbd, + ZF32_X16V8S8_MS8_VC24_2CS = 0xbe, + ZF32_X16V8S8_MS4_VC12_2CSZV = 0xbf, + ZF32_X16V8S8_MS4_VC4_2CSZV = 0xc0, + ZF32_X16V8S8_MS8_VC8_2CSZV = 0xc1, + ZF32_X16V8S8_MS8_VC24_2CSZV = 0xc2, + ZF32_X24S8 = 0xc3, + ZF32_X24S8_1CS = 0xc4, + ZF32_X24S8_MS2_1CS = 0xc5, + ZF32_X24S8_MS4_1CS = 0xc6, + ZF32_X24S8_MS8_1CS = 0xc7, + ZF32_X24S8_MS16_1CS = 0xc8, + ZF32_X24S8_2CSZV = 0xce, + ZF32_X24S8_MS2_2CSZV = 0xcf, + ZF32_X24S8_MS4_2CSZV = 0xd0, + ZF32_X24S8_MS8_2CSZV = 0xd1, + ZF32_X24S8_MS16_2CSZV = 0xd2, + ZF32_X24S8_2CS = 0xd3, + ZF32_X24S8_MS2_2CS = 0xd4, + ZF32_X24S8_MS4_2CS = 0xd5, + ZF32_X24S8_MS8_2CS = 0xd6, + ZF32_X24S8_MS16_2CS = 0xd7, + S8 = 0x2a, + S8_2S = 0x2b, + GENERIC_16BX2 = 0xfe, + C32_2C = 0xd8, + C32_2CBR = 0xd9, + C32_2CBA = 0xda, + C32_2CRA = 0xdb, + C32_2BRA = 0xdc, + C32_MS2_2C = 0xdd, + C32_MS2_2CBR = 0xde, + C32_MS2_4CBRA = 0xcc, + C32_MS4_2C = 0xdf, + C32_MS4_2CBR = 0xe0, + C32_MS4_2CBA = 0xe1, + C32_MS4_2CRA = 0xe2, + C32_MS4_2BRA = 0xe3, + C32_MS4_4CBRA = 0x2c, + C32_MS8_MS16_2C = 0xe4, + C32_MS8_MS16_2CRA = 0xe5, + C64_2C = 0xe6, + C64_2CBR = 0xe7, + C64_2CBA = 0xe8, + C64_2CRA = 0xe9, + C64_2BRA = 0xea, + C64_MS2_2C = 0xeb, + C64_MS2_2CBR = 0xec, + C64_MS2_4CBRA = 0xcd, + C64_MS4_2C = 0xed, + C64_MS4_2CBR = 0xee, + C64_MS4_2CBA = 0xef, + C64_MS4_2CRA = 0xf0, + C64_MS4_2BRA = 0xf1, + C64_MS4_4CBRA = 0x2d, + C64_MS8_MS16_2C = 0xf2, + C64_MS8_MS16_2CRA = 0xf3, + C128_2C = 0xf4, + C128_2CR = 0xf5, + C128_MS2_2C = 0xf6, + C128_MS2_2CR = 0xf7, + C128_MS4_2C = 0xf8, + C128_MS4_2CR = 0xf9, + C128_MS8_MS16_2C = 0xfa, + C128_MS8_MS16_2CR = 0xfb, + X8C24 = 0xfc, + PITCH_NO_SWIZZLE = 0xfd, + SMSKED_MESSAGE = 0xca, + SMHOST_MESSAGE = 0xcb, +}; + +constexpr bool IsPitchKind(PTEKind kind) { + return kind == PTEKind::PITCH || kind == PTEKind::PITCH_NO_SWIZZLE; +} + +} // namespace Tegra diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h index 889b606b3..00ce53e3e 100644 --- a/src/video_core/query_cache.h +++ b/src/video_core/query_cache.h @@ -17,6 +17,7 @@ #include "common/assert.h" #include "common/settings.h" +#include "video_core/control/channel_state_cache.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/memory_manager.h" #include "video_core/rasterizer_interface.h" @@ -90,13 +91,10 @@ private: }; template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter> -class QueryCacheBase { +class QueryCacheBase : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> { public: - explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_, - Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::MemoryManager& gpu_memory_) - : rasterizer{rasterizer_}, maxwell3d{maxwell3d_}, - gpu_memory{gpu_memory_}, streams{{CounterStream{static_cast<QueryCache&>(*this), + explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_) + : rasterizer{rasterizer_}, streams{{CounterStream{static_cast<QueryCache&>(*this), VideoCore::QueryType::SamplesPassed}}} {} void InvalidateRegion(VAddr addr, std::size_t size) { @@ -117,13 +115,13 @@ public: */ void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) { std::unique_lock lock{mutex}; - const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); + const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); ASSERT(cpu_addr); CachedQuery* query = TryGet(*cpu_addr); if (!query) { ASSERT_OR_EXECUTE(cpu_addr, return;); - u8* const host_ptr = gpu_memory.GetPointer(gpu_addr); + u8* const host_ptr = gpu_memory->GetPointer(gpu_addr); query = Register(type, *cpu_addr, host_ptr, timestamp.has_value()); } @@ -137,8 +135,10 @@ public: /// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch. void UpdateCounters() { std::unique_lock lock{mutex}; - const auto& regs = maxwell3d.regs; - Stream(VideoCore::QueryType::SamplesPassed).Update(regs.samplecnt_enable); + if (maxwell3d) { + const auto& regs = maxwell3d->regs; + Stream(VideoCore::QueryType::SamplesPassed).Update(regs.zpass_pixel_count_enable); + } } /// Resets a counter to zero. It doesn't disable the query after resetting. @@ -264,8 +264,6 @@ private: static constexpr unsigned YUZU_PAGEBITS = 12; VideoCore::RasterizerInterface& rasterizer; - Tegra::Engines::Maxwell3D& maxwell3d; - Tegra::MemoryManager& gpu_memory; std::recursive_mutex mutex; diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h index a04a76481..d2d40884c 100644 --- a/src/video_core/rasterizer_interface.h +++ b/src/video_core/rasterizer_interface.h @@ -16,6 +16,9 @@ class MemoryManager; namespace Engines { class AccelerateDMAInterface; } +namespace Control { +struct ChannelState; +} } // namespace Tegra namespace VideoCore { @@ -59,7 +62,10 @@ public: virtual void DisableGraphicsUniformBuffer(size_t stage, u32 index) = 0; /// Signal a GPU based semaphore as a fence - virtual void SignalSemaphore(GPUVAddr addr, u32 value) = 0; + virtual void SignalFence(std::function<void()>&& func) = 0; + + /// Send an operation to be done after a certain amount of flushes. + virtual void SyncOperation(std::function<void()>&& func) = 0; /// Signal a GPU based syncpoint as a fence virtual void SignalSyncPoint(u32 value) = 0; @@ -86,13 +92,13 @@ public: virtual void OnCPUWrite(VAddr addr, u64 size) = 0; /// Sync memory between guest and host. - virtual void SyncGuestHost() = 0; + virtual void InvalidateGPUCache() = 0; /// Unmap memory range virtual void UnmapMemory(VAddr addr, u64 size) = 0; /// Remap GPU memory range. This means underneath backing memory changed - virtual void ModifyGPUMemory(GPUVAddr addr, u64 size) = 0; + virtual void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) = 0; /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory /// and invalidated @@ -123,7 +129,7 @@ public: [[nodiscard]] virtual Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() = 0; virtual void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, - std::span<u8> memory) = 0; + std::span<const u8> memory) = 0; /// Attempt to use a faster method to display the framebuffer to screen [[nodiscard]] virtual bool AccelerateDisplay(const Tegra::FramebufferConfig& config, @@ -137,5 +143,11 @@ public: /// Initialize disk cached resources for the game being emulated virtual void LoadDiskResources(u64 title_id, std::stop_token stop_loading, const DiskResourceLoadCallback& callback) {} + + virtual void InitializeChannel(Tegra::Control::ChannelState& channel) {} + + virtual void BindChannel(Tegra::Control::ChannelState& channel) {} + + virtual void ReleaseChannel(s32 channel_id) {} }; } // namespace VideoCore diff --git a/src/video_core/renderer_base.cpp b/src/video_core/renderer_base.cpp index 45791aa75..e8761a747 100644 --- a/src/video_core/renderer_base.cpp +++ b/src/video_core/renderer_base.cpp @@ -1,6 +1,8 @@ // SPDX-FileCopyrightText: 2015 Citra Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later +#include <thread> + #include "common/logging/log.h" #include "core/frontend/emu_window.h" #include "video_core/renderer_base.h" @@ -35,8 +37,12 @@ void RendererBase::RequestScreenshot(void* data, std::function<void(bool)> callb LOG_ERROR(Render, "A screenshot is already requested or in progress, ignoring the request"); return; } + auto async_callback{[callback = std::move(callback)](bool invert_y) { + std::thread t{callback, invert_y}; + t.detach(); + }}; renderer_settings.screenshot_bits = data; - renderer_settings.screenshot_complete_callback = std::move(callback); + renderer_settings.screenshot_complete_callback = async_callback; renderer_settings.screenshot_framebuffer_layout = layout; renderer_settings.screenshot_requested = true; } diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp index 32450ee1d..08f4d69ab 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp @@ -168,7 +168,7 @@ void BufferCacheRuntime::BindIndexBuffer(Buffer& buffer, u32 offset, u32 size) { if (has_unified_vertex_buffers) { buffer.MakeResident(GL_READ_ONLY); glBufferAddressRangeNV(GL_ELEMENT_ARRAY_ADDRESS_NV, 0, buffer.HostGpuAddr() + offset, - static_cast<GLsizeiptr>(size)); + static_cast<GLsizeiptr>(Common::AlignUp(size, 4))); } else { glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer.Handle()); index_buffer_offset = offset; diff --git a/src/video_core/renderer_opengl/gl_compute_pipeline.cpp b/src/video_core/renderer_opengl/gl_compute_pipeline.cpp index 1f0f156ed..26d066004 100644 --- a/src/video_core/renderer_opengl/gl_compute_pipeline.cpp +++ b/src/video_core/renderer_opengl/gl_compute_pipeline.cpp @@ -28,12 +28,11 @@ bool ComputePipelineKey::operator==(const ComputePipelineKey& rhs) const noexcep } ComputePipeline::ComputePipeline(const Device& device, TextureCache& texture_cache_, - BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_, - Tegra::Engines::KeplerCompute& kepler_compute_, - ProgramManager& program_manager_, const Shader::Info& info_, - std::string code, std::vector<u32> code_v) - : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, gpu_memory{gpu_memory_}, - kepler_compute{kepler_compute_}, program_manager{program_manager_}, info{info_} { + BufferCache& buffer_cache_, ProgramManager& program_manager_, + const Shader::Info& info_, std::string code, + std::vector<u32> code_v) + : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, + program_manager{program_manager_}, info{info_} { switch (device.GetShaderBackend()) { case Settings::ShaderBackend::GLSL: source_program = CreateProgram(code, GL_COMPUTE_SHADER); @@ -86,7 +85,7 @@ void ComputePipeline::Configure() { GLsizei texture_binding{}; GLsizei image_binding{}; - const auto& qmd{kepler_compute.launch_description}; + const auto& qmd{kepler_compute->launch_description}; const auto& cbufs{qmd.const_buffer_config}; const bool via_header_index{qmd.linked_tsc != 0}; const auto read_handle{[&](const auto& desc, u32 index) { @@ -101,12 +100,13 @@ void ComputePipeline::Configure() { const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset}; const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() + secondary_offset}; - const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; - const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; + const u32 lhs_raw{gpu_memory->Read<u32>(addr) << desc.shift_left}; + const u32 rhs_raw{gpu_memory->Read<u32>(separate_addr) + << desc.secondary_shift_left}; return TexturePair(lhs_raw | rhs_raw, via_header_index); } } - return TexturePair(gpu_memory.Read<u32>(addr), via_header_index); + return TexturePair(gpu_memory->Read<u32>(addr), via_header_index); }}; const auto add_image{[&](const auto& desc, bool blacklist) { for (u32 index = 0; index < desc.count; ++index) { diff --git a/src/video_core/renderer_opengl/gl_compute_pipeline.h b/src/video_core/renderer_opengl/gl_compute_pipeline.h index 723f27f11..6534dec32 100644 --- a/src/video_core/renderer_opengl/gl_compute_pipeline.h +++ b/src/video_core/renderer_opengl/gl_compute_pipeline.h @@ -49,10 +49,8 @@ static_assert(std::is_trivially_constructible_v<ComputePipelineKey>); class ComputePipeline { public: explicit ComputePipeline(const Device& device, TextureCache& texture_cache_, - BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_, - Tegra::Engines::KeplerCompute& kepler_compute_, - ProgramManager& program_manager_, const Shader::Info& info_, - std::string code, std::vector<u32> code_v); + BufferCache& buffer_cache_, ProgramManager& program_manager_, + const Shader::Info& info_, std::string code, std::vector<u32> code_v); void Configure(); @@ -60,11 +58,17 @@ public: return writes_global_memory; } + void SetEngine(Tegra::Engines::KeplerCompute* kepler_compute_, + Tegra::MemoryManager* gpu_memory_) { + kepler_compute = kepler_compute_; + gpu_memory = gpu_memory_; + } + private: TextureCache& texture_cache; BufferCache& buffer_cache; - Tegra::MemoryManager& gpu_memory; - Tegra::Engines::KeplerCompute& kepler_compute; + Tegra::MemoryManager* gpu_memory; + Tegra::Engines::KeplerCompute* kepler_compute; ProgramManager& program_manager; Shader::Info info; diff --git a/src/video_core/renderer_opengl/gl_fence_manager.cpp b/src/video_core/renderer_opengl/gl_fence_manager.cpp index 6e82c2e28..91463f854 100644 --- a/src/video_core/renderer_opengl/gl_fence_manager.cpp +++ b/src/video_core/renderer_opengl/gl_fence_manager.cpp @@ -10,10 +10,7 @@ namespace OpenGL { -GLInnerFence::GLInnerFence(u32 payload_, bool is_stubbed_) : FenceBase{payload_, is_stubbed_} {} - -GLInnerFence::GLInnerFence(GPUVAddr address_, u32 payload_, bool is_stubbed_) - : FenceBase{address_, payload_, is_stubbed_} {} +GLInnerFence::GLInnerFence(bool is_stubbed_) : FenceBase{is_stubbed_} {} GLInnerFence::~GLInnerFence() = default; @@ -48,12 +45,8 @@ FenceManagerOpenGL::FenceManagerOpenGL(VideoCore::RasterizerInterface& rasterize BufferCache& buffer_cache_, QueryCache& query_cache_) : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_} {} -Fence FenceManagerOpenGL::CreateFence(u32 value, bool is_stubbed) { - return std::make_shared<GLInnerFence>(value, is_stubbed); -} - -Fence FenceManagerOpenGL::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) { - return std::make_shared<GLInnerFence>(addr, value, is_stubbed); +Fence FenceManagerOpenGL::CreateFence(bool is_stubbed) { + return std::make_shared<GLInnerFence>(is_stubbed); } void FenceManagerOpenGL::QueueFence(Fence& fence) { diff --git a/src/video_core/renderer_opengl/gl_fence_manager.h b/src/video_core/renderer_opengl/gl_fence_manager.h index 14ff00db2..f1446e732 100644 --- a/src/video_core/renderer_opengl/gl_fence_manager.h +++ b/src/video_core/renderer_opengl/gl_fence_manager.h @@ -16,8 +16,7 @@ namespace OpenGL { class GLInnerFence : public VideoCommon::FenceBase { public: - explicit GLInnerFence(u32 payload_, bool is_stubbed_); - explicit GLInnerFence(GPUVAddr address_, u32 payload_, bool is_stubbed_); + explicit GLInnerFence(bool is_stubbed_); ~GLInnerFence(); void Queue(); @@ -40,8 +39,7 @@ public: QueryCache& query_cache); protected: - Fence CreateFence(u32 value, bool is_stubbed) override; - Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override; + Fence CreateFence(bool is_stubbed) override; void QueueFence(Fence& fence) override; bool IsFenceSignaled(Fence& fence) const override; void WaitFence(Fence& fence) override; diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp index 67eae369d..1d20a79ec 100644 --- a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp +++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp @@ -73,8 +73,8 @@ GLenum AssemblyStage(size_t stage_index) { /// @param location Hardware location /// @return Pair of ARB_transform_feedback3 token stream first and third arguments /// @note Read https://www.khronos.org/registry/OpenGL/extensions/ARB/ARB_transform_feedback3.txt -std::pair<GLint, GLint> TransformFeedbackEnum(u8 location) { - const u8 index = location / 4; +std::pair<GLint, GLint> TransformFeedbackEnum(u32 location) { + const auto index = location / 4; if (index >= 8 && index <= 39) { return {GL_GENERIC_ATTRIB_NV, index - 8}; } @@ -169,15 +169,15 @@ ConfigureFuncPtr ConfigureFunc(const std::array<Shader::Info, 5>& infos, u32 ena } } // Anonymous namespace -GraphicsPipeline::GraphicsPipeline( - const Device& device, TextureCache& texture_cache_, BufferCache& buffer_cache_, - Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_, - ProgramManager& program_manager_, StateTracker& state_tracker_, ShaderWorker* thread_worker, - VideoCore::ShaderNotify* shader_notify, std::array<std::string, 5> sources, - std::array<std::vector<u32>, 5> sources_spirv, const std::array<const Shader::Info*, 5>& infos, - const GraphicsPipelineKey& key_) - : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, - gpu_memory{gpu_memory_}, maxwell3d{maxwell3d_}, program_manager{program_manager_}, +GraphicsPipeline::GraphicsPipeline(const Device& device, TextureCache& texture_cache_, + BufferCache& buffer_cache_, ProgramManager& program_manager_, + StateTracker& state_tracker_, ShaderWorker* thread_worker, + VideoCore::ShaderNotify* shader_notify, + std::array<std::string, 5> sources, + std::array<std::vector<u32>, 5> sources_spirv, + const std::array<const Shader::Info*, 5>& infos, + const GraphicsPipelineKey& key_) + : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, program_manager{program_manager_}, state_tracker{state_tracker_}, key{key_} { if (shader_notify) { shader_notify->MarkShaderBuilding(); @@ -285,8 +285,8 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { buffer_cache.runtime.SetBaseStorageBindings(base_storage_bindings); buffer_cache.runtime.SetEnableStorageBuffers(use_storage_buffers); - const auto& regs{maxwell3d.regs}; - const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex}; + const auto& regs{maxwell3d->regs}; + const bool via_header_index{regs.sampler_binding == Maxwell::SamplerBinding::ViaHeaderBinding}; const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE { const Shader::Info& info{stage_infos[stage]}; buffer_cache.UnbindGraphicsStorageBuffers(stage); @@ -299,7 +299,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { ++ssbo_index; } } - const auto& cbufs{maxwell3d.state.shader_stages[stage].const_buffers}; + const auto& cbufs{maxwell3d->state.shader_stages[stage].const_buffers}; const auto read_handle{[&](const auto& desc, u32 index) { ASSERT(cbufs[desc.cbuf_index].enabled); const u32 index_offset{index << desc.size_shift}; @@ -312,13 +312,14 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { const u32 second_offset{desc.secondary_cbuf_offset + index_offset}; const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address + second_offset}; - const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; - const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; + const u32 lhs_raw{gpu_memory->Read<u32>(addr) << desc.shift_left}; + const u32 rhs_raw{gpu_memory->Read<u32>(separate_addr) + << desc.secondary_shift_left}; const u32 raw{lhs_raw | rhs_raw}; return TexturePair(raw, via_header_index); } } - return TexturePair(gpu_memory.Read<u32>(addr), via_header_index); + return TexturePair(gpu_memory->Read<u32>(addr), via_header_index); }}; const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE { for (u32 index = 0; index < desc.count; ++index) { @@ -556,10 +557,25 @@ void GraphicsPipeline::GenerateTransformFeedbackState() { ++current_stream; const auto& locations = key.xfb_state.varyings[feedback]; - std::optional<u8> current_index; + std::optional<u32> current_index; for (u32 offset = 0; offset < layout.varying_count; ++offset) { - const u8 location = locations[offset]; - const u8 index = location / 4; + const auto get_attribute = [&locations](u32 index) -> u32 { + switch (index % 4) { + case 0: + return locations[index / 4].attribute0.Value(); + case 1: + return locations[index / 4].attribute1.Value(); + case 2: + return locations[index / 4].attribute2.Value(); + case 3: + return locations[index / 4].attribute3.Value(); + } + UNREACHABLE(); + return 0; + }; + + const auto attribute{get_attribute(offset)}; + const auto index = attribute / 4U; if (current_index == index) { // Increase number of components of the previous attachment @@ -568,7 +584,7 @@ void GraphicsPipeline::GenerateTransformFeedbackState() { } current_index = index; - std::tie(cursor[0], cursor[2]) = TransformFeedbackEnum(location); + std::tie(cursor[0], cursor[2]) = TransformFeedbackEnum(attribute); cursor[1] = 1; cursor += XFB_ENTRY_STRIDE; } diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.h b/src/video_core/renderer_opengl/gl_graphics_pipeline.h index 4ec15b966..ea53ddb46 100644 --- a/src/video_core/renderer_opengl/gl_graphics_pipeline.h +++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.h @@ -37,8 +37,8 @@ struct GraphicsPipelineKey { BitField<0, 1, u32> xfb_enabled; BitField<1, 1, u32> early_z; BitField<2, 4, Maxwell::PrimitiveTopology> gs_input_topology; - BitField<6, 2, Maxwell::TessellationPrimitive> tessellation_primitive; - BitField<8, 2, Maxwell::TessellationSpacing> tessellation_spacing; + BitField<6, 2, Maxwell::Tessellation::DomainType> tessellation_primitive; + BitField<8, 2, Maxwell::Tessellation::Spacing> tessellation_spacing; BitField<10, 1, u32> tessellation_clockwise; }; std::array<u32, 3> padding; @@ -71,10 +71,9 @@ static_assert(std::is_trivially_constructible_v<GraphicsPipelineKey>); class GraphicsPipeline { public: explicit GraphicsPipeline(const Device& device, TextureCache& texture_cache_, - BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_, - Tegra::Engines::Maxwell3D& maxwell3d_, - ProgramManager& program_manager_, StateTracker& state_tracker_, - ShaderWorker* thread_worker, VideoCore::ShaderNotify* shader_notify, + BufferCache& buffer_cache_, ProgramManager& program_manager_, + StateTracker& state_tracker_, ShaderWorker* thread_worker, + VideoCore::ShaderNotify* shader_notify, std::array<std::string, 5> sources, std::array<std::vector<u32>, 5> sources_spirv, const std::array<const Shader::Info*, 5>& infos, @@ -107,6 +106,11 @@ public: }; } + void SetEngine(Tegra::Engines::Maxwell3D* maxwell3d_, Tegra::MemoryManager* gpu_memory_) { + maxwell3d = maxwell3d_; + gpu_memory = gpu_memory_; + } + private: template <typename Spec> void ConfigureImpl(bool is_indexed); @@ -119,8 +123,8 @@ private: TextureCache& texture_cache; BufferCache& buffer_cache; - Tegra::MemoryManager& gpu_memory; - Tegra::Engines::Maxwell3D& maxwell3d; + Tegra::MemoryManager* gpu_memory; + Tegra::Engines::Maxwell3D* maxwell3d; ProgramManager& program_manager; StateTracker& state_tracker; const GraphicsPipelineKey key; diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp index ed40f5791..5070db441 100644 --- a/src/video_core/renderer_opengl/gl_query_cache.cpp +++ b/src/video_core/renderer_opengl/gl_query_cache.cpp @@ -26,9 +26,8 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) { } // Anonymous namespace -QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::MemoryManager& gpu_memory_) - : QueryCacheBase(rasterizer_, maxwell3d_, gpu_memory_), gl_rasterizer{rasterizer_} {} +QueryCache::QueryCache(RasterizerOpenGL& rasterizer_) + : QueryCacheBase(rasterizer_), gl_rasterizer{rasterizer_} {} QueryCache::~QueryCache() = default; diff --git a/src/video_core/renderer_opengl/gl_query_cache.h b/src/video_core/renderer_opengl/gl_query_cache.h index 8a49f1ef0..14ce59990 100644 --- a/src/video_core/renderer_opengl/gl_query_cache.h +++ b/src/video_core/renderer_opengl/gl_query_cache.h @@ -28,8 +28,7 @@ using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>; class QueryCache final : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> { public: - explicit QueryCache(RasterizerOpenGL& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::MemoryManager& gpu_memory_); + explicit QueryCache(RasterizerOpenGL& rasterizer_); ~QueryCache(); OGLQuery AllocateQuery(VideoCore::QueryType type); diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index a0d048b0b..e5c09a969 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -16,7 +16,7 @@ #include "common/microprofile.h" #include "common/scope_exit.h" #include "common/settings.h" - +#include "video_core/control/channel_state.h" #include "video_core/engines/kepler_compute.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/memory_manager.h" @@ -56,22 +56,20 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra Core::Memory::Memory& cpu_memory_, const Device& device_, ScreenInfo& screen_info_, ProgramManager& program_manager_, StateTracker& state_tracker_) - : RasterizerAccelerated(cpu_memory_), gpu(gpu_), maxwell3d(gpu.Maxwell3D()), - kepler_compute(gpu.KeplerCompute()), gpu_memory(gpu.MemoryManager()), device(device_), - screen_info(screen_info_), program_manager(program_manager_), state_tracker(state_tracker_), + : RasterizerAccelerated(cpu_memory_), gpu(gpu_), device(device_), screen_info(screen_info_), + program_manager(program_manager_), state_tracker(state_tracker_), texture_cache_runtime(device, program_manager, state_tracker), - texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory), - buffer_cache_runtime(device), - buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime), - shader_cache(*this, emu_window_, maxwell3d, kepler_compute, gpu_memory, device, texture_cache, - buffer_cache, program_manager, state_tracker, gpu.ShaderNotify()), - query_cache(*this, maxwell3d, gpu_memory), accelerate_dma(buffer_cache), + texture_cache(texture_cache_runtime, *this), buffer_cache_runtime(device), + buffer_cache(*this, cpu_memory_, buffer_cache_runtime), + shader_cache(*this, emu_window_, device, texture_cache, buffer_cache, program_manager, + state_tracker, gpu.ShaderNotify()), + query_cache(*this), accelerate_dma(buffer_cache), fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache) {} RasterizerOpenGL::~RasterizerOpenGL() = default; void RasterizerOpenGL::SyncVertexFormats() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::VertexFormats]) { return; } @@ -89,7 +87,7 @@ void RasterizerOpenGL::SyncVertexFormats() { } flags[Dirty::VertexFormat0 + index] = false; - const auto attrib = maxwell3d.regs.vertex_attrib_format[index]; + const auto& attrib = maxwell3d->regs.vertex_attrib_format[index]; const auto gl_index = static_cast<GLuint>(index); // Disable constant attributes. @@ -99,8 +97,8 @@ void RasterizerOpenGL::SyncVertexFormats() { } glEnableVertexAttribArray(gl_index); - if (attrib.type == Maxwell::VertexAttribute::Type::SignedInt || - attrib.type == Maxwell::VertexAttribute::Type::UnsignedInt) { + if (attrib.type == Maxwell::VertexAttribute::Type::SInt || + attrib.type == Maxwell::VertexAttribute::Type::UInt) { glVertexAttribIFormat(gl_index, attrib.ComponentCount(), MaxwellToGL::VertexFormat(attrib), attrib.offset); } else { @@ -113,13 +111,13 @@ void RasterizerOpenGL::SyncVertexFormats() { } void RasterizerOpenGL::SyncVertexInstances() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::VertexInstances]) { return; } flags[Dirty::VertexInstances] = false; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; for (std::size_t index = 0; index < NUM_SUPPORTED_VERTEX_ATTRIBUTES; ++index) { if (!flags[Dirty::VertexInstance0 + index]) { continue; @@ -127,8 +125,8 @@ void RasterizerOpenGL::SyncVertexInstances() { flags[Dirty::VertexInstance0 + index] = false; const auto gl_index = static_cast<GLuint>(index); - const bool instancing_enabled = regs.instanced_arrays.IsInstancingEnabled(gl_index); - const GLuint divisor = instancing_enabled ? regs.vertex_array[index].divisor : 0; + const bool instancing_enabled = regs.vertex_stream_instances.IsInstancingEnabled(gl_index); + const GLuint divisor = instancing_enabled ? regs.vertex_streams[index].frequency : 0; glVertexBindingDivisor(gl_index, divisor); } } @@ -140,36 +138,36 @@ void RasterizerOpenGL::LoadDiskResources(u64 title_id, std::stop_token stop_load void RasterizerOpenGL::Clear() { MICROPROFILE_SCOPE(OpenGL_Clears); - if (!maxwell3d.ShouldExecute()) { + if (!maxwell3d->ShouldExecute()) { return; } - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; bool use_color{}; bool use_depth{}; bool use_stencil{}; - if (regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B || - regs.clear_buffers.A) { + if (regs.clear_surface.R || regs.clear_surface.G || regs.clear_surface.B || + regs.clear_surface.A) { use_color = true; - const GLuint index = regs.clear_buffers.RT; + const GLuint index = regs.clear_surface.RT; state_tracker.NotifyColorMask(index); - glColorMaski(index, regs.clear_buffers.R != 0, regs.clear_buffers.G != 0, - regs.clear_buffers.B != 0, regs.clear_buffers.A != 0); + glColorMaski(index, regs.clear_surface.R != 0, regs.clear_surface.G != 0, + regs.clear_surface.B != 0, regs.clear_surface.A != 0); // TODO(Rodrigo): Determine if clamping is used on clears SyncFragmentColorClampState(); SyncFramebufferSRGB(); } - if (regs.clear_buffers.Z) { + if (regs.clear_surface.Z) { ASSERT_MSG(regs.zeta_enable != 0, "Tried to clear Z but buffer is not enabled!"); use_depth = true; state_tracker.NotifyDepthMask(); glDepthMask(GL_TRUE); } - if (regs.clear_buffers.S) { + if (regs.clear_surface.S) { ASSERT_MSG(regs.zeta_enable, "Tried to clear stencil but buffer is not enabled!"); use_stencil = true; } @@ -186,16 +184,16 @@ void RasterizerOpenGL::Clear() { texture_cache.UpdateRenderTargets(true); state_tracker.BindFramebuffer(texture_cache.GetFramebuffer()->Handle()); SyncViewport(); - if (regs.clear_flags.scissor) { + if (regs.clear_control.use_scissor) { SyncScissorTest(); } else { state_tracker.NotifyScissor0(); glDisablei(GL_SCISSOR_TEST, 0); } - UNIMPLEMENTED_IF(regs.clear_flags.viewport); + UNIMPLEMENTED_IF(regs.clear_control.use_viewport_clip0); if (use_color) { - glClearBufferfv(GL_COLOR, regs.clear_buffers.RT, regs.clear_color); + glClearBufferfv(GL_COLOR, regs.clear_surface.RT, regs.clear_color.data()); } if (use_depth && use_stencil) { glClearBufferfi(GL_DEPTH_STENCIL, 0, regs.clear_depth, regs.clear_stencil); @@ -217,22 +215,26 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { if (!pipeline) { return; } + + gpu.TickWork(); + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + pipeline->SetEngine(maxwell3d, gpu_memory); pipeline->Configure(is_indexed); SyncState(); - const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d.regs.draw.topology); + const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d->regs.draw.topology); BeginTransformFeedback(pipeline, primitive_mode); - const GLuint base_instance = static_cast<GLuint>(maxwell3d.regs.vb_base_instance); + const GLuint base_instance = static_cast<GLuint>(maxwell3d->regs.global_base_instance_index); const GLsizei num_instances = - static_cast<GLsizei>(is_instanced ? maxwell3d.mme_draw.instance_count : 1); + static_cast<GLsizei>(is_instanced ? maxwell3d->mme_draw.instance_count : 1); if (is_indexed) { - const GLint base_vertex = static_cast<GLint>(maxwell3d.regs.vb_element_base); - const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d.regs.index_array.count); + const GLint base_vertex = static_cast<GLint>(maxwell3d->regs.global_base_vertex_index); + const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d->regs.index_buffer.count); const GLvoid* const offset = buffer_cache_runtime.IndexOffset(); - const GLenum format = MaxwellToGL::IndexFormat(maxwell3d.regs.index_array.format); + const GLenum format = MaxwellToGL::IndexFormat(maxwell3d->regs.index_buffer.format); if (num_instances == 1 && base_instance == 0 && base_vertex == 0) { glDrawElements(primitive_mode, num_vertices, format, offset); } else if (num_instances == 1 && base_instance == 0) { @@ -251,8 +253,8 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { base_instance); } } else { - const GLint base_vertex = static_cast<GLint>(maxwell3d.regs.vertex_buffer.first); - const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d.regs.vertex_buffer.count); + const GLint base_vertex = static_cast<GLint>(maxwell3d->regs.vertex_buffer.first); + const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d->regs.vertex_buffer.count); if (num_instances == 1 && base_instance == 0) { glDrawArrays(primitive_mode, base_vertex, num_vertices); } else if (base_instance == 0) { @@ -273,8 +275,9 @@ void RasterizerOpenGL::DispatchCompute() { if (!pipeline) { return; } + pipeline->SetEngine(kepler_compute, gpu_memory); pipeline->Configure(); - const auto& qmd{kepler_compute.launch_description}; + const auto& qmd{kepler_compute->launch_description}; glDispatchCompute(qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z); ++num_queued_commands; has_written_global_memory |= pipeline->WritesGlobalMemory(); @@ -359,7 +362,7 @@ void RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) { } } -void RasterizerOpenGL::SyncGuestHost() { +void RasterizerOpenGL::InvalidateGPUCache() { MICROPROFILE_SCOPE(OpenGL_CacheManagement); shader_cache.SyncGuestHost(); { @@ -380,40 +383,30 @@ void RasterizerOpenGL::UnmapMemory(VAddr addr, u64 size) { shader_cache.OnCPUWrite(addr, size); } -void RasterizerOpenGL::ModifyGPUMemory(GPUVAddr addr, u64 size) { +void RasterizerOpenGL::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) { { std::scoped_lock lock{texture_cache.mutex}; - texture_cache.UnmapGPUMemory(addr, size); + texture_cache.UnmapGPUMemory(as_id, addr, size); } } -void RasterizerOpenGL::SignalSemaphore(GPUVAddr addr, u32 value) { - if (!gpu.IsAsync()) { - gpu_memory.Write<u32>(addr, value); - return; - } - fence_manager.SignalSemaphore(addr, value); +void RasterizerOpenGL::SignalFence(std::function<void()>&& func) { + fence_manager.SignalFence(std::move(func)); +} + +void RasterizerOpenGL::SyncOperation(std::function<void()>&& func) { + fence_manager.SyncOperation(std::move(func)); } void RasterizerOpenGL::SignalSyncPoint(u32 value) { - if (!gpu.IsAsync()) { - gpu.IncrementSyncPoint(value); - return; - } fence_manager.SignalSyncPoint(value); } void RasterizerOpenGL::SignalReference() { - if (!gpu.IsAsync()) { - return; - } fence_manager.SignalOrdering(); } void RasterizerOpenGL::ReleaseFences() { - if (!gpu.IsAsync()) { - return; - } fence_manager.WaitPendingFences(); } @@ -430,6 +423,7 @@ void RasterizerOpenGL::WaitForIdle() { } void RasterizerOpenGL::FragmentBarrier() { + glTextureBarrier(); glMemoryBarrier(GL_FRAMEBUFFER_BARRIER_BIT | GL_TEXTURE_FETCH_BARRIER_BIT); } @@ -482,13 +476,13 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerOpenGL::AccessAccelerateDMA() } void RasterizerOpenGL::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, - std::span<u8> memory) { - auto cpu_addr = gpu_memory.GpuToCpuAddress(address); + std::span<const u8> memory) { + auto cpu_addr = gpu_memory->GpuToCpuAddress(address); if (!cpu_addr) [[unlikely]] { - gpu_memory.WriteBlock(address, memory.data(), copy_size); + gpu_memory->WriteBlock(address, memory.data(), copy_size); return; } - gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size); + gpu_memory->WriteBlockUnsafe(address, memory.data(), copy_size); { std::unique_lock<std::mutex> lock{buffer_cache.mutex}; if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) { @@ -551,8 +545,8 @@ void RasterizerOpenGL::SyncState() { } void RasterizerOpenGL::SyncViewport() { - auto& flags = maxwell3d.dirty.flags; - const auto& regs = maxwell3d.regs; + auto& flags = maxwell3d->dirty.flags; + const auto& regs = maxwell3d->regs; const bool rescale_viewports = flags[VideoCommon::Dirty::RescaleViewports]; const bool dirty_viewport = flags[Dirty::Viewports] || rescale_viewports; @@ -561,9 +555,9 @@ void RasterizerOpenGL::SyncViewport() { if (dirty_viewport || dirty_clip_control || flags[Dirty::FrontFace]) { flags[Dirty::FrontFace] = false; - GLenum mode = MaxwellToGL::FrontFace(regs.front_face); + GLenum mode = MaxwellToGL::FrontFace(regs.gl_front_face); bool flip_faces = true; - if (regs.screen_y_control.triangle_rast_flip != 0) { + if (regs.window_origin.flip_y != 0) { flip_faces = !flip_faces; } if (regs.viewport_transform[0].scale_y < 0.0f) { @@ -588,14 +582,15 @@ void RasterizerOpenGL::SyncViewport() { if (regs.viewport_transform[0].scale_y < 0.0f) { flip_y = !flip_y; } - if (regs.screen_y_control.y_negate != 0) { + const bool lower_left{regs.window_origin.mode != Maxwell::WindowOrigin::Mode::UpperLeft}; + if (lower_left) { flip_y = !flip_y; } const bool is_zero_to_one = regs.depth_mode == Maxwell::DepthMode::ZeroToOne; const GLenum origin = flip_y ? GL_UPPER_LEFT : GL_LOWER_LEFT; const GLenum depth = is_zero_to_one ? GL_ZERO_TO_ONE : GL_NEGATIVE_ONE_TO_ONE; state_tracker.ClipControl(origin, depth); - state_tracker.SetYNegate(regs.screen_y_control.y_negate != 0); + state_tracker.SetYNegate(lower_left); } const bool is_rescaling{texture_cache.IsRescaling()}; const float scale = is_rescaling ? Settings::values.resolution_info.up_factor : 1.0f; @@ -657,23 +652,29 @@ void RasterizerOpenGL::SyncViewport() { } void RasterizerOpenGL::SyncDepthClamp() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::DepthClampEnabled]) { return; } flags[Dirty::DepthClampEnabled] = false; - oglEnable(GL_DEPTH_CLAMP, maxwell3d.regs.view_volume_clip_control.depth_clamp_disabled == 0); + bool depth_clamp_disabled{maxwell3d->regs.viewport_clip_control.geometry_clip == + Maxwell::ViewportClipControl::GeometryClip::Passthrough || + maxwell3d->regs.viewport_clip_control.geometry_clip == + Maxwell::ViewportClipControl::GeometryClip::FrustumXYZ || + maxwell3d->regs.viewport_clip_control.geometry_clip == + Maxwell::ViewportClipControl::GeometryClip::FrustumZ}; + oglEnable(GL_DEPTH_CLAMP, !depth_clamp_disabled); } void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::ClipDistances] && !flags[VideoCommon::Dirty::Shaders]) { return; } flags[Dirty::ClipDistances] = false; - clip_mask &= maxwell3d.regs.clip_distance_enabled; + clip_mask &= maxwell3d->regs.user_clip_enable.raw; if (clip_mask == last_clip_distance_mask) { return; } @@ -689,15 +690,15 @@ void RasterizerOpenGL::SyncClipCoef() { } void RasterizerOpenGL::SyncCullMode() { - auto& flags = maxwell3d.dirty.flags; - const auto& regs = maxwell3d.regs; + auto& flags = maxwell3d->dirty.flags; + const auto& regs = maxwell3d->regs; if (flags[Dirty::CullTest]) { flags[Dirty::CullTest] = false; - if (regs.cull_test_enabled) { + if (regs.gl_cull_test_enabled) { glEnable(GL_CULL_FACE); - glCullFace(MaxwellToGL::CullFace(regs.cull_face)); + glCullFace(MaxwellToGL::CullFace(regs.gl_cull_face)); } else { glDisable(GL_CULL_FACE); } @@ -705,23 +706,23 @@ void RasterizerOpenGL::SyncCullMode() { } void RasterizerOpenGL::SyncPrimitiveRestart() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::PrimitiveRestart]) { return; } flags[Dirty::PrimitiveRestart] = false; - if (maxwell3d.regs.primitive_restart.enabled) { + if (maxwell3d->regs.primitive_restart.enabled) { glEnable(GL_PRIMITIVE_RESTART); - glPrimitiveRestartIndex(maxwell3d.regs.primitive_restart.index); + glPrimitiveRestartIndex(maxwell3d->regs.primitive_restart.index); } else { glDisable(GL_PRIMITIVE_RESTART); } } void RasterizerOpenGL::SyncDepthTestState() { - auto& flags = maxwell3d.dirty.flags; - const auto& regs = maxwell3d.regs; + auto& flags = maxwell3d->dirty.flags; + const auto& regs = maxwell3d->regs; if (flags[Dirty::DepthMask]) { flags[Dirty::DepthMask] = false; @@ -740,28 +741,28 @@ void RasterizerOpenGL::SyncDepthTestState() { } void RasterizerOpenGL::SyncStencilTestState() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::StencilTest]) { return; } flags[Dirty::StencilTest] = false; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; oglEnable(GL_STENCIL_TEST, regs.stencil_enable); - glStencilFuncSeparate(GL_FRONT, MaxwellToGL::ComparisonOp(regs.stencil_front_func_func), - regs.stencil_front_func_ref, regs.stencil_front_func_mask); - glStencilOpSeparate(GL_FRONT, MaxwellToGL::StencilOp(regs.stencil_front_op_fail), - MaxwellToGL::StencilOp(regs.stencil_front_op_zfail), - MaxwellToGL::StencilOp(regs.stencil_front_op_zpass)); + glStencilFuncSeparate(GL_FRONT, MaxwellToGL::ComparisonOp(regs.stencil_front_op.func), + regs.stencil_front_ref, regs.stencil_front_func_mask); + glStencilOpSeparate(GL_FRONT, MaxwellToGL::StencilOp(regs.stencil_front_op.fail), + MaxwellToGL::StencilOp(regs.stencil_front_op.zfail), + MaxwellToGL::StencilOp(regs.stencil_front_op.zpass)); glStencilMaskSeparate(GL_FRONT, regs.stencil_front_mask); if (regs.stencil_two_side_enable) { - glStencilFuncSeparate(GL_BACK, MaxwellToGL::ComparisonOp(regs.stencil_back_func_func), - regs.stencil_back_func_ref, regs.stencil_back_func_mask); - glStencilOpSeparate(GL_BACK, MaxwellToGL::StencilOp(regs.stencil_back_op_fail), - MaxwellToGL::StencilOp(regs.stencil_back_op_zfail), - MaxwellToGL::StencilOp(regs.stencil_back_op_zpass)); + glStencilFuncSeparate(GL_BACK, MaxwellToGL::ComparisonOp(regs.stencil_back_op.func), + regs.stencil_back_ref, regs.stencil_back_mask); + glStencilOpSeparate(GL_BACK, MaxwellToGL::StencilOp(regs.stencil_back_op.fail), + MaxwellToGL::StencilOp(regs.stencil_back_op.zfail), + MaxwellToGL::StencilOp(regs.stencil_back_op.zpass)); glStencilMaskSeparate(GL_BACK, regs.stencil_back_mask); } else { glStencilFuncSeparate(GL_BACK, GL_ALWAYS, 0, 0xFFFFFFFF); @@ -771,24 +772,24 @@ void RasterizerOpenGL::SyncStencilTestState() { } void RasterizerOpenGL::SyncRasterizeEnable() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::RasterizeEnable]) { return; } flags[Dirty::RasterizeEnable] = false; - oglEnable(GL_RASTERIZER_DISCARD, maxwell3d.regs.rasterize_enable == 0); + oglEnable(GL_RASTERIZER_DISCARD, maxwell3d->regs.rasterize_enable == 0); } void RasterizerOpenGL::SyncPolygonModes() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::PolygonModes]) { return; } flags[Dirty::PolygonModes] = false; - const auto& regs = maxwell3d.regs; - if (regs.fill_rectangle) { + const auto& regs = maxwell3d->regs; + if (regs.fill_via_triangle_mode != Maxwell::FillViaTriangleMode::Disabled) { if (!GLAD_GL_NV_fill_rectangle) { LOG_ERROR(Render_OpenGL, "GL_NV_fill_rectangle used and not supported"); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); @@ -820,7 +821,7 @@ void RasterizerOpenGL::SyncPolygonModes() { } void RasterizerOpenGL::SyncColorMask() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::ColorMasks]) { return; } @@ -829,7 +830,7 @@ void RasterizerOpenGL::SyncColorMask() { const bool force = flags[Dirty::ColorMaskCommon]; flags[Dirty::ColorMaskCommon] = false; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; if (regs.color_mask_common) { if (!force && !flags[Dirty::ColorMask0]) { return; @@ -854,30 +855,31 @@ void RasterizerOpenGL::SyncColorMask() { } void RasterizerOpenGL::SyncMultiSampleState() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::MultisampleControl]) { return; } flags[Dirty::MultisampleControl] = false; - const auto& regs = maxwell3d.regs; - oglEnable(GL_SAMPLE_ALPHA_TO_COVERAGE, regs.multisample_control.alpha_to_coverage); - oglEnable(GL_SAMPLE_ALPHA_TO_ONE, regs.multisample_control.alpha_to_one); + const auto& regs = maxwell3d->regs; + oglEnable(GL_SAMPLE_ALPHA_TO_COVERAGE, regs.anti_alias_alpha_control.alpha_to_coverage); + oglEnable(GL_SAMPLE_ALPHA_TO_ONE, regs.anti_alias_alpha_control.alpha_to_one); } void RasterizerOpenGL::SyncFragmentColorClampState() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::FragmentClampColor]) { return; } flags[Dirty::FragmentClampColor] = false; - glClampColor(GL_CLAMP_FRAGMENT_COLOR, maxwell3d.regs.frag_color_clamp ? GL_TRUE : GL_FALSE); + glClampColor(GL_CLAMP_FRAGMENT_COLOR, + maxwell3d->regs.frag_color_clamp.AnyEnabled() ? GL_TRUE : GL_FALSE); } void RasterizerOpenGL::SyncBlendState() { - auto& flags = maxwell3d.dirty.flags; - const auto& regs = maxwell3d.regs; + auto& flags = maxwell3d->dirty.flags; + const auto& regs = maxwell3d->regs; if (flags[Dirty::BlendColor]) { flags[Dirty::BlendColor] = false; @@ -892,18 +894,18 @@ void RasterizerOpenGL::SyncBlendState() { } flags[Dirty::BlendStates] = false; - if (!regs.independent_blend_enable) { + if (!regs.blend_per_target_enabled) { if (!regs.blend.enable[0]) { glDisable(GL_BLEND); return; } glEnable(GL_BLEND); - glBlendFuncSeparate(MaxwellToGL::BlendFunc(regs.blend.factor_source_rgb), - MaxwellToGL::BlendFunc(regs.blend.factor_dest_rgb), - MaxwellToGL::BlendFunc(regs.blend.factor_source_a), - MaxwellToGL::BlendFunc(regs.blend.factor_dest_a)); - glBlendEquationSeparate(MaxwellToGL::BlendEquation(regs.blend.equation_rgb), - MaxwellToGL::BlendEquation(regs.blend.equation_a)); + glBlendFuncSeparate(MaxwellToGL::BlendFunc(regs.blend.color_source), + MaxwellToGL::BlendFunc(regs.blend.color_dest), + MaxwellToGL::BlendFunc(regs.blend.alpha_source), + MaxwellToGL::BlendFunc(regs.blend.alpha_dest)); + glBlendEquationSeparate(MaxwellToGL::BlendEquation(regs.blend.color_op), + MaxwellToGL::BlendEquation(regs.blend.alpha_op)); return; } @@ -922,35 +924,34 @@ void RasterizerOpenGL::SyncBlendState() { } glEnablei(GL_BLEND, static_cast<GLuint>(i)); - const auto& src = regs.independent_blend[i]; - glBlendFuncSeparatei(static_cast<GLuint>(i), MaxwellToGL::BlendFunc(src.factor_source_rgb), - MaxwellToGL::BlendFunc(src.factor_dest_rgb), - MaxwellToGL::BlendFunc(src.factor_source_a), - MaxwellToGL::BlendFunc(src.factor_dest_a)); - glBlendEquationSeparatei(static_cast<GLuint>(i), - MaxwellToGL::BlendEquation(src.equation_rgb), - MaxwellToGL::BlendEquation(src.equation_a)); + const auto& src = regs.blend_per_target[i]; + glBlendFuncSeparatei(static_cast<GLuint>(i), MaxwellToGL::BlendFunc(src.color_source), + MaxwellToGL::BlendFunc(src.color_dest), + MaxwellToGL::BlendFunc(src.alpha_source), + MaxwellToGL::BlendFunc(src.alpha_dest)); + glBlendEquationSeparatei(static_cast<GLuint>(i), MaxwellToGL::BlendEquation(src.color_op), + MaxwellToGL::BlendEquation(src.alpha_op)); } } void RasterizerOpenGL::SyncLogicOpState() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::LogicOp]) { return; } flags[Dirty::LogicOp] = false; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; if (regs.logic_op.enable) { glEnable(GL_COLOR_LOGIC_OP); - glLogicOp(MaxwellToGL::LogicOp(regs.logic_op.operation)); + glLogicOp(MaxwellToGL::LogicOp(regs.logic_op.op)); } else { glDisable(GL_COLOR_LOGIC_OP); } } void RasterizerOpenGL::SyncScissorTest() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::Scissors] && !flags[VideoCommon::Dirty::RescaleScissors]) { return; } @@ -959,7 +960,7 @@ void RasterizerOpenGL::SyncScissorTest() { const bool force = flags[VideoCommon::Dirty::RescaleScissors]; flags[VideoCommon::Dirty::RescaleScissors] = false; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; const auto& resolution = Settings::values.resolution_info; const bool is_rescaling{texture_cache.IsRescaling()}; @@ -995,39 +996,39 @@ void RasterizerOpenGL::SyncScissorTest() { } void RasterizerOpenGL::SyncPointState() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::PointSize]) { return; } flags[Dirty::PointSize] = false; - oglEnable(GL_POINT_SPRITE, maxwell3d.regs.point_sprite_enable); - oglEnable(GL_PROGRAM_POINT_SIZE, maxwell3d.regs.vp_point_size.enable); + oglEnable(GL_POINT_SPRITE, maxwell3d->regs.point_sprite_enable); + oglEnable(GL_PROGRAM_POINT_SIZE, maxwell3d->regs.point_size_attribute.enabled); const bool is_rescaling{texture_cache.IsRescaling()}; const float scale = is_rescaling ? Settings::values.resolution_info.up_factor : 1.0f; - glPointSize(std::max(1.0f, maxwell3d.regs.point_size * scale)); + glPointSize(std::max(1.0f, maxwell3d->regs.point_size * scale)); } void RasterizerOpenGL::SyncLineState() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::LineWidth]) { return; } flags[Dirty::LineWidth] = false; - const auto& regs = maxwell3d.regs; - oglEnable(GL_LINE_SMOOTH, regs.line_smooth_enable); - glLineWidth(regs.line_smooth_enable ? regs.line_width_smooth : regs.line_width_aliased); + const auto& regs = maxwell3d->regs; + oglEnable(GL_LINE_SMOOTH, regs.line_anti_alias_enable); + glLineWidth(regs.line_anti_alias_enable ? regs.line_width_smooth : regs.line_width_aliased); } void RasterizerOpenGL::SyncPolygonOffset() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::PolygonOffset]) { return; } flags[Dirty::PolygonOffset] = false; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; oglEnable(GL_POLYGON_OFFSET_FILL, regs.polygon_offset_fill_enable); oglEnable(GL_POLYGON_OFFSET_LINE, regs.polygon_offset_line_enable); oglEnable(GL_POLYGON_OFFSET_POINT, regs.polygon_offset_point_enable); @@ -1035,19 +1036,19 @@ void RasterizerOpenGL::SyncPolygonOffset() { if (regs.polygon_offset_fill_enable || regs.polygon_offset_line_enable || regs.polygon_offset_point_enable) { // Hardware divides polygon offset units by two - glPolygonOffsetClamp(regs.polygon_offset_factor, regs.polygon_offset_units / 2.0f, - regs.polygon_offset_clamp); + glPolygonOffsetClamp(regs.slope_scale_depth_bias, regs.depth_bias / 2.0f, + regs.depth_bias_clamp); } } void RasterizerOpenGL::SyncAlphaTest() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::AlphaTest]) { return; } flags[Dirty::AlphaTest] = false; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; if (regs.alpha_test_enabled) { glEnable(GL_ALPHA_TEST); glAlphaFunc(MaxwellToGL::ComparisonOp(regs.alpha_test_func), regs.alpha_test_ref); @@ -1057,25 +1058,25 @@ void RasterizerOpenGL::SyncAlphaTest() { } void RasterizerOpenGL::SyncFramebufferSRGB() { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::FramebufferSRGB]) { return; } flags[Dirty::FramebufferSRGB] = false; - oglEnable(GL_FRAMEBUFFER_SRGB, maxwell3d.regs.framebuffer_srgb); + oglEnable(GL_FRAMEBUFFER_SRGB, maxwell3d->regs.framebuffer_srgb); } void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum primitive_mode) { - const auto& regs = maxwell3d.regs; - if (regs.tfb_enabled == 0) { + const auto& regs = maxwell3d->regs; + if (regs.transform_feedback_enabled == 0) { return; } program->ConfigureTransformFeedback(); - UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationControl) || - regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationEval) || - regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::Geometry)); + UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderType::TessellationInit) || + regs.IsShaderConfigEnabled(Maxwell::ShaderType::Tessellation) || + regs.IsShaderConfigEnabled(Maxwell::ShaderType::Geometry)); UNIMPLEMENTED_IF(primitive_mode != GL_POINTS); // We may have to call BeginTransformFeedbackNV here since they seem to call different @@ -1086,11 +1087,48 @@ void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum } void RasterizerOpenGL::EndTransformFeedback() { - if (maxwell3d.regs.tfb_enabled != 0) { + if (maxwell3d->regs.transform_feedback_enabled != 0) { glEndTransformFeedback(); } } +void RasterizerOpenGL::InitializeChannel(Tegra::Control::ChannelState& channel) { + CreateChannel(channel); + { + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + texture_cache.CreateChannel(channel); + buffer_cache.CreateChannel(channel); + } + shader_cache.CreateChannel(channel); + query_cache.CreateChannel(channel); + state_tracker.SetupTables(channel); +} + +void RasterizerOpenGL::BindChannel(Tegra::Control::ChannelState& channel) { + const s32 channel_id = channel.bind_id; + BindToChannel(channel_id); + { + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + texture_cache.BindToChannel(channel_id); + buffer_cache.BindToChannel(channel_id); + } + shader_cache.BindToChannel(channel_id); + query_cache.BindToChannel(channel_id); + state_tracker.ChangeChannel(channel); + state_tracker.InvalidateState(); +} + +void RasterizerOpenGL::ReleaseChannel(s32 channel_id) { + EraseChannel(channel_id); + { + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + texture_cache.EraseChannel(channel_id); + buffer_cache.EraseChannel(channel_id); + } + shader_cache.EraseChannel(channel_id); + query_cache.EraseChannel(channel_id); +} + AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {} bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 31a16fcba..45131b785 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -12,6 +12,7 @@ #include <glad/glad.h> #include "common/common_types.h" +#include "video_core/control/channel_state_cache.h" #include "video_core/engines/maxwell_dma.h" #include "video_core/rasterizer_accelerated.h" #include "video_core/rasterizer_interface.h" @@ -58,7 +59,8 @@ private: BufferCache& buffer_cache; }; -class RasterizerOpenGL : public VideoCore::RasterizerAccelerated { +class RasterizerOpenGL : public VideoCore::RasterizerAccelerated, + protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> { public: explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, Core::Memory::Memory& cpu_memory_, const Device& device_, @@ -78,10 +80,11 @@ public: bool MustFlushRegion(VAddr addr, u64 size) override; void InvalidateRegion(VAddr addr, u64 size) override; void OnCPUWrite(VAddr addr, u64 size) override; - void SyncGuestHost() override; + void InvalidateGPUCache() override; void UnmapMemory(VAddr addr, u64 size) override; - void ModifyGPUMemory(GPUVAddr addr, u64 size) override; - void SignalSemaphore(GPUVAddr addr, u32 value) override; + void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override; + void SignalFence(std::function<void()>&& func) override; + void SyncOperation(std::function<void()>&& func) override; void SignalSyncPoint(u32 value) override; void SignalReference() override; void ReleaseFences() override; @@ -96,7 +99,7 @@ public: const Tegra::Engines::Fermi2D::Config& copy_config) override; Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override; void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, - std::span<u8> memory) override; + std::span<const u8> memory) override; bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, u32 pixel_stride) override; void LoadDiskResources(u64 title_id, std::stop_token stop_loading, @@ -107,6 +110,12 @@ public: return num_queued_commands > 0; } + void InitializeChannel(Tegra::Control::ChannelState& channel) override; + + void BindChannel(Tegra::Control::ChannelState& channel) override; + + void ReleaseChannel(s32 channel_id) override; + private: static constexpr size_t MAX_TEXTURES = 192; static constexpr size_t MAX_IMAGES = 48; @@ -191,9 +200,6 @@ private: void EndTransformFeedback(); Tegra::GPU& gpu; - Tegra::Engines::Maxwell3D& maxwell3d; - Tegra::Engines::KeplerCompute& kepler_compute; - Tegra::MemoryManager& gpu_memory; const Device& device; ScreenInfo& screen_info; diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index fa05b47ff..e94cfdb1a 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -79,11 +79,11 @@ Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key, info.tess_clockwise = key.tessellation_clockwise != 0; info.tess_primitive = [&key] { switch (key.tessellation_primitive) { - case Maxwell::TessellationPrimitive::Isolines: + case Maxwell::Tessellation::DomainType::Isolines: return Shader::TessPrimitive::Isolines; - case Maxwell::TessellationPrimitive::Triangles: + case Maxwell::Tessellation::DomainType::Triangles: return Shader::TessPrimitive::Triangles; - case Maxwell::TessellationPrimitive::Quads: + case Maxwell::Tessellation::DomainType::Quads: return Shader::TessPrimitive::Quads; } ASSERT(false); @@ -91,11 +91,11 @@ Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key, }(); info.tess_spacing = [&] { switch (key.tessellation_spacing) { - case Maxwell::TessellationSpacing::Equal: + case Maxwell::Tessellation::Spacing::Integer: return Shader::TessSpacing::Equal; - case Maxwell::TessellationSpacing::FractionalOdd: + case Maxwell::Tessellation::Spacing::FractionalOdd: return Shader::TessSpacing::FractionalOdd; - case Maxwell::TessellationSpacing::FractionalEven: + case Maxwell::Tessellation::Spacing::FractionalEven: return Shader::TessSpacing::FractionalEven; } ASSERT(false); @@ -140,28 +140,26 @@ Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key, } void SetXfbState(VideoCommon::TransformFeedbackState& state, const Maxwell& regs) { - std::ranges::transform(regs.tfb_layouts, state.layouts.begin(), [](const auto& layout) { - return VideoCommon::TransformFeedbackState::Layout{ - .stream = layout.stream, - .varying_count = layout.varying_count, - .stride = layout.stride, - }; - }); - state.varyings = regs.tfb_varying_locs; + std::ranges::transform(regs.transform_feedback.controls, state.layouts.begin(), + [](const auto& layout) { + return VideoCommon::TransformFeedbackState::Layout{ + .stream = layout.stream, + .varying_count = layout.varying_count, + .stride = layout.stride, + }; + }); + state.varyings = regs.stream_out_layout; } } // Anonymous namespace ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_, - Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::Engines::KeplerCompute& kepler_compute_, - Tegra::MemoryManager& gpu_memory_, const Device& device_, - TextureCache& texture_cache_, BufferCache& buffer_cache_, - ProgramManager& program_manager_, StateTracker& state_tracker_, - VideoCore::ShaderNotify& shader_notify_) - : VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_}, - emu_window{emu_window_}, device{device_}, texture_cache{texture_cache_}, - buffer_cache{buffer_cache_}, program_manager{program_manager_}, state_tracker{state_tracker_}, - shader_notify{shader_notify_}, use_asynchronous_shaders{device.UseAsynchronousShaders()}, + const Device& device_, TextureCache& texture_cache_, + BufferCache& buffer_cache_, ProgramManager& program_manager_, + StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_) + : VideoCommon::ShaderCache{rasterizer_}, emu_window{emu_window_}, device{device_}, + texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, program_manager{program_manager_}, + state_tracker{state_tracker_}, shader_notify{shader_notify_}, + use_asynchronous_shaders{device.UseAsynchronousShaders()}, profile{ .supported_spirv = 0x00010000, @@ -311,16 +309,18 @@ GraphicsPipeline* ShaderCache::CurrentGraphicsPipeline() { current_pipeline = nullptr; return nullptr; } - const auto& regs{maxwell3d.regs}; + const auto& regs{maxwell3d->regs}; graphics_key.raw = 0; - graphics_key.early_z.Assign(regs.force_early_fragment_tests != 0 ? 1 : 0); + graphics_key.early_z.Assign(regs.mandated_early_z != 0 ? 1 : 0); graphics_key.gs_input_topology.Assign(graphics_key.unique_hashes[4] != 0 ? regs.draw.topology.Value() : Maxwell::PrimitiveTopology{}); - graphics_key.tessellation_primitive.Assign(regs.tess_mode.prim.Value()); - graphics_key.tessellation_spacing.Assign(regs.tess_mode.spacing.Value()); - graphics_key.tessellation_clockwise.Assign(regs.tess_mode.cw.Value()); - graphics_key.xfb_enabled.Assign(regs.tfb_enabled != 0 ? 1 : 0); + graphics_key.tessellation_primitive.Assign(regs.tessellation.params.domain_type.Value()); + graphics_key.tessellation_spacing.Assign(regs.tessellation.params.spacing.Value()); + graphics_key.tessellation_clockwise.Assign( + regs.tessellation.params.output_primitives.Value() == + Maxwell::Tessellation::OutputPrimitives::Triangles_CW); + graphics_key.xfb_enabled.Assign(regs.transform_feedback_enabled != 0 ? 1 : 0); if (graphics_key.xfb_enabled) { SetXfbState(graphics_key.xfb_state, regs); } @@ -352,13 +352,13 @@ GraphicsPipeline* ShaderCache::BuiltPipeline(GraphicsPipeline* pipeline) const n } // If something is using depth, we can assume that games are not rendering anything which // will be used one time. - if (maxwell3d.regs.zeta_enable) { + if (maxwell3d->regs.zeta_enable) { return nullptr; } // If games are using a small index count, we can assume these are full screen quads. // Usually these shaders are only used once for building textures so we can assume they // can't be built async - if (maxwell3d.regs.index_array.count <= 6 || maxwell3d.regs.vertex_buffer.count <= 6) { + if (maxwell3d->regs.index_buffer.count <= 6 || maxwell3d->regs.vertex_buffer.count <= 6) { return pipeline; } return nullptr; @@ -369,7 +369,7 @@ ComputePipeline* ShaderCache::CurrentComputePipeline() { if (!shader) { return nullptr; } - const auto& qmd{kepler_compute.launch_description}; + const auto& qmd{kepler_compute->launch_description}; const ComputePipelineKey key{ .unique_hash = shader->unique_hash, .shared_memory_size = qmd.shared_alloc, @@ -481,9 +481,9 @@ std::unique_ptr<GraphicsPipeline> ShaderCache::CreateGraphicsPipeline( previous_program = &program; } auto* const thread_worker{build_in_parallel ? workers.get() : nullptr}; - return std::make_unique<GraphicsPipeline>( - device, texture_cache, buffer_cache, gpu_memory, maxwell3d, program_manager, state_tracker, - thread_worker, &shader_notify, sources, sources_spirv, infos, key); + return std::make_unique<GraphicsPipeline>(device, texture_cache, buffer_cache, program_manager, + state_tracker, thread_worker, &shader_notify, sources, + sources_spirv, infos, key); } catch (Shader::Exception& exception) { LOG_ERROR(Render_OpenGL, "{}", exception.what()); @@ -492,9 +492,9 @@ std::unique_ptr<GraphicsPipeline> ShaderCache::CreateGraphicsPipeline( std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline( const ComputePipelineKey& key, const VideoCommon::ShaderInfo* shader) { - const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()}; - const auto& qmd{kepler_compute.launch_description}; - ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start}; + const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()}; + const auto& qmd{kepler_compute->launch_description}; + ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start}; env.SetCachedSize(shader->size_bytes); main_pools.ReleaseContents(); @@ -537,9 +537,8 @@ std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline( break; } - return std::make_unique<ComputePipeline>(device, texture_cache, buffer_cache, gpu_memory, - kepler_compute, program_manager, program.info, code, - code_spirv); + return std::make_unique<ComputePipeline>(device, texture_cache, buffer_cache, program_manager, + program.info, code, code_spirv); } catch (Shader::Exception& exception) { LOG_ERROR(Render_OpenGL, "{}", exception.what()); return nullptr; @@ -547,7 +546,7 @@ std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline( std::unique_ptr<ShaderWorker> ShaderCache::CreateWorkers() const { return std::make_unique<ShaderWorker>(std::max(std::thread::hardware_concurrency(), 2U) - 1, - "yuzu:ShaderBuilder", + "GlShaderBuilder", [this] { return Context{emu_window}; }); } diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h index a14269dea..89f181fe3 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_cache.h @@ -30,12 +30,9 @@ using ShaderWorker = Common::StatefulThreadWorker<ShaderContext::Context>; class ShaderCache : public VideoCommon::ShaderCache { public: explicit ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_, - Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::Engines::KeplerCompute& kepler_compute_, - Tegra::MemoryManager& gpu_memory_, const Device& device_, - TextureCache& texture_cache_, BufferCache& buffer_cache_, - ProgramManager& program_manager_, StateTracker& state_tracker_, - VideoCore::ShaderNotify& shader_notify_); + const Device& device_, TextureCache& texture_cache_, + BufferCache& buffer_cache_, ProgramManager& program_manager_, + StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_); ~ShaderCache(); void LoadDiskResources(u64 title_id, std::stop_token stop_loading, diff --git a/src/video_core/renderer_opengl/gl_state_tracker.cpp b/src/video_core/renderer_opengl/gl_state_tracker.cpp index 912725ef7..a359f96f1 100644 --- a/src/video_core/renderer_opengl/gl_state_tracker.cpp +++ b/src/video_core/renderer_opengl/gl_state_tracker.cpp @@ -7,8 +7,8 @@ #include "common/common_types.h" #include "core/core.h" +#include "video_core/control/channel_state.h" #include "video_core/engines/maxwell_3d.h" -#include "video_core/gpu.h" #include "video_core/renderer_opengl/gl_state_tracker.h" #define OFF(field_name) MAXWELL3D_REG_INDEX(field_name) @@ -38,12 +38,12 @@ void SetupDirtyColorMasks(Tables& tables) { void SetupDirtyVertexInstances(Tables& tables) { static constexpr std::size_t instance_base_offset = 3; for (std::size_t i = 0; i < Regs::NumVertexArrays; ++i) { - const std::size_t array_offset = OFF(vertex_array) + i * NUM(vertex_array[0]); + const std::size_t array_offset = OFF(vertex_streams) + i * NUM(vertex_streams[0]); const std::size_t instance_array_offset = array_offset + instance_base_offset; tables[0][instance_array_offset] = static_cast<u8>(VertexInstance0 + i); tables[1][instance_array_offset] = VertexInstances; - const std::size_t instance_offset = OFF(instanced_arrays) + i; + const std::size_t instance_offset = OFF(vertex_stream_instances) + i; tables[0][instance_offset] = static_cast<u8>(VertexInstance0 + i); tables[1][instance_offset] = VertexInstances; } @@ -70,8 +70,8 @@ void SetupDirtyViewports(Tables& tables) { FillBlock(tables[1], OFF(viewport_transform), NUM(viewport_transform), Viewports); FillBlock(tables[1], OFF(viewports), NUM(viewports), Viewports); - tables[0][OFF(viewport_transform_enabled)] = ViewportTransform; - tables[1][OFF(viewport_transform_enabled)] = Viewports; + tables[0][OFF(viewport_scale_offset_enbled)] = ViewportTransform; + tables[1][OFF(viewport_scale_offset_enbled)] = Viewports; } void SetupDirtyScissors(Tables& tables) { @@ -88,7 +88,7 @@ void SetupDirtyPolygonModes(Tables& tables) { tables[1][OFF(polygon_mode_front)] = PolygonModes; tables[1][OFF(polygon_mode_back)] = PolygonModes; - tables[0][OFF(fill_rectangle)] = PolygonModes; + tables[0][OFF(fill_via_triangle_mode)] = PolygonModes; } void SetupDirtyDepthTest(Tables& tables) { @@ -100,11 +100,11 @@ void SetupDirtyDepthTest(Tables& tables) { void SetupDirtyStencilTest(Tables& tables) { static constexpr std::array offsets = { - OFF(stencil_enable), OFF(stencil_front_func_func), OFF(stencil_front_func_ref), - OFF(stencil_front_func_mask), OFF(stencil_front_op_fail), OFF(stencil_front_op_zfail), - OFF(stencil_front_op_zpass), OFF(stencil_front_mask), OFF(stencil_two_side_enable), - OFF(stencil_back_func_func), OFF(stencil_back_func_ref), OFF(stencil_back_func_mask), - OFF(stencil_back_op_fail), OFF(stencil_back_op_zfail), OFF(stencil_back_op_zpass), + OFF(stencil_enable), OFF(stencil_front_op.func), OFF(stencil_front_ref), + OFF(stencil_front_func_mask), OFF(stencil_front_op.fail), OFF(stencil_front_op.zfail), + OFF(stencil_front_op.zpass), OFF(stencil_front_mask), OFF(stencil_two_side_enable), + OFF(stencil_back_op.func), OFF(stencil_back_ref), OFF(stencil_back_func_mask), + OFF(stencil_back_op.fail), OFF(stencil_back_op.zfail), OFF(stencil_back_op.zpass), OFF(stencil_back_mask)}; for (const auto offset : offsets) { tables[0][offset] = StencilTest; @@ -121,15 +121,15 @@ void SetupDirtyAlphaTest(Tables& tables) { void SetupDirtyBlend(Tables& tables) { FillBlock(tables[0], OFF(blend_color), NUM(blend_color), BlendColor); - tables[0][OFF(independent_blend_enable)] = BlendIndependentEnabled; + tables[0][OFF(blend_per_target_enabled)] = BlendIndependentEnabled; for (std::size_t i = 0; i < Regs::NumRenderTargets; ++i) { - const std::size_t offset = OFF(independent_blend) + i * NUM(independent_blend[0]); - FillBlock(tables[0], offset, NUM(independent_blend[0]), BlendState0 + i); + const std::size_t offset = OFF(blend_per_target) + i * NUM(blend_per_target[0]); + FillBlock(tables[0], offset, NUM(blend_per_target[0]), BlendState0 + i); tables[0][OFF(blend.enable) + i] = static_cast<u8>(BlendState0 + i); } - FillBlock(tables[1], OFF(independent_blend), NUM(independent_blend), BlendStates); + FillBlock(tables[1], OFF(blend_per_target), NUM(blend_per_target), BlendStates); FillBlock(tables[1], OFF(blend), NUM(blend), BlendStates); } @@ -142,13 +142,14 @@ void SetupDirtyPolygonOffset(Tables& tables) { table[OFF(polygon_offset_fill_enable)] = PolygonOffset; table[OFF(polygon_offset_line_enable)] = PolygonOffset; table[OFF(polygon_offset_point_enable)] = PolygonOffset; - table[OFF(polygon_offset_factor)] = PolygonOffset; - table[OFF(polygon_offset_units)] = PolygonOffset; - table[OFF(polygon_offset_clamp)] = PolygonOffset; + table[OFF(slope_scale_depth_bias)] = PolygonOffset; + table[OFF(depth_bias)] = PolygonOffset; + table[OFF(depth_bias_clamp)] = PolygonOffset; } void SetupDirtyMultisampleControl(Tables& tables) { - FillBlock(tables[0], OFF(multisample_control), NUM(multisample_control), MultisampleControl); + FillBlock(tables[0], OFF(anti_alias_alpha_control), NUM(anti_alias_alpha_control), + MultisampleControl); } void SetupDirtyRasterizeEnable(Tables& tables) { @@ -168,7 +169,7 @@ void SetupDirtyFragmentClampColor(Tables& tables) { } void SetupDirtyPointSize(Tables& tables) { - tables[0][OFF(vp_point_size)] = PointSize; + tables[0][OFF(point_size_attribute)] = PointSize; tables[0][OFF(point_size)] = PointSize; tables[0][OFF(point_sprite_enable)] = PointSize; } @@ -176,35 +177,34 @@ void SetupDirtyPointSize(Tables& tables) { void SetupDirtyLineWidth(Tables& tables) { tables[0][OFF(line_width_smooth)] = LineWidth; tables[0][OFF(line_width_aliased)] = LineWidth; - tables[0][OFF(line_smooth_enable)] = LineWidth; + tables[0][OFF(line_anti_alias_enable)] = LineWidth; } void SetupDirtyClipControl(Tables& tables) { auto& table = tables[0]; - table[OFF(screen_y_control)] = ClipControl; + table[OFF(window_origin)] = ClipControl; table[OFF(depth_mode)] = ClipControl; } void SetupDirtyDepthClampEnabled(Tables& tables) { - tables[0][OFF(view_volume_clip_control)] = DepthClampEnabled; + tables[0][OFF(viewport_clip_control)] = DepthClampEnabled; } void SetupDirtyMisc(Tables& tables) { auto& table = tables[0]; - table[OFF(clip_distance_enabled)] = ClipDistances; + table[OFF(user_clip_enable)] = ClipDistances; - table[OFF(front_face)] = FrontFace; + table[OFF(gl_front_face)] = FrontFace; - table[OFF(cull_test_enabled)] = CullTest; - table[OFF(cull_face)] = CullTest; + table[OFF(gl_cull_test_enabled)] = CullTest; + table[OFF(gl_cull_face)] = CullTest; } } // Anonymous namespace -StateTracker::StateTracker(Tegra::GPU& gpu) : flags{gpu.Maxwell3D().dirty.flags} { - auto& dirty = gpu.Maxwell3D().dirty; - auto& tables = dirty.tables; +void StateTracker::SetupTables(Tegra::Control::ChannelState& channel_state) { + auto& tables{channel_state.maxwell_3d->dirty.tables}; SetupDirtyFlags(tables); SetupDirtyColorMasks(tables); SetupDirtyViewports(tables); @@ -230,4 +230,14 @@ StateTracker::StateTracker(Tegra::GPU& gpu) : flags{gpu.Maxwell3D().dirty.flags} SetupDirtyMisc(tables); } +void StateTracker::ChangeChannel(Tegra::Control::ChannelState& channel_state) { + flags = &channel_state.maxwell_3d->dirty.flags; +} + +void StateTracker::InvalidateState() { + flags->set(); +} + +StateTracker::StateTracker() : flags{&default_flags} {} + } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_state_tracker.h b/src/video_core/renderer_opengl/gl_state_tracker.h index 04e024f08..19bcf3f35 100644 --- a/src/video_core/renderer_opengl/gl_state_tracker.h +++ b/src/video_core/renderer_opengl/gl_state_tracker.h @@ -12,8 +12,10 @@ #include "video_core/engines/maxwell_3d.h" namespace Tegra { -class GPU; +namespace Control { +struct ChannelState; } +} // namespace Tegra namespace OpenGL { @@ -83,7 +85,7 @@ static_assert(Last <= std::numeric_limits<u8>::max()); class StateTracker { public: - explicit StateTracker(Tegra::GPU& gpu); + explicit StateTracker(); void BindIndexBuffer(GLuint new_index_buffer) { if (index_buffer == new_index_buffer) { @@ -121,94 +123,107 @@ public: } void NotifyScreenDrawVertexArray() { - flags[OpenGL::Dirty::VertexFormats] = true; - flags[OpenGL::Dirty::VertexFormat0 + 0] = true; - flags[OpenGL::Dirty::VertexFormat0 + 1] = true; + (*flags)[OpenGL::Dirty::VertexFormats] = true; + (*flags)[OpenGL::Dirty::VertexFormat0 + 0] = true; + (*flags)[OpenGL::Dirty::VertexFormat0 + 1] = true; - flags[VideoCommon::Dirty::VertexBuffers] = true; - flags[VideoCommon::Dirty::VertexBuffer0] = true; + (*flags)[VideoCommon::Dirty::VertexBuffers] = true; + (*flags)[VideoCommon::Dirty::VertexBuffer0] = true; - flags[OpenGL::Dirty::VertexInstances] = true; - flags[OpenGL::Dirty::VertexInstance0 + 0] = true; - flags[OpenGL::Dirty::VertexInstance0 + 1] = true; + (*flags)[OpenGL::Dirty::VertexInstances] = true; + (*flags)[OpenGL::Dirty::VertexInstance0 + 0] = true; + (*flags)[OpenGL::Dirty::VertexInstance0 + 1] = true; } void NotifyPolygonModes() { - flags[OpenGL::Dirty::PolygonModes] = true; - flags[OpenGL::Dirty::PolygonModeFront] = true; - flags[OpenGL::Dirty::PolygonModeBack] = true; + (*flags)[OpenGL::Dirty::PolygonModes] = true; + (*flags)[OpenGL::Dirty::PolygonModeFront] = true; + (*flags)[OpenGL::Dirty::PolygonModeBack] = true; } void NotifyViewport0() { - flags[OpenGL::Dirty::Viewports] = true; - flags[OpenGL::Dirty::Viewport0] = true; + (*flags)[OpenGL::Dirty::Viewports] = true; + (*flags)[OpenGL::Dirty::Viewport0] = true; } void NotifyScissor0() { - flags[OpenGL::Dirty::Scissors] = true; - flags[OpenGL::Dirty::Scissor0] = true; + (*flags)[OpenGL::Dirty::Scissors] = true; + (*flags)[OpenGL::Dirty::Scissor0] = true; } void NotifyColorMask(size_t index) { - flags[OpenGL::Dirty::ColorMasks] = true; - flags[OpenGL::Dirty::ColorMask0 + index] = true; + (*flags)[OpenGL::Dirty::ColorMasks] = true; + (*flags)[OpenGL::Dirty::ColorMask0 + index] = true; } void NotifyBlend0() { - flags[OpenGL::Dirty::BlendStates] = true; - flags[OpenGL::Dirty::BlendState0] = true; + (*flags)[OpenGL::Dirty::BlendStates] = true; + (*flags)[OpenGL::Dirty::BlendState0] = true; } void NotifyFramebuffer() { - flags[VideoCommon::Dirty::RenderTargets] = true; + (*flags)[VideoCommon::Dirty::RenderTargets] = true; } void NotifyFrontFace() { - flags[OpenGL::Dirty::FrontFace] = true; + (*flags)[OpenGL::Dirty::FrontFace] = true; } void NotifyCullTest() { - flags[OpenGL::Dirty::CullTest] = true; + (*flags)[OpenGL::Dirty::CullTest] = true; } void NotifyDepthMask() { - flags[OpenGL::Dirty::DepthMask] = true; + (*flags)[OpenGL::Dirty::DepthMask] = true; } void NotifyDepthTest() { - flags[OpenGL::Dirty::DepthTest] = true; + (*flags)[OpenGL::Dirty::DepthTest] = true; } void NotifyStencilTest() { - flags[OpenGL::Dirty::StencilTest] = true; + (*flags)[OpenGL::Dirty::StencilTest] = true; } void NotifyPolygonOffset() { - flags[OpenGL::Dirty::PolygonOffset] = true; + (*flags)[OpenGL::Dirty::PolygonOffset] = true; } void NotifyRasterizeEnable() { - flags[OpenGL::Dirty::RasterizeEnable] = true; + (*flags)[OpenGL::Dirty::RasterizeEnable] = true; } void NotifyFramebufferSRGB() { - flags[OpenGL::Dirty::FramebufferSRGB] = true; + (*flags)[OpenGL::Dirty::FramebufferSRGB] = true; } void NotifyLogicOp() { - flags[OpenGL::Dirty::LogicOp] = true; + (*flags)[OpenGL::Dirty::LogicOp] = true; } void NotifyClipControl() { - flags[OpenGL::Dirty::ClipControl] = true; + (*flags)[OpenGL::Dirty::ClipControl] = true; } void NotifyAlphaTest() { - flags[OpenGL::Dirty::AlphaTest] = true; + (*flags)[OpenGL::Dirty::AlphaTest] = true; } + void NotifyRange(u8 start, u8 end) { + for (auto flag = start; flag <= end; flag++) { + (*flags)[flag] = true; + } + } + + void SetupTables(Tegra::Control::ChannelState& channel_state); + + void ChangeChannel(Tegra::Control::ChannelState& channel_state); + + void InvalidateState(); + private: - Tegra::Engines::Maxwell3D::DirtyState::Flags& flags; + Tegra::Engines::Maxwell3D::DirtyState::Flags* flags; + Tegra::Engines::Maxwell3D::DirtyState::Flags default_flags{}; GLuint framebuffer = 0; GLuint index_buffer = 0; diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h index 9a72d0d6d..e14f9b2db 100644 --- a/src/video_core/renderer_opengl/maxwell_to_gl.h +++ b/src/video_core/renderer_opengl/maxwell_to_gl.h @@ -87,7 +87,7 @@ constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> FORMAT_TAB {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT}, // BC3_SRGB {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM}, // BC7_SRGB {GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV}, // A4B4G4R4_UNORM - {GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // R4G4_UNORM + {GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // G4R4_UNORM {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR}, // ASTC_2D_4X4_SRGB {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR}, // ASTC_2D_8X8_SRGB {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR}, // ASTC_2D_8X5_SRGB @@ -99,6 +99,8 @@ constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> FORMAT_TAB {GL_COMPRESSED_RGBA_ASTC_6x6_KHR}, // ASTC_2D_6X6_UNORM {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR}, // ASTC_2D_6X6_SRGB {GL_COMPRESSED_RGBA_ASTC_10x6_KHR}, // ASTC_2D_10X6_UNORM + {GL_COMPRESSED_RGBA_ASTC_10x5_KHR}, // ASTC_2D_10X5_UNORM + {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR}, // ASTC_2D_10X5_SRGB {GL_COMPRESSED_RGBA_ASTC_10x10_KHR}, // ASTC_2D_10X10_UNORM {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR}, // ASTC_2D_10X10_SRGB {GL_COMPRESSED_RGBA_ASTC_12x12_KHR}, // ASTC_2D_12X12_UNORM @@ -124,51 +126,60 @@ inline const FormatTuple& GetFormatTuple(VideoCore::Surface::PixelFormat pixel_f inline GLenum VertexFormat(Maxwell::VertexAttribute attrib) { switch (attrib.type) { - case Maxwell::VertexAttribute::Type::UnsignedNorm: - case Maxwell::VertexAttribute::Type::UnsignedScaled: - case Maxwell::VertexAttribute::Type::UnsignedInt: + case Maxwell::VertexAttribute::Type::UnusedEnumDoNotUseBecauseItWillGoAway: + ASSERT_MSG(false, "Invalid vertex attribute type!"); + break; + case Maxwell::VertexAttribute::Type::UNorm: + case Maxwell::VertexAttribute::Type::UScaled: + case Maxwell::VertexAttribute::Type::UInt: switch (attrib.size) { - case Maxwell::VertexAttribute::Size::Size_8: - case Maxwell::VertexAttribute::Size::Size_8_8: - case Maxwell::VertexAttribute::Size::Size_8_8_8: - case Maxwell::VertexAttribute::Size::Size_8_8_8_8: + case Maxwell::VertexAttribute::Size::Size_R8: + case Maxwell::VertexAttribute::Size::Size_A8: + case Maxwell::VertexAttribute::Size::Size_R8_G8: + case Maxwell::VertexAttribute::Size::Size_G8_R8: + case Maxwell::VertexAttribute::Size::Size_R8_G8_B8: + case Maxwell::VertexAttribute::Size::Size_R8_G8_B8_A8: + case Maxwell::VertexAttribute::Size::Size_X8_B8_G8_R8: return GL_UNSIGNED_BYTE; - case Maxwell::VertexAttribute::Size::Size_16: - case Maxwell::VertexAttribute::Size::Size_16_16: - case Maxwell::VertexAttribute::Size::Size_16_16_16: - case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + case Maxwell::VertexAttribute::Size::Size_R16: + case Maxwell::VertexAttribute::Size::Size_R16_G16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16: return GL_UNSIGNED_SHORT; - case Maxwell::VertexAttribute::Size::Size_32: - case Maxwell::VertexAttribute::Size::Size_32_32: - case Maxwell::VertexAttribute::Size::Size_32_32_32: - case Maxwell::VertexAttribute::Size::Size_32_32_32_32: + case Maxwell::VertexAttribute::Size::Size_R32: + case Maxwell::VertexAttribute::Size::Size_R32_G32: + case Maxwell::VertexAttribute::Size::Size_R32_G32_B32: + case Maxwell::VertexAttribute::Size::Size_R32_G32_B32_A32: return GL_UNSIGNED_INT; - case Maxwell::VertexAttribute::Size::Size_10_10_10_2: + case Maxwell::VertexAttribute::Size::Size_A2_B10_G10_R10: return GL_UNSIGNED_INT_2_10_10_10_REV; default: break; } break; - case Maxwell::VertexAttribute::Type::SignedNorm: - case Maxwell::VertexAttribute::Type::SignedScaled: - case Maxwell::VertexAttribute::Type::SignedInt: + case Maxwell::VertexAttribute::Type::SNorm: + case Maxwell::VertexAttribute::Type::SScaled: + case Maxwell::VertexAttribute::Type::SInt: switch (attrib.size) { - case Maxwell::VertexAttribute::Size::Size_8: - case Maxwell::VertexAttribute::Size::Size_8_8: - case Maxwell::VertexAttribute::Size::Size_8_8_8: - case Maxwell::VertexAttribute::Size::Size_8_8_8_8: + case Maxwell::VertexAttribute::Size::Size_R8: + case Maxwell::VertexAttribute::Size::Size_A8: + case Maxwell::VertexAttribute::Size::Size_R8_G8: + case Maxwell::VertexAttribute::Size::Size_G8_R8: + case Maxwell::VertexAttribute::Size::Size_R8_G8_B8: + case Maxwell::VertexAttribute::Size::Size_R8_G8_B8_A8: + case Maxwell::VertexAttribute::Size::Size_X8_B8_G8_R8: return GL_BYTE; - case Maxwell::VertexAttribute::Size::Size_16: - case Maxwell::VertexAttribute::Size::Size_16_16: - case Maxwell::VertexAttribute::Size::Size_16_16_16: - case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + case Maxwell::VertexAttribute::Size::Size_R16: + case Maxwell::VertexAttribute::Size::Size_R16_G16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16: return GL_SHORT; - case Maxwell::VertexAttribute::Size::Size_32: - case Maxwell::VertexAttribute::Size::Size_32_32: - case Maxwell::VertexAttribute::Size::Size_32_32_32: - case Maxwell::VertexAttribute::Size::Size_32_32_32_32: + case Maxwell::VertexAttribute::Size::Size_R32: + case Maxwell::VertexAttribute::Size::Size_R32_G32: + case Maxwell::VertexAttribute::Size::Size_R32_G32_B32: + case Maxwell::VertexAttribute::Size::Size_R32_G32_B32_A32: return GL_INT; - case Maxwell::VertexAttribute::Size::Size_10_10_10_2: + case Maxwell::VertexAttribute::Size::Size_A2_B10_G10_R10: return GL_INT_2_10_10_10_REV; default: break; @@ -176,17 +187,17 @@ inline GLenum VertexFormat(Maxwell::VertexAttribute attrib) { break; case Maxwell::VertexAttribute::Type::Float: switch (attrib.size) { - case Maxwell::VertexAttribute::Size::Size_16: - case Maxwell::VertexAttribute::Size::Size_16_16: - case Maxwell::VertexAttribute::Size::Size_16_16_16: - case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + case Maxwell::VertexAttribute::Size::Size_R16: + case Maxwell::VertexAttribute::Size::Size_R16_G16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16: return GL_HALF_FLOAT; - case Maxwell::VertexAttribute::Size::Size_32: - case Maxwell::VertexAttribute::Size::Size_32_32: - case Maxwell::VertexAttribute::Size::Size_32_32_32: - case Maxwell::VertexAttribute::Size::Size_32_32_32_32: + case Maxwell::VertexAttribute::Size::Size_R32: + case Maxwell::VertexAttribute::Size::Size_R32_G32: + case Maxwell::VertexAttribute::Size::Size_R32_G32_B32: + case Maxwell::VertexAttribute::Size::Size_R32_G32_B32_A32: return GL_FLOAT; - case Maxwell::VertexAttribute::Size::Size_11_11_10: + case Maxwell::VertexAttribute::Size::Size_B10_G11_R11: return GL_UNSIGNED_INT_10F_11F_11F_REV; default: break; @@ -333,20 +344,20 @@ inline GLenum DepthCompareFunc(Tegra::Texture::DepthCompareFunc func) { inline GLenum BlendEquation(Maxwell::Blend::Equation equation) { switch (equation) { - case Maxwell::Blend::Equation::Add: - case Maxwell::Blend::Equation::AddGL: + case Maxwell::Blend::Equation::Add_D3D: + case Maxwell::Blend::Equation::Add_GL: return GL_FUNC_ADD; - case Maxwell::Blend::Equation::Subtract: - case Maxwell::Blend::Equation::SubtractGL: + case Maxwell::Blend::Equation::Subtract_D3D: + case Maxwell::Blend::Equation::Subtract_GL: return GL_FUNC_SUBTRACT; - case Maxwell::Blend::Equation::ReverseSubtract: - case Maxwell::Blend::Equation::ReverseSubtractGL: + case Maxwell::Blend::Equation::ReverseSubtract_D3D: + case Maxwell::Blend::Equation::ReverseSubtract_GL: return GL_FUNC_REVERSE_SUBTRACT; - case Maxwell::Blend::Equation::Min: - case Maxwell::Blend::Equation::MinGL: + case Maxwell::Blend::Equation::Min_D3D: + case Maxwell::Blend::Equation::Min_GL: return GL_MIN; - case Maxwell::Blend::Equation::Max: - case Maxwell::Blend::Equation::MaxGL: + case Maxwell::Blend::Equation::Max_D3D: + case Maxwell::Blend::Equation::Max_GL: return GL_MAX; } UNIMPLEMENTED_MSG("Unimplemented blend equation={}", equation); @@ -355,62 +366,62 @@ inline GLenum BlendEquation(Maxwell::Blend::Equation equation) { inline GLenum BlendFunc(Maxwell::Blend::Factor factor) { switch (factor) { - case Maxwell::Blend::Factor::Zero: - case Maxwell::Blend::Factor::ZeroGL: + case Maxwell::Blend::Factor::Zero_D3D: + case Maxwell::Blend::Factor::Zero_GL: return GL_ZERO; - case Maxwell::Blend::Factor::One: - case Maxwell::Blend::Factor::OneGL: + case Maxwell::Blend::Factor::One_D3D: + case Maxwell::Blend::Factor::One_GL: return GL_ONE; - case Maxwell::Blend::Factor::SourceColor: - case Maxwell::Blend::Factor::SourceColorGL: + case Maxwell::Blend::Factor::SourceColor_D3D: + case Maxwell::Blend::Factor::SourceColor_GL: return GL_SRC_COLOR; - case Maxwell::Blend::Factor::OneMinusSourceColor: - case Maxwell::Blend::Factor::OneMinusSourceColorGL: + case Maxwell::Blend::Factor::OneMinusSourceColor_D3D: + case Maxwell::Blend::Factor::OneMinusSourceColor_GL: return GL_ONE_MINUS_SRC_COLOR; - case Maxwell::Blend::Factor::SourceAlpha: - case Maxwell::Blend::Factor::SourceAlphaGL: + case Maxwell::Blend::Factor::SourceAlpha_D3D: + case Maxwell::Blend::Factor::SourceAlpha_GL: return GL_SRC_ALPHA; - case Maxwell::Blend::Factor::OneMinusSourceAlpha: - case Maxwell::Blend::Factor::OneMinusSourceAlphaGL: + case Maxwell::Blend::Factor::OneMinusSourceAlpha_D3D: + case Maxwell::Blend::Factor::OneMinusSourceAlpha_GL: return GL_ONE_MINUS_SRC_ALPHA; - case Maxwell::Blend::Factor::DestAlpha: - case Maxwell::Blend::Factor::DestAlphaGL: + case Maxwell::Blend::Factor::DestAlpha_D3D: + case Maxwell::Blend::Factor::DestAlpha_GL: return GL_DST_ALPHA; - case Maxwell::Blend::Factor::OneMinusDestAlpha: - case Maxwell::Blend::Factor::OneMinusDestAlphaGL: + case Maxwell::Blend::Factor::OneMinusDestAlpha_D3D: + case Maxwell::Blend::Factor::OneMinusDestAlpha_GL: return GL_ONE_MINUS_DST_ALPHA; - case Maxwell::Blend::Factor::DestColor: - case Maxwell::Blend::Factor::DestColorGL: + case Maxwell::Blend::Factor::DestColor_D3D: + case Maxwell::Blend::Factor::DestColor_GL: return GL_DST_COLOR; - case Maxwell::Blend::Factor::OneMinusDestColor: - case Maxwell::Blend::Factor::OneMinusDestColorGL: + case Maxwell::Blend::Factor::OneMinusDestColor_D3D: + case Maxwell::Blend::Factor::OneMinusDestColor_GL: return GL_ONE_MINUS_DST_COLOR; - case Maxwell::Blend::Factor::SourceAlphaSaturate: - case Maxwell::Blend::Factor::SourceAlphaSaturateGL: + case Maxwell::Blend::Factor::SourceAlphaSaturate_D3D: + case Maxwell::Blend::Factor::SourceAlphaSaturate_GL: return GL_SRC_ALPHA_SATURATE; - case Maxwell::Blend::Factor::Source1Color: - case Maxwell::Blend::Factor::Source1ColorGL: + case Maxwell::Blend::Factor::Source1Color_D3D: + case Maxwell::Blend::Factor::Source1Color_GL: return GL_SRC1_COLOR; - case Maxwell::Blend::Factor::OneMinusSource1Color: - case Maxwell::Blend::Factor::OneMinusSource1ColorGL: + case Maxwell::Blend::Factor::OneMinusSource1Color_D3D: + case Maxwell::Blend::Factor::OneMinusSource1Color_GL: return GL_ONE_MINUS_SRC1_COLOR; - case Maxwell::Blend::Factor::Source1Alpha: - case Maxwell::Blend::Factor::Source1AlphaGL: + case Maxwell::Blend::Factor::Source1Alpha_D3D: + case Maxwell::Blend::Factor::Source1Alpha_GL: return GL_SRC1_ALPHA; - case Maxwell::Blend::Factor::OneMinusSource1Alpha: - case Maxwell::Blend::Factor::OneMinusSource1AlphaGL: + case Maxwell::Blend::Factor::OneMinusSource1Alpha_D3D: + case Maxwell::Blend::Factor::OneMinusSource1Alpha_GL: return GL_ONE_MINUS_SRC1_ALPHA; - case Maxwell::Blend::Factor::ConstantColor: - case Maxwell::Blend::Factor::ConstantColorGL: + case Maxwell::Blend::Factor::BlendFactor_D3D: + case Maxwell::Blend::Factor::ConstantColor_GL: return GL_CONSTANT_COLOR; - case Maxwell::Blend::Factor::OneMinusConstantColor: - case Maxwell::Blend::Factor::OneMinusConstantColorGL: + case Maxwell::Blend::Factor::OneMinusBlendFactor_D3D: + case Maxwell::Blend::Factor::OneMinusConstantColor_GL: return GL_ONE_MINUS_CONSTANT_COLOR; - case Maxwell::Blend::Factor::ConstantAlpha: - case Maxwell::Blend::Factor::ConstantAlphaGL: + case Maxwell::Blend::Factor::BothSourceAlpha_D3D: + case Maxwell::Blend::Factor::ConstantAlpha_GL: return GL_CONSTANT_ALPHA; - case Maxwell::Blend::Factor::OneMinusConstantAlpha: - case Maxwell::Blend::Factor::OneMinusConstantAlphaGL: + case Maxwell::Blend::Factor::OneMinusBothSourceAlpha_D3D: + case Maxwell::Blend::Factor::OneMinusConstantAlpha_GL: return GL_ONE_MINUS_CONSTANT_ALPHA; } UNIMPLEMENTED_MSG("Unimplemented blend factor={}", factor); @@ -419,60 +430,60 @@ inline GLenum BlendFunc(Maxwell::Blend::Factor factor) { inline GLenum ComparisonOp(Maxwell::ComparisonOp comparison) { switch (comparison) { - case Maxwell::ComparisonOp::Never: - case Maxwell::ComparisonOp::NeverOld: + case Maxwell::ComparisonOp::Never_D3D: + case Maxwell::ComparisonOp::Never_GL: return GL_NEVER; - case Maxwell::ComparisonOp::Less: - case Maxwell::ComparisonOp::LessOld: + case Maxwell::ComparisonOp::Less_D3D: + case Maxwell::ComparisonOp::Less_GL: return GL_LESS; - case Maxwell::ComparisonOp::Equal: - case Maxwell::ComparisonOp::EqualOld: + case Maxwell::ComparisonOp::Equal_D3D: + case Maxwell::ComparisonOp::Equal_GL: return GL_EQUAL; - case Maxwell::ComparisonOp::LessEqual: - case Maxwell::ComparisonOp::LessEqualOld: + case Maxwell::ComparisonOp::LessEqual_D3D: + case Maxwell::ComparisonOp::LessEqual_GL: return GL_LEQUAL; - case Maxwell::ComparisonOp::Greater: - case Maxwell::ComparisonOp::GreaterOld: + case Maxwell::ComparisonOp::Greater_D3D: + case Maxwell::ComparisonOp::Greater_GL: return GL_GREATER; - case Maxwell::ComparisonOp::NotEqual: - case Maxwell::ComparisonOp::NotEqualOld: + case Maxwell::ComparisonOp::NotEqual_D3D: + case Maxwell::ComparisonOp::NotEqual_GL: return GL_NOTEQUAL; - case Maxwell::ComparisonOp::GreaterEqual: - case Maxwell::ComparisonOp::GreaterEqualOld: + case Maxwell::ComparisonOp::GreaterEqual_D3D: + case Maxwell::ComparisonOp::GreaterEqual_GL: return GL_GEQUAL; - case Maxwell::ComparisonOp::Always: - case Maxwell::ComparisonOp::AlwaysOld: + case Maxwell::ComparisonOp::Always_D3D: + case Maxwell::ComparisonOp::Always_GL: return GL_ALWAYS; } UNIMPLEMENTED_MSG("Unimplemented comparison op={}", comparison); return GL_ALWAYS; } -inline GLenum StencilOp(Maxwell::StencilOp stencil) { +inline GLenum StencilOp(Maxwell::StencilOp::Op stencil) { switch (stencil) { - case Maxwell::StencilOp::Keep: - case Maxwell::StencilOp::KeepOGL: + case Maxwell::StencilOp::Op::Keep_D3D: + case Maxwell::StencilOp::Op::Keep_GL: return GL_KEEP; - case Maxwell::StencilOp::Zero: - case Maxwell::StencilOp::ZeroOGL: + case Maxwell::StencilOp::Op::Zero_D3D: + case Maxwell::StencilOp::Op::Zero_GL: return GL_ZERO; - case Maxwell::StencilOp::Replace: - case Maxwell::StencilOp::ReplaceOGL: + case Maxwell::StencilOp::Op::Replace_D3D: + case Maxwell::StencilOp::Op::Replace_GL: return GL_REPLACE; - case Maxwell::StencilOp::Incr: - case Maxwell::StencilOp::IncrOGL: + case Maxwell::StencilOp::Op::IncrSaturate_D3D: + case Maxwell::StencilOp::Op::IncrSaturate_GL: return GL_INCR; - case Maxwell::StencilOp::Decr: - case Maxwell::StencilOp::DecrOGL: + case Maxwell::StencilOp::Op::DecrSaturate_D3D: + case Maxwell::StencilOp::Op::DecrSaturate_GL: return GL_DECR; - case Maxwell::StencilOp::Invert: - case Maxwell::StencilOp::InvertOGL: + case Maxwell::StencilOp::Op::Invert_D3D: + case Maxwell::StencilOp::Op::Invert_GL: return GL_INVERT; - case Maxwell::StencilOp::IncrWrap: - case Maxwell::StencilOp::IncrWrapOGL: + case Maxwell::StencilOp::Op::Incr_D3D: + case Maxwell::StencilOp::Op::Incr_GL: return GL_INCR_WRAP; - case Maxwell::StencilOp::DecrWrap: - case Maxwell::StencilOp::DecrWrapOGL: + case Maxwell::StencilOp::Op::Decr_D3D: + case Maxwell::StencilOp::Op::Decr_GL: return GL_DECR_WRAP; } UNIMPLEMENTED_MSG("Unimplemented stencil op={}", stencil); @@ -503,39 +514,39 @@ inline GLenum CullFace(Maxwell::CullFace cull_face) { return GL_BACK; } -inline GLenum LogicOp(Maxwell::LogicOperation operation) { +inline GLenum LogicOp(Maxwell::LogicOp::Op operation) { switch (operation) { - case Maxwell::LogicOperation::Clear: + case Maxwell::LogicOp::Op::Clear: return GL_CLEAR; - case Maxwell::LogicOperation::And: + case Maxwell::LogicOp::Op::And: return GL_AND; - case Maxwell::LogicOperation::AndReverse: + case Maxwell::LogicOp::Op::AndReverse: return GL_AND_REVERSE; - case Maxwell::LogicOperation::Copy: + case Maxwell::LogicOp::Op::Copy: return GL_COPY; - case Maxwell::LogicOperation::AndInverted: + case Maxwell::LogicOp::Op::AndInverted: return GL_AND_INVERTED; - case Maxwell::LogicOperation::NoOp: + case Maxwell::LogicOp::Op::NoOp: return GL_NOOP; - case Maxwell::LogicOperation::Xor: + case Maxwell::LogicOp::Op::Xor: return GL_XOR; - case Maxwell::LogicOperation::Or: + case Maxwell::LogicOp::Op::Or: return GL_OR; - case Maxwell::LogicOperation::Nor: + case Maxwell::LogicOp::Op::Nor: return GL_NOR; - case Maxwell::LogicOperation::Equiv: + case Maxwell::LogicOp::Op::Equiv: return GL_EQUIV; - case Maxwell::LogicOperation::Invert: + case Maxwell::LogicOp::Op::Invert: return GL_INVERT; - case Maxwell::LogicOperation::OrReverse: + case Maxwell::LogicOp::Op::OrReverse: return GL_OR_REVERSE; - case Maxwell::LogicOperation::CopyInverted: + case Maxwell::LogicOp::Op::CopyInverted: return GL_COPY_INVERTED; - case Maxwell::LogicOperation::OrInverted: + case Maxwell::LogicOp::Op::OrInverted: return GL_OR_INVERTED; - case Maxwell::LogicOperation::Nand: + case Maxwell::LogicOp::Op::Nand: return GL_NAND; - case Maxwell::LogicOperation::Set: + case Maxwell::LogicOp::Op::Set: return GL_SET; } UNIMPLEMENTED_MSG("Unimplemented logic operation={}", operation); diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index 34f3f7a67..8bd5eba7e 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp @@ -131,7 +131,7 @@ RendererOpenGL::RendererOpenGL(Core::TelemetrySession& telemetry_session_, Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_, std::unique_ptr<Core::Frontend::GraphicsContext> context_) : RendererBase{emu_window_, std::move(context_)}, telemetry_session{telemetry_session_}, - emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, state_tracker{gpu}, + emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, state_tracker{}, program_manager{device}, rasterizer(emu_window, gpu, cpu_memory, device, screen_info, program_manager, state_tracker) { if (Settings::values.renderer_debug && GLAD_GL_KHR_debug) { diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp index 733b454de..f85ed8e5b 100644 --- a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp +++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp @@ -34,14 +34,15 @@ constexpr std::array POLYGON_OFFSET_ENABLE_LUT = { }; void RefreshXfbState(VideoCommon::TransformFeedbackState& state, const Maxwell& regs) { - std::ranges::transform(regs.tfb_layouts, state.layouts.begin(), [](const auto& layout) { - return VideoCommon::TransformFeedbackState::Layout{ - .stream = layout.stream, - .varying_count = layout.varying_count, - .stride = layout.stride, - }; - }); - state.varyings = regs.tfb_varying_locs; + std::ranges::transform(regs.transform_feedback.controls, state.layouts.begin(), + [](const auto& layout) { + return VideoCommon::TransformFeedbackState::Layout{ + .stream = layout.stream, + .varying_count = layout.varying_count, + .stride = layout.stride, + }; + }); + state.varyings = regs.stream_out_layout; } } // Anonymous namespace @@ -58,33 +59,38 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d, raw1 = 0; extended_dynamic_state.Assign(has_extended_dynamic_state ? 1 : 0); dynamic_vertex_input.Assign(has_dynamic_vertex_input ? 1 : 0); - xfb_enabled.Assign(regs.tfb_enabled != 0); + xfb_enabled.Assign(regs.transform_feedback_enabled != 0); primitive_restart_enable.Assign(regs.primitive_restart.enabled != 0 ? 1 : 0); depth_bias_enable.Assign(enabled_lut[POLYGON_OFFSET_ENABLE_LUT[topology_index]] != 0 ? 1 : 0); - depth_clamp_disabled.Assign(regs.view_volume_clip_control.depth_clamp_disabled.Value()); + depth_clamp_disabled.Assign(regs.viewport_clip_control.geometry_clip == + Maxwell::ViewportClipControl::GeometryClip::Passthrough || + regs.viewport_clip_control.geometry_clip == + Maxwell::ViewportClipControl::GeometryClip::FrustumXYZ || + regs.viewport_clip_control.geometry_clip == + Maxwell::ViewportClipControl::GeometryClip::FrustumZ); ndc_minus_one_to_one.Assign(regs.depth_mode == Maxwell::DepthMode::MinusOneToOne ? 1 : 0); polygon_mode.Assign(PackPolygonMode(regs.polygon_mode_front)); patch_control_points_minus_one.Assign(regs.patch_vertices - 1); - tessellation_primitive.Assign(static_cast<u32>(regs.tess_mode.prim.Value())); - tessellation_spacing.Assign(static_cast<u32>(regs.tess_mode.spacing.Value())); - tessellation_clockwise.Assign(regs.tess_mode.cw.Value()); + tessellation_primitive.Assign(static_cast<u32>(regs.tessellation.params.domain_type.Value())); + tessellation_spacing.Assign(static_cast<u32>(regs.tessellation.params.spacing.Value())); + tessellation_clockwise.Assign(regs.tessellation.params.output_primitives.Value() == + Maxwell::Tessellation::OutputPrimitives::Triangles_CW); logic_op_enable.Assign(regs.logic_op.enable != 0 ? 1 : 0); - logic_op.Assign(PackLogicOp(regs.logic_op.operation)); + logic_op.Assign(PackLogicOp(regs.logic_op.op)); topology.Assign(regs.draw.topology); - msaa_mode.Assign(regs.multisample_mode); + msaa_mode.Assign(regs.anti_alias_samples_mode); raw2 = 0; rasterize_enable.Assign(regs.rasterize_enable != 0 ? 1 : 0); const auto test_func = - regs.alpha_test_enabled != 0 ? regs.alpha_test_func : Maxwell::ComparisonOp::Always; + regs.alpha_test_enabled != 0 ? regs.alpha_test_func : Maxwell::ComparisonOp::Always_GL; alpha_test_func.Assign(PackComparisonOp(test_func)); - early_z.Assign(regs.force_early_fragment_tests != 0 ? 1 : 0); + early_z.Assign(regs.mandated_early_z != 0 ? 1 : 0); depth_enabled.Assign(regs.zeta_enable != 0 ? 1 : 0); depth_format.Assign(static_cast<u32>(regs.zeta.format)); - y_negate.Assign(regs.screen_y_control.y_negate != 0 ? 1 : 0); - provoking_vertex_last.Assign(regs.provoking_vertex_last != 0 ? 1 : 0); - conservative_raster_enable.Assign(regs.conservative_raster_enable != 0 ? 1 : 0); - smooth_lines.Assign(regs.line_smooth_enable != 0 ? 1 : 0); + y_negate.Assign(regs.window_origin.mode != Maxwell::WindowOrigin::Mode::UpperLeft ? 1 : 0); + provoking_vertex_last.Assign(regs.provoking_vertex == Maxwell::ProvokingVertex::Last ? 1 : 0); + smooth_lines.Assign(regs.line_anti_alias_enable != 0 ? 1 : 0); for (size_t i = 0; i < regs.rt.size(); ++i) { color_formats[i] = static_cast<u8>(regs.rt[i].format); @@ -116,8 +122,8 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d, maxwell3d.dirty.flags[Dirty::VertexInput] = false; enabled_divisors = 0; for (size_t index = 0; index < Maxwell::NumVertexArrays; ++index) { - const bool is_enabled = regs.instanced_arrays.IsInstancingEnabled(index); - binding_divisors[index] = is_enabled ? regs.vertex_array[index].divisor : 0; + const bool is_enabled = regs.vertex_stream_instances.IsInstancingEnabled(index); + binding_divisors[index] = is_enabled ? regs.vertex_streams[index].frequency : 0; enabled_divisors |= (is_enabled ? u64{1} : 0) << index; } for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) { @@ -164,17 +170,17 @@ void FixedPipelineState::BlendingAttachment::Refresh(const Maxwell& regs, size_t // TODO: C++20 Use templated lambda to deduplicate code - if (!regs.independent_blend_enable) { - const auto& src = regs.blend; - if (!src.enable[index]) { + if (!regs.blend_per_target_enabled) { + if (!regs.blend.enable[index]) { return; } - equation_rgb.Assign(PackBlendEquation(src.equation_rgb)); - equation_a.Assign(PackBlendEquation(src.equation_a)); - factor_source_rgb.Assign(PackBlendFactor(src.factor_source_rgb)); - factor_dest_rgb.Assign(PackBlendFactor(src.factor_dest_rgb)); - factor_source_a.Assign(PackBlendFactor(src.factor_source_a)); - factor_dest_a.Assign(PackBlendFactor(src.factor_dest_a)); + const auto& src = regs.blend; + equation_rgb.Assign(PackBlendEquation(src.color_op)); + equation_a.Assign(PackBlendEquation(src.alpha_op)); + factor_source_rgb.Assign(PackBlendFactor(src.color_source)); + factor_dest_rgb.Assign(PackBlendFactor(src.color_dest)); + factor_source_a.Assign(PackBlendFactor(src.alpha_source)); + factor_dest_a.Assign(PackBlendFactor(src.alpha_dest)); enable.Assign(1); return; } @@ -182,34 +188,34 @@ void FixedPipelineState::BlendingAttachment::Refresh(const Maxwell& regs, size_t if (!regs.blend.enable[index]) { return; } - const auto& src = regs.independent_blend[index]; - equation_rgb.Assign(PackBlendEquation(src.equation_rgb)); - equation_a.Assign(PackBlendEquation(src.equation_a)); - factor_source_rgb.Assign(PackBlendFactor(src.factor_source_rgb)); - factor_dest_rgb.Assign(PackBlendFactor(src.factor_dest_rgb)); - factor_source_a.Assign(PackBlendFactor(src.factor_source_a)); - factor_dest_a.Assign(PackBlendFactor(src.factor_dest_a)); + const auto& src = regs.blend_per_target[index]; + equation_rgb.Assign(PackBlendEquation(src.color_op)); + equation_a.Assign(PackBlendEquation(src.alpha_op)); + factor_source_rgb.Assign(PackBlendFactor(src.color_source)); + factor_dest_rgb.Assign(PackBlendFactor(src.color_dest)); + factor_source_a.Assign(PackBlendFactor(src.alpha_source)); + factor_dest_a.Assign(PackBlendFactor(src.alpha_dest)); enable.Assign(1); } void FixedPipelineState::DynamicState::Refresh(const Maxwell& regs) { - u32 packed_front_face = PackFrontFace(regs.front_face); - if (regs.screen_y_control.triangle_rast_flip != 0) { + u32 packed_front_face = PackFrontFace(regs.gl_front_face); + if (regs.window_origin.flip_y != 0) { // Flip front face packed_front_face = 1 - packed_front_face; } raw1 = 0; raw2 = 0; - front.action_stencil_fail.Assign(PackStencilOp(regs.stencil_front_op_fail)); - front.action_depth_fail.Assign(PackStencilOp(regs.stencil_front_op_zfail)); - front.action_depth_pass.Assign(PackStencilOp(regs.stencil_front_op_zpass)); - front.test_func.Assign(PackComparisonOp(regs.stencil_front_func_func)); + front.action_stencil_fail.Assign(PackStencilOp(regs.stencil_front_op.fail)); + front.action_depth_fail.Assign(PackStencilOp(regs.stencil_front_op.zfail)); + front.action_depth_pass.Assign(PackStencilOp(regs.stencil_front_op.zpass)); + front.test_func.Assign(PackComparisonOp(regs.stencil_front_op.func)); if (regs.stencil_two_side_enable) { - back.action_stencil_fail.Assign(PackStencilOp(regs.stencil_back_op_fail)); - back.action_depth_fail.Assign(PackStencilOp(regs.stencil_back_op_zfail)); - back.action_depth_pass.Assign(PackStencilOp(regs.stencil_back_op_zpass)); - back.test_func.Assign(PackComparisonOp(regs.stencil_back_func_func)); + back.action_stencil_fail.Assign(PackStencilOp(regs.stencil_back_op.fail)); + back.action_depth_fail.Assign(PackStencilOp(regs.stencil_back_op.zfail)); + back.action_depth_pass.Assign(PackStencilOp(regs.stencil_back_op.zpass)); + back.test_func.Assign(PackComparisonOp(regs.stencil_back_op.func)); } else { back.action_stencil_fail.Assign(front.action_stencil_fail); back.action_depth_fail.Assign(front.action_depth_fail); @@ -222,9 +228,9 @@ void FixedPipelineState::DynamicState::Refresh(const Maxwell& regs) { depth_test_enable.Assign(regs.depth_test_enable); front_face.Assign(packed_front_face); depth_test_func.Assign(PackComparisonOp(regs.depth_test_func)); - cull_face.Assign(PackCullFace(regs.cull_face)); - cull_enable.Assign(regs.cull_test_enabled != 0 ? 1 : 0); - std::ranges::transform(regs.vertex_array, vertex_strides.begin(), [](const auto& array) { + cull_face.Assign(PackCullFace(regs.gl_cull_face)); + cull_enable.Assign(regs.gl_cull_test_enabled != 0 ? 1 : 0); + std::ranges::transform(regs.vertex_streams, vertex_strides.begin(), [](const auto& array) { return static_cast<u16>(array.stride.Value()); }); } @@ -251,41 +257,42 @@ Maxwell::ComparisonOp FixedPipelineState::UnpackComparisonOp(u32 packed) noexcep return static_cast<Maxwell::ComparisonOp>(packed + 1); } -u32 FixedPipelineState::PackStencilOp(Maxwell::StencilOp op) noexcept { +u32 FixedPipelineState::PackStencilOp(Maxwell::StencilOp::Op op) noexcept { switch (op) { - case Maxwell::StencilOp::Keep: - case Maxwell::StencilOp::KeepOGL: + case Maxwell::StencilOp::Op::Keep_D3D: + case Maxwell::StencilOp::Op::Keep_GL: return 0; - case Maxwell::StencilOp::Zero: - case Maxwell::StencilOp::ZeroOGL: + case Maxwell::StencilOp::Op::Zero_D3D: + case Maxwell::StencilOp::Op::Zero_GL: return 1; - case Maxwell::StencilOp::Replace: - case Maxwell::StencilOp::ReplaceOGL: + case Maxwell::StencilOp::Op::Replace_D3D: + case Maxwell::StencilOp::Op::Replace_GL: return 2; - case Maxwell::StencilOp::Incr: - case Maxwell::StencilOp::IncrOGL: + case Maxwell::StencilOp::Op::IncrSaturate_D3D: + case Maxwell::StencilOp::Op::IncrSaturate_GL: return 3; - case Maxwell::StencilOp::Decr: - case Maxwell::StencilOp::DecrOGL: + case Maxwell::StencilOp::Op::DecrSaturate_D3D: + case Maxwell::StencilOp::Op::DecrSaturate_GL: return 4; - case Maxwell::StencilOp::Invert: - case Maxwell::StencilOp::InvertOGL: + case Maxwell::StencilOp::Op::Invert_D3D: + case Maxwell::StencilOp::Op::Invert_GL: return 5; - case Maxwell::StencilOp::IncrWrap: - case Maxwell::StencilOp::IncrWrapOGL: + case Maxwell::StencilOp::Op::Incr_D3D: + case Maxwell::StencilOp::Op::Incr_GL: return 6; - case Maxwell::StencilOp::DecrWrap: - case Maxwell::StencilOp::DecrWrapOGL: + case Maxwell::StencilOp::Op::Decr_D3D: + case Maxwell::StencilOp::Op::Decr_GL: return 7; } return 0; } -Maxwell::StencilOp FixedPipelineState::UnpackStencilOp(u32 packed) noexcept { - static constexpr std::array LUT = {Maxwell::StencilOp::Keep, Maxwell::StencilOp::Zero, - Maxwell::StencilOp::Replace, Maxwell::StencilOp::Incr, - Maxwell::StencilOp::Decr, Maxwell::StencilOp::Invert, - Maxwell::StencilOp::IncrWrap, Maxwell::StencilOp::DecrWrap}; +Maxwell::StencilOp::Op FixedPipelineState::UnpackStencilOp(u32 packed) noexcept { + static constexpr std::array LUT = { + Maxwell::StencilOp::Op::Keep_D3D, Maxwell::StencilOp::Op::Zero_D3D, + Maxwell::StencilOp::Op::Replace_D3D, Maxwell::StencilOp::Op::IncrSaturate_D3D, + Maxwell::StencilOp::Op::DecrSaturate_D3D, Maxwell::StencilOp::Op::Invert_D3D, + Maxwell::StencilOp::Op::Incr_D3D, Maxwell::StencilOp::Op::Decr_D3D}; return LUT[packed]; } @@ -318,30 +325,30 @@ Maxwell::PolygonMode FixedPipelineState::UnpackPolygonMode(u32 packed) noexcept return static_cast<Maxwell::PolygonMode>(packed + 0x1B00); } -u32 FixedPipelineState::PackLogicOp(Maxwell::LogicOperation op) noexcept { +u32 FixedPipelineState::PackLogicOp(Maxwell::LogicOp::Op op) noexcept { return static_cast<u32>(op) - 0x1500; } -Maxwell::LogicOperation FixedPipelineState::UnpackLogicOp(u32 packed) noexcept { - return static_cast<Maxwell::LogicOperation>(packed + 0x1500); +Maxwell::LogicOp::Op FixedPipelineState::UnpackLogicOp(u32 packed) noexcept { + return static_cast<Maxwell::LogicOp::Op>(packed + 0x1500); } u32 FixedPipelineState::PackBlendEquation(Maxwell::Blend::Equation equation) noexcept { switch (equation) { - case Maxwell::Blend::Equation::Add: - case Maxwell::Blend::Equation::AddGL: + case Maxwell::Blend::Equation::Add_D3D: + case Maxwell::Blend::Equation::Add_GL: return 0; - case Maxwell::Blend::Equation::Subtract: - case Maxwell::Blend::Equation::SubtractGL: + case Maxwell::Blend::Equation::Subtract_D3D: + case Maxwell::Blend::Equation::Subtract_GL: return 1; - case Maxwell::Blend::Equation::ReverseSubtract: - case Maxwell::Blend::Equation::ReverseSubtractGL: + case Maxwell::Blend::Equation::ReverseSubtract_D3D: + case Maxwell::Blend::Equation::ReverseSubtract_GL: return 2; - case Maxwell::Blend::Equation::Min: - case Maxwell::Blend::Equation::MinGL: + case Maxwell::Blend::Equation::Min_D3D: + case Maxwell::Blend::Equation::Min_GL: return 3; - case Maxwell::Blend::Equation::Max: - case Maxwell::Blend::Equation::MaxGL: + case Maxwell::Blend::Equation::Max_D3D: + case Maxwell::Blend::Equation::Max_GL: return 4; } return 0; @@ -349,97 +356,99 @@ u32 FixedPipelineState::PackBlendEquation(Maxwell::Blend::Equation equation) noe Maxwell::Blend::Equation FixedPipelineState::UnpackBlendEquation(u32 packed) noexcept { static constexpr std::array LUT = { - Maxwell::Blend::Equation::Add, Maxwell::Blend::Equation::Subtract, - Maxwell::Blend::Equation::ReverseSubtract, Maxwell::Blend::Equation::Min, - Maxwell::Blend::Equation::Max}; + Maxwell::Blend::Equation::Add_D3D, Maxwell::Blend::Equation::Subtract_D3D, + Maxwell::Blend::Equation::ReverseSubtract_D3D, Maxwell::Blend::Equation::Min_D3D, + Maxwell::Blend::Equation::Max_D3D}; return LUT[packed]; } u32 FixedPipelineState::PackBlendFactor(Maxwell::Blend::Factor factor) noexcept { switch (factor) { - case Maxwell::Blend::Factor::Zero: - case Maxwell::Blend::Factor::ZeroGL: + case Maxwell::Blend::Factor::Zero_D3D: + case Maxwell::Blend::Factor::Zero_GL: return 0; - case Maxwell::Blend::Factor::One: - case Maxwell::Blend::Factor::OneGL: + case Maxwell::Blend::Factor::One_D3D: + case Maxwell::Blend::Factor::One_GL: return 1; - case Maxwell::Blend::Factor::SourceColor: - case Maxwell::Blend::Factor::SourceColorGL: + case Maxwell::Blend::Factor::SourceColor_D3D: + case Maxwell::Blend::Factor::SourceColor_GL: return 2; - case Maxwell::Blend::Factor::OneMinusSourceColor: - case Maxwell::Blend::Factor::OneMinusSourceColorGL: + case Maxwell::Blend::Factor::OneMinusSourceColor_D3D: + case Maxwell::Blend::Factor::OneMinusSourceColor_GL: return 3; - case Maxwell::Blend::Factor::SourceAlpha: - case Maxwell::Blend::Factor::SourceAlphaGL: + case Maxwell::Blend::Factor::SourceAlpha_D3D: + case Maxwell::Blend::Factor::SourceAlpha_GL: return 4; - case Maxwell::Blend::Factor::OneMinusSourceAlpha: - case Maxwell::Blend::Factor::OneMinusSourceAlphaGL: + case Maxwell::Blend::Factor::OneMinusSourceAlpha_D3D: + case Maxwell::Blend::Factor::OneMinusSourceAlpha_GL: return 5; - case Maxwell::Blend::Factor::DestAlpha: - case Maxwell::Blend::Factor::DestAlphaGL: + case Maxwell::Blend::Factor::DestAlpha_D3D: + case Maxwell::Blend::Factor::DestAlpha_GL: return 6; - case Maxwell::Blend::Factor::OneMinusDestAlpha: - case Maxwell::Blend::Factor::OneMinusDestAlphaGL: + case Maxwell::Blend::Factor::OneMinusDestAlpha_D3D: + case Maxwell::Blend::Factor::OneMinusDestAlpha_GL: return 7; - case Maxwell::Blend::Factor::DestColor: - case Maxwell::Blend::Factor::DestColorGL: + case Maxwell::Blend::Factor::DestColor_D3D: + case Maxwell::Blend::Factor::DestColor_GL: return 8; - case Maxwell::Blend::Factor::OneMinusDestColor: - case Maxwell::Blend::Factor::OneMinusDestColorGL: + case Maxwell::Blend::Factor::OneMinusDestColor_D3D: + case Maxwell::Blend::Factor::OneMinusDestColor_GL: return 9; - case Maxwell::Blend::Factor::SourceAlphaSaturate: - case Maxwell::Blend::Factor::SourceAlphaSaturateGL: + case Maxwell::Blend::Factor::SourceAlphaSaturate_D3D: + case Maxwell::Blend::Factor::SourceAlphaSaturate_GL: return 10; - case Maxwell::Blend::Factor::Source1Color: - case Maxwell::Blend::Factor::Source1ColorGL: + case Maxwell::Blend::Factor::Source1Color_D3D: + case Maxwell::Blend::Factor::Source1Color_GL: return 11; - case Maxwell::Blend::Factor::OneMinusSource1Color: - case Maxwell::Blend::Factor::OneMinusSource1ColorGL: + case Maxwell::Blend::Factor::OneMinusSource1Color_D3D: + case Maxwell::Blend::Factor::OneMinusSource1Color_GL: return 12; - case Maxwell::Blend::Factor::Source1Alpha: - case Maxwell::Blend::Factor::Source1AlphaGL: + case Maxwell::Blend::Factor::Source1Alpha_D3D: + case Maxwell::Blend::Factor::Source1Alpha_GL: return 13; - case Maxwell::Blend::Factor::OneMinusSource1Alpha: - case Maxwell::Blend::Factor::OneMinusSource1AlphaGL: + case Maxwell::Blend::Factor::OneMinusSource1Alpha_D3D: + case Maxwell::Blend::Factor::OneMinusSource1Alpha_GL: return 14; - case Maxwell::Blend::Factor::ConstantColor: - case Maxwell::Blend::Factor::ConstantColorGL: + case Maxwell::Blend::Factor::BlendFactor_D3D: + case Maxwell::Blend::Factor::ConstantColor_GL: return 15; - case Maxwell::Blend::Factor::OneMinusConstantColor: - case Maxwell::Blend::Factor::OneMinusConstantColorGL: + case Maxwell::Blend::Factor::OneMinusBlendFactor_D3D: + case Maxwell::Blend::Factor::OneMinusConstantColor_GL: return 16; - case Maxwell::Blend::Factor::ConstantAlpha: - case Maxwell::Blend::Factor::ConstantAlphaGL: + case Maxwell::Blend::Factor::BothSourceAlpha_D3D: + case Maxwell::Blend::Factor::ConstantAlpha_GL: return 17; - case Maxwell::Blend::Factor::OneMinusConstantAlpha: - case Maxwell::Blend::Factor::OneMinusConstantAlphaGL: + case Maxwell::Blend::Factor::OneMinusBothSourceAlpha_D3D: + case Maxwell::Blend::Factor::OneMinusConstantAlpha_GL: return 18; } + UNIMPLEMENTED_MSG("Unknown blend factor {}", static_cast<u32>(factor)); return 0; } Maxwell::Blend::Factor FixedPipelineState::UnpackBlendFactor(u32 packed) noexcept { static constexpr std::array LUT = { - Maxwell::Blend::Factor::Zero, - Maxwell::Blend::Factor::One, - Maxwell::Blend::Factor::SourceColor, - Maxwell::Blend::Factor::OneMinusSourceColor, - Maxwell::Blend::Factor::SourceAlpha, - Maxwell::Blend::Factor::OneMinusSourceAlpha, - Maxwell::Blend::Factor::DestAlpha, - Maxwell::Blend::Factor::OneMinusDestAlpha, - Maxwell::Blend::Factor::DestColor, - Maxwell::Blend::Factor::OneMinusDestColor, - Maxwell::Blend::Factor::SourceAlphaSaturate, - Maxwell::Blend::Factor::Source1Color, - Maxwell::Blend::Factor::OneMinusSource1Color, - Maxwell::Blend::Factor::Source1Alpha, - Maxwell::Blend::Factor::OneMinusSource1Alpha, - Maxwell::Blend::Factor::ConstantColor, - Maxwell::Blend::Factor::OneMinusConstantColor, - Maxwell::Blend::Factor::ConstantAlpha, - Maxwell::Blend::Factor::OneMinusConstantAlpha, + Maxwell::Blend::Factor::Zero_D3D, + Maxwell::Blend::Factor::One_D3D, + Maxwell::Blend::Factor::SourceColor_D3D, + Maxwell::Blend::Factor::OneMinusSourceColor_D3D, + Maxwell::Blend::Factor::SourceAlpha_D3D, + Maxwell::Blend::Factor::OneMinusSourceAlpha_D3D, + Maxwell::Blend::Factor::DestAlpha_D3D, + Maxwell::Blend::Factor::OneMinusDestAlpha_D3D, + Maxwell::Blend::Factor::DestColor_D3D, + Maxwell::Blend::Factor::OneMinusDestColor_D3D, + Maxwell::Blend::Factor::SourceAlphaSaturate_D3D, + Maxwell::Blend::Factor::Source1Color_D3D, + Maxwell::Blend::Factor::OneMinusSource1Color_D3D, + Maxwell::Blend::Factor::Source1Alpha_D3D, + Maxwell::Blend::Factor::OneMinusSource1Alpha_D3D, + Maxwell::Blend::Factor::BlendFactor_D3D, + Maxwell::Blend::Factor::OneMinusBlendFactor_D3D, + Maxwell::Blend::Factor::BothSourceAlpha_D3D, + Maxwell::Blend::Factor::OneMinusBothSourceAlpha_D3D, }; + ASSERT(packed < LUT.size()); return LUT[packed]; } diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.h b/src/video_core/renderer_vulkan/fixed_pipeline_state.h index 9d60756e5..43441209c 100644 --- a/src/video_core/renderer_vulkan/fixed_pipeline_state.h +++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.h @@ -21,8 +21,8 @@ struct FixedPipelineState { static u32 PackComparisonOp(Maxwell::ComparisonOp op) noexcept; static Maxwell::ComparisonOp UnpackComparisonOp(u32 packed) noexcept; - static u32 PackStencilOp(Maxwell::StencilOp op) noexcept; - static Maxwell::StencilOp UnpackStencilOp(u32 packed) noexcept; + static u32 PackStencilOp(Maxwell::StencilOp::Op op) noexcept; + static Maxwell::StencilOp::Op UnpackStencilOp(u32 packed) noexcept; static u32 PackCullFace(Maxwell::CullFace cull) noexcept; static Maxwell::CullFace UnpackCullFace(u32 packed) noexcept; @@ -33,8 +33,8 @@ struct FixedPipelineState { static u32 PackPolygonMode(Maxwell::PolygonMode mode) noexcept; static Maxwell::PolygonMode UnpackPolygonMode(u32 packed) noexcept; - static u32 PackLogicOp(Maxwell::LogicOperation op) noexcept; - static Maxwell::LogicOperation UnpackLogicOp(u32 packed) noexcept; + static u32 PackLogicOp(Maxwell::LogicOp::Op op) noexcept; + static Maxwell::LogicOp::Op UnpackLogicOp(u32 packed) noexcept; static u32 PackBlendEquation(Maxwell::Blend::Equation equation) noexcept; static Maxwell::Blend::Equation UnpackBlendEquation(u32 packed) noexcept; @@ -113,15 +113,15 @@ struct FixedPipelineState { BitField<Position + 6, 3, u32> action_depth_pass; BitField<Position + 9, 3, u32> test_func; - Maxwell::StencilOp ActionStencilFail() const noexcept { + Maxwell::StencilOp::Op ActionStencilFail() const noexcept { return UnpackStencilOp(action_stencil_fail); } - Maxwell::StencilOp ActionDepthFail() const noexcept { + Maxwell::StencilOp::Op ActionDepthFail() const noexcept { return UnpackStencilOp(action_depth_fail); } - Maxwell::StencilOp ActionDepthPass() const noexcept { + Maxwell::StencilOp::Op ActionDepthPass() const noexcept { return UnpackStencilOp(action_depth_pass); } @@ -193,7 +193,6 @@ struct FixedPipelineState { BitField<6, 5, u32> depth_format; BitField<11, 1, u32> y_negate; BitField<12, 1, u32> provoking_vertex_last; - BitField<13, 1, u32> conservative_raster_enable; BitField<14, 1, u32> smooth_lines; }; std::array<u8, Maxwell::NumRenderTargets> color_formats; diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp index bdb71dc53..5c156087b 100644 --- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp +++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp @@ -184,7 +184,7 @@ struct FormatTuple { {VK_FORMAT_BC3_SRGB_BLOCK}, // BC3_SRGB {VK_FORMAT_BC7_SRGB_BLOCK}, // BC7_SRGB {VK_FORMAT_R4G4B4A4_UNORM_PACK16, Attachable}, // A4B4G4R4_UNORM - {VK_FORMAT_R4G4_UNORM_PACK8}, // R4G4_UNORM + {VK_FORMAT_R4G4_UNORM_PACK8}, // G4R4_UNORM {VK_FORMAT_ASTC_4x4_SRGB_BLOCK}, // ASTC_2D_4X4_SRGB {VK_FORMAT_ASTC_8x8_SRGB_BLOCK}, // ASTC_2D_8X8_SRGB {VK_FORMAT_ASTC_8x5_SRGB_BLOCK}, // ASTC_2D_8X5_SRGB @@ -196,6 +196,8 @@ struct FormatTuple { {VK_FORMAT_ASTC_6x6_UNORM_BLOCK}, // ASTC_2D_6X6_UNORM {VK_FORMAT_ASTC_6x6_SRGB_BLOCK}, // ASTC_2D_6X6_SRGB {VK_FORMAT_ASTC_10x6_UNORM_BLOCK}, // ASTC_2D_10X6_UNORM + {VK_FORMAT_ASTC_10x5_UNORM_BLOCK}, // ASTC_2D_10X5_UNORM + {VK_FORMAT_ASTC_10x5_SRGB_BLOCK}, // ASTC_2D_10X5_SRGB {VK_FORMAT_ASTC_10x10_UNORM_BLOCK}, // ASTC_2D_10X10_UNORM {VK_FORMAT_ASTC_10x10_SRGB_BLOCK}, // ASTC_2D_10X10_SRGB {VK_FORMAT_ASTC_12x12_UNORM_BLOCK}, // ASTC_2D_12X12_UNORM @@ -321,161 +323,182 @@ VkFormat VertexFormat(const Device& device, Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size) { const VkFormat format{([&]() { switch (type) { - case Maxwell::VertexAttribute::Type::UnsignedNorm: + case Maxwell::VertexAttribute::Type::UnusedEnumDoNotUseBecauseItWillGoAway: + ASSERT_MSG(false, "Invalid vertex attribute type!"); + break; + case Maxwell::VertexAttribute::Type::UNorm: switch (size) { - case Maxwell::VertexAttribute::Size::Size_8: + case Maxwell::VertexAttribute::Size::Size_R8: + case Maxwell::VertexAttribute::Size::Size_A8: return VK_FORMAT_R8_UNORM; - case Maxwell::VertexAttribute::Size::Size_8_8: + case Maxwell::VertexAttribute::Size::Size_R8_G8: + case Maxwell::VertexAttribute::Size::Size_G8_R8: return VK_FORMAT_R8G8_UNORM; - case Maxwell::VertexAttribute::Size::Size_8_8_8: + case Maxwell::VertexAttribute::Size::Size_R8_G8_B8: return VK_FORMAT_R8G8B8_UNORM; - case Maxwell::VertexAttribute::Size::Size_8_8_8_8: + case Maxwell::VertexAttribute::Size::Size_R8_G8_B8_A8: + case Maxwell::VertexAttribute::Size::Size_X8_B8_G8_R8: return VK_FORMAT_R8G8B8A8_UNORM; - case Maxwell::VertexAttribute::Size::Size_16: + case Maxwell::VertexAttribute::Size::Size_R16: return VK_FORMAT_R16_UNORM; - case Maxwell::VertexAttribute::Size::Size_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16: return VK_FORMAT_R16G16_UNORM; - case Maxwell::VertexAttribute::Size::Size_16_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16: return VK_FORMAT_R16G16B16_UNORM; - case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16: return VK_FORMAT_R16G16B16A16_UNORM; - case Maxwell::VertexAttribute::Size::Size_10_10_10_2: + case Maxwell::VertexAttribute::Size::Size_A2_B10_G10_R10: return VK_FORMAT_A2B10G10R10_UNORM_PACK32; default: break; } break; - case Maxwell::VertexAttribute::Type::SignedNorm: + case Maxwell::VertexAttribute::Type::SNorm: switch (size) { - case Maxwell::VertexAttribute::Size::Size_8: + case Maxwell::VertexAttribute::Size::Size_R8: + case Maxwell::VertexAttribute::Size::Size_A8: return VK_FORMAT_R8_SNORM; - case Maxwell::VertexAttribute::Size::Size_8_8: + case Maxwell::VertexAttribute::Size::Size_R8_G8: + case Maxwell::VertexAttribute::Size::Size_G8_R8: return VK_FORMAT_R8G8_SNORM; - case Maxwell::VertexAttribute::Size::Size_8_8_8: + case Maxwell::VertexAttribute::Size::Size_R8_G8_B8: return VK_FORMAT_R8G8B8_SNORM; - case Maxwell::VertexAttribute::Size::Size_8_8_8_8: + case Maxwell::VertexAttribute::Size::Size_R8_G8_B8_A8: + case Maxwell::VertexAttribute::Size::Size_X8_B8_G8_R8: return VK_FORMAT_R8G8B8A8_SNORM; - case Maxwell::VertexAttribute::Size::Size_16: + case Maxwell::VertexAttribute::Size::Size_R16: return VK_FORMAT_R16_SNORM; - case Maxwell::VertexAttribute::Size::Size_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16: return VK_FORMAT_R16G16_SNORM; - case Maxwell::VertexAttribute::Size::Size_16_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16: return VK_FORMAT_R16G16B16_SNORM; - case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16: return VK_FORMAT_R16G16B16A16_SNORM; - case Maxwell::VertexAttribute::Size::Size_10_10_10_2: + case Maxwell::VertexAttribute::Size::Size_A2_B10_G10_R10: return VK_FORMAT_A2B10G10R10_SNORM_PACK32; default: break; } break; - case Maxwell::VertexAttribute::Type::UnsignedScaled: + case Maxwell::VertexAttribute::Type::UScaled: switch (size) { - case Maxwell::VertexAttribute::Size::Size_8: + case Maxwell::VertexAttribute::Size::Size_R8: + case Maxwell::VertexAttribute::Size::Size_A8: return VK_FORMAT_R8_USCALED; - case Maxwell::VertexAttribute::Size::Size_8_8: + case Maxwell::VertexAttribute::Size::Size_R8_G8: + case Maxwell::VertexAttribute::Size::Size_G8_R8: return VK_FORMAT_R8G8_USCALED; - case Maxwell::VertexAttribute::Size::Size_8_8_8: + case Maxwell::VertexAttribute::Size::Size_R8_G8_B8: return VK_FORMAT_R8G8B8_USCALED; - case Maxwell::VertexAttribute::Size::Size_8_8_8_8: + case Maxwell::VertexAttribute::Size::Size_R8_G8_B8_A8: + case Maxwell::VertexAttribute::Size::Size_X8_B8_G8_R8: return VK_FORMAT_R8G8B8A8_USCALED; - case Maxwell::VertexAttribute::Size::Size_16: + case Maxwell::VertexAttribute::Size::Size_R16: return VK_FORMAT_R16_USCALED; - case Maxwell::VertexAttribute::Size::Size_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16: return VK_FORMAT_R16G16_USCALED; - case Maxwell::VertexAttribute::Size::Size_16_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16: return VK_FORMAT_R16G16B16_USCALED; - case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16: return VK_FORMAT_R16G16B16A16_USCALED; - case Maxwell::VertexAttribute::Size::Size_10_10_10_2: + case Maxwell::VertexAttribute::Size::Size_A2_B10_G10_R10: return VK_FORMAT_A2B10G10R10_USCALED_PACK32; default: break; } break; - case Maxwell::VertexAttribute::Type::SignedScaled: + case Maxwell::VertexAttribute::Type::SScaled: switch (size) { - case Maxwell::VertexAttribute::Size::Size_8: + case Maxwell::VertexAttribute::Size::Size_R8: + case Maxwell::VertexAttribute::Size::Size_A8: return VK_FORMAT_R8_SSCALED; - case Maxwell::VertexAttribute::Size::Size_8_8: + case Maxwell::VertexAttribute::Size::Size_R8_G8: + case Maxwell::VertexAttribute::Size::Size_G8_R8: return VK_FORMAT_R8G8_SSCALED; - case Maxwell::VertexAttribute::Size::Size_8_8_8: + case Maxwell::VertexAttribute::Size::Size_R8_G8_B8: return VK_FORMAT_R8G8B8_SSCALED; - case Maxwell::VertexAttribute::Size::Size_8_8_8_8: + case Maxwell::VertexAttribute::Size::Size_R8_G8_B8_A8: + case Maxwell::VertexAttribute::Size::Size_X8_B8_G8_R8: return VK_FORMAT_R8G8B8A8_SSCALED; - case Maxwell::VertexAttribute::Size::Size_16: + case Maxwell::VertexAttribute::Size::Size_R16: return VK_FORMAT_R16_SSCALED; - case Maxwell::VertexAttribute::Size::Size_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16: return VK_FORMAT_R16G16_SSCALED; - case Maxwell::VertexAttribute::Size::Size_16_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16: return VK_FORMAT_R16G16B16_SSCALED; - case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16: return VK_FORMAT_R16G16B16A16_SSCALED; - case Maxwell::VertexAttribute::Size::Size_10_10_10_2: + case Maxwell::VertexAttribute::Size::Size_A2_B10_G10_R10: return VK_FORMAT_A2B10G10R10_SSCALED_PACK32; default: break; } break; - case Maxwell::VertexAttribute::Type::UnsignedInt: + case Maxwell::VertexAttribute::Type::UInt: switch (size) { - case Maxwell::VertexAttribute::Size::Size_8: + case Maxwell::VertexAttribute::Size::Size_R8: + case Maxwell::VertexAttribute::Size::Size_A8: return VK_FORMAT_R8_UINT; - case Maxwell::VertexAttribute::Size::Size_8_8: + case Maxwell::VertexAttribute::Size::Size_R8_G8: + case Maxwell::VertexAttribute::Size::Size_G8_R8: return VK_FORMAT_R8G8_UINT; - case Maxwell::VertexAttribute::Size::Size_8_8_8: + case Maxwell::VertexAttribute::Size::Size_R8_G8_B8: return VK_FORMAT_R8G8B8_UINT; - case Maxwell::VertexAttribute::Size::Size_8_8_8_8: + case Maxwell::VertexAttribute::Size::Size_R8_G8_B8_A8: + case Maxwell::VertexAttribute::Size::Size_X8_B8_G8_R8: return VK_FORMAT_R8G8B8A8_UINT; - case Maxwell::VertexAttribute::Size::Size_16: + case Maxwell::VertexAttribute::Size::Size_R16: return VK_FORMAT_R16_UINT; - case Maxwell::VertexAttribute::Size::Size_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16: return VK_FORMAT_R16G16_UINT; - case Maxwell::VertexAttribute::Size::Size_16_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16: return VK_FORMAT_R16G16B16_UINT; - case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16: return VK_FORMAT_R16G16B16A16_UINT; - case Maxwell::VertexAttribute::Size::Size_32: + case Maxwell::VertexAttribute::Size::Size_R32: return VK_FORMAT_R32_UINT; - case Maxwell::VertexAttribute::Size::Size_32_32: + case Maxwell::VertexAttribute::Size::Size_R32_G32: return VK_FORMAT_R32G32_UINT; - case Maxwell::VertexAttribute::Size::Size_32_32_32: + case Maxwell::VertexAttribute::Size::Size_R32_G32_B32: return VK_FORMAT_R32G32B32_UINT; - case Maxwell::VertexAttribute::Size::Size_32_32_32_32: + case Maxwell::VertexAttribute::Size::Size_R32_G32_B32_A32: return VK_FORMAT_R32G32B32A32_UINT; - case Maxwell::VertexAttribute::Size::Size_10_10_10_2: + case Maxwell::VertexAttribute::Size::Size_A2_B10_G10_R10: return VK_FORMAT_A2B10G10R10_UINT_PACK32; default: break; } break; - case Maxwell::VertexAttribute::Type::SignedInt: + case Maxwell::VertexAttribute::Type::SInt: switch (size) { - case Maxwell::VertexAttribute::Size::Size_8: + case Maxwell::VertexAttribute::Size::Size_R8: + case Maxwell::VertexAttribute::Size::Size_A8: return VK_FORMAT_R8_SINT; - case Maxwell::VertexAttribute::Size::Size_8_8: + case Maxwell::VertexAttribute::Size::Size_R8_G8: + case Maxwell::VertexAttribute::Size::Size_G8_R8: return VK_FORMAT_R8G8_SINT; - case Maxwell::VertexAttribute::Size::Size_8_8_8: + case Maxwell::VertexAttribute::Size::Size_R8_G8_B8: return VK_FORMAT_R8G8B8_SINT; - case Maxwell::VertexAttribute::Size::Size_8_8_8_8: + case Maxwell::VertexAttribute::Size::Size_R8_G8_B8_A8: + case Maxwell::VertexAttribute::Size::Size_X8_B8_G8_R8: return VK_FORMAT_R8G8B8A8_SINT; - case Maxwell::VertexAttribute::Size::Size_16: + case Maxwell::VertexAttribute::Size::Size_R16: return VK_FORMAT_R16_SINT; - case Maxwell::VertexAttribute::Size::Size_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16: return VK_FORMAT_R16G16_SINT; - case Maxwell::VertexAttribute::Size::Size_16_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16: return VK_FORMAT_R16G16B16_SINT; - case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16: return VK_FORMAT_R16G16B16A16_SINT; - case Maxwell::VertexAttribute::Size::Size_32: + case Maxwell::VertexAttribute::Size::Size_R32: return VK_FORMAT_R32_SINT; - case Maxwell::VertexAttribute::Size::Size_32_32: + case Maxwell::VertexAttribute::Size::Size_R32_G32: return VK_FORMAT_R32G32_SINT; - case Maxwell::VertexAttribute::Size::Size_32_32_32: + case Maxwell::VertexAttribute::Size::Size_R32_G32_B32: return VK_FORMAT_R32G32B32_SINT; - case Maxwell::VertexAttribute::Size::Size_32_32_32_32: + case Maxwell::VertexAttribute::Size::Size_R32_G32_B32_A32: return VK_FORMAT_R32G32B32A32_SINT; - case Maxwell::VertexAttribute::Size::Size_10_10_10_2: + case Maxwell::VertexAttribute::Size::Size_A2_B10_G10_R10: return VK_FORMAT_A2B10G10R10_SINT_PACK32; default: break; @@ -483,23 +506,23 @@ VkFormat VertexFormat(const Device& device, Maxwell::VertexAttribute::Type type, break; case Maxwell::VertexAttribute::Type::Float: switch (size) { - case Maxwell::VertexAttribute::Size::Size_16: + case Maxwell::VertexAttribute::Size::Size_R16: return VK_FORMAT_R16_SFLOAT; - case Maxwell::VertexAttribute::Size::Size_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16: return VK_FORMAT_R16G16_SFLOAT; - case Maxwell::VertexAttribute::Size::Size_16_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16: return VK_FORMAT_R16G16B16_SFLOAT; - case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16: return VK_FORMAT_R16G16B16A16_SFLOAT; - case Maxwell::VertexAttribute::Size::Size_32: + case Maxwell::VertexAttribute::Size::Size_R32: return VK_FORMAT_R32_SFLOAT; - case Maxwell::VertexAttribute::Size::Size_32_32: + case Maxwell::VertexAttribute::Size::Size_R32_G32: return VK_FORMAT_R32G32_SFLOAT; - case Maxwell::VertexAttribute::Size::Size_32_32_32: + case Maxwell::VertexAttribute::Size::Size_R32_G32_B32: return VK_FORMAT_R32G32B32_SFLOAT; - case Maxwell::VertexAttribute::Size::Size_32_32_32_32: + case Maxwell::VertexAttribute::Size::Size_R32_G32_B32_A32: return VK_FORMAT_R32G32B32A32_SFLOAT; - case Maxwell::VertexAttribute::Size::Size_11_11_10: + case Maxwell::VertexAttribute::Size::Size_B10_G11_R11: return VK_FORMAT_B10G11R11_UFLOAT_PACK32; default: break; @@ -519,29 +542,29 @@ VkFormat VertexFormat(const Device& device, Maxwell::VertexAttribute::Type type, VkCompareOp ComparisonOp(Maxwell::ComparisonOp comparison) { switch (comparison) { - case Maxwell::ComparisonOp::Never: - case Maxwell::ComparisonOp::NeverOld: + case Maxwell::ComparisonOp::Never_D3D: + case Maxwell::ComparisonOp::Never_GL: return VK_COMPARE_OP_NEVER; - case Maxwell::ComparisonOp::Less: - case Maxwell::ComparisonOp::LessOld: + case Maxwell::ComparisonOp::Less_D3D: + case Maxwell::ComparisonOp::Less_GL: return VK_COMPARE_OP_LESS; - case Maxwell::ComparisonOp::Equal: - case Maxwell::ComparisonOp::EqualOld: + case Maxwell::ComparisonOp::Equal_D3D: + case Maxwell::ComparisonOp::Equal_GL: return VK_COMPARE_OP_EQUAL; - case Maxwell::ComparisonOp::LessEqual: - case Maxwell::ComparisonOp::LessEqualOld: + case Maxwell::ComparisonOp::LessEqual_D3D: + case Maxwell::ComparisonOp::LessEqual_GL: return VK_COMPARE_OP_LESS_OR_EQUAL; - case Maxwell::ComparisonOp::Greater: - case Maxwell::ComparisonOp::GreaterOld: + case Maxwell::ComparisonOp::Greater_D3D: + case Maxwell::ComparisonOp::Greater_GL: return VK_COMPARE_OP_GREATER; - case Maxwell::ComparisonOp::NotEqual: - case Maxwell::ComparisonOp::NotEqualOld: + case Maxwell::ComparisonOp::NotEqual_D3D: + case Maxwell::ComparisonOp::NotEqual_GL: return VK_COMPARE_OP_NOT_EQUAL; - case Maxwell::ComparisonOp::GreaterEqual: - case Maxwell::ComparisonOp::GreaterEqualOld: + case Maxwell::ComparisonOp::GreaterEqual_D3D: + case Maxwell::ComparisonOp::GreaterEqual_GL: return VK_COMPARE_OP_GREATER_OR_EQUAL; - case Maxwell::ComparisonOp::Always: - case Maxwell::ComparisonOp::AlwaysOld: + case Maxwell::ComparisonOp::Always_D3D: + case Maxwell::ComparisonOp::Always_GL: return VK_COMPARE_OP_ALWAYS; } UNIMPLEMENTED_MSG("Unimplemented comparison op={}", comparison); @@ -561,31 +584,31 @@ VkIndexType IndexFormat(Maxwell::IndexFormat index_format) { return {}; } -VkStencilOp StencilOp(Maxwell::StencilOp stencil_op) { +VkStencilOp StencilOp(Maxwell::StencilOp::Op stencil_op) { switch (stencil_op) { - case Maxwell::StencilOp::Keep: - case Maxwell::StencilOp::KeepOGL: + case Maxwell::StencilOp::Op::Keep_D3D: + case Maxwell::StencilOp::Op::Keep_GL: return VK_STENCIL_OP_KEEP; - case Maxwell::StencilOp::Zero: - case Maxwell::StencilOp::ZeroOGL: + case Maxwell::StencilOp::Op::Zero_D3D: + case Maxwell::StencilOp::Op::Zero_GL: return VK_STENCIL_OP_ZERO; - case Maxwell::StencilOp::Replace: - case Maxwell::StencilOp::ReplaceOGL: + case Maxwell::StencilOp::Op::Replace_D3D: + case Maxwell::StencilOp::Op::Replace_GL: return VK_STENCIL_OP_REPLACE; - case Maxwell::StencilOp::Incr: - case Maxwell::StencilOp::IncrOGL: + case Maxwell::StencilOp::Op::IncrSaturate_D3D: + case Maxwell::StencilOp::Op::IncrSaturate_GL: return VK_STENCIL_OP_INCREMENT_AND_CLAMP; - case Maxwell::StencilOp::Decr: - case Maxwell::StencilOp::DecrOGL: + case Maxwell::StencilOp::Op::DecrSaturate_D3D: + case Maxwell::StencilOp::Op::DecrSaturate_GL: return VK_STENCIL_OP_DECREMENT_AND_CLAMP; - case Maxwell::StencilOp::Invert: - case Maxwell::StencilOp::InvertOGL: + case Maxwell::StencilOp::Op::Invert_D3D: + case Maxwell::StencilOp::Op::Invert_GL: return VK_STENCIL_OP_INVERT; - case Maxwell::StencilOp::IncrWrap: - case Maxwell::StencilOp::IncrWrapOGL: + case Maxwell::StencilOp::Op::Incr_D3D: + case Maxwell::StencilOp::Op::Incr_GL: return VK_STENCIL_OP_INCREMENT_AND_WRAP; - case Maxwell::StencilOp::DecrWrap: - case Maxwell::StencilOp::DecrWrapOGL: + case Maxwell::StencilOp::Op::Decr_D3D: + case Maxwell::StencilOp::Op::Decr_GL: return VK_STENCIL_OP_DECREMENT_AND_WRAP; } UNIMPLEMENTED_MSG("Unimplemented stencil op={}", stencil_op); @@ -594,20 +617,20 @@ VkStencilOp StencilOp(Maxwell::StencilOp stencil_op) { VkBlendOp BlendEquation(Maxwell::Blend::Equation equation) { switch (equation) { - case Maxwell::Blend::Equation::Add: - case Maxwell::Blend::Equation::AddGL: + case Maxwell::Blend::Equation::Add_D3D: + case Maxwell::Blend::Equation::Add_GL: return VK_BLEND_OP_ADD; - case Maxwell::Blend::Equation::Subtract: - case Maxwell::Blend::Equation::SubtractGL: + case Maxwell::Blend::Equation::Subtract_D3D: + case Maxwell::Blend::Equation::Subtract_GL: return VK_BLEND_OP_SUBTRACT; - case Maxwell::Blend::Equation::ReverseSubtract: - case Maxwell::Blend::Equation::ReverseSubtractGL: + case Maxwell::Blend::Equation::ReverseSubtract_D3D: + case Maxwell::Blend::Equation::ReverseSubtract_GL: return VK_BLEND_OP_REVERSE_SUBTRACT; - case Maxwell::Blend::Equation::Min: - case Maxwell::Blend::Equation::MinGL: + case Maxwell::Blend::Equation::Min_D3D: + case Maxwell::Blend::Equation::Min_GL: return VK_BLEND_OP_MIN; - case Maxwell::Blend::Equation::Max: - case Maxwell::Blend::Equation::MaxGL: + case Maxwell::Blend::Equation::Max_D3D: + case Maxwell::Blend::Equation::Max_GL: return VK_BLEND_OP_MAX; } UNIMPLEMENTED_MSG("Unimplemented blend equation={}", equation); @@ -616,62 +639,62 @@ VkBlendOp BlendEquation(Maxwell::Blend::Equation equation) { VkBlendFactor BlendFactor(Maxwell::Blend::Factor factor) { switch (factor) { - case Maxwell::Blend::Factor::Zero: - case Maxwell::Blend::Factor::ZeroGL: + case Maxwell::Blend::Factor::Zero_D3D: + case Maxwell::Blend::Factor::Zero_GL: return VK_BLEND_FACTOR_ZERO; - case Maxwell::Blend::Factor::One: - case Maxwell::Blend::Factor::OneGL: + case Maxwell::Blend::Factor::One_D3D: + case Maxwell::Blend::Factor::One_GL: return VK_BLEND_FACTOR_ONE; - case Maxwell::Blend::Factor::SourceColor: - case Maxwell::Blend::Factor::SourceColorGL: + case Maxwell::Blend::Factor::SourceColor_D3D: + case Maxwell::Blend::Factor::SourceColor_GL: return VK_BLEND_FACTOR_SRC_COLOR; - case Maxwell::Blend::Factor::OneMinusSourceColor: - case Maxwell::Blend::Factor::OneMinusSourceColorGL: + case Maxwell::Blend::Factor::OneMinusSourceColor_D3D: + case Maxwell::Blend::Factor::OneMinusSourceColor_GL: return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR; - case Maxwell::Blend::Factor::SourceAlpha: - case Maxwell::Blend::Factor::SourceAlphaGL: + case Maxwell::Blend::Factor::SourceAlpha_D3D: + case Maxwell::Blend::Factor::SourceAlpha_GL: return VK_BLEND_FACTOR_SRC_ALPHA; - case Maxwell::Blend::Factor::OneMinusSourceAlpha: - case Maxwell::Blend::Factor::OneMinusSourceAlphaGL: + case Maxwell::Blend::Factor::OneMinusSourceAlpha_D3D: + case Maxwell::Blend::Factor::OneMinusSourceAlpha_GL: return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; - case Maxwell::Blend::Factor::DestAlpha: - case Maxwell::Blend::Factor::DestAlphaGL: + case Maxwell::Blend::Factor::DestAlpha_D3D: + case Maxwell::Blend::Factor::DestAlpha_GL: return VK_BLEND_FACTOR_DST_ALPHA; - case Maxwell::Blend::Factor::OneMinusDestAlpha: - case Maxwell::Blend::Factor::OneMinusDestAlphaGL: + case Maxwell::Blend::Factor::OneMinusDestAlpha_D3D: + case Maxwell::Blend::Factor::OneMinusDestAlpha_GL: return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA; - case Maxwell::Blend::Factor::DestColor: - case Maxwell::Blend::Factor::DestColorGL: + case Maxwell::Blend::Factor::DestColor_D3D: + case Maxwell::Blend::Factor::DestColor_GL: return VK_BLEND_FACTOR_DST_COLOR; - case Maxwell::Blend::Factor::OneMinusDestColor: - case Maxwell::Blend::Factor::OneMinusDestColorGL: + case Maxwell::Blend::Factor::OneMinusDestColor_D3D: + case Maxwell::Blend::Factor::OneMinusDestColor_GL: return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR; - case Maxwell::Blend::Factor::SourceAlphaSaturate: - case Maxwell::Blend::Factor::SourceAlphaSaturateGL: + case Maxwell::Blend::Factor::SourceAlphaSaturate_D3D: + case Maxwell::Blend::Factor::SourceAlphaSaturate_GL: return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE; - case Maxwell::Blend::Factor::Source1Color: - case Maxwell::Blend::Factor::Source1ColorGL: + case Maxwell::Blend::Factor::Source1Color_D3D: + case Maxwell::Blend::Factor::Source1Color_GL: return VK_BLEND_FACTOR_SRC1_COLOR; - case Maxwell::Blend::Factor::OneMinusSource1Color: - case Maxwell::Blend::Factor::OneMinusSource1ColorGL: + case Maxwell::Blend::Factor::OneMinusSource1Color_D3D: + case Maxwell::Blend::Factor::OneMinusSource1Color_GL: return VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR; - case Maxwell::Blend::Factor::Source1Alpha: - case Maxwell::Blend::Factor::Source1AlphaGL: + case Maxwell::Blend::Factor::Source1Alpha_D3D: + case Maxwell::Blend::Factor::Source1Alpha_GL: return VK_BLEND_FACTOR_SRC1_ALPHA; - case Maxwell::Blend::Factor::OneMinusSource1Alpha: - case Maxwell::Blend::Factor::OneMinusSource1AlphaGL: + case Maxwell::Blend::Factor::OneMinusSource1Alpha_D3D: + case Maxwell::Blend::Factor::OneMinusSource1Alpha_GL: return VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA; - case Maxwell::Blend::Factor::ConstantColor: - case Maxwell::Blend::Factor::ConstantColorGL: + case Maxwell::Blend::Factor::BlendFactor_D3D: + case Maxwell::Blend::Factor::ConstantColor_GL: return VK_BLEND_FACTOR_CONSTANT_COLOR; - case Maxwell::Blend::Factor::OneMinusConstantColor: - case Maxwell::Blend::Factor::OneMinusConstantColorGL: + case Maxwell::Blend::Factor::OneMinusBlendFactor_D3D: + case Maxwell::Blend::Factor::OneMinusConstantColor_GL: return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR; - case Maxwell::Blend::Factor::ConstantAlpha: - case Maxwell::Blend::Factor::ConstantAlphaGL: + case Maxwell::Blend::Factor::BothSourceAlpha_D3D: + case Maxwell::Blend::Factor::ConstantAlpha_GL: return VK_BLEND_FACTOR_CONSTANT_ALPHA; - case Maxwell::Blend::Factor::OneMinusConstantAlpha: - case Maxwell::Blend::Factor::OneMinusConstantAlphaGL: + case Maxwell::Blend::Factor::OneMinusBothSourceAlpha_D3D: + case Maxwell::Blend::Factor::OneMinusConstantAlpha_GL: return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA; } UNIMPLEMENTED_MSG("Unimplemented blend factor={}", factor); diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.h b/src/video_core/renderer_vulkan/maxwell_to_vk.h index 356d46292..6f65502d6 100644 --- a/src/video_core/renderer_vulkan/maxwell_to_vk.h +++ b/src/video_core/renderer_vulkan/maxwell_to_vk.h @@ -55,7 +55,7 @@ VkCompareOp ComparisonOp(Maxwell::ComparisonOp comparison); VkIndexType IndexFormat(Maxwell::IndexFormat index_format); -VkStencilOp StencilOp(Maxwell::StencilOp stencil_op); +VkStencilOp StencilOp(Maxwell::StencilOp::Op stencil_op); VkBlendOp BlendEquation(Maxwell::Blend::Equation equation); diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index 7c78d0299..d8131232a 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp @@ -102,13 +102,13 @@ RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_, debug_callback(Settings::values.renderer_debug ? CreateDebugCallback(instance) : nullptr), surface(CreateSurface(instance, render_window)), device(CreateDevice(instance, dld, *surface)), memory_allocator(device, false), - state_tracker(gpu), scheduler(device, state_tracker), + state_tracker(), scheduler(device, state_tracker), swapchain(*surface, device, scheduler, render_window.GetFramebufferLayout().width, render_window.GetFramebufferLayout().height, false), blit_screen(cpu_memory, render_window, device, memory_allocator, swapchain, scheduler, screen_info), - rasterizer(render_window, gpu, gpu.MemoryManager(), cpu_memory, screen_info, device, - memory_allocator, state_tracker, scheduler) { + rasterizer(render_window, gpu, cpu_memory, screen_info, device, memory_allocator, + state_tracker, scheduler) { Report(); } catch (const vk::Exception& exception) { LOG_ERROR(Render_Vulkan, "Vulkan initialization failed with error: {}", exception.what()); @@ -142,7 +142,7 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { const auto recreate_swapchain = [&] { if (!has_been_recreated) { has_been_recreated = true; - scheduler.WaitWorker(); + scheduler.Finish(); } const Layout::FramebufferLayout layout = render_window.GetFramebufferLayout(); swapchain.Create(layout.width, layout.height, is_srgb); diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp index 444c29f68..cb7fa2078 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp +++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp @@ -145,6 +145,11 @@ VkSemaphore BlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, // Finish any pending renderpass scheduler.RequestOutsideRenderPassOperationContext(); + if (const auto swapchain_images = swapchain.GetImageCount(); swapchain_images != image_count) { + image_count = swapchain_images; + Recreate(); + } + const std::size_t image_index = swapchain.GetImageIndex(); scheduler.Wait(resource_ticks[image_index]); @@ -448,15 +453,15 @@ vk::Framebuffer BlitScreen::CreateFramebuffer(const VkImageView& image_view, VkE void BlitScreen::CreateStaticResources() { CreateShaders(); + CreateSampler(); +} + +void BlitScreen::CreateDynamicResources() { CreateSemaphores(); CreateDescriptorPool(); CreateDescriptorSetLayout(); CreateDescriptorSets(); CreatePipelineLayout(); - CreateSampler(); -} - -void BlitScreen::CreateDynamicResources() { CreateRenderPass(); CreateFramebuffers(); CreateGraphicsPipeline(); diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h index b8c67bef0..29e2ea925 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.h +++ b/src/video_core/renderer_vulkan/vk_blit_screen.h @@ -109,7 +109,7 @@ private: MemoryAllocator& memory_allocator; Swapchain& swapchain; Scheduler& scheduler; - const std::size_t image_count; + std::size_t image_count; const ScreenInfo& screen_info; vk::ShaderModule vertex_shader; diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp index f17a5ccd6..241d7573e 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp +++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp @@ -26,8 +26,6 @@ namespace Vulkan { -using Tegra::Texture::SWIZZLE_TABLE; - namespace { constexpr u32 ASTC_BINDING_INPUT_BUFFER = 0; diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp index 6447210e2..7906e11a8 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp @@ -126,8 +126,8 @@ void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute, const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset}; const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() + secondary_offset}; - const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; - const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; + const u32 lhs_raw{gpu_memory.Read<u32>(addr) << desc.shift_left}; + const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr) << desc.secondary_shift_left}; return TexturePair(lhs_raw | rhs_raw, via_header_index); } } diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp index c249b34d4..0214b103a 100644 --- a/src/video_core/renderer_vulkan/vk_fence_manager.cpp +++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp @@ -11,11 +11,8 @@ namespace Vulkan { -InnerFence::InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_) - : FenceBase{payload_, is_stubbed_}, scheduler{scheduler_} {} - -InnerFence::InnerFence(Scheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_) - : FenceBase{address_, payload_, is_stubbed_}, scheduler{scheduler_} {} +InnerFence::InnerFence(Scheduler& scheduler_, bool is_stubbed_) + : FenceBase{is_stubbed_}, scheduler{scheduler_} {} InnerFence::~InnerFence() = default; @@ -48,12 +45,8 @@ FenceManager::FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::G : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_}, scheduler{scheduler_} {} -Fence FenceManager::CreateFence(u32 value, bool is_stubbed) { - return std::make_shared<InnerFence>(scheduler, value, is_stubbed); -} - -Fence FenceManager::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) { - return std::make_shared<InnerFence>(scheduler, addr, value, is_stubbed); +Fence FenceManager::CreateFence(bool is_stubbed) { + return std::make_shared<InnerFence>(scheduler, is_stubbed); } void FenceManager::QueueFence(Fence& fence) { diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h index 7c0bbd80a..7fe2afcd9 100644 --- a/src/video_core/renderer_vulkan/vk_fence_manager.h +++ b/src/video_core/renderer_vulkan/vk_fence_manager.h @@ -25,8 +25,7 @@ class Scheduler; class InnerFence : public VideoCommon::FenceBase { public: - explicit InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_); - explicit InnerFence(Scheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_); + explicit InnerFence(Scheduler& scheduler_, bool is_stubbed_); ~InnerFence(); void Queue(); @@ -50,8 +49,7 @@ public: QueryCache& query_cache, const Device& device, Scheduler& scheduler); protected: - Fence CreateFence(u32 value, bool is_stubbed) override; - Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override; + Fence CreateFence(bool is_stubbed) override; void QueueFence(Fence& fence) override; bool IsFenceSignaled(Fence& fence) const override; void WaitFence(Fence& fence) override; diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp index 5aca8f038..c3f66c8a3 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp @@ -215,15 +215,14 @@ ConfigureFuncPtr ConfigureFunc(const std::array<vk::ShaderModule, NUM_STAGES>& m } // Anonymous namespace GraphicsPipeline::GraphicsPipeline( - Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, Scheduler& scheduler_, - BufferCache& buffer_cache_, TextureCache& texture_cache_, + Scheduler& scheduler_, BufferCache& buffer_cache_, TextureCache& texture_cache_, VideoCore::ShaderNotify* shader_notify, const Device& device_, DescriptorPool& descriptor_pool, UpdateDescriptorQueue& update_descriptor_queue_, Common::ThreadWorker* worker_thread, PipelineStatistics* pipeline_statistics, RenderPassCache& render_pass_cache, const GraphicsPipelineCacheKey& key_, std::array<vk::ShaderModule, NUM_STAGES> stages, const std::array<const Shader::Info*, NUM_STAGES>& infos) - : key{key_}, maxwell3d{maxwell3d_}, gpu_memory{gpu_memory_}, device{device_}, - texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, scheduler{scheduler_}, + : key{key_}, device{device_}, texture_cache{texture_cache_}, + buffer_cache{buffer_cache_}, scheduler{scheduler_}, update_descriptor_queue{update_descriptor_queue_}, spv_modules{std::move(stages)} { if (shader_notify) { shader_notify->MarkShaderBuilding(); @@ -288,8 +287,8 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { buffer_cache.SetUniformBuffersState(enabled_uniform_buffer_masks, &uniform_buffer_sizes); - const auto& regs{maxwell3d.regs}; - const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex}; + const auto& regs{maxwell3d->regs}; + const bool via_header_index{regs.sampler_binding == Maxwell::SamplerBinding::ViaHeaderBinding}; const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE { const Shader::Info& info{stage_infos[stage]}; buffer_cache.UnbindGraphicsStorageBuffers(stage); @@ -302,7 +301,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { ++ssbo_index; } } - const auto& cbufs{maxwell3d.state.shader_stages[stage].const_buffers}; + const auto& cbufs{maxwell3d->state.shader_stages[stage].const_buffers}; const auto read_handle{[&](const auto& desc, u32 index) { ASSERT(cbufs[desc.cbuf_index].enabled); const u32 index_offset{index << desc.size_shift}; @@ -315,13 +314,14 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { const u32 second_offset{desc.secondary_cbuf_offset + index_offset}; const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address + second_offset}; - const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; - const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; + const u32 lhs_raw{gpu_memory->Read<u32>(addr) << desc.shift_left}; + const u32 rhs_raw{gpu_memory->Read<u32>(separate_addr) + << desc.secondary_shift_left}; const u32 raw{lhs_raw | rhs_raw}; return TexturePair(raw, via_header_index); } } - return TexturePair(gpu_memory.Read<u32>(addr), via_header_index); + return TexturePair(gpu_memory->Read<u32>(addr), via_header_index); }}; const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE { for (u32 index = 0; index < desc.count; ++index) { @@ -664,15 +664,6 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) { .lineStippleFactor = 0, .lineStipplePattern = 0, }; - VkPipelineRasterizationConservativeStateCreateInfoEXT conservative_raster{ - .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT, - .pNext = nullptr, - .flags = 0, - .conservativeRasterizationMode = key.state.conservative_raster_enable != 0 - ? VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT - : VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT, - .extraPrimitiveOverestimationSize = 0.0f, - }; VkPipelineRasterizationProvokingVertexStateCreateInfoEXT provoking_vertex{ .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT, .pNext = nullptr, @@ -683,9 +674,6 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) { if (IsLine(input_assembly_topology) && device.IsExtLineRasterizationSupported()) { line_state.pNext = std::exchange(rasterization_ci.pNext, &line_state); } - if (device.IsExtConservativeRasterizationSupported()) { - conservative_raster.pNext = std::exchange(rasterization_ci.pNext, &conservative_raster); - } if (device.IsExtProvokingVertexSupported()) { provoking_vertex.pNext = std::exchange(rasterization_ci.pNext, &provoking_vertex); } diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h index e8949a9ab..85602592b 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h @@ -69,15 +69,16 @@ class GraphicsPipeline { static constexpr size_t NUM_STAGES = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage; public: - explicit GraphicsPipeline( - Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory, - Scheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache, - VideoCore::ShaderNotify* shader_notify, const Device& device, - DescriptorPool& descriptor_pool, UpdateDescriptorQueue& update_descriptor_queue, - Common::ThreadWorker* worker_thread, PipelineStatistics* pipeline_statistics, - RenderPassCache& render_pass_cache, const GraphicsPipelineCacheKey& key, - std::array<vk::ShaderModule, NUM_STAGES> stages, - const std::array<const Shader::Info*, NUM_STAGES>& infos); + explicit GraphicsPipeline(Scheduler& scheduler, BufferCache& buffer_cache, + TextureCache& texture_cache, VideoCore::ShaderNotify* shader_notify, + const Device& device, DescriptorPool& descriptor_pool, + UpdateDescriptorQueue& update_descriptor_queue, + Common::ThreadWorker* worker_thread, + PipelineStatistics* pipeline_statistics, + RenderPassCache& render_pass_cache, + const GraphicsPipelineCacheKey& key, + std::array<vk::ShaderModule, NUM_STAGES> stages, + const std::array<const Shader::Info*, NUM_STAGES>& infos); GraphicsPipeline& operator=(GraphicsPipeline&&) noexcept = delete; GraphicsPipeline(GraphicsPipeline&&) noexcept = delete; @@ -109,6 +110,11 @@ public: return [](GraphicsPipeline* pl, bool is_indexed) { pl->ConfigureImpl<Spec>(is_indexed); }; } + void SetEngine(Tegra::Engines::Maxwell3D* maxwell3d_, Tegra::MemoryManager* gpu_memory_) { + maxwell3d = maxwell3d_; + gpu_memory = gpu_memory_; + } + private: template <typename Spec> void ConfigureImpl(bool is_indexed); @@ -120,8 +126,8 @@ private: void Validate(); const GraphicsPipelineCacheKey key; - Tegra::Engines::Maxwell3D& maxwell3d; - Tegra::MemoryManager& gpu_memory; + Tegra::Engines::Maxwell3D* maxwell3d; + Tegra::MemoryManager* gpu_memory; const Device& device; TextureCache& texture_cache; BufferCache& buffer_cache; diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp index 53dda5048..13d5a1f67 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp @@ -62,29 +62,29 @@ auto MakeSpan(Container& container) { Shader::CompareFunction MaxwellToCompareFunction(Maxwell::ComparisonOp comparison) { switch (comparison) { - case Maxwell::ComparisonOp::Never: - case Maxwell::ComparisonOp::NeverOld: + case Maxwell::ComparisonOp::Never_D3D: + case Maxwell::ComparisonOp::Never_GL: return Shader::CompareFunction::Never; - case Maxwell::ComparisonOp::Less: - case Maxwell::ComparisonOp::LessOld: + case Maxwell::ComparisonOp::Less_D3D: + case Maxwell::ComparisonOp::Less_GL: return Shader::CompareFunction::Less; - case Maxwell::ComparisonOp::Equal: - case Maxwell::ComparisonOp::EqualOld: + case Maxwell::ComparisonOp::Equal_D3D: + case Maxwell::ComparisonOp::Equal_GL: return Shader::CompareFunction::Equal; - case Maxwell::ComparisonOp::LessEqual: - case Maxwell::ComparisonOp::LessEqualOld: + case Maxwell::ComparisonOp::LessEqual_D3D: + case Maxwell::ComparisonOp::LessEqual_GL: return Shader::CompareFunction::LessThanEqual; - case Maxwell::ComparisonOp::Greater: - case Maxwell::ComparisonOp::GreaterOld: + case Maxwell::ComparisonOp::Greater_D3D: + case Maxwell::ComparisonOp::Greater_GL: return Shader::CompareFunction::Greater; - case Maxwell::ComparisonOp::NotEqual: - case Maxwell::ComparisonOp::NotEqualOld: + case Maxwell::ComparisonOp::NotEqual_D3D: + case Maxwell::ComparisonOp::NotEqual_GL: return Shader::CompareFunction::NotEqual; - case Maxwell::ComparisonOp::GreaterEqual: - case Maxwell::ComparisonOp::GreaterEqualOld: + case Maxwell::ComparisonOp::GreaterEqual_D3D: + case Maxwell::ComparisonOp::GreaterEqual_GL: return Shader::CompareFunction::GreaterThanEqual; - case Maxwell::ComparisonOp::Always: - case Maxwell::ComparisonOp::AlwaysOld: + case Maxwell::ComparisonOp::Always_D3D: + case Maxwell::ComparisonOp::Always_GL: return Shader::CompareFunction::Always; } UNIMPLEMENTED_MSG("Unimplemented comparison op={}", comparison); @@ -96,15 +96,18 @@ Shader::AttributeType CastAttributeType(const FixedPipelineState::VertexAttribut return Shader::AttributeType::Disabled; } switch (attr.Type()) { - case Maxwell::VertexAttribute::Type::SignedNorm: - case Maxwell::VertexAttribute::Type::UnsignedNorm: - case Maxwell::VertexAttribute::Type::UnsignedScaled: - case Maxwell::VertexAttribute::Type::SignedScaled: + case Maxwell::VertexAttribute::Type::UnusedEnumDoNotUseBecauseItWillGoAway: + ASSERT_MSG(false, "Invalid vertex attribute type!"); + return Shader::AttributeType::Disabled; + case Maxwell::VertexAttribute::Type::SNorm: + case Maxwell::VertexAttribute::Type::UNorm: + case Maxwell::VertexAttribute::Type::UScaled: + case Maxwell::VertexAttribute::Type::SScaled: case Maxwell::VertexAttribute::Type::Float: return Shader::AttributeType::Float; - case Maxwell::VertexAttribute::Type::SignedInt: + case Maxwell::VertexAttribute::Type::SInt: return Shader::AttributeType::SignedInt; - case Maxwell::VertexAttribute::Type::UnsignedInt: + case Maxwell::VertexAttribute::Type::UInt: return Shader::AttributeType::UnsignedInt; } return Shader::AttributeType::Float; @@ -163,16 +166,14 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program } break; case Shader::Stage::TessellationEval: - // We have to flip tessellation clockwise for some reason... - info.tess_clockwise = key.state.tessellation_clockwise == 0; info.tess_primitive = [&key] { const u32 raw{key.state.tessellation_primitive.Value()}; - switch (static_cast<Maxwell::TessellationPrimitive>(raw)) { - case Maxwell::TessellationPrimitive::Isolines: + switch (static_cast<Maxwell::Tessellation::DomainType>(raw)) { + case Maxwell::Tessellation::DomainType::Isolines: return Shader::TessPrimitive::Isolines; - case Maxwell::TessellationPrimitive::Triangles: + case Maxwell::Tessellation::DomainType::Triangles: return Shader::TessPrimitive::Triangles; - case Maxwell::TessellationPrimitive::Quads: + case Maxwell::Tessellation::DomainType::Quads: return Shader::TessPrimitive::Quads; } ASSERT(false); @@ -180,12 +181,12 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program }(); info.tess_spacing = [&] { const u32 raw{key.state.tessellation_spacing}; - switch (static_cast<Maxwell::TessellationSpacing>(raw)) { - case Maxwell::TessellationSpacing::Equal: + switch (static_cast<Maxwell::Tessellation::Spacing>(raw)) { + case Maxwell::Tessellation::Spacing::Integer: return Shader::TessSpacing::Equal; - case Maxwell::TessellationSpacing::FractionalOdd: + case Maxwell::Tessellation::Spacing::FractionalOdd: return Shader::TessSpacing::FractionalOdd; - case Maxwell::TessellationSpacing::FractionalEven: + case Maxwell::Tessellation::Spacing::FractionalEven: return Shader::TessSpacing::FractionalEven; } ASSERT(false); @@ -260,20 +261,18 @@ bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) c return std::memcmp(&rhs, this, Size()) == 0; } -PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::Engines::KeplerCompute& kepler_compute_, - Tegra::MemoryManager& gpu_memory_, const Device& device_, +PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, const Device& device_, Scheduler& scheduler_, DescriptorPool& descriptor_pool_, UpdateDescriptorQueue& update_descriptor_queue_, RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_, TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_) - : VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_}, - device{device_}, scheduler{scheduler_}, descriptor_pool{descriptor_pool_}, - update_descriptor_queue{update_descriptor_queue_}, render_pass_cache{render_pass_cache_}, - buffer_cache{buffer_cache_}, texture_cache{texture_cache_}, shader_notify{shader_notify_}, + : VideoCommon::ShaderCache{rasterizer_}, device{device_}, scheduler{scheduler_}, + descriptor_pool{descriptor_pool_}, update_descriptor_queue{update_descriptor_queue_}, + render_pass_cache{render_pass_cache_}, buffer_cache{buffer_cache_}, + texture_cache{texture_cache_}, shader_notify{shader_notify_}, use_asynchronous_shaders{Settings::values.use_asynchronous_shaders.GetValue()}, - workers(std::max(std::thread::hardware_concurrency(), 2U) - 1, "yuzu:PipelineBuilder"), - serialization_thread(1, "yuzu:PipelineSerialization") { + workers(std::max(std::thread::hardware_concurrency(), 2U) - 1, "VkPipelineBuilder"), + serialization_thread(1, "VkPipelineSerialization") { const auto& float_control{device.FloatControlProperties()}; const VkDriverIdKHR driver_id{device.GetDriverID()}; profile = Shader::Profile{ @@ -338,7 +337,7 @@ GraphicsPipeline* PipelineCache::CurrentGraphicsPipeline() { current_pipeline = nullptr; return nullptr; } - graphics_key.state.Refresh(maxwell3d, device.IsExtExtendedDynamicStateSupported(), + graphics_key.state.Refresh(*maxwell3d, device.IsExtExtendedDynamicStateSupported(), device.IsExtVertexInputDynamicStateSupported()); if (current_pipeline) { @@ -358,7 +357,7 @@ ComputePipeline* PipelineCache::CurrentComputePipeline() { if (!shader) { return nullptr; } - const auto& qmd{kepler_compute.launch_description}; + const auto& qmd{kepler_compute->launch_description}; const ComputePipelineCacheKey key{ .unique_hash = shader->unique_hash, .shared_memory_size = qmd.shared_alloc, @@ -487,13 +486,13 @@ GraphicsPipeline* PipelineCache::BuiltPipeline(GraphicsPipeline* pipeline) const } // If something is using depth, we can assume that games are not rendering anything which // will be used one time. - if (maxwell3d.regs.zeta_enable) { + if (maxwell3d->regs.zeta_enable) { return nullptr; } // If games are using a small index count, we can assume these are full screen quads. // Usually these shaders are only used once for building textures so we can assume they // can't be built async - if (maxwell3d.regs.index_array.count <= 6 || maxwell3d.regs.vertex_buffer.count <= 6) { + if (maxwell3d->regs.index_buffer.count <= 6 || maxwell3d->regs.vertex_buffer.count <= 6) { return pipeline; } return nullptr; @@ -558,10 +557,10 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline( previous_stage = &program; } Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr}; - return std::make_unique<GraphicsPipeline>( - maxwell3d, gpu_memory, scheduler, buffer_cache, texture_cache, &shader_notify, device, - descriptor_pool, update_descriptor_queue, thread_worker, statistics, render_pass_cache, key, - std::move(modules), infos); + return std::make_unique<GraphicsPipeline>(scheduler, buffer_cache, texture_cache, + &shader_notify, device, descriptor_pool, + update_descriptor_queue, thread_worker, statistics, + render_pass_cache, key, std::move(modules), infos); } catch (const Shader::Exception& exception) { LOG_ERROR(Render_Vulkan, "{}", exception.what()); @@ -593,9 +592,9 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline() { std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline( const ComputePipelineCacheKey& key, const ShaderInfo* shader) { - const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()}; - const auto& qmd{kepler_compute.launch_description}; - ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start}; + const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()}; + const auto& qmd{kepler_compute->launch_description}; + ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start}; env.SetCachedSize(shader->size_bytes); main_pools.ReleaseContents(); diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h index 127957dbf..61f9e9366 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h @@ -100,10 +100,8 @@ struct ShaderPools { class PipelineCache : public VideoCommon::ShaderCache { public: - explicit PipelineCache(RasterizerVulkan& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d, - Tegra::Engines::KeplerCompute& kepler_compute, - Tegra::MemoryManager& gpu_memory, const Device& device, - Scheduler& scheduler, DescriptorPool& descriptor_pool, + explicit PipelineCache(RasterizerVulkan& rasterizer, const Device& device, Scheduler& scheduler, + DescriptorPool& descriptor_pool, UpdateDescriptorQueue& update_descriptor_queue, RenderPassCache& render_pass_cache, BufferCache& buffer_cache, TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_); diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp index 2b859c6b8..4b15c0f85 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp @@ -59,16 +59,16 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) { std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) { return query_pool == *pool; }); - ASSERT(it != std::end(pools)); - const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it); - usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false; + if (it != std::end(pools)) { + const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it); + usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false; + } } -QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, - Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, - const Device& device_, Scheduler& scheduler_) - : QueryCacheBase{rasterizer_, maxwell3d_, gpu_memory_}, device{device_}, scheduler{scheduler_}, +QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_, + Scheduler& scheduler_) + : QueryCacheBase{rasterizer_}, device{device_}, scheduler{scheduler_}, query_pools{ QueryPool{device_, scheduler_, QueryType::SamplesPassed}, } {} diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h index b0d86c4f8..26762ee09 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.h +++ b/src/video_core/renderer_vulkan/vk_query_cache.h @@ -52,9 +52,8 @@ private: class QueryCache final : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> { public: - explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_, - Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, - const Device& device_, Scheduler& scheduler_); + explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_, + Scheduler& scheduler_); ~QueryCache(); std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type); diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 7e40c2df1..47dfb45a1 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -11,6 +11,7 @@ #include "common/microprofile.h" #include "common/scope_exit.h" #include "common/settings.h" +#include "video_core/control/channel_state.h" #include "video_core/engines/kepler_compute.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_vulkan/blit_image.h" @@ -69,7 +70,7 @@ VkViewport GetViewportState(const Device& device, const Maxwell& regs, size_t in const float width = conv(src.scale_x * 2.0f); float y = conv(src.translate_y - src.scale_y); float height = conv(src.scale_y * 2.0f); - bool y_negate = regs.screen_y_control.y_negate; + bool y_negate = regs.window_origin.mode != Maxwell::WindowOrigin::Mode::UpperLeft; if (!device.IsNvViewportSwizzleSupported()) { y_negate = y_negate != (src.swizzle.y == Maxwell::ViewportSwizzle::NegativeY); @@ -129,11 +130,11 @@ VkRect2D GetScissorState(const Maxwell& regs, size_t index, u32 up_scale = 1, u3 DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instanced, bool is_indexed) { DrawParams params{ - .base_instance = regs.vb_base_instance, + .base_instance = regs.global_base_instance_index, .num_instances = is_instanced ? num_instances : 1, - .base_vertex = is_indexed ? regs.vb_element_base : regs.vertex_buffer.first, - .num_vertices = is_indexed ? regs.index_array.count : regs.vertex_buffer.count, - .first_index = is_indexed ? regs.index_array.first : 0, + .base_vertex = is_indexed ? regs.global_base_vertex_index : regs.vertex_buffer.first, + .num_vertices = is_indexed ? regs.index_buffer.count : regs.vertex_buffer.count, + .first_index = is_indexed ? regs.index_buffer.first : 0, .is_indexed = is_indexed, }; if (regs.draw.topology == Maxwell::PrimitiveTopology::Quads) { @@ -148,14 +149,11 @@ DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instan } // Anonymous namespace RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, - Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_, const Device& device_, MemoryAllocator& memory_allocator_, StateTracker& state_tracker_, Scheduler& scheduler_) - : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, - gpu_memory{gpu_memory_}, maxwell3d{gpu.Maxwell3D()}, kepler_compute{gpu.KeplerCompute()}, - screen_info{screen_info_}, device{device_}, memory_allocator{memory_allocator_}, - state_tracker{state_tracker_}, scheduler{scheduler_}, + : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, screen_info{screen_info_}, device{device_}, + memory_allocator{memory_allocator_}, state_tracker{state_tracker_}, scheduler{scheduler_}, staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler), update_descriptor_queue(device, scheduler), blit_image(device, scheduler, state_tracker, descriptor_pool), @@ -165,14 +163,13 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra memory_allocator, staging_pool, blit_image, astc_decoder_pass, render_pass_cache}, - texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory), + texture_cache(texture_cache_runtime, *this), buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool, update_descriptor_queue, descriptor_pool), - buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime), - pipeline_cache(*this, maxwell3d, kepler_compute, gpu_memory, device, scheduler, - descriptor_pool, update_descriptor_queue, render_pass_cache, buffer_cache, - texture_cache, gpu.ShaderNotify()), - query_cache{*this, maxwell3d, gpu_memory, device, scheduler}, accelerate_dma{buffer_cache}, + buffer_cache(*this, cpu_memory_, buffer_cache_runtime), + pipeline_cache(*this, device, scheduler, descriptor_pool, update_descriptor_queue, + render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()), + query_cache{*this, device, scheduler}, accelerate_dma{buffer_cache}, fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler), wfi_event(device.GetLogical().CreateEvent()) { scheduler.SetQueryCache(query_cache); @@ -193,14 +190,16 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { return; } std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + // update engine as channel may be different. + pipeline->SetEngine(maxwell3d, gpu_memory); pipeline->Configure(is_indexed); BeginTransformFeedback(); UpdateDynamicStates(); - const auto& regs{maxwell3d.regs}; - const u32 num_instances{maxwell3d.mme_draw.instance_count}; + const auto& regs{maxwell3d->regs}; + const u32 num_instances{maxwell3d->mme_draw.instance_count}; const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_instanced, is_indexed)}; scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) { if (draw_params.is_indexed) { @@ -218,18 +217,18 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { void RasterizerVulkan::Clear() { MICROPROFILE_SCOPE(Vulkan_Clearing); - if (!maxwell3d.ShouldExecute()) { + if (!maxwell3d->ShouldExecute()) { return; } FlushWork(); query_cache.UpdateCounters(); - auto& regs = maxwell3d.regs; - const bool use_color = regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B || - regs.clear_buffers.A; - const bool use_depth = regs.clear_buffers.Z; - const bool use_stencil = regs.clear_buffers.S; + auto& regs = maxwell3d->regs; + const bool use_color = regs.clear_surface.R || regs.clear_surface.G || regs.clear_surface.B || + regs.clear_surface.A; + const bool use_depth = regs.clear_surface.Z; + const bool use_stencil = regs.clear_surface.S; if (!use_color && !use_depth && !use_stencil) { return; } @@ -248,9 +247,16 @@ void RasterizerVulkan::Clear() { } UpdateViewportsState(regs); + VkRect2D default_scissor; + default_scissor.offset.x = 0; + default_scissor.offset.y = 0; + default_scissor.extent.width = std::numeric_limits<s32>::max(); + default_scissor.extent.height = std::numeric_limits<s32>::max(); + VkClearRect clear_rect{ - .rect = GetScissorState(regs, 0, up_scale, down_shift), - .baseArrayLayer = regs.clear_buffers.layer, + .rect = regs.clear_control.use_scissor ? GetScissorState(regs, 0, up_scale, down_shift) + : default_scissor, + .baseArrayLayer = regs.clear_surface.layer, .layerCount = 1, }; if (clear_rect.rect.extent.width == 0 || clear_rect.rect.extent.height == 0) { @@ -261,7 +267,7 @@ void RasterizerVulkan::Clear() { .height = std::min(clear_rect.rect.extent.height, render_area.height), }; - const u32 color_attachment = regs.clear_buffers.RT; + const u32 color_attachment = regs.clear_surface.RT; if (use_color && framebuffer->HasAspectColorBit(color_attachment)) { VkClearValue clear_value; bool is_integer = false; @@ -283,7 +289,8 @@ void RasterizerVulkan::Clear() { break; } if (!is_integer) { - std::memcpy(clear_value.color.float32, regs.clear_color, sizeof(regs.clear_color)); + std::memcpy(clear_value.color.float32, regs.clear_color.data(), + regs.clear_color.size() * sizeof(f32)); } else if (!is_signed) { for (size_t i = 0; i < 4; i++) { clear_value.color.uint32[i] = static_cast<u32>( @@ -339,9 +346,9 @@ void RasterizerVulkan::DispatchCompute() { return; } std::scoped_lock lock{texture_cache.mutex, buffer_cache.mutex}; - pipeline->Configure(kepler_compute, gpu_memory, scheduler, buffer_cache, texture_cache); + pipeline->Configure(*kepler_compute, *gpu_memory, scheduler, buffer_cache, texture_cache); - const auto& qmd{kepler_compute.launch_description}; + const auto& qmd{kepler_compute->launch_description}; const std::array<u32, 3> dim{qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z}; scheduler.RequestOutsideRenderPassOperationContext(); scheduler.Record([dim](vk::CommandBuffer cmdbuf) { cmdbuf.Dispatch(dim[0], dim[1], dim[2]); }); @@ -422,7 +429,7 @@ void RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) { } } -void RasterizerVulkan::SyncGuestHost() { +void RasterizerVulkan::InvalidateGPUCache() { pipeline_cache.SyncGuestHost(); { std::scoped_lock lock{buffer_cache.mutex}; @@ -442,40 +449,30 @@ void RasterizerVulkan::UnmapMemory(VAddr addr, u64 size) { pipeline_cache.OnCPUWrite(addr, size); } -void RasterizerVulkan::ModifyGPUMemory(GPUVAddr addr, u64 size) { +void RasterizerVulkan::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) { { std::scoped_lock lock{texture_cache.mutex}; - texture_cache.UnmapGPUMemory(addr, size); + texture_cache.UnmapGPUMemory(as_id, addr, size); } } -void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) { - if (!gpu.IsAsync()) { - gpu_memory.Write<u32>(addr, value); - return; - } - fence_manager.SignalSemaphore(addr, value); +void RasterizerVulkan::SignalFence(std::function<void()>&& func) { + fence_manager.SignalFence(std::move(func)); +} + +void RasterizerVulkan::SyncOperation(std::function<void()>&& func) { + fence_manager.SyncOperation(std::move(func)); } void RasterizerVulkan::SignalSyncPoint(u32 value) { - if (!gpu.IsAsync()) { - gpu.IncrementSyncPoint(value); - return; - } fence_manager.SignalSyncPoint(value); } void RasterizerVulkan::SignalReference() { - if (!gpu.IsAsync()) { - return; - } fence_manager.SignalOrdering(); } void RasterizerVulkan::ReleaseFences() { - if (!gpu.IsAsync()) { - return; - } fence_manager.WaitPendingFences(); } @@ -552,13 +549,13 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerVulkan::AccessAccelerateDMA() } void RasterizerVulkan::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, - std::span<u8> memory) { - auto cpu_addr = gpu_memory.GpuToCpuAddress(address); + std::span<const u8> memory) { + auto cpu_addr = gpu_memory->GpuToCpuAddress(address); if (!cpu_addr) [[unlikely]] { - gpu_memory.WriteBlock(address, memory.data(), copy_size); + gpu_memory->WriteBlock(address, memory.data(), copy_size); return; } - gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size); + gpu_memory->WriteBlockUnsafe(address, memory.data(), copy_size); { std::unique_lock<std::mutex> lock{buffer_cache.mutex}; if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) { @@ -627,7 +624,7 @@ bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 } void RasterizerVulkan::UpdateDynamicStates() { - auto& regs = maxwell3d.regs; + auto& regs = maxwell3d->regs; UpdateViewportsState(regs); UpdateScissorsState(regs); UpdateDepthBias(regs); @@ -651,24 +648,24 @@ void RasterizerVulkan::UpdateDynamicStates() { } void RasterizerVulkan::BeginTransformFeedback() { - const auto& regs = maxwell3d.regs; - if (regs.tfb_enabled == 0) { + const auto& regs = maxwell3d->regs; + if (regs.transform_feedback_enabled == 0) { return; } if (!device.IsExtTransformFeedbackSupported()) { LOG_ERROR(Render_Vulkan, "Transform feedbacks used but not supported"); return; } - UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationControl) || - regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationEval) || - regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::Geometry)); + UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderType::TessellationInit) || + regs.IsShaderConfigEnabled(Maxwell::ShaderType::Tessellation) || + regs.IsShaderConfigEnabled(Maxwell::ShaderType::Geometry)); scheduler.Record( [](vk::CommandBuffer cmdbuf) { cmdbuf.BeginTransformFeedbackEXT(0, 0, nullptr, nullptr); }); } void RasterizerVulkan::EndTransformFeedback() { - const auto& regs = maxwell3d.regs; - if (regs.tfb_enabled == 0) { + const auto& regs = maxwell3d->regs; + if (regs.transform_feedback_enabled == 0) { return; } if (!device.IsExtTransformFeedbackSupported()) { @@ -732,11 +729,11 @@ void RasterizerVulkan::UpdateDepthBias(Tegra::Engines::Maxwell3D::Regs& regs) { if (!state_tracker.TouchDepthBias()) { return; } - float units = regs.polygon_offset_units / 2.0f; - const bool is_d24 = regs.zeta.format == Tegra::DepthFormat::S8_UINT_Z24_UNORM || - regs.zeta.format == Tegra::DepthFormat::D24X8_UNORM || - regs.zeta.format == Tegra::DepthFormat::D24S8_UNORM || - regs.zeta.format == Tegra::DepthFormat::D24C8_UNORM; + float units = regs.depth_bias / 2.0f; + const bool is_d24 = regs.zeta.format == Tegra::DepthFormat::Z24_UNORM_S8_UINT || + regs.zeta.format == Tegra::DepthFormat::X8Z24_UNORM || + regs.zeta.format == Tegra::DepthFormat::S8Z24_UNORM || + regs.zeta.format == Tegra::DepthFormat::V8Z24_UNORM; if (is_d24 && !device.SupportsD24DepthBuffer()) { // the base formulas can be obtained from here: // https://docs.microsoft.com/en-us/windows/win32/direct3d11/d3d10-graphics-programming-guide-output-merger-stage-depth-bias @@ -744,8 +741,8 @@ void RasterizerVulkan::UpdateDepthBias(Tegra::Engines::Maxwell3D::Regs& regs) { static_cast<double>(1ULL << (32 - 24)) / (static_cast<double>(0x1.ep+127)); units = static_cast<float>(static_cast<double>(units) * rescale_factor); } - scheduler.Record([constant = units, clamp = regs.polygon_offset_clamp, - factor = regs.polygon_offset_factor](vk::CommandBuffer cmdbuf) { + scheduler.Record([constant = units, clamp = regs.depth_bias_clamp, + factor = regs.slope_scale_depth_bias](vk::CommandBuffer cmdbuf) { cmdbuf.SetDepthBias(constant, clamp, factor); }); } @@ -775,8 +772,8 @@ void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs) if (regs.stencil_two_side_enable) { // Separate values per face scheduler.Record( - [front_ref = regs.stencil_front_func_ref, front_write_mask = regs.stencil_front_mask, - front_test_mask = regs.stencil_front_func_mask, back_ref = regs.stencil_back_func_ref, + [front_ref = regs.stencil_front_ref, front_write_mask = regs.stencil_front_mask, + front_test_mask = regs.stencil_front_func_mask, back_ref = regs.stencil_back_ref, back_write_mask = regs.stencil_back_mask, back_test_mask = regs.stencil_back_func_mask](vk::CommandBuffer cmdbuf) { // Front face @@ -791,7 +788,7 @@ void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs) }); } else { // Front face defines both faces - scheduler.Record([ref = regs.stencil_front_func_ref, write_mask = regs.stencil_front_mask, + scheduler.Record([ref = regs.stencil_front_ref, write_mask = regs.stencil_front_mask, test_mask = regs.stencil_front_func_mask](vk::CommandBuffer cmdbuf) { cmdbuf.SetStencilReference(VK_STENCIL_FACE_FRONT_AND_BACK, ref); cmdbuf.SetStencilWriteMask(VK_STENCIL_FACE_FRONT_AND_BACK, write_mask); @@ -804,7 +801,8 @@ void RasterizerVulkan::UpdateLineWidth(Tegra::Engines::Maxwell3D::Regs& regs) { if (!state_tracker.TouchLineWidth()) { return; } - const float width = regs.line_smooth_enable ? regs.line_width_smooth : regs.line_width_aliased; + const float width = + regs.line_anti_alias_enable ? regs.line_width_smooth : regs.line_width_aliased; scheduler.Record([width](vk::CommandBuffer cmdbuf) { cmdbuf.SetLineWidth(width); }); } @@ -812,10 +810,10 @@ void RasterizerVulkan::UpdateCullMode(Tegra::Engines::Maxwell3D::Regs& regs) { if (!state_tracker.TouchCullMode()) { return; } - scheduler.Record( - [enabled = regs.cull_test_enabled, cull_face = regs.cull_face](vk::CommandBuffer cmdbuf) { - cmdbuf.SetCullModeEXT(enabled ? MaxwellToVK::CullFace(cull_face) : VK_CULL_MODE_NONE); - }); + scheduler.Record([enabled = regs.gl_cull_test_enabled, + cull_face = regs.gl_cull_face](vk::CommandBuffer cmdbuf) { + cmdbuf.SetCullModeEXT(enabled ? MaxwellToVK::CullFace(cull_face) : VK_CULL_MODE_NONE); + }); } void RasterizerVulkan::UpdateDepthBoundsTestEnable(Tegra::Engines::Maxwell3D::Regs& regs) { @@ -864,8 +862,8 @@ void RasterizerVulkan::UpdateFrontFace(Tegra::Engines::Maxwell3D::Regs& regs) { return; } - VkFrontFace front_face = MaxwellToVK::FrontFace(regs.front_face); - if (regs.screen_y_control.triangle_rast_flip != 0) { + VkFrontFace front_face = MaxwellToVK::FrontFace(regs.gl_front_face); + if (regs.window_origin.flip_y != 0) { front_face = front_face == VK_FRONT_FACE_CLOCKWISE ? VK_FRONT_FACE_COUNTER_CLOCKWISE : VK_FRONT_FACE_CLOCKWISE; } @@ -877,16 +875,16 @@ void RasterizerVulkan::UpdateStencilOp(Tegra::Engines::Maxwell3D::Regs& regs) { if (!state_tracker.TouchStencilOp()) { return; } - const Maxwell::StencilOp fail = regs.stencil_front_op_fail; - const Maxwell::StencilOp zfail = regs.stencil_front_op_zfail; - const Maxwell::StencilOp zpass = regs.stencil_front_op_zpass; - const Maxwell::ComparisonOp compare = regs.stencil_front_func_func; + const Maxwell::StencilOp::Op fail = regs.stencil_front_op.fail; + const Maxwell::StencilOp::Op zfail = regs.stencil_front_op.zfail; + const Maxwell::StencilOp::Op zpass = regs.stencil_front_op.zpass; + const Maxwell::ComparisonOp compare = regs.stencil_front_op.func; if (regs.stencil_two_side_enable) { // Separate stencil op per face - const Maxwell::StencilOp back_fail = regs.stencil_back_op_fail; - const Maxwell::StencilOp back_zfail = regs.stencil_back_op_zfail; - const Maxwell::StencilOp back_zpass = regs.stencil_back_op_zpass; - const Maxwell::ComparisonOp back_compare = regs.stencil_back_func_func; + const Maxwell::StencilOp::Op back_fail = regs.stencil_back_op.fail; + const Maxwell::StencilOp::Op back_zfail = regs.stencil_back_op.zfail; + const Maxwell::StencilOp::Op back_zpass = regs.stencil_back_op.zpass; + const Maxwell::ComparisonOp back_compare = regs.stencil_back_op.func; scheduler.Record([fail, zfail, zpass, compare, back_fail, back_zfail, back_zpass, back_compare](vk::CommandBuffer cmdbuf) { cmdbuf.SetStencilOpEXT(VK_STENCIL_FACE_FRONT_BIT, MaxwellToVK::StencilOp(fail), @@ -917,7 +915,7 @@ void RasterizerVulkan::UpdateStencilTestEnable(Tegra::Engines::Maxwell3D::Regs& } void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) { - auto& dirty{maxwell3d.dirty.flags}; + auto& dirty{maxwell3d->dirty.flags}; if (!dirty[Dirty::VertexInput]) { return; } @@ -958,15 +956,15 @@ void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) dirty[Dirty::VertexBinding0 + index] = false; const u32 binding{static_cast<u32>(index)}; - const auto& input_binding{regs.vertex_array[binding]}; - const bool is_instanced{regs.instanced_arrays.IsInstancingEnabled(binding)}; + const auto& input_binding{regs.vertex_streams[binding]}; + const bool is_instanced{regs.vertex_stream_instances.IsInstancingEnabled(binding)}; bindings.push_back({ .sType = VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT, .pNext = nullptr, .binding = binding, .stride = input_binding.stride, .inputRate = is_instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX, - .divisor = is_instanced ? input_binding.divisor : 1, + .divisor = is_instanced ? input_binding.frequency : 1, }); } scheduler.Record([bindings, attributes](vk::CommandBuffer cmdbuf) { @@ -974,4 +972,41 @@ void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) }); } +void RasterizerVulkan::InitializeChannel(Tegra::Control::ChannelState& channel) { + CreateChannel(channel); + { + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + texture_cache.CreateChannel(channel); + buffer_cache.CreateChannel(channel); + } + pipeline_cache.CreateChannel(channel); + query_cache.CreateChannel(channel); + state_tracker.SetupTables(channel); +} + +void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) { + const s32 channel_id = channel.bind_id; + BindToChannel(channel_id); + { + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + texture_cache.BindToChannel(channel_id); + buffer_cache.BindToChannel(channel_id); + } + pipeline_cache.BindToChannel(channel_id); + query_cache.BindToChannel(channel_id); + state_tracker.ChangeChannel(channel); + state_tracker.InvalidateState(); +} + +void RasterizerVulkan::ReleaseChannel(s32 channel_id) { + EraseChannel(channel_id); + { + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + texture_cache.EraseChannel(channel_id); + buffer_cache.EraseChannel(channel_id); + } + pipeline_cache.EraseChannel(channel_id); + query_cache.EraseChannel(channel_id); +} + } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index 0370ea39b..4cde3c983 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h @@ -8,6 +8,7 @@ #include <boost/container/static_vector.hpp> #include "common/common_types.h" +#include "video_core/control/channel_state_cache.h" #include "video_core/engines/maxwell_dma.h" #include "video_core/rasterizer_accelerated.h" #include "video_core/rasterizer_interface.h" @@ -54,13 +55,13 @@ private: BufferCache& buffer_cache; }; -class RasterizerVulkan final : public VideoCore::RasterizerAccelerated { +class RasterizerVulkan final : public VideoCore::RasterizerAccelerated, + protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> { public: explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, - Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, - ScreenInfo& screen_info_, const Device& device_, - MemoryAllocator& memory_allocator_, StateTracker& state_tracker_, - Scheduler& scheduler_); + Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_, + const Device& device_, MemoryAllocator& memory_allocator_, + StateTracker& state_tracker_, Scheduler& scheduler_); ~RasterizerVulkan() override; void Draw(bool is_indexed, bool is_instanced) override; @@ -75,10 +76,11 @@ public: bool MustFlushRegion(VAddr addr, u64 size) override; void InvalidateRegion(VAddr addr, u64 size) override; void OnCPUWrite(VAddr addr, u64 size) override; - void SyncGuestHost() override; + void InvalidateGPUCache() override; void UnmapMemory(VAddr addr, u64 size) override; - void ModifyGPUMemory(GPUVAddr addr, u64 size) override; - void SignalSemaphore(GPUVAddr addr, u32 value) override; + void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override; + void SignalFence(std::function<void()>&& func) override; + void SyncOperation(std::function<void()>&& func) override; void SignalSyncPoint(u32 value) override; void SignalReference() override; void ReleaseFences() override; @@ -93,12 +95,18 @@ public: const Tegra::Engines::Fermi2D::Config& copy_config) override; Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override; void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, - std::span<u8> memory) override; + std::span<const u8> memory) override; bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, u32 pixel_stride) override; void LoadDiskResources(u64 title_id, std::stop_token stop_loading, const VideoCore::DiskResourceLoadCallback& callback) override; + void InitializeChannel(Tegra::Control::ChannelState& channel) override; + + void BindChannel(Tegra::Control::ChannelState& channel) override; + + void ReleaseChannel(s32 channel_id) override; + private: static constexpr size_t MAX_TEXTURES = 192; static constexpr size_t MAX_IMAGES = 48; @@ -134,9 +142,6 @@ private: void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs); Tegra::GPU& gpu; - Tegra::MemoryManager& gpu_memory; - Tegra::Engines::Maxwell3D& maxwell3d; - Tegra::Engines::KeplerCompute& kepler_compute; ScreenInfo& screen_info; const Device& device; diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp index a331ff37e..7934f2a51 100644 --- a/src/video_core/renderer_vulkan/vk_scheduler.cpp +++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp @@ -136,9 +136,10 @@ bool Scheduler::UpdateRescaling(bool is_rescaling) { } void Scheduler::WorkerThread(std::stop_token stop_token) { - Common::SetCurrentThreadName("yuzu:VulkanWorker"); + Common::SetCurrentThreadName("VulkanWorker"); do { std::unique_ptr<CommandChunk> work; + bool has_submit{false}; { std::unique_lock lock{work_mutex}; if (work_queue.empty()) { @@ -150,9 +151,10 @@ void Scheduler::WorkerThread(std::stop_token stop_token) { } work = std::move(work_queue.front()); work_queue.pop(); + + has_submit = work->HasSubmit(); + work->ExecuteAll(current_cmdbuf); } - const bool has_submit = work->HasSubmit(); - work->ExecuteAll(current_cmdbuf); if (has_submit) { AllocateWorkerCommandBuffer(); } diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp index 9ad096431..b87c3be66 100644 --- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp +++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp @@ -7,9 +7,9 @@ #include "common/common_types.h" #include "core/core.h" +#include "video_core/control/channel_state.h" #include "video_core/dirty_flags.h" #include "video_core/engines/maxwell_3d.h" -#include "video_core/gpu.h" #include "video_core/renderer_vulkan/vk_state_tracker.h" #define OFF(field_name) MAXWELL3D_REG_INDEX(field_name) @@ -51,8 +51,8 @@ Flags MakeInvalidationFlags() { void SetupDirtyViewports(Tables& tables) { FillBlock(tables[0], OFF(viewport_transform), NUM(viewport_transform), Viewports); FillBlock(tables[0], OFF(viewports), NUM(viewports), Viewports); - tables[0][OFF(viewport_transform_enabled)] = Viewports; - tables[1][OFF(screen_y_control)] = Viewports; + tables[0][OFF(viewport_scale_offset_enbled)] = Viewports; + tables[1][OFF(window_origin)] = Viewports; } void SetupDirtyScissors(Tables& tables) { @@ -61,9 +61,9 @@ void SetupDirtyScissors(Tables& tables) { void SetupDirtyDepthBias(Tables& tables) { auto& table = tables[0]; - table[OFF(polygon_offset_units)] = DepthBias; - table[OFF(polygon_offset_clamp)] = DepthBias; - table[OFF(polygon_offset_factor)] = DepthBias; + table[OFF(depth_bias)] = DepthBias; + table[OFF(depth_bias_clamp)] = DepthBias; + table[OFF(slope_scale_depth_bias)] = DepthBias; } void SetupDirtyBlendConstants(Tables& tables) { @@ -77,10 +77,10 @@ void SetupDirtyDepthBounds(Tables& tables) { void SetupDirtyStencilProperties(Tables& tables) { auto& table = tables[0]; table[OFF(stencil_two_side_enable)] = StencilProperties; - table[OFF(stencil_front_func_ref)] = StencilProperties; + table[OFF(stencil_front_ref)] = StencilProperties; table[OFF(stencil_front_mask)] = StencilProperties; table[OFF(stencil_front_func_mask)] = StencilProperties; - table[OFF(stencil_back_func_ref)] = StencilProperties; + table[OFF(stencil_back_ref)] = StencilProperties; table[OFF(stencil_back_mask)] = StencilProperties; table[OFF(stencil_back_func_mask)] = StencilProperties; } @@ -92,8 +92,8 @@ void SetupDirtyLineWidth(Tables& tables) { void SetupDirtyCullMode(Tables& tables) { auto& table = tables[0]; - table[OFF(cull_face)] = CullMode; - table[OFF(cull_test_enabled)] = CullMode; + table[OFF(gl_cull_face)] = CullMode; + table[OFF(gl_cull_test_enabled)] = CullMode; } void SetupDirtyDepthBoundsEnable(Tables& tables) { @@ -114,20 +114,20 @@ void SetupDirtyDepthCompareOp(Tables& tables) { void SetupDirtyFrontFace(Tables& tables) { auto& table = tables[0]; - table[OFF(front_face)] = FrontFace; - table[OFF(screen_y_control)] = FrontFace; + table[OFF(gl_front_face)] = FrontFace; + table[OFF(window_origin)] = FrontFace; } void SetupDirtyStencilOp(Tables& tables) { auto& table = tables[0]; - table[OFF(stencil_front_op_fail)] = StencilOp; - table[OFF(stencil_front_op_zfail)] = StencilOp; - table[OFF(stencil_front_op_zpass)] = StencilOp; - table[OFF(stencil_front_func_func)] = StencilOp; - table[OFF(stencil_back_op_fail)] = StencilOp; - table[OFF(stencil_back_op_zfail)] = StencilOp; - table[OFF(stencil_back_op_zpass)] = StencilOp; - table[OFF(stencil_back_func_func)] = StencilOp; + table[OFF(stencil_front_op.fail)] = StencilOp; + table[OFF(stencil_front_op.zfail)] = StencilOp; + table[OFF(stencil_front_op.zpass)] = StencilOp; + table[OFF(stencil_front_op.func)] = StencilOp; + table[OFF(stencil_back_op.fail)] = StencilOp; + table[OFF(stencil_back_op.zfail)] = StencilOp; + table[OFF(stencil_back_op.zpass)] = StencilOp; + table[OFF(stencil_back_op.func)] = StencilOp; // Table 0 is used by StencilProperties tables[1][OFF(stencil_two_side_enable)] = StencilOp; @@ -139,10 +139,10 @@ void SetupDirtyStencilTestEnable(Tables& tables) { void SetupDirtyBlending(Tables& tables) { tables[0][OFF(color_mask_common)] = Blending; - tables[0][OFF(independent_blend_enable)] = Blending; + tables[0][OFF(blend_per_target_enabled)] = Blending; FillBlock(tables[0], OFF(color_mask), NUM(color_mask), Blending); FillBlock(tables[0], OFF(blend), NUM(blend), Blending); - FillBlock(tables[0], OFF(independent_blend), NUM(independent_blend), Blending); + FillBlock(tables[0], OFF(blend_per_target), NUM(blend_per_target), Blending); } void SetupDirtyViewportSwizzles(Tables& tables) { @@ -166,17 +166,16 @@ void SetupDirtyVertexBindings(Tables& tables) { static constexpr size_t divisor_offset = 3; for (size_t i = 0; i < Regs::NumVertexArrays; ++i) { const u8 flag = static_cast<u8>(VertexBinding0 + i); - tables[0][OFF(instanced_arrays) + i] = VertexInput; - tables[1][OFF(instanced_arrays) + i] = flag; - tables[0][OFF(vertex_array) + i * NUM(vertex_array[0]) + divisor_offset] = VertexInput; - tables[1][OFF(vertex_array) + i * NUM(vertex_array[0]) + divisor_offset] = flag; + tables[0][OFF(vertex_stream_instances) + i] = VertexInput; + tables[1][OFF(vertex_stream_instances) + i] = flag; + tables[0][OFF(vertex_streams) + i * NUM(vertex_streams[0]) + divisor_offset] = VertexInput; + tables[1][OFF(vertex_streams) + i * NUM(vertex_streams[0]) + divisor_offset] = flag; } } } // Anonymous namespace -StateTracker::StateTracker(Tegra::GPU& gpu) - : flags{gpu.Maxwell3D().dirty.flags}, invalidation_flags{MakeInvalidationFlags()} { - auto& tables{gpu.Maxwell3D().dirty.tables}; +void StateTracker::SetupTables(Tegra::Control::ChannelState& channel_state) { + auto& tables{channel_state.maxwell_3d->dirty.tables}; SetupDirtyFlags(tables); SetupDirtyViewports(tables); SetupDirtyScissors(tables); @@ -199,4 +198,15 @@ StateTracker::StateTracker(Tegra::GPU& gpu) SetupDirtyVertexBindings(tables); } +void StateTracker::ChangeChannel(Tegra::Control::ChannelState& channel_state) { + flags = &channel_state.maxwell_3d->dirty.flags; +} + +void StateTracker::InvalidateState() { + flags->set(); +} + +StateTracker::StateTracker() + : flags{&default_flags}, default_flags{}, invalidation_flags{MakeInvalidationFlags()} {} + } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.h b/src/video_core/renderer_vulkan/vk_state_tracker.h index a85bc1c10..2296dea60 100644 --- a/src/video_core/renderer_vulkan/vk_state_tracker.h +++ b/src/video_core/renderer_vulkan/vk_state_tracker.h @@ -10,6 +10,12 @@ #include "video_core/dirty_flags.h" #include "video_core/engines/maxwell_3d.h" +namespace Tegra { +namespace Control { +struct ChannelState; +} +} // namespace Tegra + namespace Vulkan { namespace Dirty { @@ -53,19 +59,19 @@ class StateTracker { using Maxwell = Tegra::Engines::Maxwell3D::Regs; public: - explicit StateTracker(Tegra::GPU& gpu); + explicit StateTracker(); void InvalidateCommandBufferState() { - flags |= invalidation_flags; + (*flags) |= invalidation_flags; current_topology = INVALID_TOPOLOGY; } void InvalidateViewports() { - flags[Dirty::Viewports] = true; + (*flags)[Dirty::Viewports] = true; } void InvalidateScissors() { - flags[Dirty::Scissors] = true; + (*flags)[Dirty::Scissors] = true; } bool TouchViewports() { @@ -139,16 +145,23 @@ public: return has_changed; } + void SetupTables(Tegra::Control::ChannelState& channel_state); + + void ChangeChannel(Tegra::Control::ChannelState& channel_state); + + void InvalidateState(); + private: static constexpr auto INVALID_TOPOLOGY = static_cast<Maxwell::PrimitiveTopology>(~0u); bool Exchange(std::size_t id, bool new_value) const noexcept { - const bool is_dirty = flags[id]; - flags[id] = new_value; + const bool is_dirty = (*flags)[id]; + (*flags)[id] = new_value; return is_dirty; } - Tegra::Engines::Maxwell3D::DirtyState::Flags& flags; + Tegra::Engines::Maxwell3D::DirtyState::Flags* flags; + Tegra::Engines::Maxwell3D::DirtyState::Flags default_flags; Tegra::Engines::Maxwell3D::DirtyState::Flags invalidation_flags; Maxwell::PrimitiveTopology current_topology = INVALID_TOPOLOGY; }; diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp index a69ae7725..706d9ba74 100644 --- a/src/video_core/renderer_vulkan/vk_swapchain.cpp +++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp @@ -36,7 +36,8 @@ VkPresentModeKHR ChooseSwapPresentMode(vk::Span<VkPresentModeKHR> modes) { // Mailbox (triple buffering) doesn't lock the application like fifo (vsync), // prefer it if vsync option is not selected const auto found_mailbox = std::find(modes.begin(), modes.end(), VK_PRESENT_MODE_MAILBOX_KHR); - if (found_mailbox != modes.end() && !Settings::values.use_vsync.GetValue()) { + if (Settings::values.fullscreen_mode.GetValue() == Settings::FullscreenMode::Borderless && + found_mailbox != modes.end() && !Settings::values.use_vsync.GetValue()) { return VK_PRESENT_MODE_MAILBOX_KHR; } if (!Settings::values.use_speed_limit.GetValue()) { @@ -156,8 +157,16 @@ void Swapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u3 present_mode = ChooseSwapPresentMode(present_modes); u32 requested_image_count{capabilities.minImageCount + 1}; - if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) { - requested_image_count = capabilities.maxImageCount; + // Ensure Tripple buffering if possible. + if (capabilities.maxImageCount > 0) { + if (requested_image_count > capabilities.maxImageCount) { + requested_image_count = capabilities.maxImageCount; + } else { + requested_image_count = + std::max(requested_image_count, std::min(3U, capabilities.maxImageCount)); + } + } else { + requested_image_count = std::max(requested_image_count, 3U); } VkSwapchainCreateInfoKHR swapchain_ci{ .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index caca79d79..305ad8aee 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp @@ -592,7 +592,7 @@ void TryTransformSwizzleIfNeeded(PixelFormat format, std::array<SwizzleSource, 4 case PixelFormat::A5B5G5R1_UNORM: std::ranges::transform(swizzle, swizzle.begin(), SwapSpecial); break; - case PixelFormat::R4G4_UNORM: + case PixelFormat::G4R4_UNORM: std::ranges::transform(swizzle, swizzle.begin(), SwapGreenRed); break; default: @@ -1474,13 +1474,14 @@ bool Image::BlitScaleHelper(bool scale_up) { }; const VkExtent2D extent{ .width = std::max(scaled_width, info.size.width), - .height = std::max(scaled_height, info.size.width), + .height = std::max(scaled_height, info.size.height), }; auto* view_ptr = blit_view.get(); if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) { if (!blit_framebuffer) { - blit_framebuffer = std::make_unique<Framebuffer>(*runtime, view_ptr, nullptr, extent); + blit_framebuffer = + std::make_unique<Framebuffer>(*runtime, view_ptr, nullptr, extent, scale_up); } const auto color_view = blit_view->Handle(Shader::TextureType::Color2D); @@ -1488,7 +1489,8 @@ bool Image::BlitScaleHelper(bool scale_up) { src_region, operation, BLIT_OPERATION); } else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) { if (!blit_framebuffer) { - blit_framebuffer = std::make_unique<Framebuffer>(*runtime, nullptr, view_ptr, extent); + blit_framebuffer = + std::make_unique<Framebuffer>(*runtime, nullptr, view_ptr, extent, scale_up); } runtime->blit_image_helper.BlitDepthStencil(blit_framebuffer.get(), blit_view->DepthView(), blit_view->StencilView(), dst_region, @@ -1756,34 +1758,42 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM .width = key.size.width, .height = key.size.height, }} { - CreateFramebuffer(runtime, color_buffers, depth_buffer); + CreateFramebuffer(runtime, color_buffers, depth_buffer, key.is_rescaled); if (runtime.device.HasDebuggingToolAttached()) { framebuffer.SetObjectNameEXT(VideoCommon::Name(key).c_str()); } } Framebuffer::Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer, - ImageView* depth_buffer, VkExtent2D extent) + ImageView* depth_buffer, VkExtent2D extent, bool is_rescaled) : render_area{extent} { std::array<ImageView*, NUM_RT> color_buffers{color_buffer}; - CreateFramebuffer(runtime, color_buffers, depth_buffer); + CreateFramebuffer(runtime, color_buffers, depth_buffer, is_rescaled); } Framebuffer::~Framebuffer() = default; void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM_RT> color_buffers, - ImageView* depth_buffer) { + ImageView* depth_buffer, bool is_rescaled) { std::vector<VkImageView> attachments; RenderPassKey renderpass_key{}; s32 num_layers = 1; + const auto& resolution = runtime.resolution; + + u32 width = 0; + u32 height = 0; for (size_t index = 0; index < NUM_RT; ++index) { const ImageView* const color_buffer = color_buffers[index]; if (!color_buffer) { renderpass_key.color_formats[index] = PixelFormat::Invalid; continue; } + width = std::max(width, is_rescaled ? resolution.ScaleUp(color_buffer->size.width) + : color_buffer->size.width); + height = std::max(height, is_rescaled ? resolution.ScaleUp(color_buffer->size.height) + : color_buffer->size.height); attachments.push_back(color_buffer->RenderTarget()); renderpass_key.color_formats[index] = color_buffer->format; num_layers = std::max(num_layers, color_buffer->range.extent.layers); @@ -1794,6 +1804,10 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime, } const size_t num_colors = attachments.size(); if (depth_buffer) { + width = std::max(width, is_rescaled ? resolution.ScaleUp(depth_buffer->size.width) + : depth_buffer->size.width); + height = std::max(height, is_rescaled ? resolution.ScaleUp(depth_buffer->size.height) + : depth_buffer->size.height); attachments.push_back(depth_buffer->RenderTarget()); renderpass_key.depth_format = depth_buffer->format; num_layers = std::max(num_layers, depth_buffer->range.extent.layers); @@ -1810,6 +1824,8 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime, renderpass_key.samples = samples; renderpass = runtime.render_pass_cache.Get(renderpass_key); + render_area.width = std::min(render_area.width, width); + render_area.height = std::min(render_area.height, height); num_color_buffers = static_cast<u32>(num_colors); framebuffer = runtime.device.GetLogical().CreateFramebuffer({ diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h index 69f06ee7b..0b7ac0df1 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.h +++ b/src/video_core/renderer_vulkan/vk_texture_cache.h @@ -268,7 +268,7 @@ public: ImageView* depth_buffer, const VideoCommon::RenderTargets& key); explicit Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer, - ImageView* depth_buffer, VkExtent2D extent); + ImageView* depth_buffer, VkExtent2D extent, bool is_rescaled); ~Framebuffer(); @@ -279,7 +279,8 @@ public: Framebuffer& operator=(Framebuffer&&) = default; void CreateFramebuffer(TextureCacheRuntime& runtime, - std::span<ImageView*, NUM_RT> color_buffers, ImageView* depth_buffer); + std::span<ImageView*, NUM_RT> color_buffers, ImageView* depth_buffer, + bool is_rescaled = false); [[nodiscard]] VkFramebuffer Handle() const noexcept { return *framebuffer; diff --git a/src/video_core/shader_cache.cpp b/src/video_core/shader_cache.cpp index 164e4ee0e..d9482371b 100644 --- a/src/video_core/shader_cache.cpp +++ b/src/video_core/shader_cache.cpp @@ -8,6 +8,7 @@ #include "common/assert.h" #include "shader_recompiler/frontend/maxwell/control_flow.h" #include "shader_recompiler/object_pool.h" +#include "video_core/control/channel_state.h" #include "video_core/dirty_flags.h" #include "video_core/engines/kepler_compute.h" #include "video_core/engines/maxwell_3d.h" @@ -33,29 +34,25 @@ void ShaderCache::SyncGuestHost() { RemovePendingShaders(); } -ShaderCache::ShaderCache(VideoCore::RasterizerInterface& rasterizer_, - Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::Engines::KeplerCompute& kepler_compute_) - : gpu_memory{gpu_memory_}, maxwell3d{maxwell3d_}, kepler_compute{kepler_compute_}, - rasterizer{rasterizer_} {} +ShaderCache::ShaderCache(VideoCore::RasterizerInterface& rasterizer_) : rasterizer{rasterizer_} {} bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) { - auto& dirty{maxwell3d.dirty.flags}; + auto& dirty{maxwell3d->dirty.flags}; if (!dirty[VideoCommon::Dirty::Shaders]) { return last_shaders_valid; } dirty[VideoCommon::Dirty::Shaders] = false; - const GPUVAddr base_addr{maxwell3d.regs.code_address.CodeAddress()}; + const GPUVAddr base_addr{maxwell3d->regs.program_region.Address()}; for (size_t index = 0; index < Tegra::Engines::Maxwell3D::Regs::MaxShaderProgram; ++index) { - if (!maxwell3d.regs.IsShaderConfigEnabled(index)) { + if (!maxwell3d->regs.IsShaderConfigEnabled(index)) { unique_hashes[index] = 0; continue; } - const auto& shader_config{maxwell3d.regs.shader_config[index]}; - const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)}; + const auto& shader_config{maxwell3d->regs.pipelines[index]}; + const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderType>(index)}; const GPUVAddr shader_addr{base_addr + shader_config.offset}; - const std::optional<VAddr> cpu_shader_addr{gpu_memory.GpuToCpuAddress(shader_addr)}; + const std::optional<VAddr> cpu_shader_addr{gpu_memory->GpuToCpuAddress(shader_addr)}; if (!cpu_shader_addr) { LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr); last_shaders_valid = false; @@ -64,7 +61,7 @@ bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) { const ShaderInfo* shader_info{TryGet(*cpu_shader_addr)}; if (!shader_info) { const u32 start_address{shader_config.offset}; - GraphicsEnvironment env{maxwell3d, gpu_memory, program, base_addr, start_address}; + GraphicsEnvironment env{*maxwell3d, *gpu_memory, program, base_addr, start_address}; shader_info = MakeShaderInfo(env, *cpu_shader_addr); } shader_infos[index] = shader_info; @@ -75,10 +72,10 @@ bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) { } const ShaderInfo* ShaderCache::ComputeShader() { - const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()}; - const auto& qmd{kepler_compute.launch_description}; + const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()}; + const auto& qmd{kepler_compute->launch_description}; const GPUVAddr shader_addr{program_base + qmd.program_start}; - const std::optional<VAddr> cpu_shader_addr{gpu_memory.GpuToCpuAddress(shader_addr)}; + const std::optional<VAddr> cpu_shader_addr{gpu_memory->GpuToCpuAddress(shader_addr)}; if (!cpu_shader_addr) { LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr); return nullptr; @@ -86,22 +83,22 @@ const ShaderInfo* ShaderCache::ComputeShader() { if (const ShaderInfo* const shader = TryGet(*cpu_shader_addr)) { return shader; } - ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start}; + ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start}; return MakeShaderInfo(env, *cpu_shader_addr); } void ShaderCache::GetGraphicsEnvironments(GraphicsEnvironments& result, const std::array<u64, NUM_PROGRAMS>& unique_hashes) { size_t env_index{}; - const GPUVAddr base_addr{maxwell3d.regs.code_address.CodeAddress()}; + const GPUVAddr base_addr{maxwell3d->regs.program_region.Address()}; for (size_t index = 0; index < NUM_PROGRAMS; ++index) { if (unique_hashes[index] == 0) { continue; } - const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)}; + const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderType>(index)}; auto& env{result.envs[index]}; - const u32 start_address{maxwell3d.regs.shader_config[index].offset}; - env = GraphicsEnvironment{maxwell3d, gpu_memory, program, base_addr, start_address}; + const u32 start_address{maxwell3d->regs.pipelines[index].offset}; + env = GraphicsEnvironment{*maxwell3d, *gpu_memory, program, base_addr, start_address}; env.SetCachedSize(shader_infos[index]->size_bytes); result.env_ptrs[env_index++] = &env; } diff --git a/src/video_core/shader_cache.h b/src/video_core/shader_cache.h index f67cea8c4..a4391202d 100644 --- a/src/video_core/shader_cache.h +++ b/src/video_core/shader_cache.h @@ -12,6 +12,7 @@ #include <vector> #include "common/common_types.h" +#include "video_core/control/channel_state_cache.h" #include "video_core/rasterizer_interface.h" #include "video_core/shader_environment.h" @@ -19,6 +20,10 @@ namespace Tegra { class MemoryManager; } +namespace Tegra::Control { +struct ChannelState; +} + namespace VideoCommon { class GenericEnvironment; @@ -28,7 +33,7 @@ struct ShaderInfo { size_t size_bytes{}; }; -class ShaderCache { +class ShaderCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> { static constexpr u64 YUZU_PAGEBITS = 14; static constexpr u64 YUZU_PAGESIZE = u64(1) << YUZU_PAGEBITS; @@ -71,9 +76,7 @@ protected: } }; - explicit ShaderCache(VideoCore::RasterizerInterface& rasterizer_, - Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::Engines::KeplerCompute& kepler_compute_); + explicit ShaderCache(VideoCore::RasterizerInterface& rasterizer_); /// @brief Update the hashes and information of shader stages /// @param unique_hashes Shader hashes to store into when a stage is enabled @@ -88,10 +91,6 @@ protected: void GetGraphicsEnvironments(GraphicsEnvironments& result, const std::array<u64, NUM_PROGRAMS>& unique_hashes); - Tegra::MemoryManager& gpu_memory; - Tegra::Engines::Maxwell3D& maxwell3d; - Tegra::Engines::KeplerCompute& kepler_compute; - std::array<const ShaderInfo*, NUM_PROGRAMS> shader_infos{}; bool last_shaders_valid = false; diff --git a/src/video_core/shader_environment.cpp b/src/video_core/shader_environment.cpp index 5f7625947..fbabb3219 100644 --- a/src/video_core/shader_environment.cpp +++ b/src/video_core/shader_environment.cpp @@ -250,34 +250,34 @@ Shader::TextureType GenericEnvironment::ReadTextureTypeImpl(GPUVAddr tic_addr, u GraphicsEnvironment::GraphicsEnvironment(Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, - Maxwell::ShaderProgram program, GPUVAddr program_base_, + Maxwell::ShaderType program, GPUVAddr program_base_, u32 start_address_) : GenericEnvironment{gpu_memory_, program_base_, start_address_}, maxwell3d{&maxwell3d_} { gpu_memory->ReadBlock(program_base + start_address, &sph, sizeof(sph)); initial_offset = sizeof(sph); - gp_passthrough_mask = maxwell3d->regs.gp_passthrough_mask; + gp_passthrough_mask = maxwell3d->regs.post_vtg_shader_attrib_skip_mask; switch (program) { - case Maxwell::ShaderProgram::VertexA: + case Maxwell::ShaderType::VertexA: stage = Shader::Stage::VertexA; stage_index = 0; break; - case Maxwell::ShaderProgram::VertexB: + case Maxwell::ShaderType::VertexB: stage = Shader::Stage::VertexB; stage_index = 0; break; - case Maxwell::ShaderProgram::TesselationControl: + case Maxwell::ShaderType::TessellationInit: stage = Shader::Stage::TessellationControl; stage_index = 1; break; - case Maxwell::ShaderProgram::TesselationEval: + case Maxwell::ShaderType::Tessellation: stage = Shader::Stage::TessellationEval; stage_index = 2; break; - case Maxwell::ShaderProgram::Geometry: + case Maxwell::ShaderType::Geometry: stage = Shader::Stage::Geometry; stage_index = 3; break; - case Maxwell::ShaderProgram::Fragment: + case Maxwell::ShaderType::Pixel: stage = Shader::Stage::Fragment; stage_index = 4; break; @@ -288,7 +288,7 @@ GraphicsEnvironment::GraphicsEnvironment(Tegra::Engines::Maxwell3D& maxwell3d_, const u64 local_size{sph.LocalMemorySize()}; ASSERT(local_size <= std::numeric_limits<u32>::max()); local_memory_size = static_cast<u32>(local_size) + sph.common3.shader_local_memory_crs_size; - texture_bound = maxwell3d->regs.tex_cb_index; + texture_bound = maxwell3d->regs.bindless_texture_const_buffer_slot; } u32 GraphicsEnvironment::ReadCbufValue(u32 cbuf_index, u32 cbuf_offset) { @@ -304,8 +304,9 @@ u32 GraphicsEnvironment::ReadCbufValue(u32 cbuf_index, u32 cbuf_offset) { Shader::TextureType GraphicsEnvironment::ReadTextureType(u32 handle) { const auto& regs{maxwell3d->regs}; - const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex}; - return ReadTextureTypeImpl(regs.tic.Address(), regs.tic.limit, via_header_index, handle); + const bool via_header_index{regs.sampler_binding == Maxwell::SamplerBinding::ViaHeaderBinding}; + return ReadTextureTypeImpl(regs.tex_header.Address(), regs.tex_header.limit, via_header_index, + handle); } ComputeEnvironment::ComputeEnvironment(Tegra::Engines::KeplerCompute& kepler_compute_, diff --git a/src/video_core/shader_environment.h b/src/video_core/shader_environment.h index 5a145f33a..8b3b8e9f5 100644 --- a/src/video_core/shader_environment.h +++ b/src/video_core/shader_environment.h @@ -93,7 +93,7 @@ public: explicit GraphicsEnvironment() = default; explicit GraphicsEnvironment(Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, - Tegra::Engines::Maxwell3D::Regs::ShaderProgram program, + Tegra::Engines::Maxwell3D::Regs::ShaderType program, GPUVAddr program_base_, u32 start_address_); ~GraphicsEnvironment() override = default; diff --git a/src/video_core/surface.cpp b/src/video_core/surface.cpp index 079d5f028..6bd133d10 100644 --- a/src/video_core/surface.cpp +++ b/src/video_core/surface.cpp @@ -73,17 +73,17 @@ bool SurfaceTargetIsArray(SurfaceTarget target) { PixelFormat PixelFormatFromDepthFormat(Tegra::DepthFormat format) { switch (format) { - case Tegra::DepthFormat::S8_UINT_Z24_UNORM: + case Tegra::DepthFormat::Z24_UNORM_S8_UINT: return PixelFormat::S8_UINT_D24_UNORM; - case Tegra::DepthFormat::D24S8_UNORM: + case Tegra::DepthFormat::S8Z24_UNORM: return PixelFormat::D24_UNORM_S8_UINT; - case Tegra::DepthFormat::D32_FLOAT: + case Tegra::DepthFormat::Z32_FLOAT: return PixelFormat::D32_FLOAT; - case Tegra::DepthFormat::D16_UNORM: + case Tegra::DepthFormat::Z16_UNORM: return PixelFormat::D16_UNORM; case Tegra::DepthFormat::S8_UINT: return PixelFormat::S8_UINT; - case Tegra::DepthFormat::D32_FLOAT_S8X24_UINT: + case Tegra::DepthFormat::Z32_FLOAT_X24S8_UINT: return PixelFormat::D32_FLOAT_S8_UINT; default: UNIMPLEMENTED_MSG("Unimplemented format={}", format); @@ -117,9 +117,9 @@ PixelFormat PixelFormatFromRenderTargetFormat(Tegra::RenderTargetFormat format) return PixelFormat::R32G32_UINT; case Tegra::RenderTargetFormat::R16G16B16X16_FLOAT: return PixelFormat::R16G16B16X16_FLOAT; - case Tegra::RenderTargetFormat::B8G8R8A8_UNORM: + case Tegra::RenderTargetFormat::A8R8G8B8_UNORM: return PixelFormat::B8G8R8A8_UNORM; - case Tegra::RenderTargetFormat::B8G8R8A8_SRGB: + case Tegra::RenderTargetFormat::A8R8G8B8_SRGB: return PixelFormat::B8G8R8A8_SRGB; case Tegra::RenderTargetFormat::A2B10G10R10_UNORM: return PixelFormat::A2B10G10R10_UNORM; @@ -247,6 +247,8 @@ bool IsPixelFormatASTC(PixelFormat format) { case PixelFormat::ASTC_2D_6X6_UNORM: case PixelFormat::ASTC_2D_6X6_SRGB: case PixelFormat::ASTC_2D_10X6_UNORM: + case PixelFormat::ASTC_2D_10X5_UNORM: + case PixelFormat::ASTC_2D_10X5_SRGB: case PixelFormat::ASTC_2D_10X10_UNORM: case PixelFormat::ASTC_2D_10X10_SRGB: case PixelFormat::ASTC_2D_12X12_UNORM: @@ -276,6 +278,7 @@ bool IsPixelFormatSRGB(PixelFormat format) { case PixelFormat::ASTC_2D_5X5_SRGB: case PixelFormat::ASTC_2D_10X8_SRGB: case PixelFormat::ASTC_2D_6X6_SRGB: + case PixelFormat::ASTC_2D_10X5_SRGB: case PixelFormat::ASTC_2D_10X10_SRGB: case PixelFormat::ASTC_2D_12X12_SRGB: case PixelFormat::ASTC_2D_8X6_SRGB: diff --git a/src/video_core/surface.h b/src/video_core/surface.h index 16273f185..57ca7f597 100644 --- a/src/video_core/surface.h +++ b/src/video_core/surface.h @@ -82,7 +82,7 @@ enum class PixelFormat { BC3_SRGB, BC7_SRGB, A4B4G4R4_UNORM, - R4G4_UNORM, + G4R4_UNORM, ASTC_2D_4X4_SRGB, ASTC_2D_8X8_SRGB, ASTC_2D_8X5_SRGB, @@ -94,6 +94,8 @@ enum class PixelFormat { ASTC_2D_6X6_UNORM, ASTC_2D_6X6_SRGB, ASTC_2D_10X6_UNORM, + ASTC_2D_10X5_UNORM, + ASTC_2D_10X5_SRGB, ASTC_2D_10X10_UNORM, ASTC_2D_10X10_SRGB, ASTC_2D_12X12_UNORM, @@ -216,7 +218,7 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_WIDTH_TABLE = {{ 4, // BC3_SRGB 4, // BC7_SRGB 1, // A4B4G4R4_UNORM - 1, // R4G4_UNORM + 1, // G4R4_UNORM 4, // ASTC_2D_4X4_SRGB 8, // ASTC_2D_8X8_SRGB 8, // ASTC_2D_8X5_SRGB @@ -228,6 +230,8 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_WIDTH_TABLE = {{ 6, // ASTC_2D_6X6_UNORM 6, // ASTC_2D_6X6_SRGB 10, // ASTC_2D_10X6_UNORM + 10, // ASTC_2D_10X5_UNORM + 10, // ASTC_2D_10X5_SRGB 10, // ASTC_2D_10X10_UNORM 10, // ASTC_2D_10X10_SRGB 12, // ASTC_2D_12X12_UNORM @@ -319,7 +323,7 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_HEIGHT_TABLE = {{ 4, // BC3_SRGB 4, // BC7_SRGB 1, // A4B4G4R4_UNORM - 1, // R4G4_UNORM + 1, // G4R4_UNORM 4, // ASTC_2D_4X4_SRGB 8, // ASTC_2D_8X8_SRGB 5, // ASTC_2D_8X5_SRGB @@ -331,6 +335,8 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_HEIGHT_TABLE = {{ 6, // ASTC_2D_6X6_UNORM 6, // ASTC_2D_6X6_SRGB 6, // ASTC_2D_10X6_UNORM + 5, // ASTC_2D_10X5_UNORM + 5, // ASTC_2D_10X5_SRGB 10, // ASTC_2D_10X10_UNORM 10, // ASTC_2D_10X10_SRGB 12, // ASTC_2D_12X12_UNORM @@ -422,7 +428,7 @@ constexpr std::array<u8, MaxPixelFormat> BITS_PER_BLOCK_TABLE = {{ 128, // BC3_SRGB 128, // BC7_UNORM 16, // A4B4G4R4_UNORM - 8, // R4G4_UNORM + 8, // G4R4_UNORM 128, // ASTC_2D_4X4_SRGB 128, // ASTC_2D_8X8_SRGB 128, // ASTC_2D_8X5_SRGB @@ -434,6 +440,8 @@ constexpr std::array<u8, MaxPixelFormat> BITS_PER_BLOCK_TABLE = {{ 128, // ASTC_2D_6X6_UNORM 128, // ASTC_2D_6X6_SRGB 128, // ASTC_2D_10X6_UNORM + 128, // ASTC_2D_10X5_UNORM + 128, // ASTC_2D_10X5_SRGB 128, // ASTC_2D_10X10_UNORM 128, // ASTC_2D_10X10_SRGB 128, // ASTC_2D_12X12_UNORM diff --git a/src/video_core/texture_cache/descriptor_table.h b/src/video_core/texture_cache/descriptor_table.h index b18e3838f..ee4240288 100644 --- a/src/video_core/texture_cache/descriptor_table.h +++ b/src/video_core/texture_cache/descriptor_table.h @@ -18,7 +18,7 @@ class DescriptorTable { public: explicit DescriptorTable(Tegra::MemoryManager& gpu_memory_) : gpu_memory{gpu_memory_} {} - [[nodiscard]] bool Synchornize(GPUVAddr gpu_addr, u32 limit) { + [[nodiscard]] bool Synchronize(GPUVAddr gpu_addr, u32 limit) { [[likely]] if (current_gpu_addr == gpu_addr && current_limit == limit) { return false; } diff --git a/src/video_core/texture_cache/format_lookup_table.cpp b/src/video_core/texture_cache/format_lookup_table.cpp index 1412aa076..08aa8ca33 100644 --- a/src/video_core/texture_cache/format_lookup_table.cpp +++ b/src/video_core/texture_cache/format_lookup_table.cpp @@ -63,7 +63,7 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red, case Hash(TextureFormat::A4B4G4R4, UNORM): return PixelFormat::A4B4G4R4_UNORM; case Hash(TextureFormat::G4R4, UNORM): - return PixelFormat::R4G4_UNORM; + return PixelFormat::G4R4_UNORM; case Hash(TextureFormat::A5B5G5R1, UNORM): return PixelFormat::A5B5G5R1_UNORM; case Hash(TextureFormat::R8, UNORM): @@ -150,6 +150,8 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red, return PixelFormat::D24_UNORM_S8_UINT; case Hash(TextureFormat::D32S8, FLOAT, UINT, UNORM, UNORM, LINEAR): return PixelFormat::D32_FLOAT_S8_UINT; + case Hash(TextureFormat::R32_B24G8, FLOAT, UINT, UNORM, UNORM, LINEAR): + return PixelFormat::D32_FLOAT_S8_UINT; case Hash(TextureFormat::BC1_RGBA, UNORM, LINEAR): return PixelFormat::BC1_RGBA_UNORM; case Hash(TextureFormat::BC1_RGBA, UNORM, SRGB): @@ -208,6 +210,10 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red, return PixelFormat::ASTC_2D_6X6_SRGB; case Hash(TextureFormat::ASTC_2D_10X6, UNORM, LINEAR): return PixelFormat::ASTC_2D_10X6_UNORM; + case Hash(TextureFormat::ASTC_2D_10X5, UNORM, LINEAR): + return PixelFormat::ASTC_2D_10X5_UNORM; + case Hash(TextureFormat::ASTC_2D_10X5, UNORM, SRGB): + return PixelFormat::ASTC_2D_10X5_SRGB; case Hash(TextureFormat::ASTC_2D_10X10, UNORM, LINEAR): return PixelFormat::ASTC_2D_10X10_UNORM; case Hash(TextureFormat::ASTC_2D_10X10, UNORM, SRGB): diff --git a/src/video_core/texture_cache/formatter.h b/src/video_core/texture_cache/formatter.h index 95a572604..acc854715 100644 --- a/src/video_core/texture_cache/formatter.h +++ b/src/video_core/texture_cache/formatter.h @@ -153,8 +153,8 @@ struct fmt::formatter<VideoCore::Surface::PixelFormat> : fmt::formatter<fmt::str return "BC7_SRGB"; case PixelFormat::A4B4G4R4_UNORM: return "A4B4G4R4_UNORM"; - case PixelFormat::R4G4_UNORM: - return "R4G4_UNORM"; + case PixelFormat::G4R4_UNORM: + return "G4R4_UNORM"; case PixelFormat::ASTC_2D_4X4_SRGB: return "ASTC_2D_4X4_SRGB"; case PixelFormat::ASTC_2D_8X8_SRGB: @@ -177,6 +177,10 @@ struct fmt::formatter<VideoCore::Surface::PixelFormat> : fmt::formatter<fmt::str return "ASTC_2D_6X6_SRGB"; case PixelFormat::ASTC_2D_10X6_UNORM: return "ASTC_2D_10X6_UNORM"; + case PixelFormat::ASTC_2D_10X5_UNORM: + return "ASTC_2D_10X5_UNORM"; + case PixelFormat::ASTC_2D_10X5_SRGB: + return "ASTC_2D_10X5_SRGB"; case PixelFormat::ASTC_2D_10X10_UNORM: return "ASTC_2D_10X10_UNORM"; case PixelFormat::ASTC_2D_10X10_SRGB: diff --git a/src/video_core/texture_cache/image_base.cpp b/src/video_core/texture_cache/image_base.cpp index f61e09ac7..91512022f 100644 --- a/src/video_core/texture_cache/image_base.cpp +++ b/src/video_core/texture_cache/image_base.cpp @@ -7,6 +7,7 @@ #include <vector> #include "common/common_types.h" +#include "common/div_ceil.h" #include "video_core/surface.h" #include "video_core/texture_cache/formatter.h" #include "video_core/texture_cache/image_base.h" @@ -182,10 +183,6 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i }; const bool is_lhs_compressed = lhs_block.width > 1 || lhs_block.height > 1; const bool is_rhs_compressed = rhs_block.width > 1 || rhs_block.height > 1; - if (is_lhs_compressed && is_rhs_compressed) { - LOG_ERROR(HW_GPU, "Compressed to compressed image aliasing is not implemented"); - return; - } const s32 lhs_mips = lhs.info.resources.levels; const s32 rhs_mips = rhs.info.resources.levels; const s32 num_mips = std::min(lhs_mips - base->level, rhs_mips); @@ -199,12 +196,12 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i Extent3D lhs_size = MipSize(lhs.info.size, base->level + mip_level); Extent3D rhs_size = MipSize(rhs.info.size, mip_level); if (is_lhs_compressed) { - lhs_size.width /= lhs_block.width; - lhs_size.height /= lhs_block.height; + lhs_size.width = Common::DivCeil(lhs_size.width, lhs_block.width); + lhs_size.height = Common::DivCeil(lhs_size.height, lhs_block.height); } if (is_rhs_compressed) { - rhs_size.width /= rhs_block.width; - rhs_size.height /= rhs_block.height; + rhs_size.width = Common::DivCeil(rhs_size.width, rhs_block.width); + rhs_size.height = Common::DivCeil(rhs_size.height, rhs_block.height); } const Extent3D copy_size{ .width = std::min(lhs_size.width, rhs_size.width), diff --git a/src/video_core/texture_cache/image_base.h b/src/video_core/texture_cache/image_base.h index 1f85ec9da..620565684 100644 --- a/src/video_core/texture_cache/image_base.h +++ b/src/video_core/texture_cache/image_base.h @@ -88,6 +88,9 @@ struct ImageBase { u32 scale_rating = 0; u64 scale_tick = 0; bool has_scaled = false; + + size_t channel = 0; + ImageFlagBits flags = ImageFlagBits::CpuModified; GPUVAddr gpu_addr = 0; diff --git a/src/video_core/texture_cache/image_info.cpp b/src/video_core/texture_cache/image_info.cpp index 6c073ee57..852ec2519 100644 --- a/src/video_core/texture_cache/image_info.cpp +++ b/src/video_core/texture_cache/image_info.cpp @@ -1,6 +1,8 @@ // SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later +#include <fmt/format.h> + #include "common/assert.h" #include "video_core/surface.h" #include "video_core/texture_cache/format_lookup_table.h" @@ -12,6 +14,7 @@ namespace VideoCommon { +using Tegra::Engines::Maxwell3D; using Tegra::Texture::TextureType; using Tegra::Texture::TICEntry; using VideoCore::Surface::PixelFormat; @@ -107,12 +110,13 @@ ImageInfo::ImageInfo(const TICEntry& config) noexcept { } } -ImageInfo::ImageInfo(const Tegra::Engines::Maxwell3D::Regs& regs, size_t index) noexcept { +ImageInfo::ImageInfo(const Maxwell3D::Regs& regs, size_t index) noexcept { const auto& rt = regs.rt[index]; format = VideoCore::Surface::PixelFormatFromRenderTargetFormat(rt.format); rescaleable = false; if (rt.tile_mode.is_pitch_linear) { - ASSERT(rt.tile_mode.is_3d == 0); + ASSERT(rt.tile_mode.dim_control == + Maxwell3D::Regs::TileMode::DimensionControl::DepthDefinesArray); type = ImageType::Linear; pitch = rt.width; size = Extent3D{ @@ -124,15 +128,16 @@ ImageInfo::ImageInfo(const Tegra::Engines::Maxwell3D::Regs& regs, size_t index) } size.width = rt.width; size.height = rt.height; - layer_stride = rt.layer_stride * 4; + layer_stride = rt.array_pitch * 4; maybe_unaligned_layer_stride = layer_stride; - num_samples = NumSamples(regs.multisample_mode); + num_samples = NumSamples(regs.anti_alias_samples_mode); block = Extent3D{ .width = rt.tile_mode.block_width, .height = rt.tile_mode.block_height, .depth = rt.tile_mode.block_depth, }; - if (rt.tile_mode.is_3d) { + if (rt.tile_mode.dim_control == + Maxwell3D::Regs::TileMode::DimensionControl::DepthDefinesDepth) { type = ImageType::e3D; size.depth = rt.depth; } else { @@ -146,31 +151,37 @@ ImageInfo::ImageInfo(const Tegra::Engines::Maxwell3D::Regs& regs, size_t index) ImageInfo::ImageInfo(const Tegra::Engines::Maxwell3D::Regs& regs) noexcept { format = VideoCore::Surface::PixelFormatFromDepthFormat(regs.zeta.format); - size.width = regs.zeta_width; - size.height = regs.zeta_height; + size.width = regs.zeta_size.width; + size.height = regs.zeta_size.height; rescaleable = false; resources.levels = 1; - layer_stride = regs.zeta.layer_stride * 4; + layer_stride = regs.zeta.array_pitch * 4; maybe_unaligned_layer_stride = layer_stride; - num_samples = NumSamples(regs.multisample_mode); + num_samples = NumSamples(regs.anti_alias_samples_mode); block = Extent3D{ .width = regs.zeta.tile_mode.block_width, .height = regs.zeta.tile_mode.block_height, .depth = regs.zeta.tile_mode.block_depth, }; if (regs.zeta.tile_mode.is_pitch_linear) { - ASSERT(regs.zeta.tile_mode.is_3d == 0); + ASSERT(regs.zeta.tile_mode.dim_control == + Maxwell3D::Regs::TileMode::DimensionControl::DepthDefinesArray); type = ImageType::Linear; pitch = size.width * BytesPerBlock(format); - } else if (regs.zeta.tile_mode.is_3d) { + } else if (regs.zeta.tile_mode.dim_control == + Maxwell3D::Regs::TileMode::DimensionControl::DepthDefinesDepth) { ASSERT(regs.zeta.tile_mode.is_pitch_linear == 0); + ASSERT(regs.zeta_size.dim_control == + Maxwell3D::Regs::ZetaSize::DimensionControl::ArraySizeOne); type = ImageType::e3D; - size.depth = regs.zeta_depth; + size.depth = regs.zeta_size.depth; } else { + ASSERT(regs.zeta_size.dim_control == + Maxwell3D::Regs::ZetaSize::DimensionControl::DepthDefinesArray); rescaleable = block.depth == 0; downscaleable = size.height > 512; type = ImageType::e2D; - resources.layers = regs.zeta_depth; + resources.layers = regs.zeta_size.depth; } } diff --git a/src/video_core/texture_cache/render_targets.h b/src/video_core/texture_cache/render_targets.h index da8ffe9ec..1efbd6507 100644 --- a/src/video_core/texture_cache/render_targets.h +++ b/src/video_core/texture_cache/render_targets.h @@ -26,6 +26,7 @@ struct RenderTargets { ImageViewId depth_buffer_id{}; std::array<u8, NUM_RT> draw_buffers{}; Extent2D size{}; + bool is_rescaled{}; }; } // namespace VideoCommon diff --git a/src/video_core/texture_cache/texture_cache.cpp b/src/video_core/texture_cache/texture_cache.cpp new file mode 100644 index 000000000..8a9a32f44 --- /dev/null +++ b/src/video_core/texture_cache/texture_cache.cpp @@ -0,0 +1,15 @@ +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "video_core/control/channel_state_cache.inc" +#include "video_core/texture_cache/texture_cache_base.h" + +namespace VideoCommon { + +TextureCacheChannelInfo::TextureCacheChannelInfo(Tegra::Control::ChannelState& state) noexcept + : ChannelInfo(state), graphics_image_table{gpu_memory}, graphics_sampler_table{gpu_memory}, + compute_image_table{gpu_memory}, compute_sampler_table{gpu_memory} {} + +template class VideoCommon::ChannelSetupCaches<VideoCommon::TextureCacheChannelInfo>; + +} // namespace VideoCommon diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 1dbe01bc0..8ef75fe73 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -1,5 +1,5 @@ -// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once @@ -7,6 +7,7 @@ #include "common/alignment.h" #include "common/settings.h" +#include "video_core/control/channel_state.h" #include "video_core/dirty_flags.h" #include "video_core/engines/kepler_compute.h" #include "video_core/texture_cache/image_view_base.h" @@ -29,12 +30,8 @@ using VideoCore::Surface::SurfaceType; using namespace Common::Literals; template <class P> -TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& rasterizer_, - Tegra::Engines::Maxwell3D& maxwell3d_, - Tegra::Engines::KeplerCompute& kepler_compute_, - Tegra::MemoryManager& gpu_memory_) - : runtime{runtime_}, rasterizer{rasterizer_}, maxwell3d{maxwell3d_}, - kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_} { +TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& rasterizer_) + : runtime{runtime_}, rasterizer{rasterizer_} { // Configure null sampler TSCEntry sampler_descriptor{}; sampler_descriptor.min_filter.Assign(Tegra::Texture::TextureFilter::Linear); @@ -93,7 +90,7 @@ void TextureCache<P>::RunGarbageCollector() { const auto copies = FullDownloadCopies(image.info); image.DownloadMemory(map, copies); runtime.Finish(); - SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span); + SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span); } if (True(image.flags & ImageFlagBits::Tracked)) { UntrackImage(image, image_id); @@ -152,22 +149,24 @@ void TextureCache<P>::MarkModification(ImageId id) noexcept { template <class P> template <bool has_blacklists> void TextureCache<P>::FillGraphicsImageViews(std::span<ImageViewInOut> views) { - FillImageViews<has_blacklists>(graphics_image_table, graphics_image_view_ids, views); + FillImageViews<has_blacklists>(channel_state->graphics_image_table, + channel_state->graphics_image_view_ids, views); } template <class P> void TextureCache<P>::FillComputeImageViews(std::span<ImageViewInOut> views) { - FillImageViews<true>(compute_image_table, compute_image_view_ids, views); + FillImageViews<true>(channel_state->compute_image_table, channel_state->compute_image_view_ids, + views); } template <class P> typename P::Sampler* TextureCache<P>::GetGraphicsSampler(u32 index) { - if (index > graphics_sampler_table.Limit()) { + if (index > channel_state->graphics_sampler_table.Limit()) { LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index); return &slot_samplers[NULL_SAMPLER_ID]; } - const auto [descriptor, is_new] = graphics_sampler_table.Read(index); - SamplerId& id = graphics_sampler_ids[index]; + const auto [descriptor, is_new] = channel_state->graphics_sampler_table.Read(index); + SamplerId& id = channel_state->graphics_sampler_ids[index]; if (is_new) { id = FindSampler(descriptor); } @@ -176,12 +175,12 @@ typename P::Sampler* TextureCache<P>::GetGraphicsSampler(u32 index) { template <class P> typename P::Sampler* TextureCache<P>::GetComputeSampler(u32 index) { - if (index > compute_sampler_table.Limit()) { + if (index > channel_state->compute_sampler_table.Limit()) { LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index); return &slot_samplers[NULL_SAMPLER_ID]; } - const auto [descriptor, is_new] = compute_sampler_table.Read(index); - SamplerId& id = compute_sampler_ids[index]; + const auto [descriptor, is_new] = channel_state->compute_sampler_table.Read(index); + SamplerId& id = channel_state->compute_sampler_ids[index]; if (is_new) { id = FindSampler(descriptor); } @@ -190,35 +189,38 @@ typename P::Sampler* TextureCache<P>::GetComputeSampler(u32 index) { template <class P> void TextureCache<P>::SynchronizeGraphicsDescriptors() { - using SamplerIndex = Tegra::Engines::Maxwell3D::Regs::SamplerIndex; - const bool linked_tsc = maxwell3d.regs.sampler_index == SamplerIndex::ViaHeaderIndex; - const u32 tic_limit = maxwell3d.regs.tic.limit; - const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d.regs.tsc.limit; - if (graphics_sampler_table.Synchornize(maxwell3d.regs.tsc.Address(), tsc_limit)) { - graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); + using SamplerBinding = Tegra::Engines::Maxwell3D::Regs::SamplerBinding; + const bool linked_tsc = maxwell3d->regs.sampler_binding == SamplerBinding::ViaHeaderBinding; + const u32 tic_limit = maxwell3d->regs.tex_header.limit; + const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d->regs.tex_sampler.limit; + if (channel_state->graphics_sampler_table.Synchronize(maxwell3d->regs.tex_sampler.Address(), + tsc_limit)) { + channel_state->graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); } - if (graphics_image_table.Synchornize(maxwell3d.regs.tic.Address(), tic_limit)) { - graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); + if (channel_state->graphics_image_table.Synchronize(maxwell3d->regs.tex_header.Address(), + tic_limit)) { + channel_state->graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); } } template <class P> void TextureCache<P>::SynchronizeComputeDescriptors() { - const bool linked_tsc = kepler_compute.launch_description.linked_tsc; - const u32 tic_limit = kepler_compute.regs.tic.limit; - const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute.regs.tsc.limit; - const GPUVAddr tsc_gpu_addr = kepler_compute.regs.tsc.Address(); - if (compute_sampler_table.Synchornize(tsc_gpu_addr, tsc_limit)) { - compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); + const bool linked_tsc = kepler_compute->launch_description.linked_tsc; + const u32 tic_limit = kepler_compute->regs.tic.limit; + const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute->regs.tsc.limit; + const GPUVAddr tsc_gpu_addr = kepler_compute->regs.tsc.Address(); + if (channel_state->compute_sampler_table.Synchronize(tsc_gpu_addr, tsc_limit)) { + channel_state->compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); } - if (compute_image_table.Synchornize(kepler_compute.regs.tic.Address(), tic_limit)) { - compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); + if (channel_state->compute_image_table.Synchronize(kepler_compute->regs.tic.Address(), + tic_limit)) { + channel_state->compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); } } template <class P> bool TextureCache<P>::RescaleRenderTargets(bool is_clear) { - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; u32 scale_rating = 0; bool rescaled = false; std::array<ImageId, NUM_RT> tmp_color_images{}; @@ -315,7 +317,7 @@ bool TextureCache<P>::RescaleRenderTargets(bool is_clear) { template <class P> void TextureCache<P>::UpdateRenderTargets(bool is_clear) { using namespace VideoCommon::Dirty; - auto& flags = maxwell3d.dirty.flags; + auto& flags = maxwell3d->dirty.flags; if (!flags[Dirty::RenderTargets]) { for (size_t index = 0; index < NUM_RT; ++index) { ImageViewId& color_buffer_id = render_targets.color_buffer_ids[index]; @@ -342,7 +344,7 @@ void TextureCache<P>::UpdateRenderTargets(bool is_clear) { PrepareImageView(depth_buffer_id, true, is_clear && IsFullClear(depth_buffer_id)); for (size_t index = 0; index < NUM_RT; ++index) { - render_targets.draw_buffers[index] = static_cast<u8>(maxwell3d.regs.rt_control.Map(index)); + render_targets.draw_buffers[index] = static_cast<u8>(maxwell3d->regs.rt_control.Map(index)); } u32 up_scale = 1; u32 down_shift = 0; @@ -351,9 +353,10 @@ void TextureCache<P>::UpdateRenderTargets(bool is_clear) { down_shift = Settings::values.resolution_info.down_shift; } render_targets.size = Extent2D{ - (maxwell3d.regs.render_area.width * up_scale) >> down_shift, - (maxwell3d.regs.render_area.height * up_scale) >> down_shift, + (maxwell3d->regs.surface_clip.width * up_scale) >> down_shift, + (maxwell3d->regs.surface_clip.height * up_scale) >> down_shift, }; + render_targets.is_rescaled = is_rescaling; flags[Dirty::DepthBiasGlobal] = true; } @@ -439,7 +442,7 @@ void TextureCache<P>::WriteMemory(VAddr cpu_addr, size_t size) { template <class P> void TextureCache<P>::DownloadMemory(VAddr cpu_addr, size_t size) { std::vector<ImageId> images; - ForEachImageInRegion(cpu_addr, size, [this, &images](ImageId image_id, ImageBase& image) { + ForEachImageInRegion(cpu_addr, size, [&images](ImageId image_id, ImageBase& image) { if (!image.IsSafeDownload()) { return; } @@ -458,7 +461,7 @@ void TextureCache<P>::DownloadMemory(VAddr cpu_addr, size_t size) { const auto copies = FullDownloadCopies(image.info); image.DownloadMemory(map, copies); runtime.Finish(); - SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span); + SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span); } } @@ -477,12 +480,20 @@ void TextureCache<P>::UnmapMemory(VAddr cpu_addr, size_t size) { } template <class P> -void TextureCache<P>::UnmapGPUMemory(GPUVAddr gpu_addr, size_t size) { +void TextureCache<P>::UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t size) { std::vector<ImageId> deleted_images; - ForEachImageInRegionGPU(gpu_addr, size, + ForEachImageInRegionGPU(as_id, gpu_addr, size, [&](ImageId id, Image&) { deleted_images.push_back(id); }); for (const ImageId id : deleted_images) { Image& image = slot_images[id]; + if (True(image.flags & ImageFlagBits::CpuModified)) { + return; + } + image.flags |= ImageFlagBits::CpuModified; + if (True(image.flags & ImageFlagBits::Tracked)) { + UntrackImage(image, id); + } + /* if (True(image.flags & ImageFlagBits::Remapped)) { continue; } @@ -490,6 +501,7 @@ void TextureCache<P>::UnmapGPUMemory(GPUVAddr gpu_addr, size_t size) { if (True(image.flags & ImageFlagBits::Tracked)) { UntrackImage(image, id); } + */ } } @@ -655,7 +667,7 @@ void TextureCache<P>::PopAsyncFlushes() { for (const ImageId image_id : download_ids) { const ImageBase& image = slot_images[image_id]; const auto copies = FullDownloadCopies(image.info); - SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, download_span); + SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, download_span); download_map.offset += image.unswizzled_size_bytes; download_span = download_span.subspan(image.unswizzled_size_bytes); } @@ -714,26 +726,26 @@ void TextureCache<P>::UploadImageContents(Image& image, StagingBuffer& staging) const GPUVAddr gpu_addr = image.gpu_addr; if (True(image.flags & ImageFlagBits::AcceleratedUpload)) { - gpu_memory.ReadBlockUnsafe(gpu_addr, mapped_span.data(), mapped_span.size_bytes()); + gpu_memory->ReadBlockUnsafe(gpu_addr, mapped_span.data(), mapped_span.size_bytes()); const auto uploads = FullUploadSwizzles(image.info); runtime.AccelerateImageUpload(image, staging, uploads); } else if (True(image.flags & ImageFlagBits::Converted)) { std::vector<u8> unswizzled_data(image.unswizzled_size_bytes); - auto copies = UnswizzleImage(gpu_memory, gpu_addr, image.info, unswizzled_data); + auto copies = UnswizzleImage(*gpu_memory, gpu_addr, image.info, unswizzled_data); ConvertImage(unswizzled_data, image.info, mapped_span, copies); image.UploadMemory(staging, copies); } else { - const auto copies = UnswizzleImage(gpu_memory, gpu_addr, image.info, mapped_span); + const auto copies = UnswizzleImage(*gpu_memory, gpu_addr, image.info, mapped_span); image.UploadMemory(staging, copies); } } template <class P> ImageViewId TextureCache<P>::FindImageView(const TICEntry& config) { - if (!IsValidEntry(gpu_memory, config)) { + if (!IsValidEntry(*gpu_memory, config)) { return NULL_IMAGE_VIEW_ID; } - const auto [pair, is_new] = image_views.try_emplace(config); + const auto [pair, is_new] = channel_state->image_views.try_emplace(config); ImageViewId& image_view_id = pair->second; if (is_new) { image_view_id = CreateImageView(config); @@ -777,9 +789,9 @@ ImageId TextureCache<P>::FindOrInsertImage(const ImageInfo& info, GPUVAddr gpu_a template <class P> ImageId TextureCache<P>::FindImage(const ImageInfo& info, GPUVAddr gpu_addr, RelaxedOptions options) { - std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); + std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); if (!cpu_addr) { - cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info)); + cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info)); if (!cpu_addr) { return ImageId{}; } @@ -860,7 +872,7 @@ void TextureCache<P>::InvalidateScale(Image& image) { image.scale_tick = frame_tick + 1; } const std::span<const ImageViewId> image_view_ids = image.image_view_ids; - auto& dirty = maxwell3d.dirty.flags; + auto& dirty = maxwell3d->dirty.flags; dirty[Dirty::RenderTargets] = true; dirty[Dirty::ZetaBuffer] = true; for (size_t rt = 0; rt < NUM_RT; ++rt) { @@ -880,12 +892,15 @@ void TextureCache<P>::InvalidateScale(Image& image) { } image.image_view_ids.clear(); image.image_view_infos.clear(); - if constexpr (ENABLE_VALIDATION) { - std::ranges::fill(graphics_image_view_ids, CORRUPT_ID); - std::ranges::fill(compute_image_view_ids, CORRUPT_ID); + for (size_t c : active_channel_ids) { + auto& channel_info = channel_storage[c]; + if constexpr (ENABLE_VALIDATION) { + std::ranges::fill(channel_info.graphics_image_view_ids, CORRUPT_ID); + std::ranges::fill(channel_info.compute_image_view_ids, CORRUPT_ID); + } + channel_info.graphics_image_table.Invalidate(); + channel_info.compute_image_table.Invalidate(); } - graphics_image_table.Invalidate(); - compute_image_table.Invalidate(); has_deleted_images = true; } @@ -929,10 +944,10 @@ bool TextureCache<P>::ScaleDown(Image& image) { template <class P> ImageId TextureCache<P>::InsertImage(const ImageInfo& info, GPUVAddr gpu_addr, RelaxedOptions options) { - std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); + std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); if (!cpu_addr) { const auto size = CalculateGuestSizeInBytes(info); - cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr, size); + cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, size); if (!cpu_addr) { const VAddr fake_addr = ~(1ULL << 40ULL) + virtual_invalid_space; virtual_invalid_space += Common::AlignUp(size, 32); @@ -1050,7 +1065,7 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA const ImageId new_image_id = slot_images.insert(runtime, new_info, gpu_addr, cpu_addr); Image& new_image = slot_images[new_image_id]; - if (!gpu_memory.IsContinousRange(new_image.gpu_addr, new_image.guest_size_bytes)) { + if (!gpu_memory->IsContinousRange(new_image.gpu_addr, new_image.guest_size_bytes)) { new_image.flags |= ImageFlagBits::Sparse; } @@ -1192,7 +1207,7 @@ SamplerId TextureCache<P>::FindSampler(const TSCEntry& config) { if (std::ranges::all_of(config.raw, [](u64 value) { return value == 0; })) { return NULL_SAMPLER_ID; } - const auto [pair, is_new] = samplers.try_emplace(config); + const auto [pair, is_new] = channel_state->samplers.try_emplace(config); if (is_new) { pair->second = slot_samplers.insert(runtime, config); } @@ -1201,7 +1216,7 @@ SamplerId TextureCache<P>::FindSampler(const TSCEntry& config) { template <class P> ImageViewId TextureCache<P>::FindColorBuffer(size_t index, bool is_clear) { - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; if (index >= regs.rt_control.count) { return ImageViewId{}; } @@ -1219,7 +1234,7 @@ ImageViewId TextureCache<P>::FindColorBuffer(size_t index, bool is_clear) { template <class P> ImageViewId TextureCache<P>::FindDepthBuffer(bool is_clear) { - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; if (!regs.zeta_enable) { return ImageViewId{}; } @@ -1316,11 +1331,17 @@ void TextureCache<P>::ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& f template <class P> template <typename Func> -void TextureCache<P>::ForEachImageInRegionGPU(GPUVAddr gpu_addr, size_t size, Func&& func) { +void TextureCache<P>::ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, size_t size, + Func&& func) { using FuncReturn = typename std::invoke_result<Func, ImageId, Image&>::type; static constexpr bool BOOL_BREAK = std::is_same_v<FuncReturn, bool>; boost::container::small_vector<ImageId, 8> images; - ForEachGPUPage(gpu_addr, size, [this, &images, gpu_addr, size, func](u64 page) { + auto storage_id = getStorageID(as_id); + if (!storage_id) { + return; + } + auto& gpu_page_table = gpu_page_table_storage[*storage_id]; + ForEachGPUPage(gpu_addr, size, [this, gpu_page_table, &images, gpu_addr, size, func](u64 page) { const auto it = gpu_page_table.find(page); if (it == gpu_page_table.end()) { if constexpr (BOOL_BREAK) { @@ -1403,9 +1424,9 @@ template <typename Func> void TextureCache<P>::ForEachSparseSegment(ImageBase& image, Func&& func) { using FuncReturn = typename std::invoke_result<Func, GPUVAddr, VAddr, size_t>::type; static constexpr bool RETURNS_BOOL = std::is_same_v<FuncReturn, bool>; - const auto segments = gpu_memory.GetSubmappedRange(image.gpu_addr, image.guest_size_bytes); + const auto segments = gpu_memory->GetSubmappedRange(image.gpu_addr, image.guest_size_bytes); for (const auto& [gpu_addr, size] : segments) { - std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); + std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); ASSERT(cpu_addr); if constexpr (RETURNS_BOOL) { if (func(gpu_addr, *cpu_addr, size)) { @@ -1448,8 +1469,9 @@ void TextureCache<P>::RegisterImage(ImageId image_id) { } image.lru_index = lru_cache.Insert(image_id, frame_tick); - ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, - [this, image_id](u64 page) { gpu_page_table[page].push_back(image_id); }); + ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, image_id](u64 page) { + (*channel_state->gpu_page_table)[page].push_back(image_id); + }); if (False(image.flags & ImageFlagBits::Sparse)) { auto map_id = slot_map_views.insert(image.gpu_addr, image.cpu_addr, image.guest_size_bytes, image_id); @@ -1480,9 +1502,9 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) { image.flags &= ~ImageFlagBits::BadOverlap; lru_cache.Free(image.lru_index); const auto& clear_page_table = - [this, image_id]( - u64 page, - std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>>& selected_page_table) { + [image_id](u64 page, + std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>>& + selected_page_table) { const auto page_it = selected_page_table.find(page); if (page_it == selected_page_table.end()) { ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS); @@ -1497,8 +1519,9 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) { } image_ids.erase(vector_it); }; - ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, - [this, &clear_page_table](u64 page) { clear_page_table(page, gpu_page_table); }); + ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, &clear_page_table](u64 page) { + clear_page_table(page, (*channel_state->gpu_page_table)); + }); if (False(image.flags & ImageFlagBits::Sparse)) { const auto map_id = image.map_view_id; ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) { @@ -1631,7 +1654,7 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) { ASSERT_MSG(False(image.flags & ImageFlagBits::Registered), "Image was not unregistered"); // Mark render targets as dirty - auto& dirty = maxwell3d.dirty.flags; + auto& dirty = maxwell3d->dirty.flags; dirty[Dirty::RenderTargets] = true; dirty[Dirty::ZetaBuffer] = true; for (size_t rt = 0; rt < NUM_RT; ++rt) { @@ -1681,24 +1704,30 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) { if (alloc_images.empty()) { image_allocs_table.erase(alloc_it); } - if constexpr (ENABLE_VALIDATION) { - std::ranges::fill(graphics_image_view_ids, CORRUPT_ID); - std::ranges::fill(compute_image_view_ids, CORRUPT_ID); + for (size_t c : active_channel_ids) { + auto& channel_info = channel_storage[c]; + if constexpr (ENABLE_VALIDATION) { + std::ranges::fill(channel_info.graphics_image_view_ids, CORRUPT_ID); + std::ranges::fill(channel_info.compute_image_view_ids, CORRUPT_ID); + } + channel_info.graphics_image_table.Invalidate(); + channel_info.compute_image_table.Invalidate(); } - graphics_image_table.Invalidate(); - compute_image_table.Invalidate(); has_deleted_images = true; } template <class P> void TextureCache<P>::RemoveImageViewReferences(std::span<const ImageViewId> removed_views) { - auto it = image_views.begin(); - while (it != image_views.end()) { - const auto found = std::ranges::find(removed_views, it->second); - if (found != removed_views.end()) { - it = image_views.erase(it); - } else { - ++it; + for (size_t c : active_channel_ids) { + auto& channel_info = channel_storage[c]; + auto it = channel_info.image_views.begin(); + while (it != channel_info.image_views.end()) { + const auto found = std::ranges::find(removed_views, it->second); + if (found != removed_views.end()) { + it = channel_info.image_views.erase(it); + } else { + ++it; + } } } } @@ -1729,6 +1758,7 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) { boost::container::small_vector<const AliasedImage*, 1> aliased_images; Image& image = slot_images[image_id]; bool any_rescaled = True(image.flags & ImageFlagBits::Rescaled); + bool any_modified = True(image.flags & ImageFlagBits::GpuModified); u64 most_recent_tick = image.modification_tick; for (const AliasedImage& aliased : image.aliased_images) { ImageBase& aliased_image = slot_images[aliased.id]; @@ -1736,9 +1766,7 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) { most_recent_tick = std::max(most_recent_tick, aliased_image.modification_tick); aliased_images.push_back(&aliased); any_rescaled |= True(aliased_image.flags & ImageFlagBits::Rescaled); - if (True(aliased_image.flags & ImageFlagBits::GpuModified)) { - image.flags |= ImageFlagBits::GpuModified; - } + any_modified |= True(aliased_image.flags & ImageFlagBits::GpuModified); } } if (aliased_images.empty()) { @@ -1753,6 +1781,9 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) { } } image.modification_tick = most_recent_tick; + if (any_modified) { + image.flags |= ImageFlagBits::GpuModified; + } std::ranges::sort(aliased_images, [this](const AliasedImage* lhs, const AliasedImage* rhs) { const ImageBase& lhs_image = slot_images[lhs->id]; const ImageBase& rhs_image = slot_images[rhs->id]; @@ -1931,6 +1962,7 @@ std::pair<FramebufferId, ImageViewId> TextureCache<P>::RenderTargetFromImage( .color_buffer_ids = {color_view_id}, .depth_buffer_id = depth_view_id, .size = {extent.width >> samples_x, extent.height >> samples_y}, + .is_rescaled = is_rescaled, }); return {framebuffer_id, view_id}; } @@ -1943,13 +1975,13 @@ bool TextureCache<P>::IsFullClear(ImageViewId id) { const ImageViewBase& image_view = slot_image_views[id]; const ImageBase& image = slot_images[image_view.image_id]; const Extent3D size = image_view.size; - const auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d->regs; const auto& scissor = regs.scissor_test[0]; if (image.info.resources.levels > 1 || image.info.resources.layers > 1) { // Images with multiple resources can't be cleared in a single call return false; } - if (regs.clear_flags.scissor == 0) { + if (regs.clear_control.use_scissor == 0) { // If scissor testing is disabled, the clear is always full return true; } @@ -1958,4 +1990,19 @@ bool TextureCache<P>::IsFullClear(ImageViewId id) { scissor.max_y >= size.height; } +template <class P> +void TextureCache<P>::CreateChannel(struct Tegra::Control::ChannelState& channel) { + VideoCommon::ChannelSetupCaches<TextureCacheChannelInfo>::CreateChannel(channel); + const auto it = channel_map.find(channel.bind_id); + auto* this_state = &channel_storage[it->second]; + const auto& this_as_ref = address_spaces[channel.memory_manager->GetID()]; + this_state->gpu_page_table = &gpu_page_table_storage[this_as_ref.storage_id]; +} + +/// Bind a channel for execution. +template <class P> +void TextureCache<P>::OnGPUASRegister([[maybe_unused]] size_t map_id) { + gpu_page_table_storage.emplace_back(); +} + } // namespace VideoCommon diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h index 7e6c6cef2..2fa8445eb 100644 --- a/src/video_core/texture_cache/texture_cache_base.h +++ b/src/video_core/texture_cache/texture_cache_base.h @@ -1,8 +1,10 @@ -// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once +#include <deque> +#include <limits> #include <mutex> #include <span> #include <type_traits> @@ -11,9 +13,11 @@ #include <queue> #include "common/common_types.h" +#include "common/hash.h" #include "common/literals.h" #include "common/lru_cache.h" #include "video_core/compatible_formats.h" +#include "video_core/control/channel_state_cache.h" #include "video_core/delayed_destruction_ring.h" #include "video_core/engines/fermi_2d.h" #include "video_core/surface.h" @@ -26,6 +30,10 @@ #include "video_core/texture_cache/types.h" #include "video_core/textures/texture.h" +namespace Tegra::Control { +struct ChannelState; +} + namespace VideoCommon { using Tegra::Texture::SwizzleSource; @@ -44,8 +52,35 @@ struct ImageViewInOut { ImageViewId id{}; }; +using TextureCacheGPUMap = std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>>; + +class TextureCacheChannelInfo : public ChannelInfo { +public: + TextureCacheChannelInfo() = delete; + TextureCacheChannelInfo(Tegra::Control::ChannelState& state) noexcept; + TextureCacheChannelInfo(const TextureCacheChannelInfo& state) = delete; + TextureCacheChannelInfo& operator=(const TextureCacheChannelInfo&) = delete; + TextureCacheChannelInfo(TextureCacheChannelInfo&& other) noexcept = default; + TextureCacheChannelInfo& operator=(TextureCacheChannelInfo&& other) noexcept = default; + + DescriptorTable<TICEntry> graphics_image_table{gpu_memory}; + DescriptorTable<TSCEntry> graphics_sampler_table{gpu_memory}; + std::vector<SamplerId> graphics_sampler_ids; + std::vector<ImageViewId> graphics_image_view_ids; + + DescriptorTable<TICEntry> compute_image_table{gpu_memory}; + DescriptorTable<TSCEntry> compute_sampler_table{gpu_memory}; + std::vector<SamplerId> compute_sampler_ids; + std::vector<ImageViewId> compute_image_view_ids; + + std::unordered_map<TICEntry, ImageViewId> image_views; + std::unordered_map<TSCEntry, SamplerId> samplers; + + TextureCacheGPUMap* gpu_page_table; +}; + template <class P> -class TextureCache { +class TextureCache : public VideoCommon::ChannelSetupCaches<TextureCacheChannelInfo> { /// Address shift for caching images into a hash table static constexpr u64 YUZU_PAGEBITS = 20; @@ -58,6 +93,8 @@ class TextureCache { /// True when the API can provide info about the memory of the device. static constexpr bool HAS_DEVICE_MEMORY_INFO = P::HAS_DEVICE_MEMORY_INFO; + static constexpr size_t UNSET_CHANNEL{std::numeric_limits<size_t>::max()}; + static constexpr s64 TARGET_THRESHOLD = 4_GiB; static constexpr s64 DEFAULT_EXPECTED_MEMORY = 1_GiB + 125_MiB; static constexpr s64 DEFAULT_CRITICAL_MEMORY = 1_GiB + 625_MiB; @@ -77,16 +114,8 @@ class TextureCache { PixelFormat src_format; }; - template <typename T> - struct IdentityHash { - [[nodiscard]] size_t operator()(T value) const noexcept { - return static_cast<size_t>(value); - } - }; - public: - explicit TextureCache(Runtime&, VideoCore::RasterizerInterface&, Tegra::Engines::Maxwell3D&, - Tegra::Engines::KeplerCompute&, Tegra::MemoryManager&); + explicit TextureCache(Runtime&, VideoCore::RasterizerInterface&); /// Notify the cache that a new frame has been queued void TickFrame(); @@ -142,7 +171,7 @@ public: void UnmapMemory(VAddr cpu_addr, size_t size); /// Remove images in a region - void UnmapGPUMemory(GPUVAddr gpu_addr, size_t size); + void UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t size); /// Blit an image with the given parameters void BlitImage(const Tegra::Engines::Fermi2D::Surface& dst, @@ -171,6 +200,9 @@ public: [[nodiscard]] bool IsRescaling(const ImageViewBase& image_view) const noexcept; + /// Create channel state. + void CreateChannel(Tegra::Control::ChannelState& channel) final override; + std::mutex mutex; private: @@ -205,6 +237,8 @@ private: } } + void OnGPUASRegister(size_t map_id) final override; + /// Runs the Garbage Collector. void RunGarbageCollector(); @@ -273,7 +307,7 @@ private: void ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& func); template <typename Func> - void ForEachImageInRegionGPU(GPUVAddr gpu_addr, size_t size, Func&& func); + void ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, size_t size, Func&& func); template <typename Func> void ForEachSparseImageInRegion(GPUVAddr gpu_addr, size_t size, Func&& func); @@ -338,31 +372,16 @@ private: u64 GetScaledImageSizeBytes(ImageBase& image); Runtime& runtime; - VideoCore::RasterizerInterface& rasterizer; - Tegra::Engines::Maxwell3D& maxwell3d; - Tegra::Engines::KeplerCompute& kepler_compute; - Tegra::MemoryManager& gpu_memory; - DescriptorTable<TICEntry> graphics_image_table{gpu_memory}; - DescriptorTable<TSCEntry> graphics_sampler_table{gpu_memory}; - std::vector<SamplerId> graphics_sampler_ids; - std::vector<ImageViewId> graphics_image_view_ids; - - DescriptorTable<TICEntry> compute_image_table{gpu_memory}; - DescriptorTable<TSCEntry> compute_sampler_table{gpu_memory}; - std::vector<SamplerId> compute_sampler_ids; - std::vector<ImageViewId> compute_image_view_ids; + VideoCore::RasterizerInterface& rasterizer; + std::deque<TextureCacheGPUMap> gpu_page_table_storage; RenderTargets render_targets; - std::unordered_map<TICEntry, ImageViewId> image_views; - std::unordered_map<TSCEntry, SamplerId> samplers; std::unordered_map<RenderTargets, FramebufferId> framebuffers; - std::unordered_map<u64, std::vector<ImageMapId>, IdentityHash<u64>> page_table; - std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>> gpu_page_table; - std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>> sparse_page_table; - + std::unordered_map<u64, std::vector<ImageMapId>, Common::IdentityHash<u64>> page_table; + std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>> sparse_page_table; std::unordered_map<ImageId, std::vector<ImageViewId>> sparse_views; VAddr virtual_invalid_space{}; diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp index 1820823b2..1223df5a0 100644 --- a/src/video_core/texture_cache/util.cpp +++ b/src/video_core/texture_cache/util.cpp @@ -517,7 +517,6 @@ void SwizzleBlockLinearImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr const u32 host_bytes_per_layer = num_blocks_per_layer * bytes_per_block; UNIMPLEMENTED_IF(info.tile_width_spacing > 0); - UNIMPLEMENTED_IF(copy.image_offset.x != 0); UNIMPLEMENTED_IF(copy.image_offset.y != 0); UNIMPLEMENTED_IF(copy.image_offset.z != 0); @@ -755,7 +754,7 @@ bool IsValidEntry(const Tegra::MemoryManager& gpu_memory, const TICEntry& config if (address == 0) { return false; } - if (address > (1ULL << 48)) { + if (address >= (1ULL << 40)) { return false; } if (gpu_memory.GpuToCpuAddress(address).has_value()) { diff --git a/src/video_core/textures/astc.cpp b/src/video_core/textures/astc.cpp index e3f3d3c5d..69a32819a 100644 --- a/src/video_core/textures/astc.cpp +++ b/src/video_core/textures/astc.cpp @@ -13,7 +13,9 @@ #include <boost/container/static_vector.hpp> +#include "common/alignment.h" #include "common/common_types.h" +#include "common/thread_worker.h" #include "video_core/textures/astc.h" class InputBitStream { @@ -1411,7 +1413,7 @@ static void FillVoidExtentLDR(InputBitStream& strm, std::span<u32> outBuf, u32 b static void FillError(std::span<u32> outBuf, u32 blockWidth, u32 blockHeight) { for (u32 j = 0; j < blockHeight; j++) { for (u32 i = 0; i < blockWidth; i++) { - outBuf[j * blockWidth + i] = 0xFFFF00FF; + outBuf[j * blockWidth + i] = 0x00000000; } } } @@ -1650,29 +1652,41 @@ static void DecompressBlock(std::span<const u8, 16> inBuf, const u32 blockWidth, void Decompress(std::span<const uint8_t> data, uint32_t width, uint32_t height, uint32_t depth, uint32_t block_width, uint32_t block_height, std::span<uint8_t> output) { - u32 block_index = 0; - std::size_t depth_offset = 0; - for (u32 z = 0; z < depth; z++) { - for (u32 y = 0; y < height; y += block_height) { - for (u32 x = 0; x < width; x += block_width) { - const std::span<const u8, 16> blockPtr{data.subspan(block_index * 16, 16)}; - - // Blocks can be at most 12x12 - std::array<u32, 12 * 12> uncompData; - DecompressBlock(blockPtr, block_width, block_height, uncompData); - - u32 decompWidth = std::min(block_width, width - x); - u32 decompHeight = std::min(block_height, height - y); - - const std::span<u8> outRow = output.subspan(depth_offset + (y * width + x) * 4); - for (u32 jj = 0; jj < decompHeight; jj++) { - std::memcpy(outRow.data() + jj * width * 4, - uncompData.data() + jj * block_width, decompWidth * 4); + const u32 rows = Common::DivideUp(height, block_height); + const u32 cols = Common::DivideUp(width, block_width); + + Common::ThreadWorker workers{std::max(std::thread::hardware_concurrency(), 2U) / 2, + "ASTCDecompress"}; + + for (u32 z = 0; z < depth; ++z) { + const u32 depth_offset = z * height * width * 4; + for (u32 y_index = 0; y_index < rows; ++y_index) { + auto decompress_stride = [data, width, height, block_width, block_height, output, rows, + cols, z, depth_offset, y_index] { + const u32 y = y_index * block_height; + for (u32 x_index = 0; x_index < cols; ++x_index) { + const u32 block_index = (z * rows * cols) + (y_index * cols) + x_index; + const u32 x = x_index * block_width; + + const std::span<const u8, 16> blockPtr{data.subspan(block_index * 16, 16)}; + + // Blocks can be at most 12x12 + std::array<u32, 12 * 12> uncompData; + DecompressBlock(blockPtr, block_width, block_height, uncompData); + + u32 decompWidth = std::min(block_width, width - x); + u32 decompHeight = std::min(block_height, height - y); + + const std::span<u8> outRow = output.subspan(depth_offset + (y * width + x) * 4); + for (u32 h = 0; h < decompHeight; ++h) { + std::memcpy(outRow.data() + h * width * 4, + uncompData.data() + h * block_width, decompWidth * 4); + } } - ++block_index; - } + }; + workers.QueueWork(std::move(decompress_stride)); } - depth_offset += height * width * 4; + workers.WaitForRequests(); } } diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp index 913f8ebcb..fd1a4b987 100644 --- a/src/video_core/textures/decoders.cpp +++ b/src/video_core/textures/decoders.cpp @@ -21,7 +21,7 @@ constexpr u32 pdep(u32 value) { u32 m = mask; for (u32 bit = 1; m; bit += bit) { if (value & bit) - result |= m & -m; + result |= m & (~m + 1); m &= m - 1; } return result; @@ -35,7 +35,7 @@ void incrpdep(u32& value) { template <bool TO_LINEAR, u32 BYTES_PER_PIXEL> void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 height, u32 depth, - u32 block_height, u32 block_depth, u32 stride_alignment) { + u32 block_height, u32 block_depth, u32 stride) { // The origin of the transformation can be configured here, leave it as zero as the current API // doesn't expose it. static constexpr u32 origin_x = 0; @@ -45,7 +45,6 @@ void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 // We can configure here a custom pitch // As it's not exposed 'width * BYTES_PER_PIXEL' will be the expected pitch. const u32 pitch = width * BYTES_PER_PIXEL; - const u32 stride = Common::AlignUpLog2(width, stride_alignment) * BYTES_PER_PIXEL; const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT); const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth); @@ -89,6 +88,69 @@ void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 } } +template <bool TO_LINEAR, u32 BYTES_PER_PIXEL> +void SwizzleSubrectImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 height, + u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 num_lines, + u32 block_height, u32 block_depth, u32 pitch_linear) { + // The origin of the transformation can be configured here, leave it as zero as the current API + // doesn't expose it. + static constexpr u32 origin_z = 0; + + // We can configure here a custom pitch + // As it's not exposed 'width * BYTES_PER_PIXEL' will be the expected pitch. + const u32 pitch = pitch_linear; + const u32 stride = Common::AlignUpLog2(width * BYTES_PER_PIXEL, GOB_SIZE_X_SHIFT); + + const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT); + const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth); + const u32 slice_size = + Common::DivCeilLog2(height, block_height + GOB_SIZE_Y_SHIFT) * block_size; + + const u32 block_height_mask = (1U << block_height) - 1; + const u32 block_depth_mask = (1U << block_depth) - 1; + const u32 x_shift = GOB_SIZE_SHIFT + block_height + block_depth; + + u32 unprocessed_lines = num_lines; + u32 extent_y = std::min(num_lines, height - origin_y); + + for (u32 slice = 0; slice < depth; ++slice) { + const u32 z = slice + origin_z; + const u32 offset_z = (z >> block_depth) * slice_size + + ((z & block_depth_mask) << (GOB_SIZE_SHIFT + block_height)); + const u32 lines_in_y = std::min(unprocessed_lines, extent_y); + for (u32 line = 0; line < lines_in_y; ++line) { + const u32 y = line + origin_y; + const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(y); + + const u32 block_y = y >> GOB_SIZE_Y_SHIFT; + const u32 offset_y = (block_y >> block_height) * block_size + + ((block_y & block_height_mask) << GOB_SIZE_SHIFT); + + u32 swizzled_x = pdep<SWIZZLE_X_BITS>(origin_x * BYTES_PER_PIXEL); + for (u32 column = 0; column < extent_x; + ++column, incrpdep<SWIZZLE_X_BITS, BYTES_PER_PIXEL>(swizzled_x)) { + const u32 x = (column + origin_x) * BYTES_PER_PIXEL; + const u32 offset_x = (x >> GOB_SIZE_X_SHIFT) << x_shift; + + const u32 base_swizzled_offset = offset_z + offset_y + offset_x; + const u32 swizzled_offset = base_swizzled_offset + (swizzled_x | swizzled_y); + + const u32 unswizzled_offset = + slice * pitch * height + line * pitch + column * BYTES_PER_PIXEL; + + u8* const dst = &output[TO_LINEAR ? swizzled_offset : unswizzled_offset]; + const u8* const src = &input[TO_LINEAR ? unswizzled_offset : swizzled_offset]; + + std::memcpy(dst, src, BYTES_PER_PIXEL); + } + } + unprocessed_lines -= lines_in_y; + if (unprocessed_lines == 0) { + return; + } + } +} + template <bool TO_LINEAR> void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth, u32 stride_alignment) { @@ -111,122 +173,39 @@ void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixe } } -template <u32 BYTES_PER_PIXEL> -void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, - u8* swizzled_data, const u8* unswizzled_data, u32 block_height_bit, - u32 offset_x, u32 offset_y) { - const u32 block_height = 1U << block_height_bit; - const u32 image_width_in_gobs = - (swizzled_width * BYTES_PER_PIXEL + (GOB_SIZE_X - 1)) / GOB_SIZE_X; - for (u32 line = 0; line < subrect_height; ++line) { - const u32 dst_y = line + offset_y; - const u32 gob_address_y = - (dst_y / (GOB_SIZE_Y * block_height)) * GOB_SIZE * block_height * image_width_in_gobs + - ((dst_y % (GOB_SIZE_Y * block_height)) / GOB_SIZE_Y) * GOB_SIZE; - - const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(dst_y); - u32 swizzled_x = pdep<SWIZZLE_X_BITS>(offset_x * BYTES_PER_PIXEL); - for (u32 x = 0; x < subrect_width; - ++x, incrpdep<SWIZZLE_X_BITS, BYTES_PER_PIXEL>(swizzled_x)) { - const u32 dst_x = x + offset_x; - const u32 gob_address = - gob_address_y + (dst_x * BYTES_PER_PIXEL / GOB_SIZE_X) * GOB_SIZE * block_height; - const u32 swizzled_offset = gob_address + (swizzled_x | swizzled_y); - const u32 unswizzled_offset = line * source_pitch + x * BYTES_PER_PIXEL; - - const u8* const source_line = unswizzled_data + unswizzled_offset; - u8* const dest_addr = swizzled_data + swizzled_offset; - std::memcpy(dest_addr, source_line, BYTES_PER_PIXEL); - } - } -} - -template <u32 BYTES_PER_PIXEL> -void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 block_height, - u32 origin_x, u32 origin_y, u8* output, const u8* input) { - const u32 stride = width * BYTES_PER_PIXEL; - const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) / GOB_SIZE_X; - const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height); - - const u32 block_height_mask = (1U << block_height) - 1; - const u32 x_shift = GOB_SIZE_SHIFT + block_height; - - for (u32 line = 0; line < line_count; ++line) { - const u32 src_y = line + origin_y; - const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(src_y); - - const u32 block_y = src_y >> GOB_SIZE_Y_SHIFT; - const u32 src_offset_y = (block_y >> block_height) * block_size + - ((block_y & block_height_mask) << GOB_SIZE_SHIFT); - - u32 swizzled_x = pdep<SWIZZLE_X_BITS>(origin_x * BYTES_PER_PIXEL); - for (u32 column = 0; column < line_length_in; - ++column, incrpdep<SWIZZLE_X_BITS, BYTES_PER_PIXEL>(swizzled_x)) { - const u32 src_x = (column + origin_x) * BYTES_PER_PIXEL; - const u32 src_offset_x = (src_x >> GOB_SIZE_X_SHIFT) << x_shift; - - const u32 swizzled_offset = src_offset_y + src_offset_x + (swizzled_x | swizzled_y); - const u32 unswizzled_offset = line * pitch + column * BYTES_PER_PIXEL; - - std::memcpy(output + unswizzled_offset, input + swizzled_offset, BYTES_PER_PIXEL); - } - } -} - -template <u32 BYTES_PER_PIXEL> -void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height, - u32 block_height, u32 block_depth, u32 origin_x, u32 origin_y, u8* output, - const u8* input) { - UNIMPLEMENTED_IF(origin_x > 0); - UNIMPLEMENTED_IF(origin_y > 0); - - const u32 stride = width * BYTES_PER_PIXEL; - const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) / GOB_SIZE_X; - const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth); - - const u32 block_height_mask = (1U << block_height) - 1; - const u32 x_shift = static_cast<u32>(GOB_SIZE_SHIFT) + block_height + block_depth; - - for (u32 line = 0; line < line_count; ++line) { - const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(line); - const u32 block_y = line / GOB_SIZE_Y; - const u32 dst_offset_y = - (block_y >> block_height) * block_size + (block_y & block_height_mask) * GOB_SIZE; - - u32 swizzled_x = 0; - for (u32 x = 0; x < line_length_in; ++x, incrpdep<SWIZZLE_X_BITS, 1>(swizzled_x)) { - const u32 dst_offset = - ((x / GOB_SIZE_X) << x_shift) + dst_offset_y + (swizzled_x | swizzled_y); - const u32 src_offset = x * BYTES_PER_PIXEL + line * pitch; - std::memcpy(output + dst_offset, input + src_offset, BYTES_PER_PIXEL); - } - } -} } // Anonymous namespace void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth, u32 stride_alignment) { + const u32 stride = Common::AlignUpLog2(width, stride_alignment) * bytes_per_pixel; + const u32 new_bpp = std::min(4U, static_cast<u32>(std::countr_zero(width * bytes_per_pixel))); + width = (width * bytes_per_pixel) >> new_bpp; + bytes_per_pixel = 1U << new_bpp; Swizzle<false>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth, - stride_alignment); + stride); } void SwizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth, u32 stride_alignment) { + const u32 stride = Common::AlignUpLog2(width, stride_alignment) * bytes_per_pixel; + const u32 new_bpp = std::min(4U, static_cast<u32>(std::countr_zero(width * bytes_per_pixel))); + width = (width * bytes_per_pixel) >> new_bpp; + bytes_per_pixel = 1U << new_bpp; Swizzle<true>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth, - stride_alignment); + stride); } -void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, - u32 bytes_per_pixel, u8* swizzled_data, const u8* unswizzled_data, - u32 block_height_bit, u32 offset_x, u32 offset_y) { +void SwizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width, + u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 extent_y, + u32 block_height, u32 block_depth, u32 pitch_linear) { switch (bytes_per_pixel) { #define BPP_CASE(x) \ case x: \ - return SwizzleSubrect<x>(subrect_width, subrect_height, source_pitch, swizzled_width, \ - swizzled_data, unswizzled_data, block_height_bit, offset_x, \ - offset_y); + return SwizzleSubrectImpl<true, x>(output, input, width, height, depth, origin_x, \ + origin_y, extent_x, extent_y, block_height, \ + block_depth, pitch_linear); BPP_CASE(1) BPP_CASE(2) BPP_CASE(3) @@ -241,13 +220,15 @@ void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 } } -void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 bytes_per_pixel, - u32 block_height, u32 origin_x, u32 origin_y, u8* output, const u8* input) { +void UnswizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, + u32 width, u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, + u32 extent_y, u32 block_height, u32 block_depth, u32 pitch_linear) { switch (bytes_per_pixel) { #define BPP_CASE(x) \ case x: \ - return UnswizzleSubrect<x>(line_length_in, line_count, pitch, width, block_height, \ - origin_x, origin_y, output, input); + return SwizzleSubrectImpl<false, x>(output, input, width, height, depth, origin_x, \ + origin_y, extent_x, extent_y, block_height, \ + block_depth, pitch_linear); BPP_CASE(1) BPP_CASE(2) BPP_CASE(3) @@ -262,55 +243,6 @@ void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, } } -void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height, - u32 bytes_per_pixel, u32 block_height, u32 block_depth, u32 origin_x, - u32 origin_y, u8* output, const u8* input) { - switch (bytes_per_pixel) { -#define BPP_CASE(x) \ - case x: \ - return SwizzleSliceToVoxel<x>(line_length_in, line_count, pitch, width, height, \ - block_height, block_depth, origin_x, origin_y, output, \ - input); - BPP_CASE(1) - BPP_CASE(2) - BPP_CASE(3) - BPP_CASE(4) - BPP_CASE(6) - BPP_CASE(8) - BPP_CASE(12) - BPP_CASE(16) -#undef BPP_CASE - default: - ASSERT_MSG(false, "Invalid bytes_per_pixel={}", bytes_per_pixel); - } -} - -void SwizzleKepler(const u32 width, const u32 height, const u32 dst_x, const u32 dst_y, - const u32 block_height_bit, const std::size_t copy_size, const u8* source_data, - u8* swizzle_data) { - const u32 block_height = 1U << block_height_bit; - const u32 image_width_in_gobs{(width + GOB_SIZE_X - 1) / GOB_SIZE_X}; - std::size_t count = 0; - for (std::size_t y = dst_y; y < height && count < copy_size; ++y) { - const std::size_t gob_address_y = - (y / (GOB_SIZE_Y * block_height)) * GOB_SIZE * block_height * image_width_in_gobs + - ((y % (GOB_SIZE_Y * block_height)) / GOB_SIZE_Y) * GOB_SIZE; - const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(static_cast<u32>(y)); - u32 swizzled_x = pdep<SWIZZLE_X_BITS>(dst_x); - for (std::size_t x = dst_x; x < width && count < copy_size; - ++x, incrpdep<SWIZZLE_X_BITS, 1>(swizzled_x)) { - const std::size_t gob_address = - gob_address_y + (x / GOB_SIZE_X) * GOB_SIZE * block_height; - const std::size_t swizzled_offset = gob_address + (swizzled_x | swizzled_y); - const u8* source_line = source_data + count; - u8* dest_addr = swizzle_data + swizzled_offset; - count++; - - *dest_addr = *source_line; - } - } -} - std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth) { if (tiled) { diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h index 31a11708f..e70407692 100644 --- a/src/video_core/textures/decoders.h +++ b/src/video_core/textures/decoders.h @@ -40,7 +40,6 @@ constexpr SwizzleTable MakeSwizzleTable() { } return table; } -constexpr SwizzleTable SWIZZLE_TABLE = MakeSwizzleTable(); /// Unswizzles a block linear texture into linear memory. void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, @@ -57,34 +56,14 @@ std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height u32 block_height, u32 block_depth); /// Copies an untiled subrectangle into a tiled surface. -void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, - u32 bytes_per_pixel, u8* swizzled_data, const u8* unswizzled_data, - u32 block_height_bit, u32 offset_x, u32 offset_y); +void SwizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width, + u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 extent_y, + u32 block_height, u32 block_depth, u32 pitch_linear); /// Copies a tiled subrectangle into a linear surface. -void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 bytes_per_pixel, - u32 block_height, u32 origin_x, u32 origin_y, u8* output, const u8* input); - -/// @brief Swizzles a 2D array of pixels into a 3D texture -/// @param line_length_in Number of pixels per line -/// @param line_count Number of lines -/// @param pitch Number of bytes per line -/// @param width Width of the swizzled texture -/// @param height Height of the swizzled texture -/// @param bytes_per_pixel Number of bytes used per pixel -/// @param block_height Block height shift -/// @param block_depth Block depth shift -/// @param origin_x Column offset in pixels of the swizzled texture -/// @param origin_y Row offset in pixels of the swizzled texture -/// @param output Pointer to the pixels of the swizzled texture -/// @param input Pointer to the 2D array of pixels used as input -/// @pre input and output points to an array large enough to hold the number of bytes used -void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height, - u32 bytes_per_pixel, u32 block_height, u32 block_depth, u32 origin_x, - u32 origin_y, u8* output, const u8* input); - -void SwizzleKepler(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height, - std::size_t copy_size, const u8* source_data, u8* swizzle_data); +void UnswizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, + u32 width, u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, + u32 extent_y, u32 block_height, u32 block_depth, u32 pitch_linear); /// Obtains the offset of the gob for positions 'dst_x' & 'dst_y' u64 GetGOBOffset(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height, diff --git a/src/video_core/transform_feedback.cpp b/src/video_core/transform_feedback.cpp index 7e605981c..45071185a 100644 --- a/src/video_core/transform_feedback.cpp +++ b/src/video_core/transform_feedback.cpp @@ -15,51 +15,51 @@ namespace VideoCommon { std::vector<Shader::TransformFeedbackVarying> MakeTransformFeedbackVaryings( const TransformFeedbackState& state) { static constexpr std::array VECTORS{ - 28, // gl_Position - 32, // Generic 0 - 36, // Generic 1 - 40, // Generic 2 - 44, // Generic 3 - 48, // Generic 4 - 52, // Generic 5 - 56, // Generic 6 - 60, // Generic 7 - 64, // Generic 8 - 68, // Generic 9 - 72, // Generic 10 - 76, // Generic 11 - 80, // Generic 12 - 84, // Generic 13 - 88, // Generic 14 - 92, // Generic 15 - 96, // Generic 16 - 100, // Generic 17 - 104, // Generic 18 - 108, // Generic 19 - 112, // Generic 20 - 116, // Generic 21 - 120, // Generic 22 - 124, // Generic 23 - 128, // Generic 24 - 132, // Generic 25 - 136, // Generic 26 - 140, // Generic 27 - 144, // Generic 28 - 148, // Generic 29 - 152, // Generic 30 - 156, // Generic 31 - 160, // gl_FrontColor - 164, // gl_FrontSecondaryColor - 160, // gl_BackColor - 164, // gl_BackSecondaryColor - 192, // gl_TexCoord[0] - 196, // gl_TexCoord[1] - 200, // gl_TexCoord[2] - 204, // gl_TexCoord[3] - 208, // gl_TexCoord[4] - 212, // gl_TexCoord[5] - 216, // gl_TexCoord[6] - 220, // gl_TexCoord[7] + 28U, // gl_Position + 32U, // Generic 0 + 36U, // Generic 1 + 40U, // Generic 2 + 44U, // Generic 3 + 48U, // Generic 4 + 52U, // Generic 5 + 56U, // Generic 6 + 60U, // Generic 7 + 64U, // Generic 8 + 68U, // Generic 9 + 72U, // Generic 10 + 76U, // Generic 11 + 80U, // Generic 12 + 84U, // Generic 13 + 88U, // Generic 14 + 92U, // Generic 15 + 96U, // Generic 16 + 100U, // Generic 17 + 104U, // Generic 18 + 108U, // Generic 19 + 112U, // Generic 20 + 116U, // Generic 21 + 120U, // Generic 22 + 124U, // Generic 23 + 128U, // Generic 24 + 132U, // Generic 25 + 136U, // Generic 26 + 140U, // Generic 27 + 144U, // Generic 28 + 148U, // Generic 29 + 152U, // Generic 30 + 156U, // Generic 31 + 160U, // gl_FrontColor + 164U, // gl_FrontSecondaryColor + 160U, // gl_BackColor + 164U, // gl_BackSecondaryColor + 192U, // gl_TexCoord[0] + 196U, // gl_TexCoord[1] + 200U, // gl_TexCoord[2] + 204U, // gl_TexCoord[3] + 208U, // gl_TexCoord[4] + 212U, // gl_TexCoord[5] + 216U, // gl_TexCoord[6] + 220U, // gl_TexCoord[7] }; std::vector<Shader::TransformFeedbackVarying> xfb(256); for (size_t buffer = 0; buffer < state.layouts.size(); ++buffer) { @@ -68,8 +68,20 @@ std::vector<Shader::TransformFeedbackVarying> MakeTransformFeedbackVaryings( const u32 varying_count = layout.varying_count; u32 highest = 0; for (u32 offset = 0; offset < varying_count; ++offset) { - const u32 base_offset = offset; - const u8 location = locations[offset]; + const auto get_attribute = [&locations](u32 index) -> u32 { + switch (index % 4) { + case 0: + return locations[index / 4].attribute0.Value(); + case 1: + return locations[index / 4].attribute1.Value(); + case 2: + return locations[index / 4].attribute2.Value(); + case 3: + return locations[index / 4].attribute3.Value(); + } + UNREACHABLE(); + return 0; + }; UNIMPLEMENTED_IF_MSG(layout.stream != 0, "Stream is not zero: {}", layout.stream); Shader::TransformFeedbackVarying varying{ @@ -78,16 +90,18 @@ std::vector<Shader::TransformFeedbackVarying> MakeTransformFeedbackVaryings( .offset = offset * 4, .components = 1, }; - if (std::ranges::find(VECTORS, Common::AlignDown(location, 4)) != VECTORS.end()) { - UNIMPLEMENTED_IF_MSG(location % 4 != 0, "Unaligned TFB"); + const u32 base_offset = offset; + const auto attribute{get_attribute(offset)}; + if (std::ranges::find(VECTORS, Common::AlignDown(attribute, 4)) != VECTORS.end()) { + UNIMPLEMENTED_IF_MSG(attribute % 4 != 0, "Unaligned TFB {}", attribute); - const u8 base_index = location / 4; - while (offset + 1 < varying_count && base_index == locations[offset + 1] / 4) { + const auto base_index = attribute / 4; + while (offset + 1 < varying_count && base_index == get_attribute(offset + 1) / 4) { ++offset; ++varying.components; } } - xfb[location] = varying; + xfb[attribute] = varying; highest = std::max(highest, (base_offset + varying.components) * 4); } UNIMPLEMENTED_IF(highest != layout.stride); diff --git a/src/video_core/transform_feedback.h b/src/video_core/transform_feedback.h index a519adb59..d13eb16c3 100644 --- a/src/video_core/transform_feedback.h +++ b/src/video_core/transform_feedback.h @@ -19,7 +19,8 @@ struct TransformFeedbackState { u32 stride; }; std::array<Layout, Tegra::Engines::Maxwell3D::Regs::NumTransformFeedbackBuffers> layouts; - std::array<std::array<u8, 128>, Tegra::Engines::Maxwell3D::Regs::NumTransformFeedbackBuffers> + std::array<std::array<Tegra::Engines::Maxwell3D::Regs::StreamOutLayout, 32>, + Tegra::Engines::Maxwell3D::Regs::NumTransformFeedbackBuffers> varyings; }; diff --git a/src/video_core/vulkan_common/vulkan_wrapper.h b/src/video_core/vulkan_common/vulkan_wrapper.h index 795f16bfb..1b3f493bd 100644 --- a/src/video_core/vulkan_common/vulkan_wrapper.h +++ b/src/video_core/vulkan_common/vulkan_wrapper.h @@ -519,9 +519,7 @@ public: dld{rhs.dld} {} /// Assign an allocation transfering ownership from another allocation. - /// Releases any previously held allocation. PoolAllocations& operator=(PoolAllocations&& rhs) noexcept { - Release(); allocations = std::move(rhs.allocations); num = rhs.num; device = rhs.device; @@ -530,11 +528,6 @@ public: return *this; } - /// Destroys any held allocation. - ~PoolAllocations() { - Release(); - } - /// Returns the number of allocations. std::size_t size() const noexcept { return num; @@ -557,19 +550,6 @@ public: } private: - /// Destroys the held allocations if they exist. - void Release() noexcept { - if (!allocations) { - return; - } - const Span<AllocationType> span(allocations.get(), num); - const VkResult result = Free(device, pool, span, *dld); - // There's no way to report errors from a destructor. - if (result != VK_SUCCESS) { - std::terminate(); - } - } - std::unique_ptr<AllocationType[]> allocations; std::size_t num = 0; VkDevice device = nullptr; diff --git a/src/web_service/web_backend.cpp b/src/web_service/web_backend.cpp index 378804c08..12a7e4922 100644 --- a/src/web_service/web_backend.cpp +++ b/src/web_service/web_backend.cpp @@ -111,7 +111,8 @@ struct Client::Impl { httplib::Error error; if (!cli->send(request, response, error)) { - LOG_ERROR(WebService, "{} to {} returned null", method, host + path); + LOG_ERROR(WebService, "{} to {} returned null (httplib Error: {})", method, host + path, + httplib::to_string(error)); return WebResult{WebResult::Code::LibError, "Null response", ""}; } diff --git a/src/yuzu/CMakeLists.txt b/src/yuzu/CMakeLists.txt index 50007338f..29d506c47 100644 --- a/src/yuzu/CMakeLists.txt +++ b/src/yuzu/CMakeLists.txt @@ -208,6 +208,16 @@ add_executable(yuzu yuzu.rc ) +if (WIN32 AND YUZU_CRASH_DUMPS) + target_sources(yuzu PRIVATE + mini_dump.cpp + mini_dump.h + ) + + target_link_libraries(yuzu PRIVATE ${DBGHELP_LIBRARY}) + target_compile_definitions(yuzu PRIVATE -DYUZU_DBGHELP) +endif() + file(GLOB COMPAT_LIST ${PROJECT_BINARY_DIR}/dist/compatibility_list/compatibility_list.qrc ${PROJECT_BINARY_DIR}/dist/compatibility_list/compatibility_list.json) diff --git a/src/yuzu/applets/qt_controller.cpp b/src/yuzu/applets/qt_controller.cpp index 8be311fcb..12efdc216 100644 --- a/src/yuzu/applets/qt_controller.cpp +++ b/src/yuzu/applets/qt_controller.cpp @@ -63,7 +63,7 @@ QtControllerSelectorDialog::QtControllerSelectorDialog( InputCommon::InputSubsystem* input_subsystem_, Core::System& system_) : QDialog(parent), ui(std::make_unique<Ui::QtControllerSelectorDialog>()), parameters(std::move(parameters_)), input_subsystem{input_subsystem_}, - input_profiles(std::make_unique<InputProfiles>(system_)), system{system_} { + input_profiles(std::make_unique<InputProfiles>()), system{system_} { ui->setupUi(this); player_widgets = { @@ -291,7 +291,7 @@ bool QtControllerSelectorDialog::CheckIfParametersMet() { // Here, we check and validate the current configuration against all applicable parameters. const auto num_connected_players = static_cast<int>( std::count_if(player_groupboxes.begin(), player_groupboxes.end(), - [this](const QGroupBox* player) { return player->isChecked(); })); + [](const QGroupBox* player) { return player->isChecked(); })); const auto min_supported_players = parameters.enable_single_mode ? 1 : parameters.min_players; const auto max_supported_players = parameters.enable_single_mode ? 1 : parameters.max_players; diff --git a/src/yuzu/applets/qt_controller.ui b/src/yuzu/applets/qt_controller.ui index c8cb6bcf3..f5eccba70 100644 --- a/src/yuzu/applets/qt_controller.ui +++ b/src/yuzu/applets/qt_controller.ui @@ -2300,7 +2300,7 @@ <item> <widget class="QRadioButton" name="radioUndocked"> <property name="text"> - <string>Undocked</string> + <string>Handheld</string> </property> </widget> </item> diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp index d3fbdb09d..6acfb7b06 100644 --- a/src/yuzu/bootmanager.cpp +++ b/src/yuzu/bootmanager.cpp @@ -47,7 +47,7 @@ EmuThread::EmuThread(Core::System& system_) : system{system_} {} EmuThread::~EmuThread() = default; void EmuThread::run() { - std::string name = "yuzu:EmuControlThread"; + std::string name = "EmuControlThread"; MicroProfileOnThreadCreate(name.c_str()); Common::SetCurrentThreadName(name.c_str()); @@ -120,8 +120,8 @@ void EmuThread::run() { } } - // Shutdown the core emulation - system.Shutdown(); + // Shutdown the main emulated process + system.ShutdownMainProcess(); #if MICROPROFILE_ENABLED MicroProfileOnThreadExit(); diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp index 8ecd87150..195074bf2 100644 --- a/src/yuzu/configuration/config.cpp +++ b/src/yuzu/configuration/config.cpp @@ -15,8 +15,7 @@ namespace FS = Common::FS; -Config::Config(Core::System& system_, const std::string& config_name, ConfigType config_type) - : type(config_type), system{system_} { +Config::Config(const std::string& config_name, ConfigType config_type) : type(config_type) { global = config_type == ConfigType::GlobalConfig; Initialize(config_name); @@ -546,6 +545,8 @@ void Config::ReadDebuggingValues() { ReadBasicSetting(Settings::values.use_debug_asserts); ReadBasicSetting(Settings::values.use_auto_stub); ReadBasicSetting(Settings::values.enable_all_controllers); + ReadBasicSetting(Settings::values.create_crash_dumps); + ReadBasicSetting(Settings::values.perform_vulkan_check); qt_config->endGroup(); } @@ -1161,6 +1162,8 @@ void Config::SaveDebuggingValues() { WriteBasicSetting(Settings::values.use_debug_asserts); WriteBasicSetting(Settings::values.disable_macro_jit); WriteBasicSetting(Settings::values.enable_all_controllers); + WriteBasicSetting(Settings::values.create_crash_dumps); + WriteBasicSetting(Settings::values.perform_vulkan_check); qt_config->endGroup(); } @@ -1547,7 +1550,6 @@ void Config::Reload() { ReadValues(); // To apply default value changes SaveValues(); - system.ApplySettings(); } void Config::Save() { diff --git a/src/yuzu/configuration/config.h b/src/yuzu/configuration/config.h index 486ceea94..06fa7d2d0 100644 --- a/src/yuzu/configuration/config.h +++ b/src/yuzu/configuration/config.h @@ -25,7 +25,7 @@ public: InputProfile, }; - explicit Config(Core::System& system_, const std::string& config_name = "qt-config", + explicit Config(const std::string& config_name = "qt-config", ConfigType config_type = ConfigType::GlobalConfig); ~Config(); @@ -194,8 +194,6 @@ private: std::unique_ptr<QSettings> qt_config; std::string qt_config_loc; bool global; - - Core::System& system; }; // These metatype declarations cannot be in common/settings.h because core is devoid of QT diff --git a/src/yuzu/configuration/configure_audio.cpp b/src/yuzu/configuration/configure_audio.cpp index 19b8b15ef..70cc6f84b 100644 --- a/src/yuzu/configuration/configure_audio.cpp +++ b/src/yuzu/configuration/configure_audio.cpp @@ -161,8 +161,8 @@ void ConfigureAudio::InitializeAudioSinkComboBox() { ui->sink_combo_box->clear(); ui->sink_combo_box->addItem(QString::fromUtf8(AudioCore::Sink::auto_device_name)); - for (const char* id : AudioCore::Sink::GetSinkIDs()) { - ui->sink_combo_box->addItem(QString::fromUtf8(id)); + for (const auto& id : AudioCore::Sink::GetSinkIDs()) { + ui->sink_combo_box->addItem(QString::fromUtf8(id.data(), static_cast<s32>(id.length()))); } } diff --git a/src/yuzu/configuration/configure_debug.cpp b/src/yuzu/configuration/configure_debug.cpp index 04d397750..dacc75a20 100644 --- a/src/yuzu/configuration/configure_debug.cpp +++ b/src/yuzu/configuration/configure_debug.cpp @@ -2,6 +2,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include <QDesktopServices> +#include <QMessageBox> #include <QUrl> #include "common/fs/path_util.h" #include "common/logging/backend.h" @@ -26,6 +27,16 @@ ConfigureDebug::ConfigureDebug(const Core::System& system_, QWidget* parent) connect(ui->toggle_gdbstub, &QCheckBox::toggled, [&]() { ui->gdbport_spinbox->setEnabled(ui->toggle_gdbstub->isChecked()); }); + + connect(ui->create_crash_dumps, &QCheckBox::stateChanged, [&](int) { + if (crash_dump_warning_shown) { + return; + } + QMessageBox::warning(this, tr("Restart Required"), + tr("yuzu is required to restart in order to apply this setting."), + QMessageBox::Ok, QMessageBox::Ok); + crash_dump_warning_shown = true; + }); } ConfigureDebug::~ConfigureDebug() = default; @@ -66,12 +77,20 @@ void ConfigureDebug::SetConfiguration() { ui->disable_loop_safety_checks->setChecked( Settings::values.disable_shader_loop_safety_checks.GetValue()); ui->extended_logging->setChecked(Settings::values.extended_logging.GetValue()); + ui->perform_vulkan_check->setChecked(Settings::values.perform_vulkan_check.GetValue()); #ifdef YUZU_USE_QT_WEB_ENGINE ui->disable_web_applet->setChecked(UISettings::values.disable_web_applet.GetValue()); #else ui->disable_web_applet->setEnabled(false); - ui->disable_web_applet->setText(QString::fromUtf8("Web applet not compiled")); + ui->disable_web_applet->setText(tr("Web applet not compiled")); +#endif + +#ifdef YUZU_DBGHELP + ui->create_crash_dumps->setChecked(Settings::values.create_crash_dumps.GetValue()); +#else + ui->create_crash_dumps->setEnabled(false); + ui->create_crash_dumps->setText(tr("MiniDump creation not compiled")); #endif } @@ -84,6 +103,7 @@ void ConfigureDebug::ApplyConfiguration() { Settings::values.enable_fs_access_log = ui->fs_access_log->isChecked(); Settings::values.reporting_services = ui->reporting_services->isChecked(); Settings::values.dump_audio_commands = ui->dump_audio_commands->isChecked(); + Settings::values.create_crash_dumps = ui->create_crash_dumps->isChecked(); Settings::values.quest_flag = ui->quest_flag->isChecked(); Settings::values.use_debug_asserts = ui->use_debug_asserts->isChecked(); Settings::values.use_auto_stub = ui->use_auto_stub->isChecked(); @@ -98,6 +118,7 @@ void ConfigureDebug::ApplyConfiguration() { ui->disable_loop_safety_checks->isChecked(); Settings::values.disable_macro_jit = ui->disable_macro_jit->isChecked(); Settings::values.extended_logging = ui->extended_logging->isChecked(); + Settings::values.perform_vulkan_check = ui->perform_vulkan_check->isChecked(); UISettings::values.disable_web_applet = ui->disable_web_applet->isChecked(); Debugger::ToggleConsole(); Common::Log::Filter filter; diff --git a/src/yuzu/configuration/configure_debug.h b/src/yuzu/configuration/configure_debug.h index 42d30f170..030a0b7f7 100644 --- a/src/yuzu/configuration/configure_debug.h +++ b/src/yuzu/configuration/configure_debug.h @@ -32,4 +32,6 @@ private: std::unique_ptr<Ui::ConfigureDebug> ui; const Core::System& system; + + bool crash_dump_warning_shown{false}; }; diff --git a/src/yuzu/configuration/configure_debug.ui b/src/yuzu/configuration/configure_debug.ui index 47b8b80f1..102c8c66c 100644 --- a/src/yuzu/configuration/configure_debug.ui +++ b/src/yuzu/configuration/configure_debug.ui @@ -7,60 +7,60 @@ </property> <widget class="QWidget"> <layout class="QVBoxLayout" name="verticalLayout_1"> - <item> - <layout class="QVBoxLayout" name="verticalLayout_2"> - <item> - <widget class="QGroupBox" name="groupBox"> - <property name="title"> - <string>Debugger</string> - </property> - <layout class="QVBoxLayout" name="verticalLayout_3"> + <item> + <layout class="QVBoxLayout" name="verticalLayout_2"> + <item> + <widget class="QGroupBox" name="groupBox"> + <property name="title"> + <string>Debugger</string> + </property> + <layout class="QVBoxLayout" name="verticalLayout_3"> + <item> + <layout class="QHBoxLayout" name="horizontalLayout_11"> <item> - <layout class="QHBoxLayout" name="horizontalLayout_11"> - <item> - <widget class="QCheckBox" name="toggle_gdbstub"> - <property name="text"> - <string>Enable GDB Stub</string> - </property> - </widget> - </item> - <item> - <spacer name="horizontalSpacer"> - <property name="orientation"> - <enum>Qt::Horizontal</enum> - </property> - <property name="sizeHint" stdset="0"> - <size> - <width>40</width> - <height>20</height> - </size> - </property> - </spacer> - </item> - <item> - <widget class="QLabel" name="label_11"> - <property name="text"> - <string>Port:</string> - </property> - </widget> - </item> - <item> - <widget class="QSpinBox" name="gdbport_spinbox"> - <property name="minimum"> - <number>1024</number> - </property> - <property name="maximum"> - <number>65535</number> - </property> - </widget> - </item> - </layout> + <widget class="QCheckBox" name="toggle_gdbstub"> + <property name="text"> + <string>Enable GDB Stub</string> + </property> + </widget> + </item> + <item> + <spacer name="horizontalSpacer"> + <property name="orientation"> + <enum>Qt::Horizontal</enum> + </property> + <property name="sizeHint" stdset="0"> + <size> + <width>40</width> + <height>20</height> + </size> + </property> + </spacer> + </item> + <item> + <widget class="QLabel" name="label_11"> + <property name="text"> + <string>Port:</string> + </property> + </widget> + </item> + <item> + <widget class="QSpinBox" name="gdbport_spinbox"> + <property name="minimum"> + <number>1024</number> + </property> + <property name="maximum"> + <number>65535</number> + </property> + </widget> </item> </layout> - </widget> - </item> - </layout> + </item> + </layout> + </widget> </item> + </layout> + </item> <item> <widget class="QGroupBox" name="groupBox_2"> <property name="title"> @@ -231,6 +231,13 @@ <string>Debugging</string> </property> <layout class="QGridLayout" name="gridLayout_3"> + <item row="2" column="0"> + <widget class="QCheckBox" name="reporting_services"> + <property name="text"> + <string>Enable Verbose Reporting Services**</string> + </property> + </widget> + </item> <item row="0" column="0"> <widget class="QCheckBox" name="fs_access_log"> <property name="text"> @@ -238,20 +245,20 @@ </property> </widget> </item> - <item row="1" column="0"> + <item row="0" column="1"> <widget class="QCheckBox" name="dump_audio_commands"> - <property name="text"> - <string>Dump Audio Commands To Console**</string> - </property> <property name="toolTip"> <string>Enable this to output the latest generated audio command list to the console. Only affects games using the audio renderer.</string> </property> + <property name="text"> + <string>Dump Audio Commands To Console**</string> + </property> </widget> </item> - <item row="2" column="0"> - <widget class="QCheckBox" name="reporting_services"> + <item row="2" column="1"> + <widget class="QCheckBox" name="create_crash_dumps"> <property name="text"> - <string>Enable Verbose Reporting Services**</string> + <string>Create Minidump After Crash</string> </property> </widget> </item> @@ -306,6 +313,16 @@ </property> </widget> </item> + <item row="3" column="0"> + <widget class="QCheckBox" name="perform_vulkan_check"> + <property name="toolTip"> + <string>Enables yuzu to check for a working Vulkan environment when the program starts up. Disable this if this is causing issues with external programs seeing yuzu.</string> + </property> + <property name="text"> + <string>Perform Startup Vulkan Check</string> + </property> + </widget> + </item> </layout> </widget> </item> @@ -340,7 +357,6 @@ <tabstop>disable_loop_safety_checks</tabstop> <tabstop>fs_access_log</tabstop> <tabstop>reporting_services</tabstop> - <tabstop>dump_audio_commands</tabstop> <tabstop>quest_flag</tabstop> <tabstop>enable_cpu_debugging</tabstop> <tabstop>use_debug_asserts</tabstop> diff --git a/src/yuzu/configuration/configure_graphics.cpp b/src/yuzu/configuration/configure_graphics.cpp index 87e5d0f48..bd69d04a6 100644 --- a/src/yuzu/configuration/configure_graphics.cpp +++ b/src/yuzu/configuration/configure_graphics.cpp @@ -57,9 +57,10 @@ ConfigureGraphics::ConfigureGraphics(const Core::System& system_, QWidget* paren UpdateBackgroundColorButton(new_bg_color); }); - ui->api->setEnabled(!UISettings::values.has_broken_vulkan); - ui->api_widget->setEnabled(!UISettings::values.has_broken_vulkan || - Settings::IsConfiguringGlobal()); + ui->api->setEnabled(!UISettings::values.has_broken_vulkan && ui->api->isEnabled()); + ui->api_widget->setEnabled( + (!UISettings::values.has_broken_vulkan || Settings::IsConfiguringGlobal()) && + ui->api_widget->isEnabled()); ui->bg_label->setVisible(Settings::IsConfiguringGlobal()); ui->bg_combobox->setVisible(!Settings::IsConfiguringGlobal()); } diff --git a/src/yuzu/configuration/configure_graphics.ui b/src/yuzu/configuration/configure_graphics.ui index 1e4f74704..fdbb33372 100644 --- a/src/yuzu/configuration/configure_graphics.ui +++ b/src/yuzu/configuration/configure_graphics.ui @@ -301,6 +301,11 @@ </item> <item> <property name="text"> + <string>Force 16:10</string> + </property> + </item> + <item> + <property name="text"> <string>Stretch to Window</string> </property> </item> diff --git a/src/yuzu/configuration/configure_input.cpp b/src/yuzu/configuration/configure_input.cpp index 16fba3deb..1db374d4a 100644 --- a/src/yuzu/configuration/configure_input.cpp +++ b/src/yuzu/configuration/configure_input.cpp @@ -65,7 +65,7 @@ void OnDockedModeChanged(bool last_state, bool new_state, Core::System& system) ConfigureInput::ConfigureInput(Core::System& system_, QWidget* parent) : QWidget(parent), ui(std::make_unique<Ui::ConfigureInput>()), - profiles(std::make_unique<InputProfiles>(system_)), system{system_} { + profiles(std::make_unique<InputProfiles>()), system{system_} { ui->setupUi(this); } @@ -163,10 +163,9 @@ void ConfigureInput::Initialize(InputCommon::InputSubsystem* input_subsystem, [this, input_subsystem, &hid_core] { CallConfigureDialog<ConfigureRingController>(*this, input_subsystem, hid_core); }); - connect(advanced, &ConfigureInputAdvanced::CallCameraDialog, - [this, input_subsystem, &hid_core] { - CallConfigureDialog<ConfigureCamera>(*this, input_subsystem); - }); + connect(advanced, &ConfigureInputAdvanced::CallCameraDialog, [this, input_subsystem] { + CallConfigureDialog<ConfigureCamera>(*this, input_subsystem); + }); connect(ui->vibrationButton, &QPushButton::clicked, [this, &hid_core] { CallConfigureDialog<ConfigureVibration>(*this, hid_core); }); diff --git a/src/yuzu/configuration/configure_input_player.cpp b/src/yuzu/configuration/configure_input_player.cpp index 9b4f765ce..9e5a40fe7 100644 --- a/src/yuzu/configuration/configure_input_player.cpp +++ b/src/yuzu/configuration/configure_input_player.cpp @@ -1417,7 +1417,7 @@ void ConfigureInputPlayer::HandleClick( ui->controllerFrame->BeginMappingAnalog(button_id); } - timeout_timer->start(2500); // Cancel after 2.5 seconds + timeout_timer->start(4000); // Cancel after 4 seconds poll_timer->start(25); // Check for new inputs every 25ms } diff --git a/src/yuzu/configuration/configure_per_game.cpp b/src/yuzu/configuration/configure_per_game.cpp index af8343b2e..c3cb8f61d 100644 --- a/src/yuzu/configuration/configure_per_game.cpp +++ b/src/yuzu/configuration/configure_per_game.cpp @@ -42,8 +42,7 @@ ConfigurePerGame::ConfigurePerGame(QWidget* parent, u64 title_id_, const std::st const auto file_path = std::filesystem::path(Common::FS::ToU8String(file_name)); const auto config_file_name = title_id == 0 ? Common::FS::PathToUTF8String(file_path.filename()) : fmt::format("{:016X}", title_id); - game_config = - std::make_unique<Config>(system, config_file_name, Config::ConfigType::PerGameConfig); + game_config = std::make_unique<Config>(config_file_name, Config::ConfigType::PerGameConfig); addons_tab = std::make_unique<ConfigurePerGameAddons>(system_, this); audio_tab = std::make_unique<ConfigureAudio>(system_, this); diff --git a/src/yuzu/configuration/configure_tas.ui b/src/yuzu/configuration/configure_tas.ui index cf88a5bf0..625af0c89 100644 --- a/src/yuzu/configuration/configure_tas.ui +++ b/src/yuzu/configuration/configure_tas.ui @@ -16,6 +16,9 @@ <property name="text"> <string><html><head/><body><p>Reads controller input from scripts in the same format as TAS-nx scripts.<br/>For a more detailed explanation, please consult the <a href="https://yuzu-emu.org/help/feature/tas/"><span style=" text-decoration: underline; color:#039be5;">help page</span></a> on the yuzu website.</p></body></html></string> </property> + <property name="openExternalLinks"> + <bool>true</bool> + </property> </widget> </item> <item row="1" column="0" colspan="4"> diff --git a/src/yuzu/configuration/configure_web.cpp b/src/yuzu/configuration/configure_web.cpp index d668c992b..ab526e4ca 100644 --- a/src/yuzu/configuration/configure_web.cpp +++ b/src/yuzu/configuration/configure_web.cpp @@ -128,20 +128,25 @@ void ConfigureWeb::RefreshTelemetryID() { void ConfigureWeb::OnLoginChanged() { if (ui->edit_token->text().isEmpty()) { user_verified = true; - - const QPixmap pixmap = QIcon::fromTheme(QStringLiteral("checked")).pixmap(16); - ui->label_token_verified->setPixmap(pixmap); + // Empty = no icon + ui->label_token_verified->setPixmap(QPixmap()); + ui->label_token_verified->setToolTip(QString()); } else { user_verified = false; - const QPixmap pixmap = QIcon::fromTheme(QStringLiteral("failed")).pixmap(16); + // Show an info icon if it's been changed, clearer than showing failure + const QPixmap pixmap = QIcon::fromTheme(QStringLiteral("info")).pixmap(16); ui->label_token_verified->setPixmap(pixmap); + ui->label_token_verified->setToolTip( + tr("Unverified, please click Verify before saving configuration", "Tooltip")); } } void ConfigureWeb::VerifyLogin() { ui->button_verify_login->setDisabled(true); ui->button_verify_login->setText(tr("Verifying...")); + ui->label_token_verified->setPixmap(QIcon::fromTheme(QStringLiteral("sync")).pixmap(16)); + ui->label_token_verified->setToolTip(tr("Verifying...")); verify_watcher.setFuture(QtConcurrent::run( [username = UsernameFromDisplayToken(ui->edit_token->text().toStdString()), token = TokenFromDisplayToken(ui->edit_token->text().toStdString())] { @@ -155,13 +160,13 @@ void ConfigureWeb::OnLoginVerified() { if (verify_watcher.result()) { user_verified = true; - const QPixmap pixmap = QIcon::fromTheme(QStringLiteral("checked")).pixmap(16); - ui->label_token_verified->setPixmap(pixmap); + ui->label_token_verified->setPixmap(QIcon::fromTheme(QStringLiteral("checked")).pixmap(16)); + ui->label_token_verified->setToolTip(tr("Verified", "Tooltip")); ui->username->setText( QString::fromStdString(UsernameFromDisplayToken(ui->edit_token->text().toStdString()))); } else { - const QPixmap pixmap = QIcon::fromTheme(QStringLiteral("failed")).pixmap(16); - ui->label_token_verified->setPixmap(pixmap); + ui->label_token_verified->setPixmap(QIcon::fromTheme(QStringLiteral("failed")).pixmap(16)); + ui->label_token_verified->setToolTip(tr("Verification failed", "Tooltip")); ui->username->setText(tr("Unspecified")); QMessageBox::critical(this, tr("Verification failed"), tr("Verification failed. Check that you have entered your token " diff --git a/src/yuzu/configuration/input_profiles.cpp b/src/yuzu/configuration/input_profiles.cpp index 20b22e7de..9bb69cab1 100644 --- a/src/yuzu/configuration/input_profiles.cpp +++ b/src/yuzu/configuration/input_profiles.cpp @@ -27,7 +27,7 @@ std::filesystem::path GetNameWithoutExtension(std::filesystem::path filename) { } // namespace -InputProfiles::InputProfiles(Core::System& system_) : system{system_} { +InputProfiles::InputProfiles() { const auto input_profile_loc = FS::GetYuzuPath(FS::YuzuPath::ConfigDir) / "input"; if (!FS::IsDir(input_profile_loc)) { @@ -43,8 +43,8 @@ InputProfiles::InputProfiles(Core::System& system_) : system{system_} { if (IsINI(filename) && IsProfileNameValid(name_without_ext)) { map_profiles.insert_or_assign( - name_without_ext, std::make_unique<Config>(system, name_without_ext, - Config::ConfigType::InputProfile)); + name_without_ext, + std::make_unique<Config>(name_without_ext, Config::ConfigType::InputProfile)); } return true; @@ -67,6 +67,8 @@ std::vector<std::string> InputProfiles::GetInputProfileNames() { profile_names.push_back(profile_name); } + std::stable_sort(profile_names.begin(), profile_names.end()); + return profile_names; } @@ -80,8 +82,7 @@ bool InputProfiles::CreateProfile(const std::string& profile_name, std::size_t p } map_profiles.insert_or_assign( - profile_name, - std::make_unique<Config>(system, profile_name, Config::ConfigType::InputProfile)); + profile_name, std::make_unique<Config>(profile_name, Config::ConfigType::InputProfile)); return SaveProfile(profile_name, player_index); } diff --git a/src/yuzu/configuration/input_profiles.h b/src/yuzu/configuration/input_profiles.h index 65fc9e62c..2bf3e4250 100644 --- a/src/yuzu/configuration/input_profiles.h +++ b/src/yuzu/configuration/input_profiles.h @@ -15,7 +15,7 @@ class Config; class InputProfiles { public: - explicit InputProfiles(Core::System& system_); + explicit InputProfiles(); virtual ~InputProfiles(); std::vector<std::string> GetInputProfileNames(); @@ -31,6 +31,4 @@ private: bool ProfileExistsInMap(const std::string& profile_name) const; std::unordered_map<std::string, std::unique_ptr<Config>> map_profiles; - - Core::System& system; }; diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp index a85adc072..7b16d7f7e 100644 --- a/src/yuzu/main.cpp +++ b/src/yuzu/main.cpp @@ -3,6 +3,7 @@ #include <cinttypes> #include <clocale> +#include <cmath> #include <memory> #include <thread> #ifdef __APPLE__ @@ -105,12 +106,12 @@ static FileSys::VirtualFile VfsDirectoryCreateFileWrapper(const FileSys::Virtual #include "core/hle/kernel/k_process.h" #include "core/hle/service/am/am.h" #include "core/hle/service/filesystem/filesystem.h" -#include "core/hle/service/nfp/nfp.h" #include "core/hle/service/sm/sm.h" #include "core/loader/loader.h" #include "core/perf_stats.h" #include "core/telemetry_session.h" #include "input_common/drivers/tas_input.h" +#include "input_common/drivers/virtual_amiibo.h" #include "input_common/main.h" #include "ui_main.h" #include "util/overlay_dialog.h" @@ -138,6 +139,10 @@ static FileSys::VirtualFile VfsDirectoryCreateFileWrapper(const FileSys::Virtual #include "yuzu/uisettings.h" #include "yuzu/util/clickable_label.h" +#ifdef YUZU_DBGHELP +#include "yuzu/mini_dump.h" +#endif + using namespace Common::Literals; #ifdef USE_DISCORD_PRESENCE @@ -257,6 +262,18 @@ static QString PrettyProductName() { return QSysInfo::prettyProductName(); } +#ifdef _WIN32 +static void OverrideWindowsFont() { + // Qt5 chooses these fonts on Windows and they have fairly ugly alphanumeric/cyrllic characters + // Asking to use "MS Shell Dlg 2" gives better other chars while leaving the Chinese Characters. + const QString startup_font = QApplication::font().family(); + const QStringList ugly_fonts = {QStringLiteral("SimSun"), QStringLiteral("PMingLiU")}; + if (ugly_fonts.contains(startup_font)) { + QApplication::setFont(QFont(QStringLiteral("MS Shell Dlg 2"), 9, QFont::Normal)); + } +} +#endif + bool GMainWindow::CheckDarkMode() { #ifdef __linux__ const QPalette test_palette(qApp->palette()); @@ -269,15 +286,15 @@ bool GMainWindow::CheckDarkMode() { #endif // __linux__ } -GMainWindow::GMainWindow(bool has_broken_vulkan) +GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan) : ui{std::make_unique<Ui::MainWindow>()}, system{std::make_unique<Core::System>()}, - input_subsystem{std::make_shared<InputCommon::InputSubsystem>()}, - config{std::make_unique<Config>(*system)}, + input_subsystem{std::make_shared<InputCommon::InputSubsystem>()}, config{std::move(config_)}, vfs{std::make_shared<FileSys::RealVfsFilesystem>()}, provider{std::make_unique<FileSys::ManualContentProvider>()} { #ifdef __linux__ SetupSigInterrupts(); #endif + system->Initialize(); Common::Log::Initialize(); LoadTranslation(); @@ -896,8 +913,8 @@ void GMainWindow::InitializeWidgets() { } // TODO (flTobi): Add the widget when multiplayer is fully implemented - // statusBar()->addPermanentWidget(multiplayer_state->GetStatusText(), 0); - // statusBar()->addPermanentWidget(multiplayer_state->GetStatusIcon(), 0); + statusBar()->addPermanentWidget(multiplayer_state->GetStatusText(), 0); + statusBar()->addPermanentWidget(multiplayer_state->GetStatusIcon(), 0); tas_label = new QLabel(); tas_label->setObjectName(QStringLiteral("TASlabel")); @@ -1296,6 +1313,7 @@ void GMainWindow::ConnectMenuEvents() { &MultiplayerState::OnDirectConnectToRoom); connect(ui->action_Show_Room, &QAction::triggered, multiplayer_state, &MultiplayerState::OnOpenNetworkRoom); + connect(multiplayer_state, &MultiplayerState::SaveConfig, this, &GMainWindow::OnSaveConfig); // Tools connect_menu(ui->action_Rederive, std::bind(&GMainWindow::OnReinitializeKeys, this, @@ -1336,6 +1354,8 @@ void GMainWindow::UpdateMenuState() { } else { ui->action_Pause->setText(tr("&Pause")); } + + multiplayer_state->UpdateNotificationStatus(); } void GMainWindow::OnDisplayTitleBars(bool show) { @@ -1637,7 +1657,8 @@ void GMainWindow::BootGame(const QString& filename, u64 program_id, std::size_t const auto config_file_name = title_id == 0 ? Common::FS::PathToUTF8String(file_path.filename()) : fmt::format("{:016X}", title_id); - Config per_game_config(*system, config_file_name, Config::ConfigType::PerGameConfig); + Config per_game_config(config_file_name, Config::ConfigType::PerGameConfig); + system->ApplySettings(); } // Save configurations @@ -1875,6 +1896,8 @@ void GMainWindow::OnGameListOpenFolder(u64 program_id, GameListOpenTarget target case GameListOpenTarget::SaveData: { open_target = tr("Save Data"); const auto nand_dir = Common::FS::GetYuzuPath(Common::FS::YuzuPath::NANDDir); + auto vfs_nand_dir = + vfs->OpenDirectory(Common::FS::PathToUTF8String(nand_dir), FileSys::Mode::Read); if (has_user_save) { // User save data @@ -1901,15 +1924,15 @@ void GMainWindow::OnGameListOpenFolder(u64 program_id, GameListOpenTarget target ASSERT(user_id); const auto user_save_data_path = FileSys::SaveDataFactory::GetFullPath( - *system, FileSys::SaveDataSpaceId::NandUser, FileSys::SaveDataType::SaveData, - program_id, user_id->AsU128(), 0); + *system, vfs_nand_dir, FileSys::SaveDataSpaceId::NandUser, + FileSys::SaveDataType::SaveData, program_id, user_id->AsU128(), 0); path = Common::FS::ConcatPathSafe(nand_dir, user_save_data_path); } else { // Device save data const auto device_save_data_path = FileSys::SaveDataFactory::GetFullPath( - *system, FileSys::SaveDataSpaceId::NandUser, FileSys::SaveDataType::SaveData, - program_id, {}, 0); + *system, vfs_nand_dir, FileSys::SaveDataSpaceId::NandUser, + FileSys::SaveDataType::SaveData, program_id, {}, 0); path = Common::FS::ConcatPathSafe(nand_dir, device_save_data_path); } @@ -1996,7 +2019,7 @@ static bool RomFSRawCopy(QProgressDialog& dialog, const FileSys::VirtualDir& src } void GMainWindow::OnGameListRemoveInstalledEntry(u64 program_id, InstalledEntryType type) { - const QString entry_type = [this, type] { + const QString entry_type = [type] { switch (type) { case InstalledEntryType::Game: return tr("Contents"); @@ -2093,7 +2116,7 @@ void GMainWindow::RemoveAddOnContent(u64 program_id, const QString& entry_type) void GMainWindow::OnGameListRemoveFile(u64 program_id, GameListRemoveTarget target, const std::string& game_path) { - const QString question = [this, target] { + const QString question = [target] { switch (target) { case GameListRemoveTarget::GlShaderCache: return tr("Delete OpenGL Transferable Shader Cache?"); @@ -2766,6 +2789,11 @@ void GMainWindow::OnExit() { OnStopGame(); } +void GMainWindow::OnSaveConfig() { + system->ApplySettings(); + config->Save(); +} + void GMainWindow::ErrorDisplayDisplayError(QString error_code, QString error_text) { OverlayDialog dialog(render_window, *system, error_code, error_text, QString{}, tr("OK"), Qt::AlignLeft | Qt::AlignVCenter); @@ -2981,7 +3009,7 @@ void GMainWindow::OnConfigure() { Settings::values.disabled_addons.clear(); - config = std::make_unique<Config>(*system); + config = std::make_unique<Config>(); UISettings::values.reset_to_defaults = false; UISettings::values.game_dirs = std::move(old_game_dirs); @@ -3042,6 +3070,7 @@ void GMainWindow::OnConfigure() { UpdateStatusButtons(); controller_dialog->refreshConfiguration(); + system->ApplySettings(); } void GMainWindow::OnConfigureTas() { @@ -3206,21 +3235,16 @@ void GMainWindow::OnLoadAmiibo() { return; } - Service::SM::ServiceManager& sm = system->ServiceManager(); - auto nfc = sm.GetService<Service::NFP::Module::Interface>("nfp:user"); - if (nfc == nullptr) { - QMessageBox::warning(this, tr("Error"), tr("The current game is not looking for amiibos")); - return; - } - const auto nfc_state = nfc->GetCurrentState(); - if (nfc_state == Service::NFP::DeviceState::TagFound || - nfc_state == Service::NFP::DeviceState::TagMounted) { - nfc->CloseAmiibo(); + auto* virtual_amiibo = input_subsystem->GetVirtualAmiibo(); + + // Remove amiibo if one is connected + if (virtual_amiibo->GetCurrentState() == InputCommon::VirtualAmiibo::State::AmiiboIsOpen) { + virtual_amiibo->CloseAmiibo(); QMessageBox::warning(this, tr("Amiibo"), tr("The current amiibo has been removed")); return; } - if (nfc_state != Service::NFP::DeviceState::SearchingForTag) { + if (virtual_amiibo->GetCurrentState() != InputCommon::VirtualAmiibo::State::WaitingForAmiibo) { QMessageBox::warning(this, tr("Error"), tr("The current game is not looking for amiibos")); return; } @@ -3239,43 +3263,30 @@ void GMainWindow::OnLoadAmiibo() { } void GMainWindow::LoadAmiibo(const QString& filename) { - Service::SM::ServiceManager& sm = system->ServiceManager(); - auto nfc = sm.GetService<Service::NFP::Module::Interface>("nfp:user"); - if (nfc == nullptr) { - return; - } - + auto* virtual_amiibo = input_subsystem->GetVirtualAmiibo(); + const QString title = tr("Error loading Amiibo data"); // Remove amiibo if one is connected - const auto nfc_state = nfc->GetCurrentState(); - if (nfc_state == Service::NFP::DeviceState::TagFound || - nfc_state == Service::NFP::DeviceState::TagMounted) { - nfc->CloseAmiibo(); + if (virtual_amiibo->GetCurrentState() == InputCommon::VirtualAmiibo::State::AmiiboIsOpen) { + virtual_amiibo->CloseAmiibo(); QMessageBox::warning(this, tr("Amiibo"), tr("The current amiibo has been removed")); return; } - QFile nfc_file{filename}; - if (!nfc_file.open(QIODevice::ReadOnly)) { - QMessageBox::warning(this, tr("Error opening Amiibo data file"), - tr("Unable to open Amiibo file \"%1\" for reading.").arg(filename)); - return; - } - - const u64 nfc_file_size = nfc_file.size(); - std::vector<u8> buffer(nfc_file_size); - const u64 read_size = nfc_file.read(reinterpret_cast<char*>(buffer.data()), nfc_file_size); - if (nfc_file_size != read_size) { - QMessageBox::warning(this, tr("Error reading Amiibo data file"), - tr("Unable to fully read Amiibo data. Expected to read %1 bytes, but " - "was only able to read %2 bytes.") - .arg(nfc_file_size) - .arg(read_size)); - return; - } - - if (!nfc->LoadAmiibo(buffer)) { - QMessageBox::warning(this, tr("Error loading Amiibo data"), - tr("Unable to load Amiibo data.")); + switch (virtual_amiibo->LoadAmiibo(filename.toStdString())) { + case InputCommon::VirtualAmiibo::Info::NotAnAmiibo: + QMessageBox::warning(this, title, tr("The selected file is not a valid amiibo")); + break; + case InputCommon::VirtualAmiibo::Info::UnableToLoad: + QMessageBox::warning(this, title, tr("The selected file is already on use")); + break; + case InputCommon::VirtualAmiibo::Info::WrongDeviceState: + QMessageBox::warning(this, title, tr("The current game is not looking for amiibos")); + break; + case InputCommon::VirtualAmiibo::Info::Unknown: + QMessageBox::warning(this, title, tr("An unknown error occurred")); + break; + default: + break; } } @@ -3456,9 +3467,10 @@ void GMainWindow::UpdateStatusBar() { } if (!Settings::values.use_speed_limit) { game_fps_label->setText( - tr("Game: %1 FPS (Unlocked)").arg(results.average_game_fps, 0, 'f', 0)); + tr("Game: %1 FPS (Unlocked)").arg(std::round(results.average_game_fps), 0, 'f', 0)); } else { - game_fps_label->setText(tr("Game: %1 FPS").arg(results.average_game_fps, 0, 'f', 0)); + game_fps_label->setText( + tr("Game: %1 FPS").arg(std::round(results.average_game_fps), 0, 'f', 0)); } emu_frametime_label->setText(tr("Frame: %1 ms").arg(results.frametime * 1000.0, 0, 'f', 2)); @@ -4082,8 +4094,26 @@ void GMainWindow::changeEvent(QEvent* event) { #endif int main(int argc, char* argv[]) { + std::unique_ptr<Config> config = std::make_unique<Config>(); bool has_broken_vulkan = false; - if (StartupChecks(argv[0], &has_broken_vulkan)) { + bool is_child = false; + if (CheckEnvVars(&is_child)) { + return 0; + } + +#ifdef YUZU_DBGHELP + PROCESS_INFORMATION pi; + if (!is_child && Settings::values.create_crash_dumps.GetValue() && + MiniDump::SpawnDebuggee(argv[0], pi)) { + // Delete the config object so that it doesn't save when the program exits + config.reset(nullptr); + MiniDump::DebugDebuggee(pi); + return 0; + } +#endif + + if (StartupChecks(argv[0], &has_broken_vulkan, + Settings::values.perform_vulkan_check.GetValue())) { return 0; } @@ -4122,6 +4152,10 @@ int main(int argc, char* argv[]) { QCoreApplication::setAttribute(Qt::AA_DontCheckOpenGLContextThreadAffinity); QApplication app(argc, argv); +#ifdef _WIN32 + OverrideWindowsFont(); +#endif + // Workaround for QTBUG-85409, for Suzhou numerals the number 1 is actually \u3021 // so we can see if we get \u3008 instead // TL;DR all other number formats are consecutive in unicode code points @@ -4135,7 +4169,7 @@ int main(int argc, char* argv[]) { // generating shaders setlocale(LC_ALL, "C"); - GMainWindow main_window{has_broken_vulkan}; + GMainWindow main_window{std::move(config), has_broken_vulkan}; // After settings have been loaded by GMainWindow, apply the filter main_window.show(); diff --git a/src/yuzu/main.h b/src/yuzu/main.h index 1ae2b93d9..f7aa8e417 100644 --- a/src/yuzu/main.h +++ b/src/yuzu/main.h @@ -120,7 +120,7 @@ class GMainWindow : public QMainWindow { public: void filterBarSetChecked(bool state); void UpdateUITheme(); - explicit GMainWindow(bool has_broken_vulkan); + explicit GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan); ~GMainWindow() override; bool DropAction(QDropEvent* event); @@ -169,6 +169,7 @@ public slots: void OnLoadComplete(); void OnExecuteProgram(std::size_t program_index); void OnExit(); + void OnSaveConfig(); void ControllerSelectorReconfigureControllers( const Core::Frontend::ControllerParameters& parameters); void SoftwareKeyboardInitialize( diff --git a/src/yuzu/main.ui b/src/yuzu/main.ui index cdf31b417..74d49dbd4 100644 --- a/src/yuzu/main.ui +++ b/src/yuzu/main.ui @@ -120,6 +120,20 @@ <addaction name="menu_Reset_Window_Size"/> <addaction name="menu_View_Debugging"/> </widget> + <widget class="QMenu" name="menu_Multiplayer"> + <property name="enabled"> + <bool>true</bool> + </property> + <property name="title"> + <string>&Multiplayer</string> + </property> + <addaction name="action_View_Lobby"/> + <addaction name="action_Start_Room"/> + <addaction name="action_Connect_To_Room"/> + <addaction name="separator"/> + <addaction name="action_Show_Room"/> + <addaction name="action_Leave_Room"/> + </widget> <widget class="QMenu" name="menu_Tools"> <property name="title"> <string>&Tools</string> @@ -251,7 +265,7 @@ <bool>true</bool> </property> <property name="text"> - <string>Browse Public Game Lobby</string> + <string>&Browse Public Game Lobby</string> </property> </action> <action name="action_Start_Room"> @@ -259,7 +273,7 @@ <bool>true</bool> </property> <property name="text"> - <string>Create Room</string> + <string>&Create Room</string> </property> </action> <action name="action_Leave_Room"> @@ -267,12 +281,12 @@ <bool>false</bool> </property> <property name="text"> - <string>Leave Room</string> + <string>&Leave Room</string> </property> </action> <action name="action_Connect_To_Room"> <property name="text"> - <string>Direct Connect to Room</string> + <string>&Direct Connect to Room</string> </property> </action> <action name="action_Show_Room"> @@ -280,7 +294,7 @@ <bool>false</bool> </property> <property name="text"> - <string>Show Current Room</string> + <string>&Show Current Room</string> </property> </action> <action name="action_Fullscreen"> diff --git a/src/yuzu/mini_dump.cpp b/src/yuzu/mini_dump.cpp new file mode 100644 index 000000000..a34dc6a9c --- /dev/null +++ b/src/yuzu/mini_dump.cpp @@ -0,0 +1,202 @@ +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <cstdio> +#include <cstring> +#include <ctime> +#include <filesystem> +#include <fmt/format.h> +#include <windows.h> +#include "yuzu/mini_dump.h" +#include "yuzu/startup_checks.h" + +// dbghelp.h must be included after windows.h +#include <dbghelp.h> + +namespace MiniDump { + +void CreateMiniDump(HANDLE process_handle, DWORD process_id, MINIDUMP_EXCEPTION_INFORMATION* info, + EXCEPTION_POINTERS* pep) { + char file_name[255]; + const std::time_t the_time = std::time(nullptr); + std::strftime(file_name, 255, "yuzu-crash-%Y%m%d%H%M%S.dmp", std::localtime(&the_time)); + + // Open the file + HANDLE file_handle = CreateFileA(file_name, GENERIC_READ | GENERIC_WRITE, 0, nullptr, + CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr); + + if (file_handle == nullptr || file_handle == INVALID_HANDLE_VALUE) { + fmt::print(stderr, "CreateFileA failed. Error: {}", GetLastError()); + return; + } + + // Create the minidump + const MINIDUMP_TYPE dump_type = MiniDumpNormal; + + const bool write_dump_status = MiniDumpWriteDump(process_handle, process_id, file_handle, + dump_type, (pep != 0) ? info : 0, 0, 0); + + if (write_dump_status) { + fmt::print(stderr, "MiniDump created: {}", file_name); + } else { + fmt::print(stderr, "MiniDumpWriteDump failed. Error: {}", GetLastError()); + } + + // Close the file + CloseHandle(file_handle); +} + +void DumpFromDebugEvent(DEBUG_EVENT& deb_ev, PROCESS_INFORMATION& pi) { + EXCEPTION_RECORD& record = deb_ev.u.Exception.ExceptionRecord; + + HANDLE thread_handle = OpenThread(THREAD_GET_CONTEXT, false, deb_ev.dwThreadId); + if (thread_handle == nullptr) { + fmt::print(stderr, "OpenThread failed ({})", GetLastError()); + return; + } + + // Get child process context + CONTEXT context = {}; + context.ContextFlags = CONTEXT_ALL; + if (!GetThreadContext(thread_handle, &context)) { + fmt::print(stderr, "GetThreadContext failed ({})", GetLastError()); + return; + } + + // Create exception pointers for minidump + EXCEPTION_POINTERS ep; + ep.ExceptionRecord = &record; + ep.ContextRecord = &context; + + MINIDUMP_EXCEPTION_INFORMATION info; + info.ThreadId = deb_ev.dwThreadId; + info.ExceptionPointers = &ep; + info.ClientPointers = false; + + CreateMiniDump(pi.hProcess, pi.dwProcessId, &info, &ep); + + if (CloseHandle(thread_handle) == 0) { + fmt::print(stderr, "error: CloseHandle(thread_handle) failed ({})", GetLastError()); + } +} + +bool SpawnDebuggee(const char* arg0, PROCESS_INFORMATION& pi) { + std::memset(&pi, 0, sizeof(pi)); + + // Don't debug if we are already being debugged + if (IsDebuggerPresent()) { + return false; + } + + if (!SpawnChild(arg0, &pi, 0)) { + fmt::print(stderr, "warning: continuing without crash dumps"); + return false; + } + + const bool can_debug = DebugActiveProcess(pi.dwProcessId); + if (!can_debug) { + fmt::print(stderr, + "warning: DebugActiveProcess failed ({}), continuing without crash dumps", + GetLastError()); + return false; + } + + return true; +} + +static const char* ExceptionName(DWORD exception) { + switch (exception) { + case EXCEPTION_ACCESS_VIOLATION: + return "EXCEPTION_ACCESS_VIOLATION"; + case EXCEPTION_DATATYPE_MISALIGNMENT: + return "EXCEPTION_DATATYPE_MISALIGNMENT"; + case EXCEPTION_BREAKPOINT: + return "EXCEPTION_BREAKPOINT"; + case EXCEPTION_SINGLE_STEP: + return "EXCEPTION_SINGLE_STEP"; + case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: + return "EXCEPTION_ARRAY_BOUNDS_EXCEEDED"; + case EXCEPTION_FLT_DENORMAL_OPERAND: + return "EXCEPTION_FLT_DENORMAL_OPERAND"; + case EXCEPTION_FLT_DIVIDE_BY_ZERO: + return "EXCEPTION_FLT_DIVIDE_BY_ZERO"; + case EXCEPTION_FLT_INEXACT_RESULT: + return "EXCEPTION_FLT_INEXACT_RESULT"; + case EXCEPTION_FLT_INVALID_OPERATION: + return "EXCEPTION_FLT_INVALID_OPERATION"; + case EXCEPTION_FLT_OVERFLOW: + return "EXCEPTION_FLT_OVERFLOW"; + case EXCEPTION_FLT_STACK_CHECK: + return "EXCEPTION_FLT_STACK_CHECK"; + case EXCEPTION_FLT_UNDERFLOW: + return "EXCEPTION_FLT_UNDERFLOW"; + case EXCEPTION_INT_DIVIDE_BY_ZERO: + return "EXCEPTION_INT_DIVIDE_BY_ZERO"; + case EXCEPTION_INT_OVERFLOW: + return "EXCEPTION_INT_OVERFLOW"; + case EXCEPTION_PRIV_INSTRUCTION: + return "EXCEPTION_PRIV_INSTRUCTION"; + case EXCEPTION_IN_PAGE_ERROR: + return "EXCEPTION_IN_PAGE_ERROR"; + case EXCEPTION_ILLEGAL_INSTRUCTION: + return "EXCEPTION_ILLEGAL_INSTRUCTION"; + case EXCEPTION_NONCONTINUABLE_EXCEPTION: + return "EXCEPTION_NONCONTINUABLE_EXCEPTION"; + case EXCEPTION_STACK_OVERFLOW: + return "EXCEPTION_STACK_OVERFLOW"; + case EXCEPTION_INVALID_DISPOSITION: + return "EXCEPTION_INVALID_DISPOSITION"; + case EXCEPTION_GUARD_PAGE: + return "EXCEPTION_GUARD_PAGE"; + case EXCEPTION_INVALID_HANDLE: + return "EXCEPTION_INVALID_HANDLE"; + default: + return "unknown exception type"; + } +} + +void DebugDebuggee(PROCESS_INFORMATION& pi) { + DEBUG_EVENT deb_ev = {}; + + while (deb_ev.dwDebugEventCode != EXIT_PROCESS_DEBUG_EVENT) { + const bool wait_success = WaitForDebugEvent(&deb_ev, INFINITE); + if (!wait_success) { + fmt::print(stderr, "error: WaitForDebugEvent failed ({})", GetLastError()); + return; + } + + switch (deb_ev.dwDebugEventCode) { + case OUTPUT_DEBUG_STRING_EVENT: + case CREATE_PROCESS_DEBUG_EVENT: + case CREATE_THREAD_DEBUG_EVENT: + case EXIT_PROCESS_DEBUG_EVENT: + case EXIT_THREAD_DEBUG_EVENT: + case LOAD_DLL_DEBUG_EVENT: + case RIP_EVENT: + case UNLOAD_DLL_DEBUG_EVENT: + // Continue on all other debug events + ContinueDebugEvent(deb_ev.dwProcessId, deb_ev.dwThreadId, DBG_CONTINUE); + break; + case EXCEPTION_DEBUG_EVENT: + EXCEPTION_RECORD& record = deb_ev.u.Exception.ExceptionRecord; + + // We want to generate a crash dump if we are seeing the same exception again. + if (!deb_ev.u.Exception.dwFirstChance) { + fmt::print(stderr, "Creating MiniDump on ExceptionCode: 0x{:08x} {}\n", + record.ExceptionCode, ExceptionName(record.ExceptionCode)); + DumpFromDebugEvent(deb_ev, pi); + } + + // Continue without handling the exception. + // Lets the debuggee use its own exception handler. + // - If one does not exist, we will see the exception once more where we make a minidump + // for. Then when it reaches here again, yuzu will probably crash. + // - DBG_CONTINUE on an exception that the debuggee does not handle can set us up for an + // infinite loop of exceptions. + ContinueDebugEvent(deb_ev.dwProcessId, deb_ev.dwThreadId, DBG_EXCEPTION_NOT_HANDLED); + break; + } + } +} + +} // namespace MiniDump diff --git a/src/yuzu/mini_dump.h b/src/yuzu/mini_dump.h new file mode 100644 index 000000000..d6b6cca84 --- /dev/null +++ b/src/yuzu/mini_dump.h @@ -0,0 +1,19 @@ +// SPDX-FileCopyrightText: 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <windows.h> + +#include <dbghelp.h> + +namespace MiniDump { + +void CreateMiniDump(HANDLE process_handle, DWORD process_id, MINIDUMP_EXCEPTION_INFORMATION* info, + EXCEPTION_POINTERS* pep); + +void DumpFromDebugEvent(DEBUG_EVENT& deb_ev, PROCESS_INFORMATION& pi); +bool SpawnDebuggee(const char* arg0, PROCESS_INFORMATION& pi); +void DebugDebuggee(PROCESS_INFORMATION& pi); + +} // namespace MiniDump diff --git a/src/yuzu/multiplayer/chat_room.cpp b/src/yuzu/multiplayer/chat_room.cpp index 9e672f82e..dec9696c1 100644 --- a/src/yuzu/multiplayer/chat_room.cpp +++ b/src/yuzu/multiplayer/chat_room.cpp @@ -61,7 +61,10 @@ public: /// Format the message using the players color QString GetPlayerChatMessage(u16 player) const { - auto color = player_color[player % 16]; + const bool is_dark_theme = QIcon::themeName().contains(QStringLiteral("dark")) || + QIcon::themeName().contains(QStringLiteral("midnight")); + auto color = + is_dark_theme ? player_color_dark[player % 16] : player_color_default[player % 16]; QString name; if (username.isEmpty() || username == nickname) { name = nickname; @@ -84,9 +87,12 @@ public: } private: - static constexpr std::array<const char*, 16> player_color = { + static constexpr std::array<const char*, 16> player_color_default = { {"#0000FF", "#FF0000", "#8A2BE2", "#FF69B4", "#1E90FF", "#008000", "#00FF7F", "#B22222", - "#DAA520", "#FF4500", "#2E8B57", "#5F9EA0", "#D2691E", "#9ACD32", "#FF7F50", "FFFF00"}}; + "#DAA520", "#FF4500", "#2E8B57", "#5F9EA0", "#D2691E", "#9ACD32", "#FF7F50", "#FFFF00"}}; + static constexpr std::array<const char*, 16> player_color_dark = { + {"#559AD1", "#4EC9A8", "#D69D85", "#C6C923", "#B975B5", "#D81F1F", "#7EAE39", "#4F8733", + "#F7CD8A", "#6FCACF", "#CE4897", "#8A2BE2", "#D2691E", "#9ACD32", "#FF7F50", "#152ccd"}}; static constexpr char ping_color[] = "#FFFF00"; QString timestamp; diff --git a/src/yuzu/multiplayer/client_room.cpp b/src/yuzu/multiplayer/client_room.cpp index b34a8d004..caf34a414 100644 --- a/src/yuzu/multiplayer/client_room.cpp +++ b/src/yuzu/multiplayer/client_room.cpp @@ -97,8 +97,9 @@ void ClientRoomWindow::UpdateView() { auto memberlist = member->GetMemberInformation(); ui->chat->SetPlayerList(memberlist); const auto information = member->GetRoomInformation(); - setWindowTitle(QString(tr("%1 (%2/%3 members) - connected")) + setWindowTitle(QString(tr("%1 - %2 (%3/%4 members) - connected")) .arg(QString::fromStdString(information.name)) + .arg(QString::fromStdString(information.preferred_game.name)) .arg(memberlist.size()) .arg(information.member_slots)); ui->description->setText(QString::fromStdString(information.description)); diff --git a/src/yuzu/multiplayer/direct_connect.cpp b/src/yuzu/multiplayer/direct_connect.cpp index 017063074..10bf0a4fb 100644 --- a/src/yuzu/multiplayer/direct_connect.cpp +++ b/src/yuzu/multiplayer/direct_connect.cpp @@ -106,6 +106,8 @@ void DirectConnectWindow::Connect() { UISettings::values.multiplayer_port = UISettings::values.multiplayer_port.GetDefault(); } + emit SaveConfig(); + // attempt to connect in a different thread QFuture<void> f = QtConcurrent::run([&] { if (auto room_member = room_network.GetRoomMember().lock()) { diff --git a/src/yuzu/multiplayer/direct_connect.h b/src/yuzu/multiplayer/direct_connect.h index e39dd1e0d..b8f66cfb2 100644 --- a/src/yuzu/multiplayer/direct_connect.h +++ b/src/yuzu/multiplayer/direct_connect.h @@ -31,6 +31,7 @@ signals: * connections that it might have. */ void Closed(); + void SaveConfig(); private slots: void OnConnection(); diff --git a/src/yuzu/multiplayer/host_room.cpp b/src/yuzu/multiplayer/host_room.cpp index 0c6adfd04..a8faa5b24 100644 --- a/src/yuzu/multiplayer/host_room.cpp +++ b/src/yuzu/multiplayer/host_room.cpp @@ -232,6 +232,7 @@ void HostRoomWindow::Host() { } UISettings::values.multiplayer_room_description = ui->room_description->toPlainText(); ui->host->setEnabled(true); + emit SaveConfig(); close(); } } diff --git a/src/yuzu/multiplayer/host_room.h b/src/yuzu/multiplayer/host_room.h index 034cb2eef..ae816e2e0 100644 --- a/src/yuzu/multiplayer/host_room.h +++ b/src/yuzu/multiplayer/host_room.h @@ -46,6 +46,9 @@ public: void UpdateGameList(QStandardItemModel* list); void RetranslateUi(); +signals: + void SaveConfig(); + private: void Host(); std::unique_ptr<Network::VerifyUser::Backend> CreateVerifyBackend(bool use_validation) const; diff --git a/src/yuzu/multiplayer/lobby.cpp b/src/yuzu/multiplayer/lobby.cpp index 107d40547..08c275696 100644 --- a/src/yuzu/multiplayer/lobby.cpp +++ b/src/yuzu/multiplayer/lobby.cpp @@ -7,6 +7,7 @@ #include "common/logging/log.h" #include "common/settings.h" #include "core/core.h" +#include "core/hle/service/acc/profile_manager.h" #include "core/internal_network/network_interface.h" #include "network/network.h" #include "ui_lobby.h" @@ -26,9 +27,9 @@ Lobby::Lobby(QWidget* parent, QStandardItemModel* list, std::shared_ptr<Core::AnnounceMultiplayerSession> session, Core::System& system_) : QDialog(parent, Qt::WindowTitleHint | Qt::WindowCloseButtonHint | Qt::WindowSystemMenuHint), - ui(std::make_unique<Ui::Lobby>()), - announce_multiplayer_session(session), system{system_}, room_network{ - system.GetRoomNetwork()} { + ui(std::make_unique<Ui::Lobby>()), announce_multiplayer_session(session), + profile_manager(std::make_unique<Service::Account::ProfileManager>()), system{system_}, + room_network{system.GetRoomNetwork()} { ui->setupUi(this); // setup the watcher for background connections @@ -60,9 +61,17 @@ Lobby::Lobby(QWidget* parent, QStandardItemModel* list, ui->nickname->setValidator(validation.GetNickname()); ui->nickname->setText(UISettings::values.multiplayer_nickname.GetValue()); - if (ui->nickname->text().isEmpty() && !Settings::values.yuzu_username.GetValue().empty()) { - // Use yuzu Web Service user name as nickname by default - ui->nickname->setText(QString::fromStdString(Settings::values.yuzu_username.GetValue())); + + // Try find the best nickname by default + if (ui->nickname->text().isEmpty() || ui->nickname->text() == QStringLiteral("yuzu")) { + if (!Settings::values.yuzu_username.GetValue().empty()) { + ui->nickname->setText( + QString::fromStdString(Settings::values.yuzu_username.GetValue())); + } else if (!GetProfileUsername().empty()) { + ui->nickname->setText(QString::fromStdString(GetProfileUsername())); + } else { + ui->nickname->setText(QStringLiteral("yuzu")); + } } // UI Buttons @@ -76,12 +85,6 @@ Lobby::Lobby(QWidget* parent, QStandardItemModel* list, // Actions connect(&room_list_watcher, &QFutureWatcher<AnnounceMultiplayerRoom::RoomList>::finished, this, &Lobby::OnRefreshLobby); - - // manually start a refresh when the window is opening - // TODO(jroweboy): if this refresh is slow for people with bad internet, then don't do it as - // part of the constructor, but offload the refresh until after the window shown. perhaps emit a - // refreshroomlist signal from places that open the lobby - RefreshLobby(); } Lobby::~Lobby() = default; @@ -96,6 +99,7 @@ void Lobby::UpdateGameList(QStandardItemModel* list) { } if (proxy) proxy->UpdateGameList(game_list); + ui->room_list->sortByColumn(Column::GAME_NAME, Qt::AscendingOrder); } void Lobby::RetranslateUi() { @@ -117,6 +121,11 @@ void Lobby::OnExpandRoom(const QModelIndex& index) { void Lobby::OnJoinRoom(const QModelIndex& source) { if (!Network::GetSelectedNetworkInterface()) { + LOG_INFO(WebService, "Automatically selected network interface for room network."); + Network::SelectFirstNetworkInterface(); + } + + if (!Network::GetSelectedNetworkInterface()) { NetworkMessage::ErrorManager::ShowError( NetworkMessage::ErrorManager::NO_INTERFACE_SELECTED); return; @@ -197,16 +206,16 @@ void Lobby::OnJoinRoom(const QModelIndex& source) { proxy->data(connection_index, LobbyItemHost::HostIPRole).toString(); UISettings::values.multiplayer_port = proxy->data(connection_index, LobbyItemHost::HostPortRole).toInt(); + emit SaveConfig(); } void Lobby::ResetModel() { model->clear(); model->insertColumns(0, Column::TOTAL); - model->setHeaderData(Column::EXPAND, Qt::Horizontal, QString(), Qt::DisplayRole); + model->setHeaderData(Column::MEMBER, Qt::Horizontal, tr("Players"), Qt::DisplayRole); model->setHeaderData(Column::ROOM_NAME, Qt::Horizontal, tr("Room Name"), Qt::DisplayRole); model->setHeaderData(Column::GAME_NAME, Qt::Horizontal, tr("Preferred Game"), Qt::DisplayRole); model->setHeaderData(Column::HOST, Qt::Horizontal, tr("Host"), Qt::DisplayRole); - model->setHeaderData(Column::MEMBER, Qt::Horizontal, tr("Players"), Qt::DisplayRole); } void Lobby::RefreshLobby() { @@ -229,6 +238,7 @@ void Lobby::OnRefreshLobby() { for (int r = 0; r < game_list->rowCount(); ++r) { auto index = game_list->index(r, 0); auto game_id = game_list->data(index, GameListItemPath::ProgramIdRole).toULongLong(); + if (game_id != 0 && room.information.preferred_game.id == game_id) { smdh_icon = game_list->data(index, Qt::DecorationRole).value<QPixmap>(); } @@ -243,17 +253,16 @@ void Lobby::OnRefreshLobby() { members.append(var); } - auto first_item = new LobbyItem(); + auto first_item = new LobbyItemGame( + room.information.preferred_game.id, + QString::fromStdString(room.information.preferred_game.name), smdh_icon); auto row = QList<QStandardItem*>({ first_item, new LobbyItemName(room.has_password, QString::fromStdString(room.information.name)), - new LobbyItemGame(room.information.preferred_game.id, - QString::fromStdString(room.information.preferred_game.name), - smdh_icon), + new LobbyItemMemberList(members, room.information.member_slots), new LobbyItemHost(QString::fromStdString(room.information.host_username), QString::fromStdString(room.ip), room.information.port, QString::fromStdString(room.verify_uid)), - new LobbyItemMemberList(members, room.information.member_slots), }); model->appendRow(row); // To make the rows expandable, add the member data as a child of the first column of the @@ -283,6 +292,26 @@ void Lobby::OnRefreshLobby() { ui->room_list->setFirstColumnSpanned(j, proxy->index(i, 0), true); } } + + ui->room_list->sortByColumn(Column::GAME_NAME, Qt::AscendingOrder); +} + +std::string Lobby::GetProfileUsername() { + const auto& current_user = profile_manager->GetUser(Settings::values.current_user.GetValue()); + Service::Account::ProfileBase profile{}; + + if (!current_user.has_value()) { + return ""; + } + + if (!profile_manager->GetProfileBase(*current_user, profile)) { + return ""; + } + + const auto text = Common::StringFromFixedZeroTerminatedBuffer( + reinterpret_cast<const char*>(profile.username.data()), profile.username.size()); + + return text; } LobbyFilterProxyModel::LobbyFilterProxyModel(QWidget* parent, QStandardItemModel* list) diff --git a/src/yuzu/multiplayer/lobby.h b/src/yuzu/multiplayer/lobby.h index 2696aec21..300dad13e 100644 --- a/src/yuzu/multiplayer/lobby.h +++ b/src/yuzu/multiplayer/lobby.h @@ -24,6 +24,10 @@ namespace Core { class System; } +namespace Service::Account { +class ProfileManager; +} + /** * Listing of all public games pulled from services. The lobby should be simple enough for users to * find the game they want to play, and join it. @@ -75,8 +79,11 @@ private slots: signals: void StateChanged(const Network::RoomMember::State&); + void SaveConfig(); private: + std::string GetProfileUsername(); + /** * Removes all entries in the Lobby before refreshing. */ @@ -96,6 +103,7 @@ private: QFutureWatcher<AnnounceMultiplayerRoom::RoomList> room_list_watcher; std::weak_ptr<Core::AnnounceMultiplayerSession> announce_multiplayer_session; + std::unique_ptr<Service::Account::ProfileManager> profile_manager; QFutureWatcher<void>* watcher; Validation validation; Core::System& system; diff --git a/src/yuzu/multiplayer/lobby_p.h b/src/yuzu/multiplayer/lobby_p.h index 8071cede4..068c95aca 100644 --- a/src/yuzu/multiplayer/lobby_p.h +++ b/src/yuzu/multiplayer/lobby_p.h @@ -11,11 +11,10 @@ namespace Column { enum List { - EXPAND, - ROOM_NAME, GAME_NAME, - HOST, + ROOM_NAME, MEMBER, + HOST, TOTAL, }; } @@ -91,6 +90,8 @@ public: setData(game_name, GameNameRole); if (!smdh_icon.isNull()) { setData(smdh_icon, GameIconRole); + } else { + setData(QIcon::fromTheme(QStringLiteral("chip")).pixmap(32), GameIconRole); } } @@ -98,7 +99,12 @@ public: if (role == Qt::DecorationRole) { auto val = data(GameIconRole); if (val.isValid()) { - val = val.value<QPixmap>().scaled(16, 16, Qt::KeepAspectRatio); + val = val.value<QPixmap>().scaled(32, 32, Qt::KeepAspectRatio, + Qt::TransformationMode::SmoothTransformation); + } else { + auto blank_image = QPixmap(32, 32); + blank_image.fill(Qt::black); + val = blank_image; } return val; } else if (role != Qt::DisplayRole) { @@ -191,8 +197,8 @@ public: return LobbyItem::data(role); } auto members = data(MemberListRole).toList(); - return QStringLiteral("%1 / %2").arg(QString::number(members.size()), - data(MaxPlayerRole).toString()); + return QStringLiteral("%1 / %2 ") + .arg(QString::number(members.size()), data(MaxPlayerRole).toString()); } bool operator<(const QStandardItem& other) const override { diff --git a/src/yuzu/multiplayer/message.cpp b/src/yuzu/multiplayer/message.cpp index 758b5b731..6d8f18274 100644 --- a/src/yuzu/multiplayer/message.cpp +++ b/src/yuzu/multiplayer/message.cpp @@ -49,9 +49,9 @@ const ConnectionError ErrorManager::PERMISSION_DENIED( QT_TR_NOOP("You do not have enough permission to perform this action.")); const ConnectionError ErrorManager::NO_SUCH_USER(QT_TR_NOOP( "The user you are trying to kick/ban could not be found.\nThey may have left the room.")); -const ConnectionError ErrorManager::NO_INTERFACE_SELECTED( - QT_TR_NOOP("No network interface is selected.\nPlease go to Configure -> System -> Network and " - "make a selection.")); +const ConnectionError ErrorManager::NO_INTERFACE_SELECTED(QT_TR_NOOP( + "No valid network interface is selected.\nPlease go to Configure -> System -> Network and " + "make a selection.")); static bool WarnMessage(const std::string& title, const std::string& text) { return QMessageBox::Ok == QMessageBox::warning(nullptr, QObject::tr(title.c_str()), diff --git a/src/yuzu/multiplayer/state.cpp b/src/yuzu/multiplayer/state.cpp index 66e098296..285bb150d 100644 --- a/src/yuzu/multiplayer/state.cpp +++ b/src/yuzu/multiplayer/state.cpp @@ -44,9 +44,6 @@ MultiplayerState::MultiplayerState(QWidget* parent, QStandardItemModel* game_lis status_text = new ClickableLabel(this); status_icon = new ClickableLabel(this); - status_text->setToolTip(tr("Current connection status")); - status_text->setText(tr("Not Connected. Click here to find a room!")); - status_icon->setPixmap(QIcon::fromTheme(QStringLiteral("disconnected")).pixmap(16)); connect(status_text, &ClickableLabel::clicked, this, &MultiplayerState::OnOpenNetworkRoom); connect(status_icon, &ClickableLabel::clicked, this, &MultiplayerState::OnOpenNetworkRoom); @@ -57,6 +54,8 @@ MultiplayerState::MultiplayerState(QWidget* parent, QStandardItemModel* game_lis HideNotification(); } }); + + retranslateUi(); } MultiplayerState::~MultiplayerState() = default; @@ -90,14 +89,7 @@ void MultiplayerState::Close() { void MultiplayerState::retranslateUi() { status_text->setToolTip(tr("Current connection status")); - if (current_state == Network::RoomMember::State::Uninitialized) { - status_text->setText(tr("Not Connected. Click here to find a room!")); - } else if (current_state == Network::RoomMember::State::Joined || - current_state == Network::RoomMember::State::Moderator) { - status_text->setText(tr("Connected")); - } else { - status_text->setText(tr("Not Connected")); - } + UpdateNotificationStatus(); if (lobby) { lobby->RetranslateUi(); @@ -113,21 +105,55 @@ void MultiplayerState::retranslateUi() { } } +void MultiplayerState::SetNotificationStatus(NotificationStatus status) { + notification_status = status; + UpdateNotificationStatus(); +} + +void MultiplayerState::UpdateNotificationStatus() { + switch (notification_status) { + case NotificationStatus::Unitialized: + status_icon->setPixmap(QIcon::fromTheme(QStringLiteral("disconnected")).pixmap(16)); + status_text->setText(tr("Not Connected. Click here to find a room!")); + leave_room->setEnabled(false); + show_room->setEnabled(false); + break; + case NotificationStatus::Disconnected: + status_icon->setPixmap(QIcon::fromTheme(QStringLiteral("disconnected")).pixmap(16)); + status_text->setText(tr("Not Connected")); + leave_room->setEnabled(false); + show_room->setEnabled(false); + break; + case NotificationStatus::Connected: + status_icon->setPixmap(QIcon::fromTheme(QStringLiteral("connected")).pixmap(16)); + status_text->setText(tr("Connected")); + leave_room->setEnabled(true); + show_room->setEnabled(true); + break; + case NotificationStatus::Notification: + status_icon->setPixmap( + QIcon::fromTheme(QStringLiteral("connected_notification")).pixmap(16)); + status_text->setText(tr("New Messages Received")); + leave_room->setEnabled(true); + show_room->setEnabled(true); + break; + } + + // Clean up status bar if game is running + if (system.IsPoweredOn()) { + status_text->clear(); + } +} + void MultiplayerState::OnNetworkStateChanged(const Network::RoomMember::State& state) { LOG_DEBUG(Frontend, "Network State: {}", Network::GetStateStr(state)); if (state == Network::RoomMember::State::Joined || state == Network::RoomMember::State::Moderator) { OnOpenNetworkRoom(); - status_icon->setPixmap(QIcon::fromTheme(QStringLiteral("connected")).pixmap(16)); - status_text->setText(tr("Connected")); - leave_room->setEnabled(true); - show_room->setEnabled(true); + SetNotificationStatus(NotificationStatus::Connected); } else { - status_icon->setPixmap(QIcon::fromTheme(QStringLiteral("disconnected")).pixmap(16)); - status_text->setText(tr("Not Connected")); - leave_room->setEnabled(false); - show_room->setEnabled(false); + SetNotificationStatus(NotificationStatus::Disconnected); } current_state = state; @@ -185,6 +211,10 @@ void MultiplayerState::OnAnnounceFailed(const WebService::WebResult& result) { QMessageBox::Ok); } +void MultiplayerState::OnSaveConfig() { + emit SaveConfig(); +} + void MultiplayerState::UpdateThemedIcons() { if (show_notification) { status_icon->setPixmap( @@ -209,13 +239,16 @@ static void BringWidgetToFront(QWidget* widget) { void MultiplayerState::OnViewLobby() { if (lobby == nullptr) { lobby = new Lobby(this, game_list_model, announce_multiplayer_session, system); + connect(lobby, &Lobby::SaveConfig, this, &MultiplayerState::OnSaveConfig); } + lobby->RefreshLobby(); BringWidgetToFront(lobby); } void MultiplayerState::OnCreateRoom() { if (host_room == nullptr) { host_room = new HostRoomWindow(this, game_list_model, announce_multiplayer_session, system); + connect(host_room, &HostRoomWindow::SaveConfig, this, &MultiplayerState::OnSaveConfig); } BringWidgetToFront(host_room); } @@ -235,7 +268,7 @@ bool MultiplayerState::OnCloseRoom() { return true; } // Save ban list - UISettings::values.multiplayer_ban_list = std::move(room->GetBanList()); + UISettings::values.multiplayer_ban_list = room->GetBanList(); room->Destroy(); announce_multiplayer_session->Stop(); @@ -249,14 +282,13 @@ void MultiplayerState::ShowNotification() { return; // Do not show notification if the chat window currently has focus show_notification = true; QApplication::alert(nullptr); - status_icon->setPixmap(QIcon::fromTheme(QStringLiteral("connected_notification")).pixmap(16)); - status_text->setText(tr("New Messages Received")); + QApplication::beep(); + SetNotificationStatus(NotificationStatus::Notification); } void MultiplayerState::HideNotification() { show_notification = false; - status_icon->setPixmap(QIcon::fromTheme(QStringLiteral("connected")).pixmap(16)); - status_text->setText(tr("Connected")); + SetNotificationStatus(NotificationStatus::Connected); } void MultiplayerState::OnOpenNetworkRoom() { @@ -279,6 +311,8 @@ void MultiplayerState::OnOpenNetworkRoom() { void MultiplayerState::OnDirectConnectToRoom() { if (direct_connect == nullptr) { direct_connect = new DirectConnectWindow(system, this); + connect(direct_connect, &DirectConnectWindow::SaveConfig, this, + &MultiplayerState::OnSaveConfig); } BringWidgetToFront(direct_connect); } diff --git a/src/yuzu/multiplayer/state.h b/src/yuzu/multiplayer/state.h index c92496413..5d681c5c6 100644 --- a/src/yuzu/multiplayer/state.h +++ b/src/yuzu/multiplayer/state.h @@ -22,6 +22,13 @@ class MultiplayerState : public QWidget { Q_OBJECT; public: + enum class NotificationStatus { + Unitialized, + Disconnected, + Connected, + Notification, + }; + explicit MultiplayerState(QWidget* parent, QStandardItemModel* game_list, QAction* leave_room, QAction* show_room, Core::System& system_); ~MultiplayerState(); @@ -31,6 +38,10 @@ public: */ void Close(); + void SetNotificationStatus(NotificationStatus state); + + void UpdateNotificationStatus(); + ClickableLabel* GetStatusText() const { return status_text; } @@ -64,6 +75,7 @@ public slots: void OnOpenNetworkRoom(); void OnDirectConnectToRoom(); void OnAnnounceFailed(const WebService::WebResult&); + void OnSaveConfig(); void UpdateThemedIcons(); void ShowNotification(); void HideNotification(); @@ -72,6 +84,7 @@ signals: void NetworkStateChanged(const Network::RoomMember::State&); void NetworkError(const Network::RoomMember::Error&); void AnnounceFailed(const WebService::WebResult&); + void SaveConfig(); private: Lobby* lobby = nullptr; @@ -85,6 +98,7 @@ private: QAction* show_room; std::shared_ptr<Core::AnnounceMultiplayerSession> announce_multiplayer_session; Network::RoomMember::State current_state = Network::RoomMember::State::Uninitialized; + NotificationStatus notification_status = NotificationStatus::Unitialized; bool has_mod_perms = false; Network::RoomMember::CallbackHandle<Network::RoomMember::State> state_callback_handle; Network::RoomMember::CallbackHandle<Network::RoomMember::Error> error_callback_handle; diff --git a/src/yuzu/startup_checks.cpp b/src/yuzu/startup_checks.cpp index 8421280bf..6a91212e2 100644 --- a/src/yuzu/startup_checks.cpp +++ b/src/yuzu/startup_checks.cpp @@ -31,103 +31,126 @@ void CheckVulkan() { } } -bool StartupChecks(const char* arg0, bool* has_broken_vulkan) { +bool CheckEnvVars(bool* is_child) { #ifdef _WIN32 // Check environment variable to see if we are the child char variable_contents[8]; const DWORD startup_check_var = GetEnvironmentVariableA(STARTUP_CHECK_ENV_VAR, variable_contents, 8); - if (startup_check_var > 0 && std::strncmp(variable_contents, "ON", 8) == 0) { + if (startup_check_var > 0 && std::strncmp(variable_contents, ENV_VAR_ENABLED_TEXT, 8) == 0) { CheckVulkan(); return true; } - // Set the startup variable for child processes - const bool env_var_set = SetEnvironmentVariableA(STARTUP_CHECK_ENV_VAR, "ON"); - if (!env_var_set) { - std::fprintf(stderr, "SetEnvironmentVariableA failed to set %s with error %d\n", - STARTUP_CHECK_ENV_VAR, GetLastError()); + // Don't perform startup checks if we are a child process + char is_child_s[8]; + const DWORD is_child_len = GetEnvironmentVariableA(IS_CHILD_ENV_VAR, is_child_s, 8); + if (is_child_len > 0 && std::strncmp(is_child_s, ENV_VAR_ENABLED_TEXT, 8) == 0) { + *is_child = true; return false; + } else if (!SetEnvironmentVariableA(IS_CHILD_ENV_VAR, ENV_VAR_ENABLED_TEXT)) { + std::fprintf(stderr, "SetEnvironmentVariableA failed to set %s with error %lu\n", + IS_CHILD_ENV_VAR, GetLastError()); + return true; } +#endif + return false; +} - PROCESS_INFORMATION process_info; - std::memset(&process_info, '\0', sizeof(process_info)); - - if (!SpawnChild(arg0, &process_info)) { +bool StartupChecks(const char* arg0, bool* has_broken_vulkan, bool perform_vulkan_check) { +#ifdef _WIN32 + // Set the startup variable for child processes + const bool env_var_set = SetEnvironmentVariableA(STARTUP_CHECK_ENV_VAR, ENV_VAR_ENABLED_TEXT); + if (!env_var_set) { + std::fprintf(stderr, "SetEnvironmentVariableA failed to set %s with error %lu\n", + STARTUP_CHECK_ENV_VAR, GetLastError()); return false; } - // Wait until the processs exits and get exit code from it - WaitForSingleObject(process_info.hProcess, INFINITE); - DWORD exit_code = STILL_ACTIVE; - const int err = GetExitCodeProcess(process_info.hProcess, &exit_code); - if (err == 0) { - std::fprintf(stderr, "GetExitCodeProcess failed with error %d\n", GetLastError()); - } - - // Vulkan is broken if the child crashed (return value is not zero) - *has_broken_vulkan = (exit_code != 0); - - if (CloseHandle(process_info.hProcess) == 0) { - std::fprintf(stderr, "CloseHandle failed with error %d\n", GetLastError()); - } - if (CloseHandle(process_info.hThread) == 0) { - std::fprintf(stderr, "CloseHandle failed with error %d\n", GetLastError()); + if (perform_vulkan_check) { + // Spawn child process that performs Vulkan check + PROCESS_INFORMATION process_info; + std::memset(&process_info, '\0', sizeof(process_info)); + + if (!SpawnChild(arg0, &process_info, 0)) { + return false; + } + + // Wait until the processs exits and get exit code from it + WaitForSingleObject(process_info.hProcess, INFINITE); + DWORD exit_code = STILL_ACTIVE; + const int err = GetExitCodeProcess(process_info.hProcess, &exit_code); + if (err == 0) { + std::fprintf(stderr, "GetExitCodeProcess failed with error %lu\n", GetLastError()); + } + + // Vulkan is broken if the child crashed (return value is not zero) + *has_broken_vulkan = (exit_code != 0); + + if (CloseHandle(process_info.hProcess) == 0) { + std::fprintf(stderr, "CloseHandle failed with error %lu\n", GetLastError()); + } + if (CloseHandle(process_info.hThread) == 0) { + std::fprintf(stderr, "CloseHandle failed with error %lu\n", GetLastError()); + } } if (!SetEnvironmentVariableA(STARTUP_CHECK_ENV_VAR, nullptr)) { - std::fprintf(stderr, "SetEnvironmentVariableA failed to clear %s with error %d\n", + std::fprintf(stderr, "SetEnvironmentVariableA failed to clear %s with error %lu\n", STARTUP_CHECK_ENV_VAR, GetLastError()); } #elif defined(YUZU_UNIX) - const pid_t pid = fork(); - if (pid == 0) { - CheckVulkan(); - return true; - } else if (pid == -1) { - const int err = errno; - std::fprintf(stderr, "fork failed with error %d\n", err); - return false; - } - - // Get exit code from child process - int status; - const int r_val = wait(&status); - if (r_val == -1) { - const int err = errno; - std::fprintf(stderr, "wait failed with error %d\n", err); - return false; + if (perform_vulkan_check) { + const pid_t pid = fork(); + if (pid == 0) { + CheckVulkan(); + return true; + } else if (pid == -1) { + const int err = errno; + std::fprintf(stderr, "fork failed with error %d\n", err); + return false; + } + + // Get exit code from child process + int status; + const int r_val = wait(&status); + if (r_val == -1) { + const int err = errno; + std::fprintf(stderr, "wait failed with error %d\n", err); + return false; + } + // Vulkan is broken if the child crashed (return value is not zero) + *has_broken_vulkan = (status != 0); } - // Vulkan is broken if the child crashed (return value is not zero) - *has_broken_vulkan = (status != 0); #endif return false; } #ifdef _WIN32 -bool SpawnChild(const char* arg0, PROCESS_INFORMATION* pi) { +bool SpawnChild(const char* arg0, PROCESS_INFORMATION* pi, int flags) { STARTUPINFOA startup_info; std::memset(&startup_info, '\0', sizeof(startup_info)); startup_info.cb = sizeof(startup_info); char p_name[255]; - std::strncpy(p_name, arg0, 255); + std::strncpy(p_name, arg0, 254); + p_name[254] = '\0'; const bool process_created = CreateProcessA(nullptr, // lpApplicationName p_name, // lpCommandLine nullptr, // lpProcessAttributes nullptr, // lpThreadAttributes false, // bInheritHandles - 0, // dwCreationFlags + flags, // dwCreationFlags nullptr, // lpEnvironment nullptr, // lpCurrentDirectory &startup_info, // lpStartupInfo pi // lpProcessInformation ); if (!process_created) { - std::fprintf(stderr, "CreateProcessA failed with error %d\n", GetLastError()); + std::fprintf(stderr, "CreateProcessA failed with error %lu\n", GetLastError()); return false; } diff --git a/src/yuzu/startup_checks.h b/src/yuzu/startup_checks.h index 096dd54a8..d8e563be6 100644 --- a/src/yuzu/startup_checks.h +++ b/src/yuzu/startup_checks.h @@ -7,11 +7,14 @@ #include <windows.h> #endif +constexpr char IS_CHILD_ENV_VAR[] = "YUZU_IS_CHILD"; constexpr char STARTUP_CHECK_ENV_VAR[] = "YUZU_DO_STARTUP_CHECKS"; +constexpr char ENV_VAR_ENABLED_TEXT[] = "ON"; void CheckVulkan(); -bool StartupChecks(const char* arg0, bool* has_broken_vulkan); +bool CheckEnvVars(bool* is_child); +bool StartupChecks(const char* arg0, bool* has_broken_vulkan, bool perform_vulkan_check); #ifdef _WIN32 -bool SpawnChild(const char* arg0, PROCESS_INFORMATION* pi); +bool SpawnChild(const char* arg0, PROCESS_INFORMATION* pi, int flags); #endif diff --git a/src/yuzu/uisettings.h b/src/yuzu/uisettings.h index e12d414d9..753797efc 100644 --- a/src/yuzu/uisettings.h +++ b/src/yuzu/uisettings.h @@ -102,7 +102,7 @@ struct Values { Settings::Setting<uint32_t> callout_flags{0, "calloutFlags"}; // multiplayer settings - Settings::Setting<QString> multiplayer_nickname{QStringLiteral("yuzu"), "nickname"}; + Settings::Setting<QString> multiplayer_nickname{{}, "nickname"}; Settings::Setting<QString> multiplayer_ip{{}, "ip"}; Settings::SwitchableSetting<uint, true> multiplayer_port{24872, 0, UINT16_MAX, "port"}; Settings::Setting<QString> multiplayer_room_nickname{{}, "room_nickname"}; diff --git a/src/yuzu_cmd/yuzu.cpp b/src/yuzu_cmd/yuzu.cpp index 3a0f33cba..e16f79eb4 100644 --- a/src/yuzu_cmd/yuzu.cpp +++ b/src/yuzu_cmd/yuzu.cpp @@ -302,6 +302,8 @@ int main(int argc, char** argv) { } Core::System system{}; + system.Initialize(); + InputCommon::InputSubsystem input_subsystem{}; // Apply the command line arguments @@ -392,7 +394,7 @@ int main(int argc, char** argv) { } system.DetachDebugger(); void(system.Pause()); - system.Shutdown(); + system.ShutdownMainProcess(); detached_tasks.WaitForAllTasks(); return 0; |