diff options
-rw-r--r-- | src/common/CMakeLists.txt | 1 | ||||
-rw-r--r-- | src/common/bit_util.h | 39 | ||||
-rw-r--r-- | src/common/multi_level_queue.h | 337 | ||||
-rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 42 | ||||
-rw-r--r-- | src/core/hle/kernel/scheduler.h | 4 | ||||
-rw-r--r-- | src/tests/CMakeLists.txt | 2 | ||||
-rw-r--r-- | src/tests/common/bit_utils.cpp | 23 | ||||
-rw-r--r-- | src/tests/common/multi_level_queue.cpp | 55 |
8 files changed, 484 insertions, 19 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 43ae8a9e7..850ce8006 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -98,6 +98,7 @@ add_library(common STATIC microprofile.h microprofileui.h misc.cpp + multi_level_queue.h page_table.cpp page_table.h param_package.cpp diff --git a/src/common/bit_util.h b/src/common/bit_util.h index 1eea17ba1..a4f9ed4aa 100644 --- a/src/common/bit_util.h +++ b/src/common/bit_util.h @@ -58,4 +58,43 @@ inline u64 CountLeadingZeroes64(u64 value) { return __builtin_clzll(value); } #endif + +#ifdef _MSC_VER +inline u32 CountTrailingZeroes32(u32 value) { + unsigned long trailing_zero = 0; + + if (_BitScanForward(&trailing_zero, value) != 0) { + return trailing_zero; + } + + return 32; +} + +inline u64 CountTrailingZeroes64(u64 value) { + unsigned long trailing_zero = 0; + + if (_BitScanForward64(&trailing_zero, value) != 0) { + return trailing_zero; + } + + return 64; +} +#else +inline u32 CountTrailingZeroes32(u32 value) { + if (value == 0) { + return 32; + } + + return __builtin_ctz(value); +} + +inline u64 CountTrailingZeroes64(u64 value) { + if (value == 0) { + return 64; + } + + return __builtin_ctzll(value); +} +#endif + } // namespace Common diff --git a/src/common/multi_level_queue.h b/src/common/multi_level_queue.h new file mode 100644 index 000000000..2b61b91e0 --- /dev/null +++ b/src/common/multi_level_queue.h @@ -0,0 +1,337 @@ +// Copyright 2019 TuxSH +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <array> +#include <iterator> +#include <list> +#include <utility> + +#include "common/bit_util.h" +#include "common/common_types.h" + +namespace Common { + +/** + * A MultiLevelQueue is a type of priority queue which has the following characteristics: + * - iteratable through each of its elements. + * - back can be obtained. + * - O(1) add, lookup (both front and back) + * - discrete priorities and a max of 64 priorities (limited domain) + * This type of priority queue is normaly used for managing threads within an scheduler + */ +template <typename T, std::size_t Depth> +class MultiLevelQueue { +public: + using value_type = T; + using reference = value_type&; + using const_reference = const value_type&; + using pointer = value_type*; + using const_pointer = const value_type*; + + using difference_type = typename std::pointer_traits<pointer>::difference_type; + using size_type = std::size_t; + + template <bool is_constant> + class iterator_impl { + public: + using iterator_category = std::bidirectional_iterator_tag; + using value_type = T; + using pointer = std::conditional_t<is_constant, T*, const T*>; + using reference = std::conditional_t<is_constant, const T&, T&>; + using difference_type = typename std::pointer_traits<pointer>::difference_type; + + friend bool operator==(const iterator_impl& lhs, const iterator_impl& rhs) { + if (lhs.IsEnd() && rhs.IsEnd()) + return true; + return std::tie(lhs.current_priority, lhs.it) == std::tie(rhs.current_priority, rhs.it); + } + + friend bool operator!=(const iterator_impl& lhs, const iterator_impl& rhs) { + return !operator==(lhs, rhs); + } + + reference operator*() const { + return *it; + } + + pointer operator->() const { + return it.operator->(); + } + + iterator_impl& operator++() { + if (IsEnd()) { + return *this; + } + + ++it; + + if (it == GetEndItForPrio()) { + u64 prios = mlq.used_priorities; + prios &= ~((1ULL << (current_priority + 1)) - 1); + if (prios == 0) { + current_priority = mlq.depth(); + } else { + current_priority = CountTrailingZeroes64(prios); + it = GetBeginItForPrio(); + } + } + return *this; + } + + iterator_impl& operator--() { + if (IsEnd()) { + if (mlq.used_priorities != 0) { + current_priority = 63 - CountLeadingZeroes64(mlq.used_priorities); + it = GetEndItForPrio(); + --it; + } + } else if (it == GetBeginItForPrio()) { + u64 prios = mlq.used_priorities; + prios &= (1ULL << current_priority) - 1; + if (prios != 0) { + current_priority = CountTrailingZeroes64(prios); + it = GetEndItForPrio(); + --it; + } + } else { + --it; + } + return *this; + } + + iterator_impl operator++(int) { + const iterator_impl v{*this}; + ++(*this); + return v; + } + + iterator_impl operator--(int) { + const iterator_impl v{*this}; + --(*this); + return v; + } + + // allow implicit const->non-const + iterator_impl(const iterator_impl<false>& other) + : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {} + + iterator_impl(const iterator_impl<true>& other) + : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {} + + iterator_impl& operator=(const iterator_impl<false>& other) { + mlq = other.mlq; + it = other.it; + current_priority = other.current_priority; + return *this; + } + + friend class iterator_impl<true>; + iterator_impl() = default; + + private: + friend class MultiLevelQueue; + using container_ref = + std::conditional_t<is_constant, const MultiLevelQueue&, MultiLevelQueue&>; + using list_iterator = std::conditional_t<is_constant, typename std::list<T>::const_iterator, + typename std::list<T>::iterator>; + + explicit iterator_impl(container_ref mlq, list_iterator it, u32 current_priority) + : mlq(mlq), it(it), current_priority(current_priority) {} + explicit iterator_impl(container_ref mlq, u32 current_priority) + : mlq(mlq), it(), current_priority(current_priority) {} + + bool IsEnd() const { + return current_priority == mlq.depth(); + } + + list_iterator GetBeginItForPrio() const { + return mlq.levels[current_priority].begin(); + } + + list_iterator GetEndItForPrio() const { + return mlq.levels[current_priority].end(); + } + + container_ref mlq; + list_iterator it; + u32 current_priority; + }; + + using iterator = iterator_impl<false>; + using const_iterator = iterator_impl<true>; + + void add(const T& element, u32 priority, bool send_back = true) { + if (send_back) + levels[priority].push_back(element); + else + levels[priority].push_front(element); + used_priorities |= 1ULL << priority; + } + + void remove(const T& element, u32 priority) { + auto it = ListIterateTo(levels[priority], element); + if (it == levels[priority].end()) + return; + levels[priority].erase(it); + if (levels[priority].empty()) { + used_priorities &= ~(1ULL << priority); + } + } + + void adjust(const T& element, u32 old_priority, u32 new_priority, bool adjust_front = false) { + remove(element, old_priority); + add(element, new_priority, !adjust_front); + } + void adjust(const_iterator it, u32 old_priority, u32 new_priority, bool adjust_front = false) { + adjust(*it, old_priority, new_priority, adjust_front); + } + + void transfer_to_front(const T& element, u32 priority, MultiLevelQueue& other) { + ListSplice(other.levels[priority], other.levels[priority].begin(), levels[priority], + ListIterateTo(levels[priority], element)); + + other.used_priorities |= 1ULL << priority; + + if (levels[priority].empty()) { + used_priorities &= ~(1ULL << priority); + } + } + + void transfer_to_front(const_iterator it, u32 priority, MultiLevelQueue& other) { + transfer_to_front(*it, priority, other); + } + + void transfer_to_back(const T& element, u32 priority, MultiLevelQueue& other) { + ListSplice(other.levels[priority], other.levels[priority].end(), levels[priority], + ListIterateTo(levels[priority], element)); + + other.used_priorities |= 1ULL << priority; + + if (levels[priority].empty()) { + used_priorities &= ~(1ULL << priority); + } + } + + void transfer_to_back(const_iterator it, u32 priority, MultiLevelQueue& other) { + transfer_to_back(*it, priority, other); + } + + void yield(u32 priority, std::size_t n = 1) { + ListShiftForward(levels[priority], n); + } + + std::size_t depth() const { + return Depth; + } + + std::size_t size(u32 priority) const { + return levels[priority].size(); + } + + std::size_t size() const { + u64 priorities = used_priorities; + std::size_t size = 0; + while (priorities != 0) { + const u64 current_priority = CountTrailingZeroes64(priorities); + size += levels[current_priority].size(); + priorities &= ~(1ULL << current_priority); + } + return size; + } + + bool empty() const { + return used_priorities == 0; + } + + bool empty(u32 priority) const { + return (used_priorities & (1ULL << priority)) == 0; + } + + u32 highest_priority_set(u32 max_priority = 0) const { + const u64 priorities = + max_priority == 0 ? used_priorities : (used_priorities & ~((1ULL << max_priority) - 1)); + return priorities == 0 ? Depth : static_cast<u32>(CountTrailingZeroes64(priorities)); + } + + u32 lowest_priority_set(u32 min_priority = Depth - 1) const { + const u64 priorities = min_priority >= Depth - 1 + ? used_priorities + : (used_priorities & ((1ULL << (min_priority + 1)) - 1)); + return priorities == 0 ? Depth : 63 - CountLeadingZeroes64(priorities); + } + + const_iterator cbegin(u32 max_prio = 0) const { + const u32 priority = highest_priority_set(max_prio); + return priority == Depth ? cend() + : const_iterator{*this, levels[priority].cbegin(), priority}; + } + const_iterator begin(u32 max_prio = 0) const { + return cbegin(max_prio); + } + iterator begin(u32 max_prio = 0) { + const u32 priority = highest_priority_set(max_prio); + return priority == Depth ? end() : iterator{*this, levels[priority].begin(), priority}; + } + + const_iterator cend(u32 min_prio = Depth - 1) const { + return min_prio == Depth - 1 ? const_iterator{*this, Depth} : cbegin(min_prio + 1); + } + const_iterator end(u32 min_prio = Depth - 1) const { + return cend(min_prio); + } + iterator end(u32 min_prio = Depth - 1) { + return min_prio == Depth - 1 ? iterator{*this, Depth} : begin(min_prio + 1); + } + + T& front(u32 max_priority = 0) { + const u32 priority = highest_priority_set(max_priority); + return levels[priority == Depth ? 0 : priority].front(); + } + const T& front(u32 max_priority = 0) const { + const u32 priority = highest_priority_set(max_priority); + return levels[priority == Depth ? 0 : priority].front(); + } + + T back(u32 min_priority = Depth - 1) { + const u32 priority = lowest_priority_set(min_priority); // intended + return levels[priority == Depth ? 63 : priority].back(); + } + const T& back(u32 min_priority = Depth - 1) const { + const u32 priority = lowest_priority_set(min_priority); // intended + return levels[priority == Depth ? 63 : priority].back(); + } + +private: + using const_list_iterator = typename std::list<T>::const_iterator; + + static void ListShiftForward(std::list<T>& list, const std::size_t shift = 1) { + if (shift >= list.size()) { + return; + } + + const auto begin_range = list.begin(); + const auto end_range = std::next(begin_range, shift); + list.splice(list.end(), list, begin_range, end_range); + } + + static void ListSplice(std::list<T>& in_list, const_list_iterator position, + std::list<T>& out_list, const_list_iterator element) { + in_list.splice(position, out_list, element); + } + + static const_list_iterator ListIterateTo(const std::list<T>& list, const T& element) { + auto it = list.cbegin(); + while (it != list.cend() && *it != element) { + ++it; + } + return it; + } + + std::array<std::list<T>, Depth> levels; + u64 used_priorities = 0; +}; + +} // namespace Common diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index cc189cc64..6d0f13ecf 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -30,7 +30,7 @@ Scheduler::~Scheduler() { bool Scheduler::HaveReadyThreads() const { std::lock_guard<std::mutex> lock(scheduler_mutex); - return ready_queue.get_first() != nullptr; + return !ready_queue.empty(); } Thread* Scheduler::GetCurrentThread() const { @@ -46,22 +46,27 @@ Thread* Scheduler::PopNextReadyThread() { Thread* thread = GetCurrentThread(); if (thread && thread->GetStatus() == ThreadStatus::Running) { + if (ready_queue.empty()) { + return thread; + } // We have to do better than the current thread. // This call returns null when that's not possible. - next = ready_queue.pop_first_better(thread->GetPriority()); - if (!next) { - // Otherwise just keep going with the current thread + next = ready_queue.front(); + if (next == nullptr || next->GetPriority() >= thread->GetPriority()) { next = thread; } } else { - next = ready_queue.pop_first(); + if (ready_queue.empty()) { + return nullptr; + } + next = ready_queue.front(); } return next; } void Scheduler::SwitchContext(Thread* new_thread) { - Thread* const previous_thread = GetCurrentThread(); + Thread* previous_thread = GetCurrentThread(); Process* const previous_process = system.Kernel().CurrentProcess(); UpdateLastContextSwitchTime(previous_thread, previous_process); @@ -75,7 +80,7 @@ void Scheduler::SwitchContext(Thread* new_thread) { if (previous_thread->GetStatus() == ThreadStatus::Running) { // This is only the case when a reschedule is triggered without the current thread // yielding execution (i.e. an event triggered, system core time-sliced, etc) - ready_queue.push_front(previous_thread->GetPriority(), previous_thread); + ready_queue.add(previous_thread, previous_thread->GetPriority(), false); previous_thread->SetStatus(ThreadStatus::Ready); } } @@ -90,7 +95,7 @@ void Scheduler::SwitchContext(Thread* new_thread) { current_thread = new_thread; - ready_queue.remove(new_thread->GetPriority(), new_thread); + ready_queue.remove(new_thread, new_thread->GetPriority()); new_thread->SetStatus(ThreadStatus::Running); auto* const thread_owner_process = current_thread->GetOwnerProcess(); @@ -147,7 +152,6 @@ void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) { std::lock_guard<std::mutex> lock(scheduler_mutex); thread_list.push_back(std::move(thread)); - ready_queue.prepare(priority); } void Scheduler::RemoveThread(Thread* thread) { @@ -161,33 +165,37 @@ void Scheduler::ScheduleThread(Thread* thread, u32 priority) { std::lock_guard<std::mutex> lock(scheduler_mutex); ASSERT(thread->GetStatus() == ThreadStatus::Ready); - ready_queue.push_back(priority, thread); + ready_queue.add(thread, priority); } void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { std::lock_guard<std::mutex> lock(scheduler_mutex); ASSERT(thread->GetStatus() == ThreadStatus::Ready); - ready_queue.remove(priority, thread); + ready_queue.remove(thread, priority); } void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { std::lock_guard<std::mutex> lock(scheduler_mutex); + if (thread->GetPriority() == priority) { + return; + } // If thread was ready, adjust queues if (thread->GetStatus() == ThreadStatus::Ready) - ready_queue.move(thread, thread->GetPriority(), priority); - else - ready_queue.prepare(priority); + ready_queue.adjust(thread, thread->GetPriority(), priority); } Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const { std::lock_guard<std::mutex> lock(scheduler_mutex); const u32 mask = 1U << core; - return ready_queue.get_first_filter([mask, maximum_priority](Thread const* thread) { - return (thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority; - }); + for (auto* thread : ready_queue) { + if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) { + return thread; + } + } + return nullptr; } void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index 1c5bf57d9..44baeb713 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h @@ -7,7 +7,7 @@ #include <mutex> #include <vector> #include "common/common_types.h" -#include "common/thread_queue_list.h" +#include "common/multi_level_queue.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/thread.h" @@ -156,7 +156,7 @@ private: std::vector<SharedPtr<Thread>> thread_list; /// Lists only ready thread ids. - Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST + 1> ready_queue; + Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue; SharedPtr<Thread> current_thread = nullptr; diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt index d0284bdf4..c7038b217 100644 --- a/src/tests/CMakeLists.txt +++ b/src/tests/CMakeLists.txt @@ -1,5 +1,7 @@ add_executable(tests common/bit_field.cpp + common/bit_utils.cpp + common/multi_level_queue.cpp common/param_package.cpp common/ring_buffer.cpp core/arm/arm_test_common.cpp diff --git a/src/tests/common/bit_utils.cpp b/src/tests/common/bit_utils.cpp new file mode 100644 index 000000000..479b5995a --- /dev/null +++ b/src/tests/common/bit_utils.cpp @@ -0,0 +1,23 @@ +// Copyright 2017 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <catch2/catch.hpp> +#include <math.h> +#include "common/bit_util.h" + +namespace Common { + +TEST_CASE("BitUtils::CountTrailingZeroes", "[common]") { + REQUIRE(Common::CountTrailingZeroes32(0) == 32); + REQUIRE(Common::CountTrailingZeroes64(0) == 64); + REQUIRE(Common::CountTrailingZeroes32(9) == 0); + REQUIRE(Common::CountTrailingZeroes32(8) == 3); + REQUIRE(Common::CountTrailingZeroes32(0x801000) == 12); + REQUIRE(Common::CountTrailingZeroes64(9) == 0); + REQUIRE(Common::CountTrailingZeroes64(8) == 3); + REQUIRE(Common::CountTrailingZeroes64(0x801000) == 12); + REQUIRE(Common::CountTrailingZeroes64(0x801000000000UL) == 36); +} + +} // namespace Common diff --git a/src/tests/common/multi_level_queue.cpp b/src/tests/common/multi_level_queue.cpp new file mode 100644 index 000000000..cca7ec7da --- /dev/null +++ b/src/tests/common/multi_level_queue.cpp @@ -0,0 +1,55 @@ +// Copyright 2019 Yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <catch2/catch.hpp> +#include <math.h> +#include "common/common_types.h" +#include "common/multi_level_queue.h" + +namespace Common { + +TEST_CASE("MultiLevelQueue", "[common]") { + std::array<f32, 8> values = {0.0, 5.0, 1.0, 9.0, 8.0, 2.0, 6.0, 7.0}; + Common::MultiLevelQueue<f32, 64> mlq; + REQUIRE(mlq.empty()); + mlq.add(values[2], 2); + mlq.add(values[7], 7); + mlq.add(values[3], 3); + mlq.add(values[4], 4); + mlq.add(values[0], 0); + mlq.add(values[5], 5); + mlq.add(values[6], 6); + mlq.add(values[1], 1); + u32 index = 0; + bool all_set = true; + for (auto& f : mlq) { + all_set &= (f == values[index]); + index++; + } + REQUIRE(all_set); + REQUIRE(!mlq.empty()); + f32 v = 8.0; + mlq.add(v, 2); + v = -7.0; + mlq.add(v, 2, false); + REQUIRE(mlq.front(2) == -7.0); + mlq.yield(2); + REQUIRE(mlq.front(2) == values[2]); + REQUIRE(mlq.back(2) == -7.0); + REQUIRE(mlq.empty(8)); + v = 10.0; + mlq.add(v, 8); + mlq.adjust(v, 8, 9); + REQUIRE(mlq.front(9) == v); + REQUIRE(mlq.empty(8)); + REQUIRE(!mlq.empty(9)); + mlq.adjust(values[0], 0, 9); + REQUIRE(mlq.highest_priority_set() == 1); + REQUIRE(mlq.lowest_priority_set() == 9); + mlq.remove(values[1], 1); + REQUIRE(mlq.highest_priority_set() == 2); + REQUIRE(mlq.empty(1)); +} + +} // namespace Common |