sched: Remove multi-level queue priority limit

Modified bitmask to  bitmask array, it can make multilevel queue remove
32 bit prioriry limit.

We can scan bitmask array to find which queue have ready thread.

Only need the number of queues as priority because the priority
is checked on create_thread.

Signed-off-by: TaiJu Wu <tjwu1217@gmail.com>
This commit is contained in:
TaiJu Wu 2024-03-07 03:44:22 +08:00 committed by Anas Nashif
parent a63b3d1de6
commit 1f5f0cf838
5 changed files with 75 additions and 32 deletions

View File

@ -82,7 +82,7 @@ runtime overhead and performance scaling when many threads are added.
* Traditional multi-queue ready queue (:kconfig:option:`CONFIG_SCHED_MULTIQ`)
When selected, the scheduler ready queue will be implemented as the
classic/textbook array of lists, one per priority (max 32 priorities).
classic/textbook array of lists, one per priority.
This corresponds to the scheduler algorithm used in Zephyr versions prior to
1.12.

View File

@ -32,6 +32,15 @@
#include <zephyr/sys/rb.h>
#endif
#define K_NUM_THREAD_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + CONFIG_NUM_COOP_PRIORITIES + 1)
#if defined(CONFIG_64BIT)
#define PRIQ_BITMAP_SIZE (DIV_ROUND_UP(K_NUM_THREAD_PRIO, 8 * sizeof(uint64_t)))
#else
#define PRIQ_BITMAP_SIZE (DIV_ROUND_UP(K_NUM_THREAD_PRIO, 8 * sizeof(uint32_t)))
#endif
#ifdef __cplusplus
extern "C" {
#endif
@ -117,8 +126,12 @@ struct _priq_rb {
* to represent their requirements.
*/
struct _priq_mq {
sys_dlist_t queues[32];
unsigned int bitmask; /* bit 1<<i set if queues[i] is non-empty */
sys_dlist_t queues[K_NUM_THREAD_PRIO];
#ifdef CONFIG_64BIT
uint64_t bitmask[PRIQ_BITMAP_SIZE];
#else
uint32_t bitmask[PRIQ_BITMAP_SIZE];
#endif
};
struct _ready_q {

View File

@ -317,18 +317,17 @@ config SCHED_MULTIQ
depends on !SCHED_DEADLINE
help
When selected, the scheduler ready queue will be implemented
as the classic/textbook array of lists, one per priority
(max 32 priorities). This corresponds to the scheduler
algorithm used in Zephyr versions prior to 1.12. It incurs
only a tiny code size overhead vs. the "dumb" scheduler and
runs in O(1) time in almost all circumstances with very low
constant factor. But it requires a fairly large RAM budget
to store those list heads, and the limited features make it
incompatible with features like deadline scheduling that
need to sort threads more finely, and SMP affinity which
need to traverse the list of threads. Typical applications
with small numbers of runnable threads probably want the
DUMB scheduler.
as the classic/textbook array of lists, one per priority.
This corresponds to the scheduler algorithm used in Zephyr
versions prior to 1.12. It incurs only a tiny code size
overhead vs. the "dumb" scheduler and runs in O(1) time
in almost all circumstances with very low constant factor.
But it requires a fairly large RAM budget to store those list
heads, and the limited features make it incompatible with
features like deadline scheduling that need to sort threads
more finely, and SMP affinity which need to traverse the list
of threads. Typical applications with small numbers of runnable
threads probably want the DUMB scheduler.
endchoice # SCHED_ALGORITHM

View File

@ -24,6 +24,13 @@
#define _priq_run_best z_priq_rb_best
/* Multi Queue Scheduling */
#elif defined(CONFIG_SCHED_MULTIQ)
# if defined(CONFIG_64BIT)
# define NBITS 64
# else
# define NBITS 32
# endif
#define _priq_run_add z_priq_mq_add
#define _priq_run_remove z_priq_mq_remove
#define _priq_run_best z_priq_mq_best
@ -60,27 +67,41 @@ bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
#ifdef CONFIG_SCHED_MULTIQ
# if (K_LOWEST_THREAD_PRIO - K_HIGHEST_THREAD_PRIO) > 31
# error Too many priorities for multiqueue scheduler (max 32)
# endif
struct prio_info {
uint8_t offset_prio;
uint8_t idx;
uint8_t bit;
};
static ALWAYS_INLINE struct prio_info get_prio_info(int8_t old_prio)
{
struct prio_info ret;
ret.offset_prio = old_prio - K_HIGHEST_THREAD_PRIO;
ret.idx = ret.offset_prio / NBITS;
ret.bit = ret.offset_prio % NBITS;
return ret;
}
static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq,
struct k_thread *thread)
{
int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
struct prio_info pos = get_prio_info(thread->base.prio);
sys_dlist_append(&pq->queues[priority_bit], &thread->base.qnode_dlist);
pq->bitmask |= BIT(priority_bit);
sys_dlist_append(&pq->queues[pos.offset_prio], &thread->base.qnode_dlist);
pq->bitmask[pos.idx] |= BIT(pos.bit);
}
static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq,
struct k_thread *thread)
{
int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
struct prio_info pos = get_prio_info(thread->base.prio);
sys_dlist_remove(&thread->base.qnode_dlist);
if (sys_dlist_is_empty(&pq->queues[priority_bit])) {
pq->bitmask &= ~BIT(priority_bit);
if (sys_dlist_is_empty(&pq->queues[pos.offset_prio])) {
pq->bitmask[pos.idx] &= ~BIT(pos.bit);
}
}
#endif /* CONFIG_SCHED_MULTIQ */

View File

@ -6,6 +6,7 @@
#include <zephyr/kernel.h>
#include <ksched.h>
#include <zephyr/sys/math_extras.h>
void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread)
{
@ -94,16 +95,25 @@ struct k_thread *z_priq_rb_best(struct _priq_rb *pq)
struct k_thread *z_priq_mq_best(struct _priq_mq *pq)
{
if (!pq->bitmask) {
return NULL;
}
struct k_thread *thread = NULL;
sys_dlist_t *l = &pq->queues[__builtin_ctz(pq->bitmask)];
sys_dnode_t *n = sys_dlist_peek_head(l);
if (n != NULL) {
thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
for (int i = 0; i < PRIQ_BITMAP_SIZE; ++i) {
if (!pq->bitmask[i]) {
continue;
}
#ifdef CONFIG_64BIT
sys_dlist_t *l = &pq->queues[i * 64 + u64_count_trailing_zeros(pq->bitmask[i])];
#else
sys_dlist_t *l = &pq->queues[i * 32 + u32_count_trailing_zeros(pq->bitmask[i])];
#endif
sys_dnode_t *n = sys_dlist_peek_head(l);
if (n != NULL) {
thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
break;
}
}
return thread;
}