kernel: rename Z_KERNEL_STACK_BUFFER to K_KERNEL_STACK_BUFFER

Simple rename to align the kernel naming scheme. This is being
used throughout the tree, especially in the architecture code.
As this is not a private API internal to kernel, prefix it
appropriately with K_.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2024-03-22 12:56:12 -07:00 committed by Anas Nashif
parent f05111dea0
commit b69d2486fe
22 changed files with 30 additions and 30 deletions

View File

@ -44,11 +44,11 @@ K_KERNEL_STACK_DEFINE(_firq_interrupt_stack, CONFIG_ARC_FIRQ_STACK_SIZE);
void z_arc_firq_stack_set(void)
{
#ifdef CONFIG_SMP
char *firq_sp = Z_KERNEL_STACK_BUFFER(
char *firq_sp = K_KERNEL_STACK_BUFFER(
_firq_interrupt_stack[z_arc_v2_core_id()]) +
CONFIG_ARC_FIRQ_STACK_SIZE;
#else
char *firq_sp = Z_KERNEL_STACK_BUFFER(_firq_interrupt_stack) +
char *firq_sp = K_KERNEL_STACK_BUFFER(_firq_interrupt_stack) +
CONFIG_ARC_FIRQ_STACK_SIZE;
#endif

View File

@ -50,7 +50,7 @@ void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz,
* arc_cpu_wake_flag will protect arc_cpu_sp that
* only one slave cpu can read it per time
*/
arc_cpu_sp = Z_KERNEL_STACK_BUFFER(stack) + sz;
arc_cpu_sp = K_KERNEL_STACK_BUFFER(stack) + sz;
arc_cpu_wake_flag = cpu_num;

View File

@ -283,7 +283,7 @@ FUNC_NORETURN void z_arc_switch_to_main_no_multithreading(k_thread_entry_t main_
void *p1, void *p2, void *p3)
{
_kernel.cpus[0].id = 0;
_kernel.cpus[0].irq_stack = (Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]) +
_kernel.cpus[0].irq_stack = (K_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]) +
K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[0]));
void *main_stack = (Z_THREAD_STACK_BUFFER(z_main_stack) +

View File

@ -120,16 +120,16 @@ void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz, arch_cpustart_
}
/* Pass stack address to secondary core */
arm_cpu_boot_params.irq_sp = Z_KERNEL_STACK_BUFFER(stack) + sz;
arm_cpu_boot_params.fiq_sp = Z_KERNEL_STACK_BUFFER(z_arm_fiq_stack[cpu_num])
arm_cpu_boot_params.irq_sp = K_KERNEL_STACK_BUFFER(stack) + sz;
arm_cpu_boot_params.fiq_sp = K_KERNEL_STACK_BUFFER(z_arm_fiq_stack[cpu_num])
+ CONFIG_ARMV7_FIQ_STACK_SIZE;
arm_cpu_boot_params.abt_sp = Z_KERNEL_STACK_BUFFER(z_arm_abort_stack[cpu_num])
arm_cpu_boot_params.abt_sp = K_KERNEL_STACK_BUFFER(z_arm_abort_stack[cpu_num])
+ CONFIG_ARMV7_EXCEPTION_STACK_SIZE;
arm_cpu_boot_params.udf_sp = Z_KERNEL_STACK_BUFFER(z_arm_undef_stack[cpu_num])
arm_cpu_boot_params.udf_sp = K_KERNEL_STACK_BUFFER(z_arm_undef_stack[cpu_num])
+ CONFIG_ARMV7_EXCEPTION_STACK_SIZE;
arm_cpu_boot_params.svc_sp = Z_KERNEL_STACK_BUFFER(z_arm_svc_stack[cpu_num])
arm_cpu_boot_params.svc_sp = K_KERNEL_STACK_BUFFER(z_arm_svc_stack[cpu_num])
+ CONFIG_ARMV7_SVC_STACK_SIZE;
arm_cpu_boot_params.sys_sp = Z_KERNEL_STACK_BUFFER(z_arm_sys_stack[cpu_num])
arm_cpu_boot_params.sys_sp = K_KERNEL_STACK_BUFFER(z_arm_sys_stack[cpu_num])
+ CONFIG_ARMV7_SYS_STACK_SIZE;
arm_cpu_boot_params.fn = fn;

View File

@ -28,7 +28,7 @@ void z_arm_init_stacks(void)
memset(z_arm_svc_stack, 0xAA, CONFIG_ARMV7_SVC_STACK_SIZE);
memset(z_arm_abort_stack, 0xAA, CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
memset(z_arm_undef_stack, 0xAA, CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
memset(Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]), 0xAA,
memset(K_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]), 0xAA,
K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[0]));
}
#endif

View File

@ -40,7 +40,7 @@ K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS,
static ALWAYS_INLINE void z_arm_interrupt_stack_setup(void)
{
uint32_t msp =
(uint32_t)(Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[0])) +
(uint32_t)(K_KERNEL_STACK_BUFFER(z_interrupt_stacks[0])) +
K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[0]);
__set_MSP(msp);

View File

@ -33,7 +33,7 @@ void z_arm64_safe_exception_stack_init(void)
char *safe_exc_sp;
cpu_id = arch_curr_cpu()->id;
safe_exc_sp = Z_KERNEL_STACK_BUFFER(z_arm64_safe_exception_stacks[cpu_id]) +
safe_exc_sp = K_KERNEL_STACK_BUFFER(z_arm64_safe_exception_stacks[cpu_id]) +
CONFIG_ARM64_SAFE_EXCEPTION_STACK_SIZE;
arch_curr_cpu()->arch.safe_exception_stack = (uint64_t)safe_exc_sp;
write_sp_el0((uint64_t)safe_exc_sp);

View File

@ -84,7 +84,7 @@ void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz,
"The count of CPU Cores nodes in dts is not equal to CONFIG_MP_MAX_NUM_CPUS\n");
#endif
arm64_cpu_boot_params.sp = Z_KERNEL_STACK_BUFFER(stack) + sz;
arm64_cpu_boot_params.sp = K_KERNEL_STACK_BUFFER(stack) + sz;
arm64_cpu_boot_params.fn = fn;
arm64_cpu_boot_params.arg = arg;
arm64_cpu_boot_params.cpu_num = cpu_num;

View File

@ -33,7 +33,7 @@ void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz,
riscv_cpu_init[cpu_num].fn = fn;
riscv_cpu_init[cpu_num].arg = arg;
riscv_cpu_sp = Z_KERNEL_STACK_BUFFER(stack) + sz;
riscv_cpu_sp = K_KERNEL_STACK_BUFFER(stack) + sz;
riscv_cpu_boot_flag = 0U;
#ifdef CONFIG_PM_CPU_OPS

View File

@ -206,7 +206,7 @@ FUNC_NORETURN void z_riscv_switch_to_main_no_multithreading(k_thread_entry_t mai
ARG_UNUSED(p3);
_kernel.cpus[0].id = 0;
_kernel.cpus[0].irq_stack = (Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]) +
_kernel.cpus[0].irq_stack = (K_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]) +
K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[0]));
main_stack = (Z_THREAD_STACK_BUFFER(z_main_stack) +

View File

@ -59,7 +59,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
#else
cpu_id = 0;
#endif
start = (uintptr_t)Z_KERNEL_STACK_BUFFER(
start = (uintptr_t)K_KERNEL_STACK_BUFFER(
z_interrupt_stacks[cpu_id]);
end = start + CONFIG_ISR_STACK_SIZE;
#ifdef CONFIG_USERSPACE

View File

@ -206,7 +206,7 @@ static FUNC_NORETURN __used void df_handler_top(void)
_df_esf.eflags = _main_tss.eflags;
/* Restore the main IA task to a runnable state */
_main_tss.esp = (uint32_t)(Z_KERNEL_STACK_BUFFER(
_main_tss.esp = (uint32_t)(K_KERNEL_STACK_BUFFER(
z_interrupt_stacks[0]) + CONFIG_ISR_STACK_SIZE);
_main_tss.cs = CODE_SEG;
_main_tss.ds = DATA_SEG;

View File

@ -156,7 +156,7 @@ void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz,
apic_id = x86_cpu_loapics[cpu_num];
x86_cpuboot[cpu_num].sp = (uint64_t) Z_KERNEL_STACK_BUFFER(stack) + sz;
x86_cpuboot[cpu_num].sp = (uint64_t) K_KERNEL_STACK_BUFFER(stack) + sz;
x86_cpuboot[cpu_num].stack_size = sz;
x86_cpuboot[cpu_num].fn = fn;
x86_cpuboot[cpu_num].arg = arg;

View File

@ -51,7 +51,7 @@ static ALWAYS_INLINE void arch_kernel_init(void)
XTENSA_WSR(ZSR_CPU_STR, cpu0);
#ifdef CONFIG_INIT_STACKS
char *stack_start = Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]);
char *stack_start = K_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]);
size_t stack_sz = K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[0]);
char *stack_end = stack_start + stack_sz;

View File

@ -283,7 +283,7 @@ static inline char *z_stack_ptr_align(char *ptr)
/** @} */
static inline char *Z_KERNEL_STACK_BUFFER(k_thread_stack_t *sym)
static inline char *K_KERNEL_STACK_BUFFER(k_thread_stack_t *sym)
{
return (char *)sym + K_KERNEL_STACK_RESERVED;
}
@ -294,7 +294,7 @@ static inline char *Z_KERNEL_STACK_BUFFER(k_thread_stack_t *sym)
#define K_THREAD_STACK_DEFINE K_KERNEL_STACK_DEFINE
#define K_THREAD_STACK_ARRAY_DEFINE K_KERNEL_STACK_ARRAY_DEFINE
#define K_THREAD_STACK_MEMBER K_KERNEL_STACK_MEMBER
#define Z_THREAD_STACK_BUFFER Z_KERNEL_STACK_BUFFER
#define Z_THREAD_STACK_BUFFER K_KERNEL_STACK_BUFFER
#define K_THREAD_STACK_DECLARE K_KERNEL_STACK_DECLARE
#define K_THREAD_STACK_ARRAY_DECLARE K_KERNEL_STACK_ARRAY_DECLARE
#define K_THREAD_PINNED_STACK_DEFINE K_KERNEL_PINNED_STACK_DEFINE

View File

@ -469,7 +469,7 @@ void z_init_cpu(int id)
_kernel.cpus[id].idle_thread = &z_idle_threads[id];
_kernel.cpus[id].id = id;
_kernel.cpus[id].irq_stack =
(Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[id]) +
(K_KERNEL_STACK_BUFFER(z_interrupt_stacks[id]) +
K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[id]));
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
_kernel.cpus[id].usage = &_kernel.usage[id];

View File

@ -392,7 +392,7 @@ static char *setup_thread_stack(struct k_thread *new_thread,
{
/* Object cannot host a user mode thread */
stack_obj_size = Z_KERNEL_STACK_SIZE_ADJUST(stack_size);
stack_buf_start = Z_KERNEL_STACK_BUFFER(stack);
stack_buf_start = K_KERNEL_STACK_BUFFER(stack);
stack_buf_size = stack_obj_size - K_KERNEL_STACK_RESERVED;
/* Zephyr treats stack overflow as an app bug. But

View File

@ -258,12 +258,12 @@ void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz,
sr.cpu = cpu_num;
sr.fn = fn;
sr.stack_top = Z_KERNEL_STACK_BUFFER(stack) + sz;
sr.stack_top = K_KERNEL_STACK_BUFFER(stack) + sz;
sr.arg = arg;
sr.vecbase = vb;
sr.alive = &alive_flag;
appcpu_top = Z_KERNEL_STACK_BUFFER(stack) + sz;
appcpu_top = K_KERNEL_STACK_BUFFER(stack) + sz;
start_rec = &sr;

View File

@ -129,7 +129,7 @@ void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz,
start_rec.fn = fn;
start_rec.arg = arg;
z_mp_stack_top = Z_KERNEL_STACK_BUFFER(stack) + sz;
z_mp_stack_top = K_KERNEL_STACK_BUFFER(stack) + sz;
soc_start_core(cpu_num);
}

View File

@ -134,7 +134,7 @@ static void isr_stacks(void)
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
const uint8_t *buf = Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[i]);
const uint8_t *buf = K_KERNEL_STACK_BUFFER(z_interrupt_stacks[i]);
size_t size = K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[i]);
size_t unused;
int err;

View File

@ -257,7 +257,7 @@ static int cmd_kernel_stacks(const struct shell *sh,
for (int i = 0; i < num_cpus; i++) {
size_t unused;
const uint8_t *buf = Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[i]);
const uint8_t *buf = K_KERNEL_STACK_BUFFER(z_interrupt_stacks[i]);
size_t size = K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[i]);
int err = z_stack_space_get(buf, size, &unused);

View File

@ -121,7 +121,7 @@ void stack_buffer_scenarios(void)
#endif
{
reserved = K_KERNEL_STACK_RESERVED;
stack_buf = Z_KERNEL_STACK_BUFFER(stack_obj);
stack_buf = K_KERNEL_STACK_BUFFER(stack_obj);
alignment = Z_KERNEL_STACK_OBJ_ALIGN;
}