First commit

Signed-off-by:  <inaky.perez-gonzalez@intel.com>
This commit is contained in:
Inaky Perez-Gonzalez 2015-04-10 16:44:37 -07:00
commit 8ddf82cf70
1063 changed files with 163901 additions and 0 deletions

5
.gitattributes vendored Normal file
View File

@ -0,0 +1,5 @@
# used to remove files from deployment using `git archive`
# git files
.gitattributes export-ignore
.gitignore export-ignore
.mailmap export-ignore

13
.gitignore vendored Normal file
View File

@ -0,0 +1,13 @@
*.o
*.a
*.d
.dir
outdir
samples/libc/minimal-*-O?
linux2/
host/x86-linux2/bin/gen*
host/x86-linux2/bin/bin2hex
host/x86-linux2/bin/dec2hex
host/x86-linux2/bin/mkevents
host/x86-linux2/bin/sysgen

1
.mailmap Normal file
View File

@ -0,0 +1 @@
Dirk Brandewie <dirk.j.brandewie@intel.com> <dirk.j.brandewie@intel.com>

115
arch/arc/bsp/fatal_error.c Normal file
View File

@ -0,0 +1,115 @@
/* fatal_error.c - ARCv2 system fatal error handler */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module provides the _SysFatalErrorHandler() routine for ARCv2 BSPs.
*/
/* includes */
#include <cputype.h>
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <toolchain.h>
#include <sections.h>
#include "board.h"
#ifdef CONFIG_PRINTK
#include <misc/printk.h>
#define PRINTK(...) printk(__VA_ARGS__)
#else
#define PRINTK(...)
#endif
#ifdef CONFIG_MICROKERNEL
extern void _TaskAbort(void);
static inline void nonEssentialTaskAbort(void)
{
PRINTK("Fatal fault in task ! Aborting task.\n");
_TaskAbort();
}
#define NON_ESSENTIAL_TASK_ABORT() nonEssentialTaskAbort()
#else
#define NON_ESSENTIAL_TASK_ABORT() \
do {/* nothing */ \
} while ((0))
#endif
/*******************************************************************************
*
* _SysFatalErrorHandler - fatal error handler
*
* This routine implements the corrective action to be taken when the system
* detects a fatal error.
*
* This sample implementation attempts to abort the current context and allow
* the system to continue executing, which may permit the system to continue
* functioning with degraded capabilities.
*
* System designers may wish to enhance or substitute this sample
* implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
void _SysFatalErrorHandler(
unsigned int reason, /* fatal error reason */
const NANO_ESF *pEsf /* pointer to exception stack frame */
)
{
nano_context_type_t curCtx = context_type_get();
ARG_UNUSED(reason);
ARG_UNUSED(pEsf);
if ((curCtx == NANO_CTX_ISR) || _context_essential_check(NULL)) {
PRINTK("Fatal fault in %s ! Spinning...\n",
NANO_CTX_ISR == curCtx
? "ISR"
: NANO_CTX_FIBER == curCtx ? "essential fiber"
: "essential task");
for (;;)
; /* spin forever */
}
if (NANO_CTX_FIBER == curCtx) {
PRINTK("Fatal fault in fiber ! Aborting fiber.\n");
fiber_abort();
return;
}
NON_ESSENTIAL_TASK_ABORT();
}

108
arch/arc/bsp/prep_c.c Normal file
View File

@ -0,0 +1,108 @@
/* prep_c.c - full C support initialization */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Initialization of full C support: zero the .bss, copy the .data if XIP,
call _Cstart().
Stack is available in this module, but not the global data/bss until their
initialization is performed.
*/
#include <stdint.h>
#include <toolchain.h>
#include <linker-defs.h>
/*******************************************************************************
*
* bssZero - clear BSS
*
* This routine clears the BSS region, so all bytes are 0.
*
* RETURNS: N/A
*/
static void bssZero(void)
{
volatile uint32_t *pBSS = (uint32_t *)&__bss_start;
unsigned int n;
for (n = 0; n < (unsigned int)&__bss_num_words; n++) {
pBSS[n] = 0;
}
}
/*******************************************************************************
*
* dataCopy - copy the data section from ROM to RAM
*
* This routine copies the data section from ROM to RAM.
*
* RETURNS: N/A
*/
#ifdef CONFIG_XIP
static void dataCopy(void)
{
volatile uint32_t *pROM = (uint32_t *)&__data_rom_start;
volatile uint32_t *pRAM = (uint32_t *)&__data_ram_start;
unsigned int n;
for (n = 0; n < (unsigned int)&__data_num_words; n++) {
pRAM[n] = pROM[n];
}
}
#else
static void dataCopy(void)
{
}
#endif
extern FUNC_NORETURN void _Cstart(void);
/*******************************************************************************
*
* _PrepC - prepare to and run C code
*
* This routine prepares for the execution of and runs C code.
*
* RETURNS: N/A
*/
void _PrepC(void)
{
bssZero();
dataCopy();
_Cstart();
CODE_UNREACHABLE;
}

71
arch/arc/bsp/reset.s Normal file
View File

@ -0,0 +1,71 @@
/* reset.s - reset handler */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Reset handler that prepares the system for running C code.
*/
#define _ASMLANGUAGE
#include <board.h>
#include <toolchain.h>
#include <sections.h>
#include <nanokernel/cpu.h>
#define _RAM_END (CONFIG_RAM_START + CONFIG_RAM_SIZE)
GTEXT(__reset)
/*******************************************************************************
*
* __reset - reset vector
*
* Ran when the system comes out of reset. The processor is at supervisor level.
*
* Locking interrupts prevents anything from interrupting the CPU.
*
* When these steps are completed, jump to _PrepC(), which will finish setting
* up the system for running C code.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT,__reset)
/* lock interrupts: will get unlocked when switch to main task */
clri
/* setup a stack at the end of the RAM */
mov sp, _RAM_END
j @_PrepC

View File

@ -0,0 +1,89 @@
/* vector_table.c - populated exception vector table */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Vector table with exceptions filled in. The reset vector is the system entry
point, ie. the first instruction executed.
The table is populated with all the system exception handlers. No exception
should not be triggered until the kernel is ready to handle them.
We are using a C file instead of an assembly file (like the ARM vector table)
to work around an issue with the assembler where:
.word <function>
statements would end up with the two half-words of the functions' addresses
swapped.
*/
#include <stdint.h>
#include <toolchain.h>
#include "vector_table.h"
struct vector_table {
uint32_t reset;
uint32_t memory_error;
uint32_t instruction_error;
uint32_t ev_machine_check;
uint32_t ev_tlb_miss_i;
uint32_t ev_tlb_miss_d;
uint32_t ev_prot_v;
uint32_t ev_privilege_v;
uint32_t ev_swi;
uint32_t ev_trap;
uint32_t ev_extension;
uint32_t ev_div_zero;
uint32_t ev_dc_error;
uint32_t ev_maligned;
};
struct vector_table _VectorTable _GENERIC_SECTION(.exc_vector_table) = {
(uint32_t)__reset,
(uint32_t)__memory_error,
(uint32_t)__instruction_error,
(uint32_t)__ev_machine_check,
(uint32_t)__ev_tlb_miss_i,
(uint32_t)__ev_tlb_miss_d,
(uint32_t)__ev_prot_v,
(uint32_t)__ev_privilege_v,
(uint32_t)__ev_swi,
(uint32_t)__ev_trap,
(uint32_t)__ev_extension,
(uint32_t)__ev_div_zero,
(uint32_t)__ev_dc_error,
(uint32_t)__ev_maligned,
};
extern struct vector_table _VxMicroStart _ALIAS_OF(_VectorTable);
extern struct vector_table _start _ALIAS_OF(_VectorTable);

View File

@ -0,0 +1,95 @@
/* vector_table.h - definitions for the exception vector table */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Definitions for the boot vector table.
System exception handler names all have the same format:
__<exception name with underscores>
Refer to the ARCv2 manual for an explanation of the exceptions.
*/
#ifndef _VECTOR_TABLE__H_
#define _VECTOR_TABLE__H_
#ifdef _ASMLANGUAGE
#include <board.h>
#include <toolchain.h>
#include <sections.h>
GTEXT(__start)
GTEXT(_VxMicroStart)
GTEXT(_VectorTable)
GTEXT(__reset)
GTEXT(__memory_error)
GTEXT(__instruction_error)
GTEXT(__ev_machine_check)
GTEXT(__ev_tlb_miss_i)
GTEXT(__ev_tlb_miss_d)
GTEXT(__ev_prot_v)
GTEXT(__ev_privilege_v)
GTEXT(__ev_swi)
GTEXT(__ev_trap)
GTEXT(__ev_extension)
GTEXT(__ev_div_zero)
GTEXT(__ev_dc_error)
GTEXT(__ev_maligned)
GTEXT(_PrepC)
GTEXT(_IsrWrapper)
#else
extern void __reset(void);
extern void __memory_error(void);
extern void __instruction_error(void);
extern void __ev_machine_check(void);
extern void __ev_tlb_miss_i(void);
extern void __ev_tlb_miss_d(void);
extern void __ev_prot_v(void);
extern void __ev_privilege_v(void);
extern void __ev_swi(void);
extern void __ev_trap(void);
extern void __ev_extension(void);
extern void __ev_div_zero(void);
extern void __ev_dc_error(void);
extern void __ev_maligned(void);
#endif /* _ASMLANGUAGE */
#endif /* _VECTOR_TABLE__H_ */

445
arch/arc/core/atomic.s Normal file
View File

@ -0,0 +1,445 @@
/* armAtomic.s - ARC atomic operations library */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This library provides routines to perform a number of atomic operations
on a memory location: add, subtract, increment, decrement, bitwise OR,
bitwise NOR, bitwise AND, bitwise NAND, set, clear and compare-and-swap.
*/
#define _ASMLANGUAGE
#include <toolchain.h>
#include <sections.h>
/* exports */
GTEXT(atomic_set)
GTEXT(atomic_get)
GTEXT(atomic_add)
GTEXT(atomic_nand)
GTEXT(atomic_and)
GTEXT(atomic_or)
GTEXT(atomic_xor)
GTEXT(atomic_clear)
GTEXT(atomic_dec)
GTEXT(atomic_inc)
GTEXT(atomic_sub)
GTEXT(atomic_cas)
.section .TEXT._Atomic, "ax"
.balign 2
/*******************************************************************************
*
* atomic_clear - atomically clear a memory location
*
* This routine atomically clears the contents of <target> and returns the old
* value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_clear
* (
* atomic_t *target /@ memory location to clear @/
* )
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_clear)
mov_s r1, 0
/* fall through into atomic_set */
/*******************************************************************************
*
* atomic_set - atomically set a memory location
*
* This routine atomically sets the contents of <target> to <value> and returns
* the old value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_set
* (
* atomic_t *target, /@ memory location to set @/
* atomic_val_t value /@ set with this value @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set)
ex r1, [r0] /* swap new value with old value */
j_s.d [blink]
mov_s r0, r1 /* return old value */
/******************************************************************************
*
* atomic_get - Get the value of a shared memory atomically
*
* This routine atomically retrieves the value in *target
*
* atomic_val_t atomic_get
* (
* atomic_t *target /@ address of atom to be retrieved @/
* )
*
* RETURN: value read from address target.
*
*/
SECTION_FUNC(TEXT, atomic_get)
ld_s r0, [r0, 0]
j_s [blink]
/*******************************************************************************
*
* atomic_inc - atomically increment a memory location
*
* This routine atomically increments the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_inc
* (
* atomic_t *target, /@ memory location to increment @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_inc)
mov_s r1, 1
/* fall through into atomic_add */
/*******************************************************************************
*
* atomic_add - atomically add a value to a memory location
*
* This routine atomically adds the contents of <target> and <value>, placing
* the result in <target>. The operation is done using signed integer arithmetic.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_add
* (
* atomic_t *target, /@ memory location to add to @/
* atomic_val_t value /@ value to add @/
* )
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add)
llock r2, [r0] /* load old value and mark exclusive access */
add_s r3, r1, r2
scond r3, [r0] /* try to store new value */
/* STATUS32.Z = 1 if successful */
bne_s atomic_add /* if store is not successful, retry */
j_s.d [blink]
mov_s r0, r2 /* return old value */
/*******************************************************************************
*
* atomic_dec - atomically decrement a memory location
*
* This routine atomically decrements the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_dec
* (
* atomic_t *target, /@ memory location to decrement @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_dec_sub, atomic_dec)
mov_s r1, 1
/* fall through into atomic_sub */
/*******************************************************************************
*
* atomic_sub - atomically subtract a value from a memory location
*
* This routine atomically subtracts <value> from the contents of <target>,
* placing the result in <target>. The operation is done using signed integer
* arithmetic. Various CPU architectures may impose restrictions with regards to
* the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_sub
* (
* atomic_t *target, /@ memory location to subtract from @/
* atomic_val_t value /@ value to subtract @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_dec_sub, atomic_sub)
llock r2, [r0] /* load old value and mark exclusive access */
sub r3, r2, r1
scond r3, [r0] /* try to store new value */
/* STATUS32.Z = 1 if successful */
bne_s atomic_sub /* if store is not successful, retry */
j_s.d [blink]
mov_s r0, r2 /* return old value */
/******************************************************************************
*
* atomic_nand - atomically perform a bitwise NAND on a memory location
*
* This routine atomically performs a bitwise NAND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_nand
* (
* atomic_t *target, /@ memory location to NAND @/
* atomic_val_t value /@ NAND with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_nand)
llock r2, [r0] /* load old value and mark exclusive access */
and r3, r1, r2
not r3, r3
scond r3, [r0] /* try to store new value */
/* STATUS32.Z = 1 if successful */
bne_s atomic_nand /* if store is not successful, retry */
j_s.d [blink]
mov_s r0, r2 /* return old value */
/******************************************************************************
*
* atomic_and - atomically perform a bitwise AND on a memory location
*
* This routine atomically performs a bitwise AND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_and
* (
* atomic_t *target, /@ memory location to AND @/
* atomic_val_t value /@ AND with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_and)
llock r2, [r0] /* load old value and mark exclusive access */
and r3, r1, r2
scond r3, [r0] /* try to store new value */
/* STATUS32.Z = 1 if successful */
bne_s atomic_and /* if store is not successful, retry */
j_s.d [blink]
mov_s r0, r2 /* return old value */
/*******************************************************************************
*
* atomic_or - atomically perform a bitwise OR on memory location
*
* This routine atomically performs a bitwise OR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_or
* (
* atomic_t *target, /@ memory location to OR @/
* atomic_val_t value /@ OR with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_or)
llock r2, [r0] /* load old value and mark exclusive access */
or r3, r1, r2
scond r3, [r0] /* try to store new value */
/* STATUS32.Z = 1 if successful */
bne_s atomic_or /* if store is not successful, retry */
j_s.d [blink]
mov_s r0, r2 /* return old value */
/*******************************************************************************
*
* atomic_xor - atomically perform a bitwise XOR on a memory location
*
* This routine atomically performs a bitwise XOR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_xor
* (
* atomic_t *target, /@ memory location to XOR @/
* atomic_val_t value /@ XOR with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_xor)
llock r2, [r0] /* load old value and mark exclusive access */
xor r3, r1, r2
scond r3, [r0] /* try to store new value */
/* STATUS32.Z = 1 if successful */
bne_s atomic_xor /* if store is not successful, retry */
j_s.d [blink]
mov_s r0, r2 /* return old value */
/*******************************************************************************
*
* atomic_cas - atomically compare-and-swap the contents of a memory location
*
* This routine performs an atomic compare-and-swap. testing that the contents of
* <target> contains <oldValue>, and if it does, setting the value of <target>
* to <newValue>. Various CPU architectures may impose restrictions with regards
* to the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: 1 if the swap is actually executed, 0 otherwise.
*
* ERRNO: N/A
*
* int atomic_cas
* (
* atomic_t *target, /@ memory location to compare-and-swap @/
* atomic_val_t oldValue, /@ compare to this value @/
* atomic_val_t newValue, /@ swap with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_cas)
llock r3, [r0] /* load old value and mark exclusive access */
cmp_s r1, r3
bne_s nanoAtomicCas_fail
scond r2, [r0] /* try to store new value */
/* STATUS32.Z = 1 if successful */
bne_s atomic_cas /* if store is not successful, retry */
j_s.d [blink]
mov_s r0, 1 /* return TRUE */
/* failed comparison */
nanoAtomicCas_fail:
scond r1, [r0] /* write old value to clear the access lock */
j_s.d [blink]
mov_s r0, 0 /* return FALSE */

171
arch/arc/core/context.c Normal file
View File

@ -0,0 +1,171 @@
/* context.c - new context creation for ARCv2 */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* DESCRIPTION
* Core nanokernel fiber related primitives for the ARCv2 processor
* architecture.
*/
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <toolchain.h>
#include <nanok.h>
#include <nanocontextentry.h>
#include <offsets.h>
/* initial stack frame */
struct init_stack_frame {
uint32_t pc;
uint32_t status32;
uint32_t r3;
uint32_t r2;
uint32_t r1;
uint32_t r0;
};
tNANO _NanoKernel = {0};
#if defined(CONFIG_HOST_TOOLS_SUPPORT)
#define TOOLS_SUPPORT_INIT(pCcs) toolsSupportInit(pCcs)
#else
#define TOOLS_SUPPORT_INIT(pCcs) \
do {/* do nothing */ \
} while ((0))
#endif
#if defined(CONFIG_HOST_TOOLS_SUPPORT)
/*
* toolsSupportInit - initialize host-tools support when needed
*
* Currently only inserts the new context in the list of active contexts.
*
* RETURNS: N/A
*/
static ALWAYS_INLINE void toolsSupportInit(struct s_CCS *pCcs /* context */
)
{
unsigned int key;
/*
* Add the newly initialized context to head of the list of contexts.
* This singly linked list of contexts maintains ALL the contexts in the
* system: both tasks and fibers regardless of whether they are
* runnable.
*/
key = irq_lock_inline();
pCcs->activeLink = _NanoKernel.contexts;
_NanoKernel.contexts = pCcs;
irq_unlock_inline(key);
}
#endif /* CONFIG_HOST_TOOLS_SUPPORT */
/*
* _NewContext - initialize a new context (thread) from its stack space
*
* The control structure (CCS) is put at the lower address of the stack. An
* initial context, to be "restored" by __return_from_coop(), is put at
* the other end of the stack, and thus reusable by the stack when not
* needed anymore.
*
* The initial context is a basic stack frame that contains arguments for
* _ContextEntryRtn() return address, that points at _ContextEntryRtn()
* and status register.
*
* <options> is currently unused.
*
* RETURNS: N/A
*/
void *_NewContext(
char *pStackMem, /* pointer to stack memory */
unsigned stackSize, /* stack size in bytes */
_ContextEntry pEntry, /* context (thread) entry point routine */
void *parameter1, /* first param to entry point */
void *parameter2, /* second param to entry point */
void *parameter3, /* third param to entry point */
int priority, /* fiber priority, -1 for task */
unsigned options /* unused, for expansion */
)
{
char *stackEnd = pStackMem + stackSize;
struct init_stack_frame *pInitCtx;
tCCS *pCcs = (void *)ROUND_UP(pStackMem, sizeof(uint32_t));
/* carve the context entry struct from the "base" of the stack */
pInitCtx = (struct init_stack_frame *)(STACK_ROUND_DOWN(stackEnd) -
sizeof(struct init_stack_frame));
pInitCtx->pc = ((uint32_t)_ContextEntryWrapper);
pInitCtx->r0 = (uint32_t)pEntry;
pInitCtx->r1 = (uint32_t)parameter1;
pInitCtx->r2 = (uint32_t)parameter2;
pInitCtx->r3 = (uint32_t)parameter3;
/*
* For now set the interrupt priority to 15
* we can leave interrupt enable flag set to 0 as
* seti instruction in the end of the _Swap() will
* enable the interrupts based on intlock_key
* value.
*/
pInitCtx->status32 = _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL);
pCcs->link = NULL;
pCcs->flags = priority == -1 ? TASK | PREEMPTIBLE : FIBER;
pCcs->prio = priority;
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
/* Initialize custom data field (value is opaque to kernel) */
pCcs->custom_data = NULL;
#endif
/*
* intlock_key is constructed based on ARCv2 ISA Programmer's
* Reference Manual CLRI instruction description:
* dst[31:6] dst[5] dst[4] dst[3:0]
* 26'd0 1 STATUS32.IE STATUS32.E[3:0]
*/
pCcs->intlock_key = 0x3F;
pCcs->relinquish_cause = _CAUSE_COOP;
pCcs->preempReg.sp = (uint32_t)pInitCtx - __tCalleeSaved_SIZEOF;
/* initial values in all other registers/CCS entries are irrelevant */
TOOLS_SUPPORT_INIT(pCcs);
return pCcs;
}

View File

@ -0,0 +1,64 @@
/* ctx_wrapper.s - wrapper for _ContextEntryRtn */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* DESCRIPTION
* Wrapper for _ContextEntryRtn routine when called from
* the initial context
*/
#define _ASMLANGUAGE
#include <toolchain.h>
#include <sections.h>
GTEXT(_ContextEntryWrapper)
GTEXT(_ContextEntryRtn)
/*
* _ContextEntryWrapper - wrapper for _ContextEntryRtn
*
* The routine pops parameters for the _ContextEntryRtn from
* stack frame, prepared by the _NewContext() routine
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _ContextEntryWrapper)
pop_s r3
pop_s r2
pop_s r1
pop_s r0
j _ContextEntryRtn
nop

79
arch/arc/core/cpu_idle.s Normal file
View File

@ -0,0 +1,79 @@
/* cpu_idle.s - CPU power management */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
CPU power management routines.
*/
#define _ASMLANGUAGE
#include <nanok.h>
#include <offsets.h>
#include <toolchain.h>
#include <sections.h>
#include <nanokernel/cpu.h>
GTEXT(nano_cpu_idle)
GTEXT(nano_cpu_atomic_idle)
GDATA(nano_cpu_sleep_mode)
SECTION_VAR(BSS, nano_cpu_sleep_mode)
.word 0
/*
* nano_cpu_idle - put the CPU in low-power mode
*
* This function always exits with interrupts unlocked.
*
* void nanCpuIdle(void)
*/
SECTION_FUNC(TEXT, nano_cpu_idle)
ld r1, [nano_cpu_sleep_mode]
or r1, r1, (1 << 4) /* set IRQ-enabled bit */
sleep r1
j_s.nd [blink]
nop
/*
* nano_cpu_atomic_idle - put the CPU in low-power mode, entered with IRQs locked
*
* This function exits with interrupts restored to <key>.
*
* void nano_cpu_atomic_idle(unsigned int key)
*/
SECTION_FUNC(TEXT, nano_cpu_atomic_idle)
ld r1, [nano_cpu_sleep_mode]
or r1, r1, (1 << 4) /* set IRQ-enabled bit */
sleep r1
j_s.d [blink]
seti r0

284
arch/arc/core/fast_irq.s Normal file
View File

@ -0,0 +1,284 @@
/* fast_irq.s - handling of transitions to-and-from fast IRQs (FIRQ) */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module implements the code for handling entry to and exit from Fast IRQs.
See isr_wrapper.s for details.
*/
#define _ASMLANGUAGE
#include <nanok.h>
#include <offsets.h>
#include <toolchain.h>
#include <nanokernel/cpu.h>
#include "swap_macros.h"
GTEXT(_firq_enter)
GTEXT(_firq_exit)
GTEXT(_firq_stack_setup)
GDATA(_firq_stack)
SECTION_VAR(NOINIT, _firq_stack)
.space CONFIG_FIRQ_STACK_SIZE
/*******************************************************************************
*
* _firq_enter - work to be done before handing control to a FIRQ ISR
*
* The processor switches to a second register bank so registers from the
* current bank do not have to be preserved yet. The only issue is the LP_START/
* LP_COUNT/LP_END registers, which are not banked.
*
* If all FIRQ ISRs are programmed such that there are no use of the LP
* registers (ie. no LPcc instruction), then the kernel can be configured to
* remove the use of _firq_enter().
*
* When entering a FIRQ, interrupts might as well be locked: the processor is
* running at its highest priority, and cannot be preempted by anything.
*
* Assumption by _isr_demux: r3 is untouched by _firq_enter.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _firq_enter)
#ifndef CONFIG_FIRQ_NO_LPCC
/*
* Unlike the rest of context switching code, r2 is loaded with something
* else than 'current' in this routine: this is to preserve r3 so that it
* does not have to be fetched again in _isr_demux.
*/
/* save LP_START/LP_COUNT/LP_END variables */
mov_s r1, _NanoKernel
/* cannot store lp_count directly to memory */
mov r2, lp_count
st lp_count, [r1, __tNANO_firq_regs_OFFSET + __tFirqRegs_lp_count_OFFSET]
lr r2, [_ARC_V2_LP_START]
st r2, [r1, __tNANO_firq_regs_OFFSET + __tFirqRegs_lp_start_OFFSET]
lr r2, [_ARC_V2_LP_END]
st r2, [r1, __tNANO_firq_regs_OFFSET + __tFirqRegs_lp_end_OFFSET]
#endif
j @_isr_demux
/*******************************************************************************
*
* _firq_exit - work to be done exiting a FIRQ
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _firq_exit)
mov_s r1, _NanoKernel
ld_s r2, [r1, __tNANO_current_OFFSET]
#ifndef CONFIG_FIRQ_NO_LPCC
/* assumption: r1 contains _NanoKernel, r2 contains the current thread */
/* restore LP_START/LP_COUNT/LP_END variables */
/* cannot load lp_count from memory */
ld r3, [r1, __tNANO_firq_regs_OFFSET + __tFirqRegs_lp_count_OFFSET]
mov lp_count, r3
ld r3, [r1, __tNANO_firq_regs_OFFSET + __tFirqRegs_lp_start_OFFSET]
sr r3, [_ARC_V2_LP_START]
ld r3, [r1, __tNANO_firq_regs_OFFSET + __tFirqRegs_lp_end_OFFSET]
sr r3, [_ARC_V2_LP_END]
/* exiting here: r1/r2 unchanged, r0/r3 destroyed */
#endif
#if CONFIG_NUM_IRQ_PRIO_LEVELS > 1
/* check if we're a nested interrupt: if so, let the interrupted interrupt
* handle the reschedule */
lr r3, [_ARC_V2_AUX_IRQ_ACT]
/* Viper on ARCv2 always runs in kernel mode, so assume bit31 [U] in
* AUX_IRQ_ACT is always 0: if the contents of AUX_IRQ_ACT is not 1, it
* means that another bit is set so an interrupt was interrupted.
*/
breq.nd r3, 1, _check_if_current_is_the_task
rtie
#endif
.balign 4
_check_if_current_is_the_task:
ld r0, [r2, __tCCS_flags_OFFSET]
and.f r0, r0, PREEMPTIBLE
bnz.nd _check_if_a_fiber_is_ready
rtie
.balign 4
_check_if_a_fiber_is_ready:
ld r0, [r1, __tNANO_fiber_OFFSET] /* incoming fiber in r0 */
brne.nd r0, 0, _firq_reschedule
rtie
.balign 4
_firq_reschedule:
/*
* We know there is no interrupted interrupt of lower priority at this
* point, so when switching back to register bank 0, it will contain the
* registers from the interrupted thread.
*/
/* chose register bank #0 */
lr r0, [_ARC_V2_STATUS32]
and r0, r0, ~_ARC_V2_STATUS32_RB(7)
kflag r0
/* we're back on the outgoing thread's stack */
_create_irq_stack_frame
/*
* In a FIRQ, STATUS32 of the outgoing thread is in STATUS32_P0 and the
* PC in ILINK: save them in status32/pc respectively.
*/
lr r0, [_ARC_V2_STATUS32_P0]
st_s r0, [sp, __tISF_status32_OFFSET]
st ilink, [sp, __tISF_pc_OFFSET] /* ilink into pc */
mov_s r1, _NanoKernel
ld r2, [r1, __tNANO_current_OFFSET]
_save_callee_saved_regs
st _CAUSE_FIRQ, [r2, __tCCS_relinquish_cause_OFFSET]
ld r2, [r1, __tNANO_fiber_OFFSET]
st r2, [r1, __tNANO_current_OFFSET]
ld r3, [r2, __tCCS_link_OFFSET]
st r3, [r1, __tNANO_fiber_OFFSET]
/*
* _load_callee_saved_regs expects incoming thread in r2.
* _load_callee_saved_regs restores the stack pointer.
*/
_load_callee_saved_regs
ld r3, [r2, __tCCS_relinquish_cause_OFFSET]
breq.nd r3, _CAUSE_RIRQ, _firq_return_from_rirq
nop
breq.nd r3, _CAUSE_FIRQ, _firq_return_from_firq
nop
/* fall through */
.balign 4
_firq_return_from_coop:
ld r3, [r2, __tCCS_intlock_key_OFFSET]
st 0, [r2, __tCCS_intlock_key_OFFSET]
/* pc into ilink */
pop_s r0
mov ilink, r0
pop_s r0 /* status32 into r0 */
/*
* There are only two interrupt lock states: locked and unlocked. When
* entering _Swap(), they are always locked, so the IE bit is unset in
* status32. If the incoming thread had them locked recursively, it means
* that the IE bit should stay unset. The only time the bit has to change
* is if they were not locked recursively.
*/
and.f r3, r3, (1 << 4)
or.nz r0, r0, _ARC_V2_STATUS32_IE
sr r0, [_ARC_V2_STATUS32_P0]
ld r0, [r2, __tCCS_return_value_OFFSET]
rtie
.balign 4
_firq_return_from_rirq:
_firq_return_from_firq:
_pop_irq_stack_frame
ld ilink, [sp, -4] /* status32 into ilink */
sr ilink, [_ARC_V2_STATUS32_P0]
ld ilink, [sp, -8] /* pc into ilink */
/* fall through to rtie instruction */
.balign 4
_firq_no_reschedule:
/* LP registers are already restored, just switch back to bank 0 */
rtie
/*******************************************************************************
*
* _firq_stack_setup - install the FIRQ stack in register bank 1
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _firq_stack_setup)
lr r0, [_ARC_V2_STATUS32]
and r0, r0, ~_ARC_V2_STATUS32_RB(7)
or r0, r0, _ARC_V2_STATUS32_RB(1)
kflag r0
mov sp, _firq_stack
add sp, sp, CONFIG_FIRQ_STACK_SIZE
/*
* We have to reload r0 here, because it is bank1 r0 which contains
* garbage, not bank0 r0 containing the previous value of status32.
*/
lr r0, [_ARC_V2_STATUS32]
and r0, r0, ~_ARC_V2_STATUS32_RB(7)
kflag r0
j_s.nd [blink]

114
arch/arc/core/fatal.c Normal file
View File

@ -0,0 +1,114 @@
/* fatal.c - fatal fault handling */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module implements the routines necessary for handling fatal faults on
ARCv2 CPUs.
*/
#include <nanok.h>
#include <offsets.h>
#include <toolchain.h>
#include <nanokernel/cpu.h>
#ifdef CONFIG_PRINTK
#include <misc/printk.h>
#define PR_EXC(...) printk(__VA_ARGS__)
#else
#define PR_EXC(...)
#endif /* CONFIG_PRINTK */
const NANO_ESF __defaultEsf = {
0xdeaddead, /* placeholder */
};
/*******************************************************************************
*
* _NanoFatalErrorHandler - nanokernel fatal error handler
*
* This routine is called when fatal error conditions are detected by software
* and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy.
*
* The caller is expected to always provide a usable ESF. In the event that the
* fatal error does not have a hardware generated ESF, the caller should either
* create its own or use a pointer to the global default ESF <__defaultEsf>.
*
* RETURNS: This function does not return.
*
* \NOMANUAL
*/
FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
const NANO_ESF *pEsf)
{
switch (reason) {
case _NANO_ERR_INVALID_TASK_EXIT:
PR_EXC("***** Invalid Exit Software Error! *****\n");
break;
#if defined(CONFIG_STACK_CANARIES)
case _NANO_ERR_STACK_CHK_FAIL:
PR_EXC("***** Stack Check Fail! *****\n");
break;
#endif
#ifdef CONFIG_ENHANCED_SECURITY
case _NANO_ERR_INVALID_STRING_OP:
PR_EXC("**** Invalid string operation! ****\n");
break;
#endif
default:
PR_EXC("**** Unknown Fatal Error %d! ****\n", reason);
break;
}
PR_EXC("Current context ID = 0x%x\n"
"Faulting instruction address = 0x%x\n",
context_self_get(),
_arc_v2_aux_reg_read(_ARC_V2_ERET));
/*
* Now that the error has been reported, call the user implemented
* policy
* to respond to the error. The decisions as to what responses are
* appropriate to the various errors are something the customer must
* decide.
*/
_SysFatalErrorHandler(reason, pEsf);
for (;;)
;
}

109
arch/arc/core/fault.c Normal file
View File

@ -0,0 +1,109 @@
/* fault.c - common fault handler for ARCv2 */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* DESCRIPTION
* Common fault handler for ARCv2 processors.
*/
#include <toolchain.h>
#include <sections.h>
#include <cputype.h>
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <nanok.h>
#ifdef CONFIG_PRINTK
#include <misc/printk.h>
#define PR_EXC(...) printk(__VA_ARGS__)
#else
#define PR_EXC(...)
#endif /* CONFIG_PRINTK */
#if (CONFIG_FAULT_DUMP > 0)
#define FAULT_DUMP(esf, fault) _FaultDump(esf, fault)
#else
#define FAULT_DUMP(esf, fault) \
do { \
(void) esf; \
(void) fault; \
} while ((0))
#endif
#if (CONFIG_FAULT_DUMP > 0)
/*
* _FaultDump - dump information regarding fault (FAULT_DUMP > 0)
*
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1
* (short form).
*
* RETURNS: N/A
*
* \NOMANUAL
*/
void _FaultDump(const NANO_ESF *esf, int fault)
{
ARG_UNUSED(esf);
uint32_t exc_addr = _arc_v2_aux_reg_read(_ARC_V2_EFA);
uint32_t ecr = _arc_v2_aux_reg_read(_ARC_V2_ECR);
PR_EXC("Exception vector: 0x%x, cause code: 0x%x, parameter 0x%x\n",
_ARC_V2_ECR_VECTOR(ecr),
_ARC_V2_ECR_CODE(ecr),
_ARC_V2_ECR_PARAMETER(ecr));
PR_EXC("Address 0x%x\n", exc_addr);
}
#endif /* CONFIG_FAULT_DUMP */
/*
* _Fault - fault handler
*
* This routine is called when fatal error conditions are detected by hardware
* and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy.
*
* RETURNS: This function does not return.
*
* \NOMANUAL
*/
void _Fault(void)
{
uint32_t ecr = _arc_v2_aux_reg_read(_ARC_V2_ECR);
FAULT_DUMP(&__defaultEsf, ecr);
_SysFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, &__defaultEsf);
}

107
arch/arc/core/fault_s.s Normal file
View File

@ -0,0 +1,107 @@
/* fault_s.s - fault handlers for ARCv2 */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* DESCRIPTION
* Fault handlers for ARCv2 processors.
*/
#define _ASMLANGUAGE
#include <toolchain.h>
#include <sections.h>
#include <nanokernel/cpu.h>
#include "swap_macros.h"
GTEXT(_Fault)
GTEXT(__reset)
GTEXT(__memory_error)
GTEXT(__instruction_error)
GTEXT(__ev_machine_check)
GTEXT(__ev_tlb_miss_i)
GTEXT(__ev_tlb_miss_d)
GTEXT(__ev_prot_v)
GTEXT(__ev_privilege_v)
GTEXT(__ev_swi)
GTEXT(__ev_trap)
GTEXT(__ev_extension)
GTEXT(__ev_div_zero)
GTEXT(__ev_dc_error)
GTEXT(__ev_maligned)
GDATA(_firq_stack)
SECTION_VAR(BSS, saved_stack_pointer)
.word 0
/*
* __fault - fault handler installed in the fault and reserved vectors
*/
SECTION_SUBSEC_FUNC(TEXT,__fault,__memory_error)
SECTION_SUBSEC_FUNC(TEXT,__fault,__instruction_error)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_machine_check)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_tlb_miss_i)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_tlb_miss_d)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_prot_v)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_privilege_v)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_swi)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_extension)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_div_zero)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_dc_error)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_maligned)
/*
* Before invoking exception handler, the kernel switches to an exception
* stack, which is really the FIRQ stack, to save the faulting thread's
* registers. It can use the FIRQ stack because it knows it is unused
* since it is save to assume that if an exception has happened in FIRQ
* handler, the problem is fatal and all the kernel can do is just print
* a diagnostic message and halt.
*/
st sp, [saved_stack_pointer]
mov_s sp, _firq_stack
add sp, sp, CONFIG_FIRQ_STACK_SIZE
/* save caller saved registers */
_create_irq_stack_frame
jl _Fault
/* if _Fault returns, restore the registers */
_pop_irq_stack_frame
/* now restore the stack */
ld sp,[saved_stack_pointer]
rtie

94
arch/arc/core/ffs.s Normal file
View File

@ -0,0 +1,94 @@
/* ffs.S - ARCv2 find first set assembly routines */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This library implements nanoFfsMsb() and nanoFfsLsb() which returns the
most and least significant bit set respectively.
*/
#define _ASMLANGUAGE
#include <toolchain.h>
#include <sections.h>
/* Exports */
GTEXT(nanoFfsMsb)
GTEXT(nanoFfsLsb)
/*******************************************************************************
*
* nanoFfsMsb - find first set bit (searching from the most significant bit)
*
* This routine finds the first bit set in the argument passed it and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit. A return value of zero indicates that
* the value passed is zero.
*
* RETURNS: most significant bit set
*/
SECTION_FUNC(TEXT, nanoFfsMsb)
/*
* The FLS instruction returns the bit number (0-31), and 0 if the operand
* is 0. So, the bit number must be incremented by 1 only in the case of
* an operand value that is non-zero.
*/
fls.f r0, r0
j_s.d [blink]
add.nz r0, r0, 1
/*******************************************************************************
*
* nanoFfsLsb - find first set bit (searching from the least significant bit)
*
* This routine finds the first bit set in the argument passed it and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit. A return value of zero indicates that
* the value passed is zero.
*
* RETURNS: least significant bit set
*/
SECTION_FUNC(TEXT, nanoFfsLsb)
/*
* The FFS instruction returns the bit number (0-31), and 31 if the operand
* is 0. So, the bit number must be incremented by 1, or, in the case of an
* operand value of 0, set to 0.
*/
ffs.f r0, r0
add.nz r0, r0, 1
j_s.d [blink]
mov.z r0, 0

97
arch/arc/core/irq_lock.s Normal file
View File

@ -0,0 +1,97 @@
/* irq_lock.s - interrupt locking */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _IRQ_LOCK__H_
#define _IRQ_LOCK__H_
#define _ASMLANGUAGE
#include <nanok.h>
#include <offsets.h>
#include <toolchain.h>
#include <nanokernel/cpu.h>
/*******************************************************************************
*
* irq_lock - disable all interrupts on the local CPU
*
* This routine disables interrupts. It can be called from either interrupt,
* task or fiber level. This routine returns an architecture-dependent
* lock-out key representing the "interrupt disable state" prior to the call;
* this key can be passed to irq_unlock() to re-enable interrupts.
*
* The lock-out key should only be used as the argument to the
* irq_unlock() API. It should never be used to manually re-enable
* interrupts or to inspect or manipulate the contents of the source register.
*
* WARNINGS
* Invoking a VxMicro routine with interrupts locked may result in
* interrupts being re-enabled for an unspecified period of time. If the
* called routine blocks, interrupts will be re-enabled while another
* context executes, or while the system is idle.
*
* The "interrupt disable state" is an attribute of a context. Thus, if a
* fiber or task disables interrupts and subsequently invokes a VxMicro
* system routine that causes the calling context to block, the interrupt
* disable state will be restored when the context is later rescheduled
* for execution.
*
* RETURNS: An architecture-dependent lock-out key representing the
* "interrupt disable state" prior to the call.
*
* \NOMANUAL
*/
SECTION_FUNC(TEXT, irq_lock)
j_s.d [blink]
clri r0
/*******************************************************************************
*
* irq_unlock - enable all interrupts on the local CPU
*
* This routine re-enables interrupts on the local CPU. The <key> parameter
* is an architecture-dependent lock-out key that is returned by a previous
* invocation of irq_lock().
*
* This routine can be called from either interrupt, task or fiber level.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
SECTION_FUNC(TEXT, irq_unlock)
j_s.d [blink]
seti r0
#endif /* _IRQ_LOCK__H_ */

205
arch/arc/core/irq_manage.c Normal file
View File

@ -0,0 +1,205 @@
/* irq_manage.c - ARCv2 interrupt management */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* DESCRIPTION
*
* Interrupt management:
*
* - enabling/disabling
* - dynamic ISR connecting/replacing
*
* SW_ISR_TABLE_DYNAMIC has to be enabled for connecting ISRs at runtime.
*
* An IRQ number passed to the <irq> parameters found in this file is a
* number from 16 to last IRQ number on the platform.
*/
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <misc/__assert.h>
#include <toolchain.h>
#include <sections.h>
#include <sw_isr_table.h>
/*
* irq_handler_set - replace an interrupt handler by another
*
* An interrupt's ISR can be replaced at runtime. Care must be taken that the
* interrupt is disabled before doing this.
*
* This routine will hang if <old> is not found in the table and ASSERT_ON is
* enabled.
*
* RETURNS: N/A
*/
void irq_handler_set(
unsigned int irq,
void (*old)(void *arg),
void (*new)(void *arg),
void *arg
)
{
int key = irq_lock_inline();
int index = irq - 16;
__ASSERT(old == _IsrTable[index].isr,
"expected ISR not found in table");
if (old == _IsrTable[index].isr) {
_IsrTable[index].isr = new;
_IsrTable[index].arg = arg;
}
irq_unlock_inline(key);
}
/*
* irq_enable - enable an interrupt line
*
* Clear possible pending interrupts on the line, and enable the interrupt
* line. After this call, the CPU will receive interrupts for the specified
* <irq>.
*
* RETURNS: N/A
*/
void irq_enable(unsigned int irq)
{
int key = irq_lock_inline();
_arc_v2_irq_unit_int_enable(irq);
irq_unlock_inline(key);
}
/*
* irq_disable - disable an interrupt line
*
* Disable an interrupt line. After this call, the CPU will stop receiving
* interrupts for the specified <irq>.
*
* RETURNS: N/A
*/
void irq_disable(unsigned int irq)
{
int key = irq_lock_inline();
_arc_v2_irq_unit_int_disable(irq);
irq_unlock_inline(key);
}
/*
* irq_priority_set - set an interrupt's priority
*
* Valid values are from 0 to 15. Interrupts of priority 1 are not masked when
* interrupts are locked system-wide, so care must be taken when using them. ISR
* installed with priority 0 interrupts cannot make kernel calls.
*
* The priority is verified if ASSERT_ON is enabled.
*
* RETURNS: N/A
*/
void irq_priority_set(
unsigned int irq,
unsigned int prio
)
{
int key = irq_lock_inline();
__ASSERT(prio >= 0 && prio < CONFIG_NUM_IRQ_PRIORITIES,
"invalid priority!");
_arc_v2_irq_unit_prio_set(irq, prio);
irq_unlock_inline(key);
}
/*
* _SpuriousIRQ - spurious interrupt handler
*
* Installed in all dynamic interrupt slots at boot time. Throws an error if
* called.
*
* RETURNS: N/A
*/
#include <misc/printk.h>
void _SpuriousIRQ(void *unused)
{
ARG_UNUSED(unused);
printk("_SpuriousIRQ(). Spinning...\n");
for (;;)
;
}
/*
* irq_connect - connect an ISR to an interrupt line
*
* <isr> is connected to interrupt line <irq>, a number greater than or equal
* 16. No prior ISR can have been connected on <irq> interrupt line since the
* system booted.
*
* This routine will hang if another ISR was connected for interrupt line <irq>
* and ASSERT_ON is enabled; if ASSERT_ON is disabled, it will fail silently.
*
* RETURNS: the interrupt line number
*/
int irq_connect(
unsigned int irq,
unsigned int prio,
void (*isr)(void *arg),
void *arg
)
{
irq_handler_set(irq, _SpuriousIRQ, isr, arg);
irq_priority_set(irq, prio);
return irq;
}
/*
* irq_disconnect - disconnect an ISR from an interrupt line
*
* Interrupt line <irq> is disconnected from its ISR and the latter is
* replaced by _SpuriousIRQ(). irq_disable() should have been called before
* invoking this routine.
*
* RETURNS: N/A
*/
void irq_disconnect(unsigned int irq)
{
int index = irq - 16;
irq_handler_set(irq, _IsrTable[index].isr, _SpuriousIRQ, NULL);
}

240
arch/arc/core/isr_wrapper.s Normal file
View File

@ -0,0 +1,240 @@
/* isr_wrapper.s - wrapper around ISRs with logic for context switching */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Wrapper installed in vector table for handling dynamic interrupts that accept
a parameter.
*/
#define _ASMLANGUAGE
#include <offsets.h>
#include <toolchain.h>
#include <sections.h>
#include <sw_isr_table.h>
#include <nanok.h>
#include <nanokernel/cpu.h>
GTEXT(_isr_enter)
GTEXT(_isr_demux)
/*
The symbols in this file are not real functions, and neither are
_rirq_enter/_firq_enter: they are jump points.
The flow is the following:
ISR -> _isr_enter -- + -> _rirq_enter -> _isr_demux -> ISR -> _rirq_exit
|
+ -> _firq_enter -> _isr_demux -> ISR -> _firq_exit
Context switch explanation:
The context switch code is spread in these files:
isr_wrapper.s, swap.s, swap_macros.s, fast_irq.s, regular_irq.s
IRQ stack frame layout:
high address
status32
pc
[jli_base]
[ldi_base]
[ei_base]
lp_count
lp_start
lp_end
blink
r13
...
sp -> r0
low address
[registers not taken into account in the current implementation]
The context switch code adopts this standard so that it is easier to follow:
- r1 contains _NanoKernel ASAP and is not overwritten over the lifespan of
the functions.
- r2 contains _NanoKernel.current ASAP, and the incoming thread when we
transition from outgoing context to incoming context
Not loading _NanoKernel into r0 allows loading _NanoKernel without stomping on
the parameter in r0 in _Swap().
ARCv2 processor have two kinds of interrupts: fast (FIRQ) and regular. The
official documentation calls them regular interrupts IRQ, but the internals of
the kernel calls them RIRQ to differentiate with the 'irq' subsystem, which is
the interrupt API/layer of abstraction.
FIRQs can be used to allow ISRs to run without having to save any context,
since they work with a second register bank. However, they can be somewhat more
limited than RIRQs since the register bank does not copy every possible
register that is needed to implement all available instructions: an example is
that the 'loop' registers (lp_count, lp_end, lp_start) are not present in the
second bank. The kernel thus takes upon itself to save these extra registers,
if the FIRQ is made known to the kernel. It is possible for a FIRQ to operate
outside of the kernel, but care must be taken to only use instructions that
only use the banked registers. RIRQs must always use the kernel's interrupt
entry and exit mechanisms.
The kernel is able to handle transitions to and from FIRQ, RIRQ and threads
(fibers/task). The contexts are saved 'lazily': the minimum amount of work is
done upfront, and the rest is done when needed:
o RIRQ
All needed regisers to run C code in the ISR are saved automatically
on the outgoing context's stack: loop, status32, pc, and the caller-
saved GPRs. That stack frame layout is pre-determined. If returning
to a fiber, the stack is popped and no registers have to be saved by
the kernel. If a context switch is required, the callee-saved GPRs
are then saved in the context control structure (CCS).
o FIRQ
First, a FIRQ can be interrupting a lower-priority RIRQ: if this is the case,
the first does not take a scheduling decision and leaves it the RIRQ to
handle. The limits the amount of code that has to run at interrupt-level.
GPRs are banked, loop registers are saved in a global structure upon
interrupt entry. If returning to a fiber, loop are poppped and the
CPU switches back to bank 0 for the GPRs. If a context switch is
needed, at this point only are all the registers saved. First, a
stack frame with the same layout as the automatic RIRQ one is created
and then the callee-saved GPRs are saved in the CCS. status32_p0 and
ilink are saved in this case, not status32 and pc.
To create the stack frame, the FIRQ handling code must first go back to using
bank0 of registers, since that is where the register containing the exiting
thread are saved. Care must be taken not to touch any register before saving
them: the only one usable at that point is the stack pointer.
o coop
When a coop context switch is done, the callee-saved registers are
saved in the CCS. The other GPRs do not have to be saved, since the
compiler has already placed them on the stack.
For restoring the contexts, there are six cases. In all cases, the
callee-saved registers of the incoming thread have to be restored. Then, there
are specifics for each case:
From coop:
o to coop
Restore interrupt lock level and normal function call return.
o to any irq
The incoming interrupted thread has an IRQ stack frame containing the
caller-saved registers that has to be popped. status32 has to be restored,
then we jump to the interrupted instruction.
From FIRQ:
The processor is back to using bank0, not bank1 anymore, because it had to
save the outgoing context from bank0, and not has to load the incoming one
into bank0.
o to coop
The address of the returning instruction from _Swap() is loaded in ilink and
the saved status32 in status32_p0, taking care to adjust the interrupt lock
state desired in status32_p0. The return value is put in r0.
o to any irq
The IRQ has saved the caller-saved registers in a stack frame, which must be
popped, and statu32 and pc loaded in status32_p0 and ilink.
From RIRQ:
o to coop
The interrupt return mechanism in the processor expects a stack frame, but
the outgoing context did not create one. A fake one is created here, with
only the relevant values filled in: pc, status32 and the return value in r0.
There is a discrepancy between the ABI from the ARCv2 docs, including the
way the processor pushes GPRs in pairs in the IRQ stack frame, and the ABI
GCC uses. r13 should be a callee-saved register, but GCC treats it as
caller-saved. This means that the processor pushes it in the stack frame
along with r12, but the compiler does not save it before entering a
function. So, it is saved as part of the callee-saved registers, and
restored there, but the processor restores it *a second time* when popping
the IRQ stack frame. Thus, the correct value must also be put in the fake
stack frame when returning to a thread that context switched out
cooperatively.
o to any irq
Both types of IRQs already have an IRQ stack frame: simply return from
interrupt.
*/
SECTION_FUNC(TEXT, _isr_enter)
lr r0, [_ARC_V2_AUX_IRQ_ACT]
ffs r0, r0
cmp r0, 0
mov.z r3, _firq_exit
mov.z r2, _firq_enter
mov.nz r3, _rirq_exit
mov.nz r2, _rirq_enter
j_s.nd [r2]
/* when getting here, r3 contains the interrupt exit stub to call */
SECTION_FUNC(TEXT, _isr_demux)
push_s r3
lr r0, [_ARC_V2_ICAUSE]
sub r0, r0, 16
mov r1, _IsrTable
add3 r0, r1, r0 /* table entries are 8-bytes wide */
ld r1, [r0, 4] /* ISR into r1 */
jl_s.d [r1]
ld_s r0, [r0] /* delay slot: ISR parameter into r0 */
/* back from ISR, jump to exit stub */
pop_s r3
j_s.nd [r3]
nop

View File

@ -0,0 +1,126 @@
/* offsets.c - ARCv2 nano kernel structure member offset definition file */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module is responsible for the generation of the absolute symbols whose
value represents the member offsets for various ARCv2 nanokernel
structures.
All of the absolute symbols defined by this module will be present in the
final microkernel or nanokernel ELF image (due to the linker's reference to
the _OffsetAbsSyms symbol).
INTERNAL
It is NOT necessary to define the offset for every member of a structure.
Typically, only those members that are accessed by assembly language routines
are defined; however, it doesn't hurt to define all fields for the sake of
completeness.
*/
#include <genOffset.h>
#include <nanok.h>
#include <offsets/common.h>
/* ARCv2-specific tCCS structure member offsets */
GEN_OFFSET_SYM(tNANO, rirq_sp);
GEN_OFFSET_SYM(tNANO, firq_regs);
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
GEN_OFFSET_SYM(tNANO, idle);
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
/* ARCv2-specific tCCS structure member offsets */
GEN_OFFSET_SYM(tCCS, intlock_key);
GEN_OFFSET_SYM(tCCS, relinquish_cause);
GEN_OFFSET_SYM(tCCS, return_value);
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
GEN_OFFSET_SYM(tCCS, custom_data);
#endif
/* ARCv2-specific IRQ stack frame structure member offsets */
GEN_OFFSET_SYM(tISF, r0);
GEN_OFFSET_SYM(tISF, r1);
GEN_OFFSET_SYM(tISF, r2);
GEN_OFFSET_SYM(tISF, r3);
GEN_OFFSET_SYM(tISF, r4);
GEN_OFFSET_SYM(tISF, r5);
GEN_OFFSET_SYM(tISF, r6);
GEN_OFFSET_SYM(tISF, r7);
GEN_OFFSET_SYM(tISF, r8);
GEN_OFFSET_SYM(tISF, r9);
GEN_OFFSET_SYM(tISF, r10);
GEN_OFFSET_SYM(tISF, r11);
GEN_OFFSET_SYM(tISF, r12);
GEN_OFFSET_SYM(tISF, r13);
GEN_OFFSET_SYM(tISF, blink);
GEN_OFFSET_SYM(tISF, lp_end);
GEN_OFFSET_SYM(tISF, lp_start);
GEN_OFFSET_SYM(tISF, lp_count);
GEN_OFFSET_SYM(tISF, pc);
GEN_OFFSET_SYM(tISF, status32);
GEN_ABSOLUTE_SYM(__tISF_SIZEOF, sizeof(tISF));
/* ARCv2-specific preempt registers structure member offsets */
GEN_OFFSET_SYM(tPreempt, sp);
GEN_ABSOLUTE_SYM(__tPreempt_SIZEOF, sizeof(tPreempt));
/* ARCv2-specific callee-saved stack */
GEN_OFFSET_SYM(tCalleeSaved, r13);
GEN_OFFSET_SYM(tCalleeSaved, r14);
GEN_OFFSET_SYM(tCalleeSaved, r15);
GEN_OFFSET_SYM(tCalleeSaved, r16);
GEN_OFFSET_SYM(tCalleeSaved, r17);
GEN_OFFSET_SYM(tCalleeSaved, r18);
GEN_OFFSET_SYM(tCalleeSaved, r19);
GEN_OFFSET_SYM(tCalleeSaved, r20);
GEN_OFFSET_SYM(tCalleeSaved, r21);
GEN_OFFSET_SYM(tCalleeSaved, r22);
GEN_OFFSET_SYM(tCalleeSaved, r23);
GEN_OFFSET_SYM(tCalleeSaved, r24);
GEN_OFFSET_SYM(tCalleeSaved, r25);
GEN_OFFSET_SYM(tCalleeSaved, r26);
GEN_OFFSET_SYM(tCalleeSaved, fp);
GEN_OFFSET_SYM(tCalleeSaved, r30);
GEN_ABSOLUTE_SYM(__tCalleeSaved_SIZEOF, sizeof(tCalleeSaved));
/* ARCv2-specific registers-saved-in-FIRQ structure member offsets */
GEN_OFFSET_SYM(tFirqRegs, lp_count);
GEN_OFFSET_SYM(tFirqRegs, lp_start);
GEN_OFFSET_SYM(tFirqRegs, lp_end);
GEN_ABSOLUTE_SYM(__tFirqRegs_SIZEOF, sizeof(tFirqRegs));
/* size of the tCCS structure sans save area for floating point regs */
GEN_ABSOLUTE_SYM(__tCCS_NOFLOAT_SIZEOF, sizeof(tCCS));
GEN_ABS_SYM_END

203
arch/arc/core/regular_irq.s Normal file
View File

@ -0,0 +1,203 @@
/* regular_irq.s - handling of transitions to-and-from regular IRQs (RIRQ) */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module implements the code for handling entry to and exit from regular
IRQs.
See isr_wrapper.s for details.
*/
#define _ASMLANGUAGE
#include <nanok.h>
#include <offsets.h>
#include <toolchain.h>
#include <nanokernel/cpu.h>
#include "swap_macros.h"
GTEXT(_rirq_enter)
GTEXT(_rirq_exit)
/*******************************************************************************
*
* _rirq_enter - work to be done before handing control to an IRQ ISR
*
* The processor pushes automatically all registers that need to be saved.
* However, since the processor always runs at kernel privilege there is no
* automatic switch to the IRQ stack: this must be done in software.
*
* Assumption by _isr_demux: r3 is untouched by _rirq_enter.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _rirq_enter)
mov r1, _NanoKernel
ld r2, [r1, __tNANO_current_OFFSET]
#if CONFIG_NUM_REGULAR_IRQ_PRIO_LEVELS == 1
st sp, [r2, __tCCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
ld sp, [r1, __tNANO_rirq_sp_OFFSET]
#else
#error regular irq nesting is not implemented
#endif
j _isr_demux
/*******************************************************************************
*
* _rirq_exit - work to be done exiting an IRQ
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _rirq_exit)
mov r1, _NanoKernel
ld r2, [r1, __tNANO_current_OFFSET]
#if CONFIG_NUM_REGULAR_IRQ_PRIO_LEVELS > 1
/* check if we're a nested interrupt: if so, let the interrupted interrupt
* handle the reschedule */
lr r3, [_ARC_V2_AUX_IRQ_ACT]
ffs r0, r3
asl r0, 1, r0
/* Viper on ARCv2 always runs in kernel mode, so assume bit31 [U] in
* AUX_IRQ_ACT is always 0: if the contents of AUX_IRQ_ACT is greater
* than FFS(AUX_IRQ_ACT), it means that another bit is set so an
* interrupt was interrupted.
*/
cmp r0, r3
brgt.nd _rirq_return_from_rirq
ld sp, [r2, __tCCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
#endif
/*
* Both (a)reschedule and (b)non-reschedule cases need to load the current
* thread's stack, but don't have to use it until the decision is taken:
* load the delay slots with the 'load stack pointer' instruction.
*
* a) needs to load it to save outgoing context.
* b) needs to load it to restore the interrupted context.
*/
ld r0, [r2, __tCCS_flags_OFFSET]
and.f r0, r0, PREEMPTIBLE
bz.d _rirq_no_reschedule
ld sp, [r2, __tCCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
ld r0, [r1, __tNANO_fiber_OFFSET] /* incoming fiber in r0 */
cmp r0, 0
bz.d _rirq_no_reschedule
ld sp, [r2, __tCCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
.balign 4
_rirq_reschedule:
/* _save_callee_saved_regs expects outgoing thread in r2 */
_save_callee_saved_regs
st _CAUSE_RIRQ, [r2, __tCCS_relinquish_cause_OFFSET]
/* incoming fiber is in r0: it becomes the new 'current' */
mov r2, r0
st r2, [r1, __tNANO_current_OFFSET]
ld r3, [r2, __tCCS_link_OFFSET]
st r3, [r1, __tNANO_fiber_OFFSET]
/*
* _load_callee_saved_regs expects incoming thread in r2.
* _load_callee_saved_regs restores the stack pointer.
*/
_load_callee_saved_regs
ld r3, [r2, __tCCS_relinquish_cause_OFFSET]
breq.nd r3, _CAUSE_RIRQ, _rirq_return_from_rirq
nop
breq.nd r3, _CAUSE_FIRQ, _rirq_return_from_firq
nop
/* fall through */
.balign 4
_rirq_return_from_coop:
/* status32 and pc (blink) are already on the stack in the right order */
/* update status32.ie (explanation in firq_exit:_firq_return_from_coop) */
ld r0, [sp, 4]
ld r3, [r2, __tCCS_intlock_key_OFFSET]
st 0, [r2, __tCCS_intlock_key_OFFSET]
cmp r3, 0
or.ne r0, r0, _ARC_V2_STATUS32_IE
st r0, [sp, 4]
/* carve fake stack */
sub sp, sp, (__tISF_SIZEOF - 12) /* a) status32/pc are already on the stack
* b) a real value will be pushed in r0 */
/* push return value on stack */
ld r0, [r2, __tCCS_return_value_OFFSET]
push_s r0
/*
* r13 is part of both the callee and caller-saved register sets because
* the processor is only able to save registers in pair in the regular
* IRQ prologue. r13 thus has to be set to its correct value in the IRQ
* stack frame.
*/
st r13, [sp, __tISF_r13_OFFSET]
/* stack now has the IRQ stack frame layout, pointing to r0 */
/* fall through to rtie instruction */
.balign 4
_rirq_return_from_firq:
_rirq_return_from_rirq:
/* rtie will pop the rest from the stack */
/* fall through to rtie instruction */
.balign 4
_rirq_no_reschedule:
rtie

202
arch/arc/core/swap.s Normal file
View File

@ -0,0 +1,202 @@
/* swap.s - thread context switching */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module implements the routines necessary for thread context switching
on ARCv2 CPUs.
See isr_wrapper.s for details.
*/
#define _ASMLANGUAGE
#include <nanok.h>
#include <offsets.h>
#include <toolchain.h>
#include <nanokernel/cpu.h>
#include <v2/irq.h>
#include "swap_macros.h"
GTEXT(_Swap)
GDATA(_NanoKernel)
/*******************************************************************************
*
* _Swap - initiate a cooperative context switch
*
* The _Swap() routine is invoked by various nanokernel services to effect
* a cooperative context context switch. Prior to invoking _Swap(), the caller
* disables interrupts via nanoCpuIntLock() and the return 'key' is passed as a
* parameter to _Swap(). The key is in fact the value stored in the register
* operand of a CLRI instruction.
*
* It stores the intlock key parameter into current->intlock_key.
* Given that _Swap() is called to effect a cooperative context context switch,
* the caller-saved integer registers are saved on the stack by the function
* call preamble to _Swap(). This creates a custom stack frame that will be
* popped when returning from _Swap(), but is not suitable for handling a return
* from an exception. Thus, the fact that the thread is pending because of a
* cooperative call to _Swap() has to be recorded via the _CAUSE_COOP code in
* the relinquish_cause of the context's tCCS. The _IrqExit()/_FirqExit() code
* will take care of doing the right thing to restore the thread status.
*
* When _Swap() is invoked, we know the decision to perform a context switch or
* not has already been taken and a context switch must happen.
*
* RETURNS: may contain a return value setup by a call to fiberRtnValueSet()
*
* C function prototype:
*
* unsigned int _Swap (unsigned int key);
*
*/
SECTION_FUNC(TEXT, _Swap)
/* interrupts are locked, interrupt key is in r0 */
mov r1, _NanoKernel
ld r2, [r1, __tNANO_current_OFFSET]
/* save intlock key */
st r0, [r2, __tCCS_intlock_key_OFFSET]
st _CAUSE_COOP, [r2, __tCCS_relinquish_cause_OFFSET]
/*
* Save status32 and blink on the stack before the callee-saved registers.
* This is the same layout as the start of an IRQ stack frame.
*/
lr r3, [_ARC_V2_STATUS32]
push_s r3
push_s blink
_save_callee_saved_regs
ld r2, [r1, __tNANO_fiber_OFFSET]
breq.nd r2, 0, _swap_to_the_task
.balign 4
_swap_to_a_fiber:
ld r3, [r2, __tCCS_link_OFFSET]
b.d _finish_swapping_to_context /* always execute delay slot */
st r3, [r1, __tNANO_fiber_OFFSET] /* delay slot */
.balign 4
_swap_to_the_task:
ld r2, [r1, __tNANO_task_OFFSET]
/* fall through */
.balign 4
_finish_swapping_to_context:
/* entering here, r2 contains the new current context */
#if 0
/* don't save flags in tNANO: slower, error-prone, and might not even give
* a speed boost where it's supposed to */
ld r3, [r2, __tCCS_flags_OFFSET]
st r3, [r1, __tNANO_flags_OFFSET]
#endif
/* XXX - can be moved to delay slot of _CAUSE_RIRQ ? */
st r2, [r1, __tNANO_current_OFFSET]
_load_callee_saved_regs
ld r3, [r2, __tCCS_relinquish_cause_OFFSET]
breq.nd r3, _CAUSE_RIRQ, _swap_return_from_rirq
nop
breq.nd r3, _CAUSE_FIRQ, _swap_return_from_firq
nop
/* fall through to _swap_return_from_coop */
.balign 4
_swap_return_from_coop:
ld r1, [r2, __tCCS_intlock_key_OFFSET]
st 0, [r2, __tCCS_intlock_key_OFFSET]
ld r0, [r2, __tCCS_return_value_OFFSET]
/*
* Adjust the stack here in case we go to _return_from_exc: this allows
* keeping handling both coop and irq cases in _return_from_exc without
* adding extra logic.
*/
add_s sp, sp, 8
lr ilink, [_ARC_V2_STATUS32]
bbit1 ilink, _ARC_V2_STATUS32_AE_BIT, _return_from_exc
sub_s sp, sp, 8
pop_s blink /* pc into blink */
pop_s r3 /* status32 into r3 */
kflag r3 /* write status32 */
j_s.d [blink] /* always execute delay slot */
seti r1 /* delay slot */
.balign 4
_swap_return_from_rirq:
_swap_return_from_firq:
_pop_irq_stack_frame
lr ilink, [_ARC_V2_STATUS32]
bbit1 ilink, _ARC_V2_STATUS32_AE_BIT, _return_from_exc
ld ilink, [sp, -4] /* status32 into ilink */
and ilink, ilink, 0x7ffffffe // keep interrupts disabled until seti
kflag ilink
ld ilink, [sp, -8] /* pc into ilink */
j.d [ilink]
seti (_ARC_V2_DEF_IRQ_LEVEL | (1 << 4))
.balign 4
_return_from_exc:
/* put the return address to eret */
ld ilink, [sp, -8] /* pc into ilink */
sr ilink, [_ARC_V2_ERET]
/* put status32 into estatus */
ld ilink, [sp, -4] /* status32 into ilink */
sr ilink, [_ARC_V2_ERSTATUS]
rtie

176
arch/arc/core/swap_macros.h Normal file
View File

@ -0,0 +1,176 @@
/* swap_macros.h - helper macros for context switch */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SWAP_MACROS__H_
#define _SWAP_MACROS__H_
#include <nanok.h>
#include <offsets.h>
#include <toolchain.h>
#include <nanokernel/cpu.h>
#ifdef _ASMLANGUAGE
/* entering this macro, current is in r2 */
.macro _save_callee_saved_regs
sub_s sp, sp, __tCalleeSaved_SIZEOF
/* save regs on stack */
st r13, [sp, __tCalleeSaved_r13_OFFSET]
st r14, [sp, __tCalleeSaved_r14_OFFSET]
st r15, [sp, __tCalleeSaved_r15_OFFSET]
st r16, [sp, __tCalleeSaved_r16_OFFSET]
st r17, [sp, __tCalleeSaved_r17_OFFSET]
st r18, [sp, __tCalleeSaved_r18_OFFSET]
st r19, [sp, __tCalleeSaved_r19_OFFSET]
st r20, [sp, __tCalleeSaved_r20_OFFSET]
st r21, [sp, __tCalleeSaved_r21_OFFSET]
st r22, [sp, __tCalleeSaved_r22_OFFSET]
st r23, [sp, __tCalleeSaved_r23_OFFSET]
st r24, [sp, __tCalleeSaved_r24_OFFSET]
st r25, [sp, __tCalleeSaved_r25_OFFSET]
st r26, [sp, __tCalleeSaved_r26_OFFSET]
st fp, [sp, __tCalleeSaved_fp_OFFSET]
st r30, [sp, __tCalleeSaved_r30_OFFSET]
/* save stack pointer in tCCS */
st sp, [r2, __tCCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
.endm
/* entering this macro, current is in r2 */
.macro _load_callee_saved_regs
/* restore stack pointer from tCCS */
ld sp, [r2, __tCCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
ld r13, [sp, __tCalleeSaved_r13_OFFSET]
ld r14, [sp, __tCalleeSaved_r14_OFFSET]
ld r15, [sp, __tCalleeSaved_r15_OFFSET]
ld r16, [sp, __tCalleeSaved_r16_OFFSET]
ld r17, [sp, __tCalleeSaved_r17_OFFSET]
ld r18, [sp, __tCalleeSaved_r18_OFFSET]
ld r19, [sp, __tCalleeSaved_r19_OFFSET]
ld r20, [sp, __tCalleeSaved_r20_OFFSET]
ld r21, [sp, __tCalleeSaved_r21_OFFSET]
ld r22, [sp, __tCalleeSaved_r22_OFFSET]
ld r23, [sp, __tCalleeSaved_r23_OFFSET]
ld r24, [sp, __tCalleeSaved_r24_OFFSET]
ld r25, [sp, __tCalleeSaved_r25_OFFSET]
ld r26, [sp, __tCalleeSaved_r26_OFFSET]
ld fp, [sp, __tCalleeSaved_fp_OFFSET]
ld r30, [sp, __tCalleeSaved_r30_OFFSET]
add_s sp, sp, __tCalleeSaved_SIZEOF
.endm
/*
* Must be called with interrupts locked or in P0.
* Upon exit, sp will be pointing to the stack frame.
*/
.macro _create_irq_stack_frame
sub_s sp, sp, __tISF_SIZEOF
st blink, [sp, __tISF_blink_OFFSET]
/* store these right away so we can use them if needed */
st_s r13, [sp, __tISF_r13_OFFSET]
st_s r12, [sp, __tISF_r12_OFFSET]
st r11, [sp, __tISF_r11_OFFSET]
st r10, [sp, __tISF_r10_OFFSET]
st r9, [sp, __tISF_r9_OFFSET]
st r8, [sp, __tISF_r8_OFFSET]
st r7, [sp, __tISF_r7_OFFSET]
st r6, [sp, __tISF_r6_OFFSET]
st r5, [sp, __tISF_r5_OFFSET]
st r4, [sp, __tISF_r4_OFFSET]
st_s r3, [sp, __tISF_r3_OFFSET]
st_s r2, [sp, __tISF_r2_OFFSET]
st_s r1, [sp, __tISF_r1_OFFSET]
st_s r0, [sp, __tISF_r0_OFFSET]
mov r0, lp_count
st_s r0, [sp, __tISF_lp_count_OFFSET]
lr r0, [_ARC_V2_LP_START]
st_s r0, [sp, __tISF_lp_start_OFFSET]
lr r0, [_ARC_V2_LP_END]
st_s r0, [sp, __tISF_lp_end_OFFSET]
.endm
/*
* Must be called with interrupts locked or in P0.
* sp must be pointing the to stack frame.
*/
.macro _pop_irq_stack_frame
ld blink, [sp, __tISF_blink_OFFSET]
ld_s r0, [sp, __tISF_lp_count_OFFSET]
mov lp_count, r0
ld_s r0, [sp, __tISF_lp_start_OFFSET]
sr r0, [_ARC_V2_LP_START]
ld_s r0, [sp, __tISF_lp_end_OFFSET]
sr r0, [_ARC_V2_LP_END]
ld_s r13, [sp, __tISF_r13_OFFSET]
ld_s r12, [sp, __tISF_r12_OFFSET]
ld r11, [sp, __tISF_r11_OFFSET]
ld r10, [sp, __tISF_r10_OFFSET]
ld r9, [sp, __tISF_r9_OFFSET]
ld r8, [sp, __tISF_r8_OFFSET]
ld r7, [sp, __tISF_r7_OFFSET]
ld r6, [sp, __tISF_r6_OFFSET]
ld r5, [sp, __tISF_r5_OFFSET]
ld r4, [sp, __tISF_r4_OFFSET]
ld_s r3, [sp, __tISF_r3_OFFSET]
ld_s r2, [sp, __tISF_r2_OFFSET]
ld_s r1, [sp, __tISF_r1_OFFSET]
ld_s r0, [sp, __tISF_r0_OFFSET]
/*
* All gprs have been reloaded, the only one that is still usable is
* ilink.
*
* The pc and status32 values will still be on the stack. We cannot
* pop them yet because the callers of _pop_irq_stack_frame must reload
* status32 differently depending on the context they are running in
* (_Swap(), firq or exception).
*/
add_s sp, sp, __tISF_SIZEOF
.endm
#endif /* _ASMLANGUAGE */
#endif /* _SWAP_MACROS__H_ */

61
arch/arc/defs.objs Normal file
View File

@ -0,0 +1,61 @@
# defs.objs - build system
#
# Copyright (c) 2015 Wind River Systems, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2) Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3) Neither the name of Wind River Systems nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
nkernel_INCLUDE_DIR = $(strip \
${vBASE}/arch/${vARCH}/include \
${vBASE}/kernel/nanokernel/include \
)
nkernel_PATH = $(strip \
${vBASE}/arch/${vARCH}/core \
${vBASE}/kernel/nanokernel/core \
)
nkernel_SRC :=
nkernel_SRC += $(strip \
${vBASE}/kernel/common/version.c \
${vBASE}/misc/printk.c \
)
ifeq ($(CONFIG_ENHANCED_SECURITY),y)
nkernel_SRC += $(strip \
${vBASE}/kernel/common/string_s.c \
)
endif
nkernel_SRC += $(foreach path,${nkernel_PATH},$(wildcard ${path}/*.c))
nkernel_SRC += $(foreach path,${nkernel_PATH},$(wildcard ${path}/*.s))
nkernel_SRC += ${vKLIB_DIR}/configs.c
KLIBS += nkernel

287
arch/arc/include/nanok.h Normal file
View File

@ -0,0 +1,287 @@
/* nanok.h - private nanokernel definitions */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This file contains private nanokernel structures definitions and various other
definitions for the ARCv2 processor architecture.
This file is also included by assembly language files which must #define
_ASMLANGUAGE before including this header file. Note that nanokernel assembly
source files obtains structure offset values via "absolute symbols" in the
offsets.o module.
*/
#ifndef _NANOK__H_
#define _NANOK__H_
#ifdef __cplusplus
extern "C" {
#endif
#include <toolchain.h>
#include <sections.h>
#include <nanokernel/cpu.h>
#ifndef _ASMLANGUAGE
#include <stdint.h>
#endif
#ifndef _ASMLANGUAGE
struct coop {
/*
* Saved on the stack as part of handling a regular IRQ or by the kernel
* when calling the FIRQ return code.
*/
};
struct irq_stack_frame {
uint32_t r0;
uint32_t r1;
uint32_t r2;
uint32_t r3;
uint32_t r4;
uint32_t r5;
uint32_t r6;
uint32_t r7;
uint32_t r8;
uint32_t r9;
uint32_t r10;
uint32_t r11;
uint32_t r12;
uint32_t r13;
uint32_t blink;
uint32_t lp_end;
uint32_t lp_start;
uint32_t lp_count;
#ifdef CONFIG_CODE_DENSITY
/*
* Currently unsupported. This is where those registers are automatically
* pushed on the stack by the CPU when taking a regular IRQ.
*/
uint32_t ei_base;
uint32_t ldi_base;
uint32_t jli_base;
#endif
uint32_t pc;
uint32_t status32;
};
typedef struct irq_stack_frame tISF;
struct preempt {
uint32_t sp; /* r28 */
};
typedef struct preempt tPreempt;
struct callee_saved {
uint32_t r13;
uint32_t r14;
uint32_t r15;
uint32_t r16;
uint32_t r17;
uint32_t r18;
uint32_t r19;
uint32_t r20;
uint32_t r21;
uint32_t r22;
uint32_t r23;
uint32_t r24;
uint32_t r25;
uint32_t r26;
uint32_t fp; /* r27 */
/* r28 is the stack pointer and saved separately */
/* r29 is ILINK and does not need to be saved */
uint32_t r30;
/*
* No need to save r31 (blink), it's either alread pushed as the pc or
* blink on an irq stack frame.
*/
};
typedef struct callee_saved tCalleeSaved;
/* registers saved by software when taking a FIRQ */
struct firq_regs {
uint32_t lp_count;
uint32_t lp_start;
uint32_t lp_end;
};
typedef struct firq_regs tFirqRegs;
#endif /* _ASMLANGUAGE */
/* Bitmask definitions for the tCCS->flags bit field */
#define FIBER 0x000
#define TASK 0x001 /* 1 = task context, 0 = fiber context */
#define INT_ACTIVE 0x002 /* 1 = context is executing interrupt handler */
#define EXC_ACTIVE 0x004 /* 1 = context is executing exception handler */
#define USE_FP 0x010 /* 1 = context uses floating point unit */
#define PREEMPTIBLE 0x020 /* 1 = preemptible context */
#define ESSENTIAL 0x200 /* 1 = system context that must not abort */
#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */
/* stacks */
#define STACK_GROWS_DOWN 0
#define STACK_GROWS_UP 1
#define STACK_ALIGN_SIZE 4
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE)
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE)
/*
* Reason a context has relinquished control: fibers can only be in the NONE
* or COOP state, tasks can be one in the four.
*/
#define _CAUSE_NONE 0
#define _CAUSE_COOP 1
#define _CAUSE_RIRQ 2
#define _CAUSE_FIRQ 3
#ifndef _ASMLANGUAGE
struct s_CCS {
struct s_CCS *link; /* node in singly-linked list
* _NanoKernel.fibers */
uint32_t flags; /* bitmask of flags above */
uint32_t intlock_key; /* interrupt key when relinquishing control */
int relinquish_cause; /* one of the _CAUSE_xxxx definitions above */
unsigned int return_value; /* return value from _Swap */
int prio; /* fiber priority, -1 for a task */
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
void *custom_data; /* available for custom use */
#endif
struct coop coopReg;
struct preempt preempReg;
#ifdef CONFIG_HOST_TOOLS_SUPPORT
struct s_CCS *activeLink; /* link to next context in the active list */
#endif
};
struct s_NANO {
tCCS *fiber; /* singly linked list of runnable fiber contexts */
tCCS *task; /* current task the nanokernel knows about */
tCCS *current; /* currently scheduled context (fiber or task) */
#ifdef CONFIG_HOST_TOOLS_SUPPORT
tCCS *contexts; /* singly linked list of ALL fiber+tasks */
#endif
#ifdef CONFIG_FP_SHARING
tCCS *current_fp; /* context (fiber or task) that owns the FP regs */
#endif
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
int32_t idle; /* Number of ticks for kernel idling */
#endif
char *rirq_sp; /* regular IRQ stack pointer base */
/*
* FIRQ stack pointer is installed once in the second bank's SP, so
* there is no need to track it in _NanoKernel.
*/
struct firq_regs firq_regs;
};
typedef struct s_NANO tNANO;
extern tNANO _NanoKernel;
#ifdef CONFIG_CPU_ARCV2
#include <v2/cache.h>
#include <v2/irq.h>
#endif
static ALWAYS_INLINE void nanoArchInit(void)
{
_icache_setup();
_irq_setup();
}
/*******************************************************************************
*
* fiberRtnValueSet - set the return value for the specified fiber (inline)
*
* The register used to store the return value from a function call invocation
* to <value>. It is assumed that the specified <fiber> is pending, and thus
* the fiber's context is stored in its tCCS structure.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static ALWAYS_INLINE void fiberRtnValueSet(tCCS *fiber, unsigned int value)
{
fiber->return_value = value;
}
/*******************************************************************************
*
* _IS_IN_ISR - indicates if kernel is handling interrupt
*
* RETURNS: 1 if interrupt handler is executed, 0 otherwise
*
* \NOMANUAL
*/
static ALWAYS_INLINE int _IS_IN_ISR(void)
{
uint32_t act = _arc_v2_aux_reg_read(_ARC_V2_AUX_IRQ_ACT);
return ((act & 0xffff) != 0);
}
extern void _InsertCCS(tCCS **, tCCS *);
extern void *_NewContext(char *, unsigned, _ContextEntry,
_ContextArg, _ContextArg, _ContextArg,
int, unsigned);
extern unsigned int _Swap(unsigned int);
extern void nanoCpuAtomicIdle(unsigned int);
extern void _ContextEntryWrapper(void);
static inline void _IntLibInit(void)
{
/* nothing needed, here because the kernel requires it */
}
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* _NANOK__H_ */

View File

@ -0,0 +1,66 @@
/* cache.h - cache helper functions (ARC) */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* DESCRIPTION
* This file contains private nanokernel structures definitions and various
* other definitions for the ARCv2 processor architecture.
*/
#ifndef _ARCV2_CACHE__H_
#define _ARCV2_CACHE__H_
#include <nanokernel/cpu.h>
#ifndef _ASMLANGUAGE
#define CACHE_ENABLE 0x00
#define CACHE_DISABLE 0x01
#define CACHE_DIRECT 0x00
#define CACHE_CACHE_CONTROLLED 0x20
/*
* _icache_enable - sets the I-cache
*
* Enables cache and sets the direct access.
*/
static ALWAYS_INLINE void _icache_setup(void)
{
uint32_t icache_config = (
CACHE_DIRECT | /* direct mapping (one-way assoc.) */
CACHE_ENABLE /* i-cache enabled */
);
_arc_v2_aux_reg_write(_ARC_V2_IC_CTRL, icache_config);
}
#endif /* _ASMLANGUAGE */
#endif /* _ARCV2_CACHE__H_ */

77
arch/arc/include/v2/irq.h Normal file
View File

@ -0,0 +1,77 @@
/* irq.h - interrupt helper functions (ARC) */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* DESCRIPTION
* This file contains private nanokernel structures definitions and various
* other definitions for the ARCv2 processor architecture.
*/
#ifndef _ARCV2_IRQ__H_
#define _ARCV2_IRQ__H_
#define _ARC_V2_AUX_IRQ_CTRL_BLINK (1 << 9)
#define _ARC_V2_AUX_IRQ_CTRL_LOOP_REGS (1 << 10)
#define _ARC_V2_AUX_IRQ_CTRL_14_REGS 7
#define _ARC_V2_AUX_IRQ_CTRL_16_REGS 8
#define _ARC_V2_AUX_IRQ_CTRL_32_REGS 16
#define _ARC_V2_DEF_IRQ_LEVEL 15
#define _ARC_V2_WAKE_IRQ_LEVEL 15
#ifndef _ASMLANGUAGE
extern void _firq_stack_setup(void);
extern char _InterruptStack[];
/*
* _irq_setup
*
* Configures interrupt handling parameters
*/
static ALWAYS_INLINE void _irq_setup(void)
{
uint32_t aux_irq_ctrl_value = (
_ARC_V2_AUX_IRQ_CTRL_LOOP_REGS | /* save lp_xxx registers */
_ARC_V2_AUX_IRQ_CTRL_BLINK | /* save blink */
_ARC_V2_AUX_IRQ_CTRL_14_REGS /* save r0 -> r13 (caller-saved) */
);
nano_cpu_sleep_mode = _ARC_V2_WAKE_IRQ_LEVEL;
_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_CTRL, aux_irq_ctrl_value);
_NanoKernel.rirq_sp = _InterruptStack + CONFIG_ISR_STACK_SIZE;
_firq_stack_setup();
}
#endif /* _ASMLANGUAGE */
#endif /* _ARCV2_IRQ__H_ */

View File

@ -0,0 +1,103 @@
/* arcv2_irq_unit.c - ARCv2 Interrupt Unit device driver */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* DESCRIPTION
* The ARCv2 interrupt unit has 16 allocated exceptions associated with
* vectors 0 to 15 and 240 interrupts associated with vectors 16 to 255.
* The interrupt unit is optional in the ARCv2-based processors. When
* building a processor, you can configure the processor to include an
* interrupt unit. The ARCv2 interrupt unit is highly programmable.
*/
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <board.h>
/*
* _arc_v2_irq_unit_init - initialize the interrupt unit device driver
*
* Initializes the interrupt unit device driver and the device
* itself.
*
* Interrupts are still locked at this point, so there is no need to protect
* the window between a write to IRQ_SELECT and subsequent writes to the
* selected IRQ's registers.
*
* RETURNS: N/A
*/
void _arc_v2_irq_unit_init(void)
{
int irq; /* the interrupt index */
for (irq = 16; irq < 256; irq++) {
_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
_arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY, 1);
_arc_v2_aux_reg_write(_ARC_V2_IRQ_ENABLE, _ARC_V2_INT_DISABLE);
_arc_v2_aux_reg_write(_ARC_V2_IRQ_TRIGGER, _ARC_V2_INT_LEVEL);
}
}
/*
* _arc_v2_irq_unit_int_eoi - send EOI signal to interrupt unit
*
* This routine sends an EOI (End Of Interrupt) signal to the interrupt unit
* to clear a pulse-triggered interrupt.
*
* Interrupts must be locked or the ISR operating at P0 when invoking this
* function.
*
* RETURNS: N/A
*/
void _arc_v2_irq_unit_int_eoi(int irq)
{
_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
_arc_v2_aux_reg_write(_ARC_V2_IRQ_PULSE_CANCEL, 1);
}
/*
* _arc_v2_irq_unit_int_trigger_set - sets an IRQ line to level/pulse trigger
*
* Sets the IRQ line <irq> to trigger an interrupt based on the level or the
* edge of the signal. Valid values for <trigger> are _ARC_V2_INT_LEVEL and
* _ARC_V2_INT_PULSE.
*
* RETURNS: N/A
*/
void _arc_v2_irq_unit_trigger_set(int irq, unsigned int trigger)
{
_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
_arc_v2_aux_reg_write(_ARC_V2_IRQ_TRIGGER, trigger);
}

View File

@ -0,0 +1,81 @@
/*
* Copyright (c) 2014-2015, Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <sections.h>
#include <misc/__assert.h>
#include <stdint.h>
#include <misc/util.h>
#include <string.h>
#include <board.h>
#include <drivers/uart.h>
#define NSIM_UART_DATA 0
#define NSIM_UART_STATUS 1
#define DATA_REG(n) (uart[n].regs + NSIM_UART_DATA)
#define STATUS_REG(n) (uart[n].regs + NSIM_UART_STATUS)
#define TXEMPTY 0x80 /* Transmit FIFO empty and next character can be sent */
struct uart {
uint32_t regs; /* MM base address */
};
static struct uart __noinit uart[CONFIG_UART_NUM_SYSTEM_PORTS];
/*
* uart_init - initialize fake serial port
* @which: port number
* @init_info: pointer to initialization information
*/
void uart_init(int which, const struct uart_init_info * const init_info)
{
int key = irq_lock();
uart[which].regs = init_info->regs;
irq_unlock(key);
}
/*
* uart_poll_out - output a character to serial port
* @port: port number
* @c: character to output
*/
unsigned char uart_poll_out(int port, unsigned char c)
{
/* wait for transmitter to ready to accept a character */
while ((_arc_v2_aux_reg_read(STATUS_REG(port)) & TXEMPTY) == 0)
;
_arc_v2_aux_reg_write(DATA_REG(port), c);
return c;
}

View File

@ -0,0 +1,247 @@
/* arcv2_timer0.c - ARC timer 0 device driver */
/*
* Copyright (c) 2014-2015 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module implements a VxMicro device driver for the ARCv2 processor timer 0
and provides the standard "system clock driver" interfaces.
\INTERNAL IMPLEMENTATION DETAILS
The ARCv2 processor timer provides a 32-bit incrementing, wrap-to-zero counter.
*/
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <toolchain.h>
#include <sections.h>
#include <misc/__assert.h>
#include <nanokernel/arc/v2/aux_regs.h>
#include <clock_vars.h>
#include <drivers/system_timer.h>
/*
* A board support package's board.h header must provide definitions for the
* following constants:
*
* CONFIG_ARCV2_TIMER0_CLOCK_FREQ
*
* This is the sysTick input clock frequency.
*/
#include <board.h>
#define _ARC_V2_TMR_CTRL_IE 0x1 /* interrupt enable */
#define _ARC_V2_TMR_CTRL_NH 0x2 /* count only while not halted */
#define _ARC_V2_TMR_CTRL_W 0x4 /* watchdog mode enable */
#define _ARC_V2_TMR_CTRL_IP 0x8 /* interrupt pending flag */
/* running total of timer count */
static uint32_t accumulatedCount = 0;
/*******************************************************************************
*
* enable - enable the timer with the given limit/countup value
*
* This routine sets up the timer for operation by:
* - setting value to which the timer will count up to;
* - setting the timer's start value to zero; and
* - enabling interrupt generation.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static ALWAYS_INLINE void enable(
uint32_t count /* interrupt triggers when up-counter reaches this value */
)
{
_arc_v2_aux_reg_write(_ARC_V2_TMR0_LIMIT, count); /* write limit value */
/* count only when not halted for debug and enable interrupts */
_arc_v2_aux_reg_write(_ARC_V2_TMR0_CONTROL,
_ARC_V2_TMR_CTRL_NH | _ARC_V2_TMR_CTRL_IE);
_arc_v2_aux_reg_write(_ARC_V2_TMR0_COUNT, 0); /* write the start value */
}
/*******************************************************************************
*
* count_get - get the current counter value
*
* This routine gets the value from the timer's count register. This
* value is the 'time' elapsed from the starting count (assumed to be 0).
*
* RETURNS: the current counter value
*
* \NOMANUAL
*/
static ALWAYS_INLINE uint32_t count_get(void)
{
return _arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT);
}
/*******************************************************************************
*
* limit_get - get the limit/countup value
*
* This routine gets the value from the timer's limit register, which is the
* value to which the timer will count up to.
*
* RETURNS: the limit value
*
* \NOMANUAL
*/
static ALWAYS_INLINE uint32_t limit_get(void)
{
return _arc_v2_aux_reg_read(_ARC_V2_TMR0_LIMIT);
}
/*******************************************************************************
*
* _timer_int_handler - system clock periodic tick handler
*
* This routine handles the system clock periodic tick interrupt. A TICK_EVENT
* event is pushed onto the microkernel stack.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
void _timer_int_handler(void *unused)
{
uint32_t zero_ip_bit = _ARC_V2_TMR_CTRL_NH | _ARC_V2_TMR_CTRL_IE;
ARG_UNUSED(unused);
/* clear the interrupt by writing 0 to IP bit of the control register */
_arc_v2_aux_reg_write(_ARC_V2_TMR0_CONTROL, zero_ip_bit);
accumulatedCount += sys_clock_hw_cycles_per_tick;
nanoTicks++;
if (nanoTimerList) {
nanoTimerList->ticks--;
while (nanoTimerList && (!nanoTimerList->ticks)) {
struct nano_timer *expired = nanoTimerList;
struct nano_lifo *lifo = &expired->lifo;
nanoTimerList = expired->link;
nano_fiber_lifo_put(lifo, expired->userData);
}
}
}
/*******************************************************************************
*
* timer_driver - initialize and enable the system clock
*
* This routine is used to program the ARCv2 timer to deliver interrupts at the
* rate specified via the 'sys_clock_us_per_tick' global variable.
*
* RETURNS: N/A
*/
void timer_driver(
int priority /* priority parameter ignored by this driver */
)
{
int irq = CONFIG_ARCV2_TIMER0_INT_LVL;
int prio = CONFIG_ARCV2_TIMER0_INT_PRI;
ARG_UNUSED(priority);
/* ensure that the timer will not generate interrupts */
_arc_v2_aux_reg_write(_ARC_V2_TMR0_CONTROL, 0);
_arc_v2_aux_reg_write(_ARC_V2_TMR0_COUNT, 0); /* clear the count value */
(void)irq_connect(irq, prio, _timer_int_handler, 0);
/*
* Set the reload value to achieve the configured tick rate, enable the
* counter and interrupt generation.
*/
enable(sys_clock_hw_cycles_per_tick - 1);
/* everything has been configured: safe to enable the interrupt */
irq_enable(CONFIG_ARCV2_TIMER0_INT_LVL);
}
/*******************************************************************************
*
* timer_read - read the BSP timer hardware
*
* This routine returns the current time in terms of timer hardware clock cycles.
*
* RETURNS: up counter of elapsed clock cycles
*/
uint32_t timer_read(void)
{
return (accumulatedCount + count_get());
}
#if defined(CONFIG_SYSTEM_TIMER_DISABLE)
/*******************************************************************************
*
* timer_disable - stop announcing ticks into the kernel
*
* This routine disables timer interrupt generation and delivery.
* Note that the timer's counting cannot be stopped by software.
*
* RETURNS: N/A
*/
void timer_disable(void)
{
unsigned int key; /* interrupt lock level */
uint32_t ctrl_val; /* timer control register value */
key = irq_lock();
/* disable interrupt generation */
ctrl_val = _arc_v2_aux_reg_read(_ARC_V2_TMR0_CONTROL);
_arc_v2_aux_reg_write(_ARC_V2_TMR0_CONTROL, ctrl_val & ~_ARC_V2_TMR_CTRL_IE);
irq_unlock(key);
/* disable interrupt in the interrupt controller */
irq_disable(CONFIG_ARCV2_TIMER0_INT_LVL);
}
#endif /* CONFIG_SYSTEM_TIMER_DISABLE */

117
arch/arm/bsp/CortexM/nmi.c Normal file
View File

@ -0,0 +1,117 @@
/* nmi.c - NMI handler infrastructure */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Provides a boot time handler that simply hangs in a sleep loop, and a run time
handler that resets the CPU. Also provides a mechanism for hooking a custom
run time handler.
*/
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <misc/printk.h>
#include <toolchain.h>
#include <sections.h>
extern void _SysNmiOnReset(void);
#if !defined(CONFIG_RUNTIME_NMI)
#define handler _SysNmiOnReset
#endif
#ifdef CONFIG_RUNTIME_NMI
typedef void (*_NmiHandler_t)(void);
static handler = _SysNmiOnReset;
/*******************************************************************************
*
* _DefaultHandler - default NMI handler installed when kernel is up
*
* The default handler outputs a error message and reboots the target. It is
* installed by calling _NmiInit();
*
* RETURNS: N/A
*/
static void _DefaultHandler(void)
{
printk("NMI received! Rebooting...\n");
_ScbSystemReset();
}
/*******************************************************************************
*
* _NmiInit - install default runtime NMI handler
*
* Meant to be called by BSP code if they want to install a simple NMI handler
* that reboots the target. It should be installed after the console is
* initialized.
*
* RETURNS: N/A
*/
void _NmiInit(void)
{
handler = _DefaultHandler;
}
/*******************************************************************************
*
* _NmiHandlerSet - install a custom runtime NMI handler
*
* Meant to be called by BSP code if they want to install a custom NMI handler
* that reboots. It should be installed after the console is initialized if it is
* meant to output to the console.
*
* RETURNS: N/A
*/
void _NmiHandlerSet(void (*pHandler)(void))
{
handler = pHandler;
}
#endif /* CONFIG_RUNTIME_NMI */
/*******************************************************************************
*
* __nmi - handler installed in the vector table
*
* Simply call what is installed in 'static void(*handler)(void)'.
*
* RETURNS: N/A
*/
void __nmi(void)
{
handler();
_ExcExit();
}

View File

@ -0,0 +1,108 @@
/* prep_c.c - full C support initialization */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Initialization of full C support: zero the .bss, copy the .data if XIP,
call _Cstart().
Stack is available in this module, but not the global data/bss until their
initialization is performed.
*/
#include <stdint.h>
#include <toolchain.h>
#include <linker-defs.h>
/*******************************************************************************
*
* bssZero - clear BSS
*
* This routine clears the BSS region, so all bytes are 0.
*
* RETURNS: N/A
*/
static void bssZero(void)
{
volatile uint32_t *pBSS = (uint32_t *)&__bss_start;
unsigned int n;
for (n = 0; n < (unsigned int)&__bss_num_words; n++) {
pBSS[n] = 0;
}
}
/*******************************************************************************
*
* dataCopy - copy the data section from ROM to RAM
*
* This routine copies the data section from ROM to RAM.
*
* RETURNS: N/A
*/
#ifdef CONFIG_XIP
static void dataCopy(void)
{
volatile uint32_t *pROM = (uint32_t *)&__data_rom_start;
volatile uint32_t *pRAM = (uint32_t *)&__data_ram_start;
unsigned int n;
for (n = 0; n < (unsigned int)&__data_num_words; n++) {
pRAM[n] = pROM[n];
}
}
#else
static void dataCopy(void)
{
}
#endif
extern FUNC_NORETURN void _Cstart(void);
/*******************************************************************************
*
* _PrepC - prepare to and run C code
*
* This routine prepares for the execution of and runs C code.
*
* RETURNS: N/A
*/
void _PrepC(void)
{
bssZero();
dataCopy();
_Cstart();
CODE_UNREACHABLE;
}

View File

@ -0,0 +1,93 @@
/* reset_s.s - reset handler */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Reset handler that prepares the system for running C code.
*/
#define _ASMLANGUAGE
#include <board.h>
#include <toolchain.h>
#include <sections.h>
#include <nanokernel/cpu.h>
#include "vector_table.h"
_ASM_FILE_PROLOGUE
GTEXT(__reset)
/*******************************************************************************
*
* __reset - reset vector
*
* Ran when the system comes out of reset. The processor is in thread mode with
* privileged level. At this point, the main stack pointer (MSP) is already
* pointing to a valid area in SRAM.
*
* Locking interrupts prevents anything but NMIs and hard faults from
* interrupting the CPU. A default NMI handler is already in place in the
* vector table, and the boot code should not generate hard fault, or we're in
* deep trouble.
*
* We want to use the process stack pointer (PSP) instead of the MSP, since the
* MSP is to be set up to point to the one-and-only interrupt stack during later
* boot. That would not be possible if in use for running C code.
*
* When these steps are completed, jump to _PrepC(), which will finish setting
* up the system for running C code.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT,__reset)
/* lock interrupts: will get unlocked when switch to main task */
movs.n r0, #_EXC_IRQ_DEFAULT_PRIO
msr BASEPRI, r0
/*
* Set PSP and use it to boot without using MSP, so that it
* gets set to _InterruptStack during nanoInit().
*/
ldr r0, =__CORTEXM_BOOT_PSP
msr PSP, r0
movs.n r0, #2 /* switch to using PSP (bit1 of CONTROL reg) */
msr CONTROL, r0
#ifdef CONFIG_WDOG_INIT
/* board-specific watchdog initialization is necessary */
bl _WdogInit
#endif
b _PrepC

109
arch/arm/bsp/CortexM/scb.c Normal file
View File

@ -0,0 +1,109 @@
/* scb.h - ARM CORTEX-M3 System Control Block interface */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Most of the SCB interface consists of simple bit-flipping methods, and is
implemented as inline functions in scb.h. This module thus contains only data
definitions and more complex routines, if needed.
*/
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <misc/util.h>
#define SCB_AIRCR_VECTKEY_EN_W 0x05FA
/*******************************************************************************
*
* _ScbSystemReset - reset the system
*
* This routine resets the processor.
*
* RETURNS: N/A
*/
void _ScbSystemReset(void)
{
union __aircr reg;
reg.val = __scs.scb.aircr.val;
reg.bit.vectkey = SCB_AIRCR_VECTKEY_EN_W;
reg.bit.sysresetreq = 1;
__scs.scb.aircr.val = reg.val;
}
/*******************************************************************************
*
* _ScbNumPriGroupSet - set the number of priority groups based on the number
* of exception priorities desired
*
* Exception priorities can be divided in priority groups, inside which there is
* no preemption. The priorities inside a group are only used to decide which
* exception will run when more than one is ready to be handled.
*
* The number of priorities has to be a power of two, from 1 to 128.
*
* RETURNS: N/A
*/
void _ScbNumPriGroupSet(unsigned int n /* number of priorities */
)
{
unsigned int set;
union __aircr reg;
__ASSERT(_IsPowerOfTwo(n) && (n <= 128),
"invalid number of priorities");
set = find_first_set(n);
reg.val = __scs.scb.aircr.val;
/* num pri bit set prigroup
* ---------------------------------
* 1 1 7
* 2 2 6
* 4 3 5
* 8 4 4
* 16 5 3
* 32 6 2
* 64 7 1
* 128 8 0
*/
reg.bit.prigroup = 8 - set;
reg.bit.vectkey = SCB_AIRCR_VECTKEY_EN_W;
__scs.scb.aircr.val = reg.val;
}

View File

@ -0,0 +1,46 @@
/* scs.c - ARM CORTEX-M Series System Control Space */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Most of the SCS interface consists of simple bit-flipping methods, and is
implemented as inline functions in scs.h. This module thus contains only data
definitions and more complex routines, if needed.
*/
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <toolchain.h>
#include <sections.h>
/* the linker always puts this object at 0xe000e000 */
volatile struct __scs __scs_section __scs;

View File

@ -0,0 +1,78 @@
/* vector_table.h - definitions for the boot vector table */
/*
* Copyright (c) 2013-2015 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Definitions for the boot vector table.
System exception handler names all have the same format:
__<exception name with underscores>
No other symbol has the same format, so they are easy to spot.
*/
#ifndef _VECTOR_TABLE__H_
#define _VECTOR_TABLE__H_
#ifdef _ASMLANGUAGE
#include <board.h>
#include <toolchain.h>
#include <sections.h>
/* location of MSP and PSP upon boot: at the end of SRAM */
.equ __CORTEXM_BOOT_MSP, (0x20000000 + SRAM_SIZE - 8)
.equ __CORTEXM_BOOT_PSP, (__CORTEXM_BOOT_MSP - 0x100)
GTEXT(__start)
GTEXT(_VxMicroStart)
GTEXT(_VectorTableROM)
GTEXT(__reset)
GTEXT(__nmi)
GTEXT(__hard_fault)
GTEXT(__mpu_fault)
GTEXT(__bus_fault)
GTEXT(__usage_fault)
GTEXT(__svc)
GTEXT(__debug_monitor)
GTEXT(__pendsv)
GTEXT(__reserved)
GTEXT(_PrepC)
GTEXT(_IsrWrapper)
#endif /* _ASMLANGUAGE */
#endif /* _VECTOR_TABLE__H_ */

View File

@ -0,0 +1,74 @@
/* vector_table.s - populated vector table in ROM */
/*
* Copyright (c) 2013-2015 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Vector table in ROM for starting system. The reset vector is the system entry
point, ie. the first instruction executed.
The table is populated with all the system exception handlers. The NMI vector
must be populated with a valid handler since it can happen at any time. The
rest should not be triggered until the kernel is ready to handle them.
*/
#define _ASMLANGUAGE
#include <board.h>
#include <toolchain.h>
#include <sections.h>
#include <drivers/system_timer.h>
#include "vector_table.h"
_ASM_FILE_PROLOGUE
/* Diab requires a __start symbol */
SECTION_SUBSEC_FUNC(exc_vector_table,_Start,__start)
SECTION_SUBSEC_FUNC(exc_vector_table,_Start,_VxMicroStart)
SECTION_SUBSEC_FUNC(exc_vector_table,_Start,_VectorTableROM)
.word __CORTEXM_BOOT_MSP
.word __reset
.word __nmi
.word __hard_fault
.word __mpu_fault
.word __bus_fault
.word __usage_fault
.word __reserved
.word __reserved
.word __reserved
.word __reserved
.word __svc
.word __debug_monitor
.word __reserved
.word __pendsv
.word _timer_int_handler

78
arch/arm/bsp/rand32.c Normal file
View File

@ -0,0 +1,78 @@
/* rand32.c - random number generator */
/*
* Copyright (c) 2013-2015 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module provides a non-random implementation of _Rand32Get(), which is not
meant to be used in a final product as a truly random number generator. It
was provided to allow testing of kernel stack canaries on a BSP that does not
(yet) provide a random number generator.
*/
#include <drivers/rand32.h>
#include <drivers/system_timer.h>
#if defined(CONFIG_TEST_RANDOM_GENERATOR)
#if defined(__GNUC__)
/*******************************************************************************
*
* _Rand32Init - initialize the random number generator
*
* The non-random number generator does not require any initialization.
*
* RETURNS: N/A
*/
void _Rand32Init(void)
{
}
/*******************************************************************************
*
* _Rand32Get - get a 32 bit random number
*
* The non-random number generator returns values that are based off the
* target's clock counter, which means that successive calls will normally
* display ever-increasing values.
*
* RETURNS: a 32-bit number
*/
uint32_t _Rand32Get(void)
{
return timer_read();
}
#endif /* __GNUC__ */
#endif /* CONFIG_TEST_RANDOM_GENERATOR */

View File

@ -0,0 +1,115 @@
/* sysFatalErrorHandler - ARM Cortex-M system fatal error handler */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module provides the _SysFatalErrorHandler() routine for Cortex-M BSPs.
*/
/* includes */
#include <cputype.h>
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <toolchain.h>
#include <sections.h>
#include "board.h"
#ifdef CONFIG_PRINTK
#include <misc/printk.h>
#define PRINTK(...) printk(__VA_ARGS__)
#else
#define PRINTK(...)
#endif
#ifdef CONFIG_MICROKERNEL
extern void _TaskAbort(void);
static inline void nonEssentialTaskAbort(void)
{
PRINTK("Fatal fault in task ! Aborting task.\n");
_TaskAbort();
}
#define NON_ESSENTIAL_TASK_ABORT() nonEssentialTaskAbort()
#else
#define NON_ESSENTIAL_TASK_ABORT() \
do {/* nothing */ \
} while ((0))
#endif
/*******************************************************************************
*
* _SysFatalErrorHandler - fatal error handler
*
* This routine implements the corrective action to be taken when the system
* detects a fatal error.
*
* This sample implementation attempts to abort the current context and allow
* the system to continue executing, which may permit the system to continue
* functioning with degraded capabilities.
*
* System designers may wish to enhance or substitute this sample
* implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
void _SysFatalErrorHandler(
unsigned int reason, /* fatal error reason */
const NANO_ESF * pEsf /* pointer to exception stack frame */
)
{
nano_context_type_t curCtx = context_type_get();
ARG_UNUSED(reason);
ARG_UNUSED(pEsf);
if ((curCtx == NANO_CTX_ISR) || _context_essential_check(NULL)) {
PRINTK("Fatal fault in %s ! Spinning...\n",
NANO_CTX_ISR == curCtx
? "ISR"
: NANO_CTX_FIBER == curCtx ? "essential fiber"
: "essential task");
for (;;)
; /* spin forever */
}
if (NANO_CTX_FIBER == curCtx) {
PRINTK("Fatal fault in fiber ! Aborting fiber.\n");
fiber_abort();
return;
}
NON_ESSENTIAL_TASK_ABORT();
}

429
arch/arm/core/atomic.s Normal file
View File

@ -0,0 +1,429 @@
/* armAtomic.s - ARM atomic operations library */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This library provides routines to perform a number of atomic operations
on a memory location: add, subtract, increment, decrement, bitwise OR,
bitwise NOR, bitwise AND, bitwise NAND, set, clear and compare-and-swap.
*/
#define _ASMLANGUAGE
#include <toolchain.h>
#include <sections.h>
_ASM_FILE_PROLOGUE
/* exports */
GTEXT(atomic_set)
GTEXT(atomic_get)
GTEXT(atomic_add)
GTEXT(atomic_nand)
GTEXT(atomic_and)
GTEXT(atomic_or)
GTEXT(atomic_xor)
GTEXT(atomic_clear)
GTEXT(atomic_dec)
GTEXT(atomic_inc)
GTEXT(atomic_sub)
GTEXT(atomic_cas)
/*******************************************************************************
*
* atomic_clear - atomically clear a memory location
*
* This routine atomically clears the contents of <target> and returns the old
* value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_clear
* (
* atomic_t *target /@ memory location to clear @/
* )
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_clear)
MOV r1, #0
/* fall through into atomic_set */
/*******************************************************************************
*
* atomic_set - atomically set a memory location
*
* This routine atomically sets the contents of <target> to <value> and returns
* the old value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_set
* (
* atomic_t *target, /@ memory location to set @/
* atomic_val_t value /@ set with this value @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set)
LDREX r2, [r0] /* load old value and mark exclusive access */
STREX r12, r1, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_set /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/******************************************************************************
*
* atomic_get - Get the value of a shared memory atomically
*
* This routine atomically retrieves the value in *target
*
* long atomic_get
* (
* atomic_t * target /@ address of atom to be retrieved @/
* )
*
* RETURN: value read from address target.
*
*/
SECTION_FUNC(TEXT, atomic_get)
LDR r0, [r0]
MOV pc, lr
/*******************************************************************************
*
* atomic_inc - atomically increment a memory location
*
* This routine atomically increments the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_inc
* (
* atomic_t *target, /@ memory location to increment @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_inc)
MOV r1, #1
/* fall through into atomic_add */
/*******************************************************************************
*
* atomic_add - atomically add a value to a memory location
*
* This routine atomically adds the contents of <target> and <value>, placing
* the result in <target>. The operation is done using signed integer arithmetic.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_add
* (
* atomic_t *target, /@ memory location to add to @/
* atomic_val_t value /@ value to add @/
* )
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add)
LDREX r2, [r0] /* load old value and mark exclusive access */
ADD r3, r2, r1 /* add word */
STREX r12, r3, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_add /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/*******************************************************************************
*
* atomic_dec - atomically decrement a memory location
*
* This routine atomically decrements the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_dec
* (
* atomic_t *target, /@ memory location to decrement @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_dec)
MOV r1, #1
/* fall through into atomic_sub */
/*******************************************************************************
*
* atomic_sub - atomically subtract a value from a memory location
*
* This routine atomically subtracts <value> from the contents of <target>,
* placing the result in <target>. The operation is done using signed integer
* arithmetic. Various CPU architectures may impose restrictions with regards to
* the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_sub
* (
* atomic_t *target, /@ memory location to subtract from @/
* atomic_val_t value /@ value to subtract @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_sub)
LDREX r2, [r0] /* load old value and mark exclusive access */
SUB r3, r2, r1 /* subtract word */
STREX r12, r3, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_sub /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/******************************************************************************
*
* atomic_nand - atomically perform a bitwise NAND on a memory location
*
* This routine atomically performs a bitwise NAND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_nand
* (
* atomic_t *target, /@ memory location to NAND @/
* atomic_val_t value /@ NAND with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_nand)
LDREX r2, [r0] /* load old value and mark exclusive access */
AND r3, r2, r1 /* AND word */
MVN r3, r3 /* invert */
STREX r12, r3, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_nand /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/******************************************************************************
*
* atomic_and - atomically perform a bitwise AND on a memory location
*
* This routine atomically performs a bitwise AND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_and
* (
* atomic_t *target, /@ memory location to AND @/
* atomic_val_t value /@ AND with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_and)
LDREX r2, [r0] /* load old value and mark exclusive access */
AND r3, r2, r1 /* AND word */
STREX r12, r3, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_and /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/*******************************************************************************
*
* atomic_or - atomically perform a bitwise OR on memory location
*
* This routine atomically performs a bitwise OR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_or
* (
* atomic_t *target, /@ memory location to OR @/
* atomic_val_t value /@ OR with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_or)
LDREX r2, [r0] /* load old value and mark exclusive access */
ORR r3, r2, r1 /* OR word */
STREX r12, r3, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_or /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/*******************************************************************************
*
* atomic_xor - atomically perform a bitwise XOR on a memory location
*
* This routine atomically performs a bitwise XOR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_xor
* (
* atomic_t *target, /@ memory location to XOR @/
* atomic_val_t value /@ XOR with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_xor)
LDREX r2, [r0] /* load old value and mark exclusive access */
EOR r3, r2, r1 /* XOR word */
STREX r12, r3, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_xor /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/*******************************************************************************
*
* atomic_cas - atomically compare-and-swap the contents of a memory location
*
* This routine performs an atomic compare-and-swap. testing that the contents of
* <target> contains <oldValue>, and if it does, setting the value of <target>
* to <newValue>. Various CPU architectures may impose restrictions with regards
* to the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: 1 if the swap is actually executed, 0 otherwise.
*
* ERRNO: N/A
*
* int atomic_cas
* (
* atomic_t *target, /@ memory location to compare-and-swap @/
* atomic_val_t oldValue, /@ compare to this value @/
* atomic_val_t newValue, /@ swap with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_cas)
LDREX r3, [r0] /* load the value and mark exclusive access */
CMP r3, r1 /* if (*target != oldValue) */
ITT NE
MOVNE r0, #0 /* return FALSE */
MOVNE pc, lr
STREX r12, r2, [r0] /* try to store if equal */
TEQ r12, #0 /* store successful? */
BNE atomic_cas /* if not, retry */
MOV r0, #1 /* return TRUE if swap occurred */
MOV pc, lr

95
arch/arm/core/basepri.s Normal file
View File

@ -0,0 +1,95 @@
/* basepri.s - ARM Cortex-M interrupt locking via BASEPRI */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Provide irq_lock() and irq_unlock() via the BASEPRI register. This
allows locking up to a certain interrupt priority. VxMicro locks out priorities
2 and lower (higher numbered priorities), in essence leaving priorities 0 and 1
unlocked. This achieves two purposes:
1. The service call exception is installed at priority 0, allowing it to be
invoked with interrupts locked. This is needed since 'svc #0' is the
implementation of _Swap(), which is invoked with interrupts locked in the
common implementation of nanokernel objects.
2. Zero Interrupt Latency (ZLI) is achievable via this by allowing certain
interrupts to set their priority to 1, thus being allowed in when interrupts
are locked for regular interrupts.
*/
#define _ASMLANGUAGE
#include <toolchain.h>
#include <sections.h>
#include <nanokernel/cpu.h>
_ASM_FILE_PROLOGUE
GTEXT(irq_lock)
GTEXT(irq_unlock)
/*******************************************************************************
*
* irq_lock - lock interrupts
*
* Prevent exceptions of priority lower than to the two highest priorities from
* interrupting the CPU.
*
* This function can be called recursively: it will return a key to return the
* state of interrupt locking to the previous level.
*
* RETURNS: a key to return to the previous interrupt locking level
*/
SECTION_FUNC(TEXT,irq_lock)
movs.n r1, #_EXC_IRQ_DEFAULT_PRIO
mrs r0, BASEPRI
msr BASEPRI, r1
bx lr
/*******************************************************************************
*
* irq_unlock - unlock interrupts
*
* Return the state of interrupt locking to a previous level, passed in via the
* <key> parameter, obtained from a previous call to irq_lock().
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT,irq_unlock)
msr BASEPRI, r0
bx lr
.end

198
arch/arm/core/cpu_idle.s Normal file
View File

@ -0,0 +1,198 @@
/* cpu_idle.s - ARM CORTEX-M3 power management */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
*/
#define _ASMLANGUAGE
#include <offsets.h>
#include <toolchain.h>
#include <sections.h>
#include <nanokernel/cpu.h>
#ifdef CONFIG_TICKLESS_IDLE
#include <nanok.h>
#endif
_ASM_FILE_PROLOGUE
GTEXT(_CpuIdleInit)
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
GTEXT(_NanoIdleValGet)
GTEXT(_NanoIdleValClear)
#endif
GTEXT(nano_cpu_idle)
GTEXT(nano_cpu_atomic_idle)
#define _SCR_INIT_BITS _SCB_SCR_SEVONPEND
/*******************************************************************************
*
* _CpuIdleInit - initialization of CPU idle
*
* Only called by nanoArchInit(). Sets SEVONPEND bit once for the system's
* duration.
*
* RETURNS: N/A
*
* C function prototype:
*
* void _CpuIdleInit (void);
*/
SECTION_FUNC(TEXT, _CpuIdleInit)
ldr r1, =_SCB_SCR
movs.n r2, #_SCR_INIT_BITS
str r2, [r1]
bx lr
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
/*******************************************************************************
*
* _NanoIdleValGet - get the kernel idle setting
*
* Returns the nanokernel idle setting, in ticks. Only called by __systick().
*
* RETURNS: the requested number of ticks for the kernel to be idle
*
* C function prototype:
*
* int32_t _NanoIdleValGet (void);
*/
SECTION_FUNC(TEXT, _NanoIdleValGet)
ldr r0, =_NanoKernel
ldr r0, [r0, #__tNANO_idle_OFFSET]
bx lr
/*******************************************************************************
*
* _NanoIdleValClear - clear the kernel idle setting
*
* Sets the nanokernel idle setting to 0. Only called by __systick().
*
* RETURNS: N/A
*
* C function prototype:
*
* void _NanoIdleValClear (void);
*/
SECTION_FUNC(TEXT, _NanoIdleValClear)
ldr r0, =_NanoKernel
eors.n r1, r1
str r1, [r0, #__tNANO_idle_OFFSET]
bx lr
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
/*******************************************************************************
*
* nano_cpu_idle - power save idle routine for ARM Cortex-M
*
* This function will be called by the nanokernel idle loop or possibly within
* an implementation of _SysPowerSaveIdle in the microkernel when the
* '_SysPowerSaveFlag' variable is non-zero. The ARM 'wfi' instruction
* will be issued, causing a low-power consumption sleep mode.
*
* RETURNS: N/A
*
* C function prototype:
*
* void nano_cpu_idle (void);
*/
SECTION_FUNC(TEXT, nano_cpu_idle)
/* clear BASEPRI so wfi is awakened by incoming interrupts */
eors.n r0, r0
msr BASEPRI, r0
wfi
bx lr
/*******************************************************************************
*
* nano_cpu_atomic_idle - atomically re-enable interrupts and enter low power mode
*
* This function is utilized by the nanokernel object "wait" APIs for task
* contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(),
* and nano_task_fifo_get_wait().
*
* INTERNAL
* The requirements for nano_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments
* in nano_task_lifo_get_wait(), for example, of the race condition that occurs
* if this requirement is not met.
*
* 2) After waking up from the low-power mode, the interrupt lockout state
* must be restored as indicated in the 'imask' input parameter.
*
* RETURNS: N/A
*
* C function prototype:
*
* void nano_cpu_atomic_idle (unsigned int imask);
*/
SECTION_FUNC(TEXT, nano_cpu_atomic_idle)
/*
* r0: interrupt mask from caller
* r1: zero, for setting BASEPRI (needs a register)
*/
eors.n r1, r1
/*
* Lock PRIMASK while sleeping: wfe will still get interrupted by incoming
* interrupts but the CPU will not service them right away.
*/
cpsid i
/*
* No need to set SEVONPEND, it's set once in _CpuIdleInit() and never
* touched again.
*/
/* unlock BASEPRI so wfe gets interrupted by incoming interrupts */
msr BASEPRI, r1
wfe
msr BASEPRI, r0
cpsie i
bx lr

128
arch/arm/core/exc_exit.s Normal file
View File

@ -0,0 +1,128 @@
/* exc_exit.s - ARM CORTEX-M3 exception/interrupt exit API */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Provides functions for performing kernel handling when exiting exceptions or
interrupts that are installed directly in the vector table (i.e. that are not
wrapped around by _IsrWrapper()).
*/
#define _ASMLANGUAGE
#include <nanok.h>
#include <offsets.h>
#include <toolchain.h>
#include <nanokernel/cpu.h>
_ASM_FILE_PROLOGUE
GTEXT(_ExcExit)
GTEXT(_IntExit)
GDATA(_NanoKernel)
#if CONFIG_GDB_INFO
#define _EXIT_EXC_IF_FIBER_PREEMPTED beq _ExcExitWithGdbStub
#else
_EXIT_EXC_IF_FIBER_PREEMPTED: .macro
it eq
bxeq lr
.endm
#endif
#define _EXIT_EXC_IF_FIBER_NOT_READY _EXIT_EXC_IF_FIBER_PREEMPTED
/*******************************************************************************
*
* _IntExit - kernel housekeeping when exiting interrupt handler installed
* directly in vector table
*
* VxMicro allows installing interrupt handlers (ISRs) directly into the vector
* table to get the lowest interrupt latency possible. This allows the ISR to be
* invoked directly without going through a software interrupt table. However,
* upon exiting the ISR, some kernel work must still be performed, namely
* possible context switching. While ISRs connected in the software interrupt
* table do this automatically via a wrapper, ISRs connected directly in the
* vector table must invoke _IntExit() as the *very last* action before
* returning.
*
* e.g.
*
* void myISR(void)
* {
* printk("in %s\n", __FUNCTION__);
* doStuff();
* _IntExit();
* }
*
* RETURNS: N/A
*/
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit)
/* _IntExit falls through to _ExcExit (they are aliases of each other) */
/*******************************************************************************
*
* _ExcExit - kernel housekeeping when exiting exception handler installed
* directly in vector table
*
* See _IntExit().
*
* RETURNS: N/A
*/
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
ldr r1, =_NanoKernel
/* is the current thread preemptible (task) ? */
ldr r2, [r1, #__tNANO_flags_OFFSET]
ands.w r2, #PREEMPTIBLE
_EXIT_EXC_IF_FIBER_PREEMPTED
/* is there a fiber ready ? */
ldr r2, [r1, #__tNANO_fiber_OFFSET]
cmp r2, #0
_EXIT_EXC_IF_FIBER_NOT_READY
/* context switch required, pend the PendSV exception */
ldr r1, =_SCS_ICSR
ldr r2, =_SCS_ICSR_PENDSV
str r2, [r1]
_ExcExitWithGdbStub:
_GDB_STUB_EXC_EXIT
bx lr

414
arch/arm/core/fault.c Normal file
View File

@ -0,0 +1,414 @@
/* fault.c - common fault handler for ARM Cortex-M */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Common fault handler for ARM Cortex-M processors.
*/
#include <toolchain.h>
#include <sections.h>
#include <cputype.h>
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <nanok.h>
#ifdef CONFIG_PRINTK
#include <misc/printk.h>
#define PR_EXC(...) printk(__VA_ARGS__)
#else
#define PR_EXC(...)
#endif /* CONFIG_PRINTK */
#if (CONFIG_FAULT_DUMP > 0)
#define FAULT_DUMP(esf, fault) _FaultDump(esf, fault)
#else
#define FAULT_DUMP(esf, fault) \
do { \
(void) esf; \
(void) fault; \
} while ((0))
#endif
#if (CONFIG_FAULT_DUMP == 1)
/*******************************************************************************
*
* _FaultDump - dump information regarding fault (FAULT_DUMP == 1)
*
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1
* (short form).
*
* eg. (precise bus error escalated to hard fault):
*
* Fault! EXC #3, Thread: 0x200000dc, instr: 0x000011d3
* HARD FAULT: Escalation (see below)!
* MMFSR: 0x00000000, BFSR: 0x00000082, UFSR: 0x00000000
* BFAR: 0xff001234
*
* RETURNS: N/A
*
* \NOMANUAL
*/
void _FaultDump(const NANO_ESF *esf, int fault)
{
int escalation = 0;
PR_EXC("Fault! EXC #%d, Thread: %x, instr @ %x\n",
fault,
context_self_get(),
esf->pc);
if (3 == fault) { /* hard fault */
escalation = _ScbHardFaultIsForced();
PR_EXC("HARD FAULT: %s\n",
escalation ? "Escalation (see below)!"
: "Bus fault on vector table read\n");
}
PR_EXC("MMFSR: %x, BFSR: %x, UFSR: %x\n",
__scs.scb.cfsr.byte.mmfsr.val,
__scs.scb.cfsr.byte.bfsr.val,
__scs.scb.cfsr.byte.ufsr.val);
if (_ScbMemFaultIsMmfarValid()) {
PR_EXC("MMFAR: %x\n", _ScbMemFaultAddrGet());
if (escalation) {
_ScbMemFaultMmfarReset();
}
}
if (_ScbBusFaultIsBfarValid()) {
PR_EXC("BFAR: %x\n", _ScbBusFaultAddrGet());
if (escalation) {
_ScbBusFaultBfarReset();
}
}
/* clear USFR sticky bits */
_ScbUsageFaultAllFaultsReset();
}
#endif
#if (CONFIG_FAULT_DUMP == 2)
/*******************************************************************************
*
* _FaultContextShow - dump context information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _FaultContextShow(const NANO_ESF *esf)
{
PR_EXC(" Executing context ID (thread): 0x%x\n"
" Faulting instruction address: 0x%x\n",
context_self_get(),
esf->pc);
}
/*******************************************************************************
*
* _MpuFault - dump MPU fault information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _MpuFault(const NANO_ESF *esf,
int fromHardFault)
{
PR_EXC("***** MPU FAULT *****\n");
_FaultContextShow(esf);
if (_ScbMemFaultIsStacking()) {
PR_EXC(" Stacking error\n");
} else if (_ScbMemFaultIsUnstacking()) {
PR_EXC(" Unstacking error\n");
} else if (_ScbMemFaultIsDataAccessViolation()) {
PR_EXC(" Data Access Violation\n");
if (_ScbMemFaultIsMmfarValid()) {
PR_EXC(" Address: 0x%x\n", _ScbMemFaultAddrGet());
if (fromHardFault) {
_ScbMemFaultMmfarReset();
}
}
} else if (_ScbMemFaultIsInstrAccessViolation()) {
PR_EXC(" Instruction Access Violation\n");
}
}
/*******************************************************************************
*
* _BusFault - dump bus fault information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _BusFault(const NANO_ESF *esf,
int fromHardFault)
{
PR_EXC("***** BUS FAULT *****\n");
_FaultContextShow(esf);
if (_ScbBusFaultIsStacking()) {
PR_EXC(" Stacking error\n");
} else if (_ScbBusFaultIsUnstacking()) {
PR_EXC(" Unstacking error\n");
} else if (_ScbBusFaultIsPrecise()) {
PR_EXC(" Precise data bus error\n");
if (_ScbBusFaultIsBfarValid()) {
PR_EXC(" Address: 0x%x\n", _ScbBusFaultAddrGet());
if (fromHardFault) {
_ScbBusFaultBfarReset();
}
}
/* it's possible to have both a precise and imprecise fault */
if (_ScbBusFaultIsImprecise()) {
PR_EXC(" Imprecise data bus error\n");
}
} else if (_ScbBusFaultIsImprecise()) {
PR_EXC(" Imprecise data bus error\n");
} else if (_ScbBusFaultIsInstrBusErr()) {
PR_EXC(" Instruction bus error\n");
}
}
/*******************************************************************************
*
* _UsageFault - dump usage fault information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _UsageFault(const NANO_ESF *esf)
{
PR_EXC("***** USAGE FAULT *****\n");
_FaultContextShow(esf);
/* bits are sticky: they stack and must be reset */
if (_ScbUsageFaultIsDivByZero()) {
PR_EXC(" Division by zero\n");
}
if (_ScbUsageFaultIsUnaligned()) {
PR_EXC(" Unaligned memory access\n");
}
if (_ScbUsageFaultIsNoCp()) {
PR_EXC(" No coprocessor instructions\n");
}
if (_ScbUsageFaultIsInvalidPcLoad()) {
PR_EXC(" Illegal load of EXC_RETURN into PC\n");
}
if (_ScbUsageFaultIsInvalidState()) {
PR_EXC(" Illegal use of the EPSR\n");
}
if (_ScbUsageFaultIsUndefinedInstr()) {
PR_EXC(" Attempt to execute undefined instruction\n");
}
_ScbUsageFaultAllFaultsReset();
}
/*******************************************************************************
*
* _HardFault - dump hard fault information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _HardFault(const NANO_ESF *esf)
{
PR_EXC("***** HARD FAULT *****\n");
if (_ScbHardFaultIsBusErrOnVectorRead()) {
PR_EXC(" Bus fault on vector table read\n");
} else if (_ScbHardFaultIsForced()) {
PR_EXC(" Fault escalation (see below)\n");
if (_ScbIsMemFault()) {
_MpuFault(esf, 1);
} else if (_ScbIsBusFault()) {
_BusFault(esf, 1);
} else if (_ScbIsUsageFault()) {
_UsageFault(esf);
}
}
}
/*******************************************************************************
*
* _DebugMonitor - dump debug monitor exception information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _DebugMonitor(const NANO_ESF *esf)
{
PR_EXC("***** Debug monitor exception (not implemented) *****\n");
}
/*******************************************************************************
*
* _ReservedException - dump reserved exception information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _ReservedException(const NANO_ESF *esf,
int fault)
{
PR_EXC("***** %s %d) *****\n",
fault < 16 ? "Reserved Exception (" : "Spurious interrupt (IRQ ",
fault - 16);
}
/*******************************************************************************
*
* _FaultDump - dump information regarding fault (FAULT_DUMP == 2)
*
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 2
* (long form).
*
* eg. (precise bus error escalated to hard fault):
*
* Executing context ID (thread): 0x200000dc
* Faulting instruction address: 0x000011d3
* ***** HARD FAULT *****
* Fault escalation (see below)
* ***** BUS FAULT *****
* Precise data bus error
* Address: 0xff001234
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _FaultDump(const NANO_ESF *esf, int fault)
{
switch (fault) {
case 3:
_HardFault(esf);
break;
case 4:
_MpuFault(esf, 0);
break;
case 5:
_BusFault(esf, 0);
break;
case 6:
_UsageFault(esf);
break;
case 12:
_DebugMonitor(esf);
break;
default:
_ReservedException(esf, fault);
break;
}
}
#endif /* FAULT_DUMP == 2 */
/*******************************************************************************
*
* _Fault - fault handler
*
* This routine is called when fatal error conditions are detected by hardware
* and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy.
*
* Since the ESF can be either on the MSP or PSP depending if an exception or
* interrupt was already being handled, it is passed a pointer to both and has
* to find out on which the ESP is present.
*
* RETURNS: This function does not return.
*
* \NOMANUAL
*/
void _Fault(
const NANO_ESF *msp, /* pointer to potential ESF on MSP */
const NANO_ESF *psp /* pointer to potential ESF on PSP */
)
{
const NANO_ESF *esf = _ScbIsNestedExc() ? msp : psp;
int fault = _ScbActiveVectorGet();
FAULT_DUMP(esf, fault);
_SysFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, esf);
}
/*******************************************************************************
*
* _FaultInit - initialization of fault handling
*
* Turns on the desired hardware faults.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
void _FaultInit(void)
{
_ScbDivByZeroFaultEnable();
_ScbUnalignedFaultEnable();
}

101
arch/arm/core/fault_s.s Normal file
View File

@ -0,0 +1,101 @@
/* fault_s.s - fault handlers for ARM Cortex-M */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Fault handlers for ARM Cortex-M processors.
*/
#define _ASMLANGUAGE
#include <toolchain.h>
#include <sections.h>
#include <nanokernel/cpu.h>
_ASM_FILE_PROLOGUE
GTEXT(_Fault)
GTEXT(__hard_fault)
GTEXT(__mpu_fault)
GTEXT(__bus_fault)
GTEXT(__usage_fault)
GTEXT(__debug_monitor)
GTEXT(__reserved)
/*******************************************************************************
*
* __fault - fault handler installed in the fault and reserved vectors
*
* Entry point for the hard fault, MPU fault, bus fault, usage fault, debug
* monitor and reserved exceptions.
*
* Save the values of the MSP and PSP in r0 and r1 respectively, so the first
* and second parameters to the _Fault() C function that will handle the rest.
* This has to be done because at this point we do not know if the fault
* happened while handling an exception or not, and thus the ESF could be on
* either stack. _Fault() will find out where the ESF resides.
*
* Provides these symbols:
*
* __hard_fault
* __mpu_fault
* __bus_fault
* __usage_fault
* __debug_monitor
* __reserved
*/
SECTION_SUBSEC_FUNC(TEXT,__fault,__hard_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,__mpu_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,__bus_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,__usage_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,__debug_monitor)
SECTION_SUBSEC_FUNC(TEXT,__fault,__reserved)
_GDB_STUB_EXC_ENTRY
/* force unlock interrupts */
eors.n r0, r0
msr BASEPRI, r0
mrs r0, MSP
mrs r1, PSP
push {lr}
bl _Fault
_GDB_STUB_EXC_EXIT
pop {pc}
.end

93
arch/arm/core/ffs.s Normal file
View File

@ -0,0 +1,93 @@
/* ffs.s - ARM find first set assembly routines */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This library implements find_last_set() and find_first_set() which returns the
most and least significant bit set respectively.
*/
#define _ASMLANGUAGE
#include <toolchain.h>
#include <sections.h>
_ASM_FILE_PROLOGUE
/* Exports */
GTEXT(find_last_set)
GTEXT(find_first_set)
/*******************************************************************************
*
* find_last_set - find first set bit (searching from the most significant bit)
*
* This routine finds the first bit set in the argument passed it and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit. A return value of zero indicates that
* the value passed is zero.
*
* RETURNS: most significant bit set
*/
SECTION_FUNC(TEXT, find_last_set)
cmp r0, #0
itt ne
clzne r0, r0
rsbne r0, r0, #32
mov pc, lr
/*******************************************************************************
*
* find_first_set - find first set bit (searching from the least significant bit)
*
* This routine finds the first bit set in the argument passed it and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit. A return value of zero indicates that
* the value passed is zero.
*
* RETURNS: least significant bit set
*/
SECTION_FUNC(TEXT, find_first_set)
rsb r1, r0, #0
ands r0, r1, r0 /* r0 = x & (-x): only LSB set */
itt ne
clzne r0, r0 /* count leading zeroes */
rsbne r0, r0, #32
mov pc, lr

163
arch/arm/core/gdb_stub.s Normal file
View File

@ -0,0 +1,163 @@
/* gdb_stub.s - extra work performed upon exception entry/exit for GDB */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Prep work done when entering exceptions consists of saving the callee-saved
registers before they get used by exception handlers, and recording the fact
that we are running in an exception.
Upon exception exit, it must be recorded that the task is not in an exception
anymore.
*/
#define _ASMLANGUAGE
#include <offsets.h>
#include <toolchain.h>
#include <sections.h>
#include <nanok.h>
#include <nanokernel/cpu.h>
_ASM_FILE_PROLOGUE
/*******************************************************************************
*
* _GdbStubExcEntry - exception entry extra work when GDB_INFO is enabled
*
* During normal system operation, the callee-saved registers are saved lazily
* only when a context switch is required. To allow looking at the current
* threads registers while debugging an exception/interrupt, they must be saved
* upon entry since the handler could be using them: thus, looking at the CPU
* registers would show the current system state and not the current *thread*'s
* state.
*
* Also, record the fact that the thread is currently interrupted so that VQEMU
* looks into the CCS and not the CPU registers to obtain the current thread's
* register values.
*
* NOTE:
* - must be called with interrupts locked
* - cannot use r0 without saving it first
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _GdbStubExcEntry)
ldr r1, =_NanoKernel
ldr r2, [r1, #__tNANO_flags_OFFSET]
/* already in an exception, do not update the registers */
ands r3, r2, #EXC_ACTIVE
it ne
bxne lr
orrs r2, #EXC_ACTIVE
str r2, [r1, #__tNANO_flags_OFFSET]
ldr r1, [r1, #__tNANO_current_OFFSET]
str r2, [r1, #__tCCS_flags_OFFSET]
/* save callee-saved + psp in CCS */
adds r1, #__tCCS_preempReg_OFFSET
mrs ip, PSP
stmia r1, {v1-v8, ip}
bx lr
/*******************************************************************************
*
* _GdbStubExcExit - exception exit extra clean up when GDB_INFO is enabled
*
* Record the fact that the thread is not interrupted anymore so that VQEMU
* looks at the CPU registers and not into the CCS to obtain the current
* thread's register values. Only do this if this is not a nested exception.
*
* NOTE:
* - must be called with interrupts locked
* - cannot use r0 without saving it first
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _GdbStubExcExit)
/* if we're nested (ie. !RETTOBASE), do not reset EXC_ACTIVE */
ldr r1, =_SCS_ICSR
ldr r1, [r1]
ands r1, #_SCS_ICSR_RETTOBASE
it eq
bxeq lr
ldr r1, =_NanoKernel
ldr r2, [r1, #__tNANO_flags_OFFSET]
bic r2, #EXC_ACTIVE
str r2, [r1, #__tNANO_flags_OFFSET]
ldr r1, [r1, #__tNANO_current_OFFSET]
str r2, [r1, #__tCCS_flags_OFFSET]
bx lr
/*******************************************************************************
*
* _GdbStubIrqVectorTableEntry - stub for ISRs installed directly in
* vector table
*
* VxMicro on Cortex-M3/4 allows users to configure the kernel such that
* ISRs are installed directly in the vector table for maximum efficiency.
*
* When OS-awareness is enabled, a stub must be inserted to invoke
* _GdbStubExcEntry() before the user ISR runs, to save the current task's
* registers. This stub thus gets inserted in the vector table instead of the
* user's ISR. The user's IRQ vector table gets pushed after the vector table
* automatically by the linker script: this is all transparent to the user.
* This stub must also act as a demuxer that find the running exception and
* invoke the user's real ISR.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _GdbStubIrqVectorTableEntry)
_GDB_STUB_EXC_ENTRY
mrs r0, IPSR /* get exception number */
sub r0, r0, #16 /* get IRQ number */
ldr r1, =_IrqVectorTable
/* grab real ISR at address: r1 + (r0 << 2) (table is 4-byte wide) */
ldr r1, [r1, r0, LSL #2]
/* jump to ISR, no return: ISR is responsible for calling _IntExit */
bx r1

View File

@ -0,0 +1,52 @@
/* gdb_stub_irq_vector_table.c - stubs for IRQ part of vector table */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
When GDB is enabled, the static IRQ vector table needs to install the
_GdbStubIrqVectorTableEntry stub to do some work before calling the
user-installed ISRs.
*/
#include <toolchain.h>
#include <sections.h>
#include <nanokernel/cpu.h>
typedef void (*vth)(void); /* Vector Table Handler */
#if defined(CONFIG_GDB_INFO) && !defined(CONFIG_SW_ISR_TABLE)
vth __gdb_stub_irq_vector_table _GdbStubIrqVectorTable[CONFIG_NUM_IRQS] = {
[0 ...(CONFIG_NUM_IRQS - 1)] = _GdbStubIrqVectorTableEntry
};
#endif /* CONFIG_GDB_INFO && !CONFIG_SW_ISR_TABLE */

65
arch/arm/core/irq_init.c Normal file
View File

@ -0,0 +1,65 @@
/* irq_init.c - ARM Cortex-M interrupt initialization */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
The ARM Cortex-M architecture provides its own fiber_abort() to deal with
different CPU modes (handler vs thread) when a fiber aborts. When its entry
point returns or when it aborts itself, the CPU is in thread mode and must
call _Swap() (which triggers a service call), but when in handler mode, the
CPU must exit handler mode to cause the context switch, and thus must queue
the PendSV exception.
*/
#include <toolchain.h>
#include <sections.h>
#include <nanokernel.h>
#include <nanokernel/cpu.h>
/*******************************************************************************
*
* _IntLibInit - initialize interrupts
*
* Ensures all interrupts have their priority set to _EXC_IRQ_DEFAULT_PRIO and
* not 0, which they have it set to when coming out of reset. This ensures that
* interrupt locking via BASEPRI works as expected.
*
* RETURNS: N/A
*/
void _IntLibInit(void)
{
int irq = 0;
for (; irq < CONFIG_NUM_IRQS; irq++) {
_NvicIrqPrioSet(irq, _EXC_IRQ_DEFAULT_PRIO);
}
}

189
arch/arm/core/irq_manage.c Normal file
View File

@ -0,0 +1,189 @@
/* irq_manage.c - ARM CORTEX-M3 interrupt management */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Interrupt management: enabling/disabling and dynamic ISR connecting/replacing.
SW_ISR_TABLE_DYNAMIC has to be enabled for connecting ISRs at runtime.
*/
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <misc/__assert.h>
#include <toolchain.h>
#include <sections.h>
#include <sw_isr_table.h>
extern void __reserved(void);
/*******************************************************************************
*
* irq_handler_set - replace an interrupt handler by another
*
* An interrupt's ISR can be replaced at runtime. Care must be taken that the
* interrupt is disabled before doing this.
*
* This routine will hang if <old> is not found in the table and ASSERT_ON is
* enabled.
*
* RETURNS: N/A
*/
void irq_handler_set(unsigned int irq,
void (*old)(void *arg),
void (*new)(void *arg),
void *arg)
{
int key = irq_lock_inline();
__ASSERT(old == _IsrTable[irq].isr, "expected ISR not found in table");
if (old == _IsrTable[irq].isr) {
_IsrTable[irq].isr = new;
_IsrTable[irq].arg = arg;
}
irq_unlock_inline(key);
}
/*******************************************************************************
*
* irq_enable - enable an interrupt line
*
* Clear possible pending interrupts on the line, and enable the interrupt
* line. After this call, the CPU will receive interrupts for the specified
* <irq>.
*
* RETURNS: N/A
*/
void irq_enable(unsigned int irq)
{
/* before enabling interrupts, ensure that interrupt is cleared */
_NvicIrqUnpend(irq);
_NvicIrqEnable(irq);
}
/*******************************************************************************
*
* irq_disable - disable an interrupt line
*
* Disable an interrupt line. After this call, the CPU will stop receiving
* interrupts for the specified <irq>.
*
* RETURNS: N/A
*/
void irq_disable(unsigned int irq)
{
_NvicIrqDisable(irq);
}
/*******************************************************************************
*
* irq_priority_set - set an interrupt's priority
*
* Valid values are from 1 to 255. Interrupts of priority 1 are not masked when
* interrupts are locked system-wide, so care must be taken when using them. ISR
* installed with priority 1 interrupts cannot make kernel calls.
*
* Priority 0 is reserved for kernel usage and cannot be used.
*
* The priority is verified if ASSERT_ON is enabled.
*
* RETURNS: N/A
*/
void irq_priority_set(unsigned int irq,
unsigned int prio)
{
__ASSERT(prio > 0 && prio < 256, "invalid priority!");
_NvicIrqPrioSet(irq, _EXC_PRIO(prio));
}
/*******************************************************************************
*
* _SpuriousIRQ - spurious interrupt handler
*
* Installed in all dynamic interrupt slots at boot time. Throws an error if
* called.
*
* See __reserved().
*
* RETURNS: N/A
*/
void _SpuriousIRQ(void *unused)
{
ARG_UNUSED(unused);
__reserved();
}
/*******************************************************************************
*
* irq_connect - connect an ISR to an interrupt line
*
* <isr> is connected to interrupt line <irq> (exception #<irq>+16). No prior
* ISR can have been connected on <irq> interrupt line since the system booted.
*
* This routine will hang if another ISR was connected for interrupt line <irq>
* and ASSERT_ON is enabled; if ASSERT_ON is disabled, it will fail silently.
*
* RETURNS: the interrupt line number
*/
int irq_connect(unsigned int irq,
unsigned int prio,
void (*isr)(void *arg),
void *arg)
{
irq_handler_set(irq, _SpuriousIRQ, isr, arg);
irq_priority_set(irq, prio);
return irq;
}
/*******************************************************************************
*
* irq_disconnect - disconnect an ISR from an interrupt line
*
* Interrupt line <irq> (exception #<irq>+16) is disconnected from its ISR and
* the latter is replaced by _SpuriousIRQ(). irq_disable() should have
* been called before invoking this routine.
*
* RETURNS: N/A
*/
void irq_disconnect(unsigned int irq)
{
irq_handler_set(irq, _IsrTable[irq].isr, _SpuriousIRQ, NULL);
}

111
arch/arm/core/isr_wrapper.s Normal file
View File

@ -0,0 +1,111 @@
/* isr_wrapper.s - ARM CORTEX-M3 wrapper for ISRs with parameter */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Wrapper installed in vector table for handling dynamic interrupts that accept
a parameter.
*/
#define _ASMLANGUAGE
#include <offsets.h>
#include <toolchain.h>
#include <sections.h>
#include <sw_isr_table.h>
#include <nanok.h>
#include <nanokernel/cpu.h>
_ASM_FILE_PROLOGUE
GDATA(_IsrTable)
GTEXT(_IsrWrapper)
GTEXT(_IntExit)
/*******************************************************************************
*
* _IsrWrapper - wrapper around ISRs when inserted in software ISR table
*
* When inserted in the vector table, _IsrWrapper() demuxes the ISR table using
* the running interrupt number as the index, and invokes the registered ISR
* with its correspoding argument. When returning from the ISR, it determines
* if a context switch needs to happen (see documentation for __pendsv()) and
* pends the PendSV exception if so: the latter will perform the context switch
* itself.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _IsrWrapper)
_GDB_STUB_EXC_ENTRY
push {lr} /* lr is now the first item on the stack */
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
/*
* All interrupts are disabled when handling idle wakeup.
* For tickless idle, this ensures that the calculation and programming of
* the device for the next timer deadline is not interrupted.
* For non-tickless idle, this ensures that the clearing of the kernel idle
* state is not interrupted.
* In each case, _SysPowerSaveIdleExit is called with interrupts disabled.
*/
cpsid i /* PRIMASK = 1 */
/* is this a wakeup from idle ? */
ldr r2, =_NanoKernel
ldr r0, [r2, #__tNANO_idle_OFFSET] /* requested idle duration, in ticks */
cmp r0, #0
ittt ne
movne r1, #0
strne r1, [r2, #__tNANO_idle_OFFSET] /* clear kernel idle state */
blxne _SysPowerSaveIdleExit
cpsie i /* re-enable interrupts (PRIMASK = 0) */
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
mrs r0, IPSR /* get exception number */
sub r0, r0, #16 /* get IRQ number */
lsl r0, r0, #3 /* table is 8-byte wide */
ldr r1, =_IsrTable
add r1, r1, r0 /* table entry: ISRs must have their MSB set to stay
* in thumb mode */
ldmia r1,{r0,r3} /* arg in r0, ISR in r3 */
blx r3 /* call ISR */
pop {lr}
/* exception return is done in _IntExit(), including _GDB_STUB_EXC_EXIT */
b _IntExit

View File

@ -0,0 +1,77 @@
/* nano_fiber_abort.c - ARM Cortex-M fiber_abort() routine */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
The ARM Cortex-M architecture provides its own fiber_abort() to deal with
different CPU modes (handler vs thread) when a fiber aborts. When its entry
point returns or when it aborts itself, the CPU is in thread mode and must
call _Swap() (which triggers a service call), but when in handler mode, the
CPU must exit handler mode to cause the context switch, and thus must queue
the PendSV exception.
*/
#ifdef CONFIG_MICROKERNEL
#include <microkernel/k_struct.h>
#include <microkernel.h>
#endif
#include <nanok.h>
#include <toolchain.h>
#include <sections.h>
#include <nanokernel.h>
#include <nanokernel/cpu.h>
extern void _NanoFiberSwap(void);
/*******************************************************************************
*
* fiber_abort - abort the currently executing fiber
*
* Possible reasons for a fiber aborting:
*
* - the fiber explicitly aborts itself by calling this routine
* - the fiber implicitly aborts by returning from its entry point
* - the fiber encounters a fatal exception
*
* RETURNS: N/A
*/
void fiber_abort(void)
{
_ContextExitRtn(_NanoKernel.current);
if (_ScbIsInThreadMode()) {
_NanoFiberSwap();
} else {
_ScbPendsvSet();
}
}

151
arch/arm/core/nanocontext.c Normal file
View File

@ -0,0 +1,151 @@
/* nanocontext.c - new context creation for ARM Cortex-M */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Core nanokernel fiber related primitives for the ARM Cortex-M processor
architecture.
*/
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <toolchain.h>
#include <nanok.h>
#include <nanocontextentry.h>
tNANO _NanoKernel = {0};
#if defined(CONFIG_HOST_TOOLS_SUPPORT)
#define TOOLS_SUPPORT_INIT(pCcs) toolsSupportInit(pCcs)
#else
#define TOOLS_SUPPORT_INIT(pCcs) \
do {/* do nothing */ \
} while ((0))
#endif
#if defined(CONFIG_HOST_TOOLS_SUPPORT)
/*******************************************************************************
*
* toolsSupportInit - initialize host-tools support when needed
*
* Currently only inserts the new context in the list of active contexts.
*
* RETURNS: N/A
*/
static ALWAYS_INLINE void toolsSupportInit(struct s_CCS *pCcs /* context */
)
{
unsigned int key;
/*
* Add the newly initialized context to head of the list of contexts.
* This singly linked list of contexts maintains ALL the contexts in the
* system: both tasks and fibers regardless of whether they are
* runnable.
*/
key = irq_lock();
pCcs->activeLink = _NanoKernel.contexts;
_NanoKernel.contexts = pCcs;
irq_unlock(key);
}
#endif /* CONFIG_HOST_TOOLS_SUPPORT */
/*******************************************************************************
*
* _NewContext - intialize a new context (thread) from its stack space
*
* The control structure (CCS) is put at the lower address of the stack. An
* initial context, to be "restored" by __pendsv(), is put at the other end of
* the stack, and thus reusable by the stack when not needed anymore.
*
* The initial context is an exception stack frame (ESF) since exiting the
* PendSV exception will want to pop an ESF. Interestingly, even if the lsb of
* an instruction address to jump to must always be set since the CPU always
* runs in thumb mode, the ESF expects the real address of the instruction,
* with the lsb *not* set (instructions are always aligned on 16 bit halfwords).
* Since the compiler automatically sets the lsb of function addresses, we have
* to unset it manually before storing it in the 'pc' field of the ESF.
*
* <options> is currently unused.
*
* RETURNS: N/A
*/
void *_NewContext(
char *pStackMem, /* stack memory */
unsigned stackSize, /* stack size in bytes */
_ContextEntry pEntry, /* entry point */
void *parameter1, /* entry point first param */
void *parameter2, /* entry point second param */
void *parameter3, /* entry point third param */
int priority, /* context priority (-1 for tasks) */
unsigned options /* misc options (future) */
)
{
char *stackEnd = pStackMem + stackSize;
struct __esf *pInitCtx;
tCCS *pCcs = (void *)ROUND_UP(pStackMem, sizeof(uint32_t));
/* carve the context entry struct from the "base" of the stack */
pInitCtx = (struct __esf *)(STACK_ROUND_DOWN(stackEnd) -
sizeof(struct __esf));
pInitCtx->pc = ((uint32_t)_ContextEntryRtn) & 0xfffffffe;
pInitCtx->a1 = (uint32_t)pEntry;
pInitCtx->a2 = (uint32_t)parameter1;
pInitCtx->a3 = (uint32_t)parameter2;
pInitCtx->a4 = (uint32_t)parameter3;
pInitCtx->xpsr =
0x01000000UL; /* clear all, thumb bit is 1, even if RO */
pCcs->link = NULL;
pCcs->flags = priority == -1 ? TASK | PREEMPTIBLE : FIBER;
pCcs->prio = priority;
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
/* Initialize custom data field (value is opaque to kernel) */
pCcs->custom_data = NULL;
#endif
pCcs->preempReg.psp = (uint32_t)pInitCtx;
pCcs->basepri = 0;
/* initial values in all other registers/CCS entries are irrelevant */
TOOLS_SUPPORT_INIT(pCcs);
return pCcs;
}

132
arch/arm/core/nanofatal.c Normal file
View File

@ -0,0 +1,132 @@
/* nanofatal.c - nanokernel fatal error handler for ARM Cortex-M */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module provides the _NanoFatalErrorHandler() routine for ARM Cortex-M.
*/
/* includes */
#include <toolchain.h>
#include <sections.h>
#include <cputype.h>
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <nanok.h>
#ifdef CONFIG_PRINTK
#include <misc/printk.h>
#define PR_EXC(...) printk(__VA_ARGS__)
#else
#define PR_EXC(...)
#endif /* CONFIG_PRINTK */
/* globals */
/*
* Define a default ESF for use with _NanoFatalErrorHandler() in the event
* the caller does not have a NANO_ESF to pass
*/
const NANO_ESF __defaultEsf = {0xdeaddead, /* a1 */
0xdeaddead, /* a2 */
0xdeaddead, /* a3 */
0xdeaddead, /* a4 */
0xdeaddead, /* ip */
0xdeaddead, /* lr */
0xdeaddead, /* pc */
0xdeaddead, /* xpsr */
};
/*******************************************************************************
*
* _NanoFatalErrorHandler - nanokernel fatal error handler
*
* This routine is called when fatal error conditions are detected by software
* and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy.
*
* The caller is expected to always provide a usable ESF. In the event that the
* fatal error does not have a hardware generated ESF, the caller should either
* create its own or use a pointer to the global default ESF <__defaultEsf>.
*
* RETURNS: This function does not return.
*
* \NOMANUAL
*/
FUNC_NORETURN void _NanoFatalErrorHandler(
unsigned int reason, /* reason that handler was called */
const NANO_ESF *pEsf /* pointer to exception stack frame */
)
{
switch (reason) {
case _NANO_ERR_INVALID_TASK_EXIT:
PR_EXC("***** Invalid Exit Software Error! *****\n");
break;
#if defined(CONFIG_STACK_CANARIES)
case _NANO_ERR_STACK_CHK_FAIL:
PR_EXC("***** Stack Check Fail! *****\n");
break;
#endif /* CONFIG_STACK_CANARIES */
#ifdef CONFIG_ENHANCED_SECURITY
case _NANO_ERR_INVALID_STRING_OP:
PR_EXC("**** Invalid string operation! ****\n");
break;
#endif /* CONFIG_ENHANCED_SECURITY */
default:
PR_EXC("**** Unknown Fatal Error %d! ****\n", reason);
break;
}
PR_EXC("Current context ID = 0x%x\n"
"Faulting instruction address = 0x%x\n",
context_self_get(),
pEsf->pc);
/*
* Now that the error has been reported, call the user implemented
* policy
* to respond to the error. The decisions as to what responses are
* appropriate to the various errors are something the customer must
* decide.
*/
_SysFatalErrorHandler(reason, pEsf);
for (;;)
;
}

View File

@ -0,0 +1,104 @@
/* offsets.c - ARM nano kernel structure member offset definition file */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module is responsible for the generation of the absolute symbols whose
value represents the member offsets for various ARM nanokernel
structures.
All of the absolute symbols defined by this module will be present in the
final microkernel or nanokernel ELF image (due to the linker's reference to
the _OffsetAbsSyms symbol).
INTERNAL
It is NOT necessary to define the offset for every member of a structure.
Typically, only those members that are accessed by assembly language routines
are defined; however, it doesn't hurt to define all fields for the sake of
completeness.
*/
#include <genOffset.h>
#include <nanok.h>
#include <offsets/common.h>
/* ARM-specific tNANO structure member offsets */
GEN_OFFSET_SYM(tNANO, flags);
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
GEN_OFFSET_SYM(tNANO, idle);
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
/* ARM-specific tCCS structure member offsets */
GEN_OFFSET_SYM(tCCS, basepri);
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
GEN_OFFSET_SYM(tCCS, custom_data);
#endif
/* ARM-specific ESF structure member offsets */
GEN_OFFSET_SYM(tESF, a1);
GEN_OFFSET_SYM(tESF, a2);
GEN_OFFSET_SYM(tESF, a3);
GEN_OFFSET_SYM(tESF, a4);
GEN_OFFSET_SYM(tESF, ip);
GEN_OFFSET_SYM(tESF, lr);
GEN_OFFSET_SYM(tESF, pc);
GEN_OFFSET_SYM(tESF, xpsr);
/* size of the entire tESF structure */
GEN_ABSOLUTE_SYM(__tESF_SIZEOF, sizeof(tESF));
/* ARM-specific preempt registers structure member offsets */
GEN_OFFSET_SYM(tPreempt, v1);
GEN_OFFSET_SYM(tPreempt, v2);
GEN_OFFSET_SYM(tPreempt, v3);
GEN_OFFSET_SYM(tPreempt, v4);
GEN_OFFSET_SYM(tPreempt, v5);
GEN_OFFSET_SYM(tPreempt, v6);
GEN_OFFSET_SYM(tPreempt, v7);
GEN_OFFSET_SYM(tPreempt, v8);
GEN_OFFSET_SYM(tPreempt, psp);
/* size of the entire preempt registers structure */
GEN_ABSOLUTE_SYM(__tPreempt_SIZEOF, sizeof(tPreempt));
/* size of the tCCS structure sans save area for floating point regs */
GEN_ABSOLUTE_SYM(__tCCS_NOFLOAT_SIZEOF, sizeof(tCCS));
GEN_ABS_SYM_END

223
arch/arm/core/swap.s Normal file
View File

@ -0,0 +1,223 @@
/* swap.s - thread context switching for ARM Cortex-M */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module implements the routines necessary for thread context switching
on ARM Cortex-M3/M4 CPUs.
*/
#define _ASMLANGUAGE
#include <nanok.h>
#include <offsets.h>
#include <toolchain.h>
#include <nanokernel/cpu.h>
_ASM_FILE_PROLOGUE
GTEXT(_Swap)
GTEXT(__svc)
GTEXT(__pendsv)
GDATA(_NanoKernel)
/*******************************************************************************
*
* __pendsv - PendSV exception handler, handling context switches
*
* The PendSV exception is the only context in the system that can perform
* context switching. When an execution context finds out it has to switch
* contexts, it pends the PendSV exception.
*
* When PendSV is pended, the decision that a context switch must happen has
* already been taken. In other words, when __pendsv() runs, we *know* we have
* to swap *something*.
*
* The scheduling algorithm is simple: schedule the head of the runnable FIBER
* context list, which is represented by _NanoKernel.fiber. If there are no
* runnable FIBER contexts, then schedule the TASK context represented by
* _NanoKernel.task. The _NanoKernel.task field will never be NULL.
*/
SECTION_FUNC(TEXT, __pendsv)
_GDB_STUB_EXC_ENTRY
/* load _Nanokernel into r1 and current tCCS into r2 */
ldr r1, =_NanoKernel
ldr r2, [r1, #__tNANO_current_OFFSET]
/* addr of callee-saved regs in CCS in r0 */
add r0, r2, #__tCCS_preempReg_OFFSET
/* save callee-saved + psp in CCS */
mrs ip, PSP
stmia r0, {v1-v8, ip}
/*
* Prepare to clear PendSV with interrupts unlocked, but
* don't clear it yet. PendSV must not be cleared until
* the new thread is context-switched in since all decisions
* to pend PendSV have been taken with the current kernel
* state and this is what we're handling currently.
*/
ldr ip, =_SCS_ICSR
ldr r3, =_SCS_ICSR_UNPENDSV
/* protect the kernel state while we play with the thread lists */
movs.n r0, #_EXC_IRQ_DEFAULT_PRIO
msr BASEPRI, r0
/* find out incoming context (fiber or task) */
/* is there a fiber ready ? */
ldr r2, [r1, #__tNANO_fiber_OFFSET]
cmp r2, #0
/*
* if so, remove fiber from list
* else, the task is the thread we're switching in
*/
itte ne
ldrne.w r0, [r2, #__tCCS_link_OFFSET] /* then */
strne.w r0, [r1, #__tNANO_fiber_OFFSET] /* then */
ldreq.w r2, [r1, #__tNANO_task_OFFSET] /* else */
/* r2 contains the new thread */
ldr r0, [r2, #__tCCS_flags_OFFSET]
str r0, [r1, #__tNANO_flags_OFFSET]
str r2, [r1, #__tNANO_current_OFFSET]
/*
* Clear PendSV so that if another interrupt comes in and
* decides, with the new kernel state baseed on the new thread
* being context-switched in, that it needs to reschedules, it
* will take, but that previously pended PendSVs do not take,
* since they were based on the previous kernel state and this
* has been handled.
*/
/* _SCS_ICSR is still in ip and _SCS_ICSR_UNPENDSV in r3 */
str r3, [ip, #0]
/* restore BASEPRI for the incoming thread */
ldr r0, [r2, #__tCCS_basepri_OFFSET]
mov ip, #0
str ip, [r2, #__tCCS_basepri_OFFSET]
msr BASEPRI, r0
/* load callee-saved + psp from CCS */
add r0, r2, #__tCCS_preempReg_OFFSET
ldmia r0, {v1-v8, ip}
msr PSP, ip
_GDB_STUB_EXC_EXIT
/* exc return */
bx lr
/*******************************************************************************
*
* __svc - service call handler
*
* The service call (svc) is only used in _Swap() to enter handler mode so we
* can go through the PendSV exception to perform a context switch.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, __svc)
_GDB_STUB_EXC_ENTRY
/*
* Unlock interrupts:
* - in a SVC call, so protected against context switches
* - allow PendSV, since it's running at prio 0xff
*/
eors.n r0, r0
msr BASEPRI, r0
/* set PENDSV bit, pending the PendSV exception */
ldr r1, =_SCS_ICSR
ldr r2, =_SCS_ICSR_PENDSV
str r2, [r1, #0]
_GDB_STUB_EXC_EXIT
/* handler mode exit, to PendSV */
bx lr
/*******************************************************************************
*
* _Swap - initiate a cooperative context switch
*
* The _Swap() routine is invoked by various nanokernel services to effect
* a cooperative context context switch. Prior to invoking _Swap(), the caller
* disables interrupts via irq_lock() and the return 'key' is passed as a
* parameter to _Swap(). The 'key' actually represents the BASEPRI register
* prior to disabling interrupts via the BASEPRI mechanism.
*
* _Swap() itself does not do much.
*
* It simply stores the intlock key (the BASEPRI value) parameter into
* current->basepri, and then triggers a service call exception (svc) to setup
* the PendSV exception, which does the heavy lifting of context switching.
* This is the only place we have to save BASEPRI since the other paths to
* __pendsv all come from handling an interrupt, which means we know the
* interrupts were not locked: in that case the BASEPRI value is 0.
*
* Given that _Swap() is called to effect a cooperative context context switch,
* only the caller-saved integer registers need to be saved in the tCCS of the
* outgoing context. This is all performed by the hardware, which stores it in
* its exception stack frame, created when handling the svc exception.
*
* RETURNS: may contain a return value setup by a call to fiberRtnValueSet()
*
* C function prototype:
*
* unsigned int _Swap (unsigned int basepri);
*
*/
SECTION_FUNC(TEXT, _Swap)
ldr r1, =_NanoKernel
ldr r2, [r1, #__tNANO_current_OFFSET]
str r0, [r2, #__tCCS_basepri_OFFSET]
svc #0
/* r0 contains the return value if needed */
bx lr

View File

@ -0,0 +1,86 @@
/* task_abort.c - ARM Cortex-M _TaskAbort() routine */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
The ARM Cortex-M architecture provides its own _TaskAbort() to deal with
different CPU modes (handler vs thread) when a task aborts. When its entry
point returns or when it aborts itself, the CPU is in thread mode and must
call the equivalent of task_abort(<self>), but when in handler mode, the
CPU must queue a packet to K_swapper(), then exit handler mode to queue the
PendSV exception and cause the immediate context switch to K_swapper.
*/
#ifdef CONFIG_MICROKERNEL
#include <toolchain.h>
#include <sections.h>
#include <minik.h>
#include <nanok.h>
#include <microkernel.h>
#include <nanokernel.h>
#include <misc/__assert.h>
static struct k_args cmdpacket;
/*******************************************************************************
*
* _TaskAbort - abort the current task
*
* Possible reasons for a task aborting:
*
* - the task explicitly aborts itself by calling this routine
* - the task implicitly aborts by returning from its entry point
* - the task encounters a fatal exception
*
* RETURNS: N/A
*
* \NOMANUAL
*/
void _TaskAbort(void)
{
const int taskAbortCode = 1;
if (_ScbIsInThreadMode()) {
_task_ioctl(K_Task->Ident, taskAbortCode);
} else {
cmdpacket.Comm = TSKOP;
cmdpacket.Args.g1.task = K_Task->Ident;
cmdpacket.Args.g1.opt = taskAbortCode;
cmdpacket.Srce = 0;
K_Task->Args = &cmdpacket;
nano_isr_stack_push(&K_Args, (uint32_t) &cmdpacket);
_ScbPendsvSet();
}
}
#endif /* CONFIG_MICROKERNEL */

73
arch/arm/defs.objs Normal file
View File

@ -0,0 +1,73 @@
# defs.objs - build system
#
# Copyright (c) 2015 Wind River Systems, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2) Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3) Neither the name of Wind River Systems nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
nkernel_BASE = ${vBASE}/kernel/arch/
nkernel_INCLUDE_DIR = $(strip \
${vBASE}/arch/${vARCH}/include \
${vBASE}/kernel/nanokernel/include \
${vBASE}/kernel/microkernel/include \
${vBASE}/target/src/bsp/arch/${vARCH} \
)
nkernel_PATH = $(strip \
${vBASE}/arch/${vARCH}/core \
${vBASE}/kernel/nanokernel/core \
)
nkernel_SRC :=
nkernel_SRC_NANOKERNEL_y = $(strip \
${vBASE}/kernel/common/version.c \
${vBASE}/misc/printk.c \
)
nkernel_SRC_MICROKERNEL_y = $(strip \
${vBASE}/kernel/nanokernel/task/start_task.c \
)
nkernel_SRC += $(strip \
${nkernel_SRC_NANOKERNEL_${CONFIG_NANOKERNEL}} \
${nkernel_SRC_MICROKERNEL_${CONFIG_MICROKERNEL}} \
)
ifeq ($(CONFIG_ENHANCED_SECURITY),y)
nkernel_SRC += $(strip \
${vBASE}/kernel/common/string_s.c \
)
endif
nkernel_SRC += $(foreach path,${nkernel_PATH},$(wildcard ${path}/*.$c))
nkernel_SRC += $(foreach path,${nkernel_PATH},$(wildcard ${path}/*.$s))
nkernel_SRC += ${vKLIB_DIR}/configs.$c
KLIBS += nkernel

View File

@ -0,0 +1,217 @@
/* board.h - board configuration macros for the fsl_frdm_k64f BSP */
/*
* Copyright (c) 2014-2015 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This header file is used to specify and describe board-level aspects for the
'fsl_frdm_k64f' BSP.
*/
#ifndef _BOARD__H_
#define _BOARD__H_
#include <misc/util.h>
#define SRAM_SIZE KB(192) /* 64K in code space (ignored) + 192K in SRAM space \
*/
/* default system clock */
#define SYSCLK_DEFAULT_IOSC_HZ MHZ(120)
/* address bases */
#define PERIPH_ADDR_BASE_MPU 0x4000D000 /* Memory Protection Unit */
#define PERIPH_ADDR_BASE_PCR 0x40049000 /* Port and pin Configuration */
#define PERIPH_ADDR_BASE_SIM 0x40047000 /* System Integration module */
#define PERIPH_ADDR_BASE_WDOG 0x40052000 /* Watchdog Timer module */
#define PERIPH_ADDR_BASE_MCG 0x40064000 /* Multipurpose Clock Generator */
#define PERIPH_ADDR_BASE_OSC 0x40065000 /* Oscillator module */
#define PERIPH_ADDR_BASE_UART0 0x4006A000 /* UART0 connected to SDA USB */
#define PERIPH_ADDR_BASE_UART1 0x4006B000 /* not supported */
#define PERIPH_ADDR_BASE_UART2 0x4006C000 /* not supported */
#define PERIPH_ADDR_BASE_UART3 0x4006D000 /* not supported */
#define PERIPH_ADDR_BASE_UART4 0x400EA000 /* not supported - for Bluetooth */
#define PERIPH_ADDR_BASE_PMC 0x4007D000 /* Power Mgt Controller module */
/* IRQs */
#define IRQ_DMA_CHAN0 0
#define IRQ_DMA_CHAN1 1
#define IRQ_DMA_CHAN2 2
#define IRQ_DMA_CHAN3 3
#define IRQ_DMA_CHAN4 4
#define IRQ_DMA_CHAN5 5
#define IRQ_DMA_CHAN6 6
#define IRQ_DMA_CHAN7 7
#define IRQ_DMA_CHAN8 8
#define IRQ_DMA_CHAN9 9
#define IRQ_DMA_CHAN10 10
#define IRQ_DMA_CHAN11 11
#define IRQ_DMA_CHAN12 12
#define IRQ_DMA_CHAN13 13
#define IRQ_DMA_CHAN14 14
#define IRQ_DMA_CHAN15 15
#define IRQ_DMA_ERR 16
#define IRQ_MCM 17
#define IRQ_FLASH_CMD 18
#define IRQ_FLASH_COLLISION 19
#define IRQ_LOW_VOLTAGE 20
#define IRQ_LOW_LEAKAGE 21
#define IRQ_WDOG_OR_EVM 22
#define IRQ_RAND_NUM_GEN 23
#define IRQ_I2C0 24
#define IRQ_I2C1 25
#define IRQ_SPI0 26
#define IRQ_SPI1 27
#define IRQ_I2S0_TX 28
#define IRQ_I2S0_RX 29
#define IRQ_RESERVED0 30
#define IRQ_UART0_STATUS 31
#define IRQ_UART0_ERROR 32
#define IRQ_UART1_STATUS 33
#define IRQ_UART1_ERROR 34
#define IRQ_UART2_STATUS 35
#define IRQ_UART2_ERROR 36
#define IRQ_UART3_STATUS 37
#define IRQ_UART3_ERROR 38
#define IRQ_ADC0 39
#define IRQ_CMP0 40
#define IRQ_CMP1 41
#define IRQ_FTM0 42
#define IRQ_FTM1 43
#define IRQ_FTM2 44
#define IRQ_CMT 45
#define IRQ_RTC_ALARM 46
#define IRQ_RTC_SEC 47
#define IRQ_TIMER0 48
#define IRQ_TIMER1 49
#define IRQ_TIMER2 50
#define IRQ_TIMER3 51
#define IRQ_PDB 52
#define IRQ_USB_OTG 53
#define IRQ_USB_CHARGE 54
#define IRQ_RESERVED1 55
#define IRQ_DAC0 56
#define IRQ_MCG 57
#define IRQ_LOW_PWR_TIMER 58
#define IRQ_GPIO_PORTA 59
#define IRQ_GPIO_PORTB 60
#define IRQ_GPIO_PORTC 61
#define IRQ_GPIO_PORTD 62
#define IRQ_GPIO_PORTE 63
#define IRQ_SOFTWARE 64
#define IRQ_SPI2 65
#define IRQ_UART4_STATUS 66
#define IRQ_UART4_ERROR 67
#define IRQ_RESERVED2 68 /* IRQ_UART5_STATUS - UART5 not implemented */
#define IRQ_RESERVED3 69 /* IRQ_UART5_ERROR - UART5 not implemented */
#define IRQ_CMP2 70
#define IRQ_FTM3 71
#define IRQ_DAC1 72
#define IRQ_ADC1 73
#define IRQ_I2C2 74
#define IRQ_CAN0_MSG_BUF 75
#define IRQ_CAN0_BUS_OFF 76
#define IRQ_CAN0_ERROR 77
#define IRQ_CAN0_TX_WARN 78
#define IRQ_CAN0_RX_WARN 79
#define IRQ_CAN0_WAKEUP 80
#define IRQ_SDHC 81
#define IRQ_ETH_IEEE1588_TMR 82
#define IRQ_ETH_TX 83
#define IRQ_ETH_RX 84
#define IRQ_ETH_ERR_MISC 85
#ifndef _ASMLANGUAGE
#include <misc/util.h>
#include <drivers/rand32.h>
/*
* UART configuration settings
*/
#include <drivers/k20Pcr.h>
#define CONFIG_UART_NUM_SYSTEM_PORTS 1
#define CONFIG_UART_NUM_EXTRA_PORTS 0
#define CONFIG_UART_NUM_PORTS \
(CONFIG_UART_NUM_SYSTEM_PORTS + CONFIG_UART_NUM_EXTRA_PORTS)
#define CONFIG_UART_CONSOLE_INDEX 0
#define CONFIG_UART_CONSOLE_PORT PCR_PORT_B
#define CONFIG_UART_CONSOLE_PORT_RX_PIN 16
#define CONFIG_UART_CONSOLE_PORT_TX_PIN 17
#define CONFIG_UART_CONSOLE_PORT_MUX_FUNC PCR_MUX_ALT3
#define CONFIG_UART_CONSOLE_CLK_FREQ SYSCLK_DEFAULT_IOSC_HZ
#define CONFIG_UART_CONSOLE_BAUDRATE 115200
#define CONFIG_UART_CONSOLE_REGS PERIPH_ADDR_BASE_UART0
#define CONFIG_UART_CONSOLE_IRQ IRQ_UART0_STATUS
#define CONFIG_UART_CONSOLE_INT_PRI 3
#define EXC_FROM_IRQ(irq) ((irq) + 16)
#define VECTOR_FROM_IRQ(irq) EXC_FROM_IRQ(irq)
#define VECTOR_ADDR(vector) ((uint32_t *)((int)vector << 2))
/*
* Device drivers utilize the macros PLB_BYTE_REG_WRITE() and
* PLB_BYTE_REG_READ() to access byte-wide registers on the processor
* local bus (PLB), as opposed to a PCI bus, for example. Boards are
* expected to provide implementations of these macros.
*/
static inline void __plbByteRegWrite(unsigned char data, unsigned char *pAddr)
{
*pAddr = data;
}
#define PLB_BYTE_REG_WRITE(data, address) \
__plbByteRegWrite((unsigned char)data, (unsigned char *)address)
static inline unsigned char __plbByteRegRead(unsigned char *pAddr)
{
return *pAddr;
}
#define PLB_BYTE_REG_READ(address) __plbByteRegRead((unsigned char *)address)
#endif /* !_ASMLANGUAGE */
#endif /* _BOARD__H_ */

View File

@ -0,0 +1,103 @@
# defs.objs - build system
#
# Copyright (c) 2015 Wind River Systems, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2) Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3) Neither the name of Wind River Systems nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
bsp_INCLUDE_DIR = $(strip \
${vBASE}/kernel/common/bsp \
${vBASE}/include/bsp \
${vBASE}/include \
${vBASE}/drivers \
${vBASE}/arch/${vARCH} \
${vBSP_BASE_DIR}/${vBSP} \
)
# initialization
bsp_init_SRC = $(strip \
arch/arm/bsp/CortexM/vector_table.s \
arch/arm/bsp/CortexM/reset.s \
arch/arm/bsp/CortexM/prep_c.c \
kernel/common/bsp/init.c \
kernel/common/bsp/ctors.c \
)
# arch-specific
bsp_arch_SRC = $(strip \
arch/arm/bsp/CortexM/scs.c \
arch/arm/bsp/CortexM/scb.c \
arch/arm/bsp/CortexM/nmi.c \
)
# peripherals
bsp_drivers_SRC_GDB_INFO_y = arch/${vARCH}/timer/systick_gdb.s
bsp_drivers_SRC = $(strip \
drivers/console/uart_console.c \
arch/${vARCH}/timer/systick.c \
drivers/serial/k20UartDrv.c \
${bsp_drivers_SRC_GDB_INFO_${CONFIG_GDB_INFO}} \
)
# configurable
bsp_module_SRC_MICROKERNEL_y = $(strip \
)
bsp_modules_SRC = $(strip \
${bsp_module_SRC_MICROKERNEL_${CONFIG_MICROKERNEL}} \
${bsp_module_SRC_RUNTIME_NMI_${CONFIG_RUNTIME_NMI}} \
)
# miscellaneous needed by kernel
bsp_misc_SRC = $(strip \
arch/arm/bsp/sysFatalErrorHandler.c \
arch/arm/bsp/rand32.c \
)
bsp_kernel_SRC = $(strip \
${bsp_arch_SRC} \
${bsp_init_SRC} \
${bsp_modules_SRC} \
${bsp_misc_SRC} \
)
bsp_SRC_IRQ_VECTOR_TABLE_y = ${vBSP_BASE_DIR}/${vBSP}/irq_vector_table.c
bsp_SRC_SW_ISR_TABLE_y = ${vBSP_BASE_DIR}/${vBSP}/sw_isr_table.c
bsp_SRC = $(strip \
${vBSP_BASE_DIR}/${vBSP}/system.c \
${vBSP_BASE_DIR}/${vBSP}/nmi_on_reset.s \
${vBSP_BASE_DIR}/${vBSP}/wdog.s \
${bsp_SRC_IRQ_VECTOR_TABLE_${CONFIG_IRQ_VECTOR_TABLE_BSP}} \
${bsp_SRC_SW_ISR_TABLE_${CONFIG_SW_ISR_TABLE_BSP}} \
$(foreach src,${bsp_kernel_SRC},$(addprefix ${vBASE}/,${src})) \
$(foreach src,${bsp_drivers_SRC},$(addprefix ${vBASE}/,${src})) \
)
KLIBS += bsp

View File

@ -0,0 +1,65 @@
/* irq_vector_table.c - IRQ part of vector table */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This file contains the IRQ part of the vector table. It is meant to be used
for one of two cases:
a) When software-managed ISRs (SW_ISR_TABLE) is enabled, and in that case it
binds _IsrWrapper() to all the IRQ entries in the vector table.
b) When the BSP is written so that device ISRs are installed directly in the
vector table, they are enumerated here.
*/
#include <toolchain.h>
#include <sections.h>
extern void _IsrWrapper(void);
typedef void (*vth)(void); /* Vector Table Handler */
#if defined(CONFIG_SW_ISR_TABLE)
vth __irq_vector_table _IrqVectorTable[CONFIG_NUM_IRQS] = {
[0 ...(CONFIG_NUM_IRQS - 1)] = _IsrWrapper};
#elif !defined(CONFIG_IRQ_VECTOR_TABLE_CUSTOM)
extern void _SpuriousIRQ(void);
/* placeholders: fill with real ISRs */
vth __irq_vector_table _IrqVectorTable[CONFIG_NUM_IRQS] = {
[0 ...(CONFIG_NUM_IRQS - 1)] = _SpuriousIRQ};
#endif /* CONFIG_SW_ISR_TABLE */

View File

@ -0,0 +1,60 @@
/* linker.cmd - Linker command/script file */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This is the linker script for both standard images and XIP images.
*/
/* Flash base address and size */
#define FLASH_START 0x00000000
#define FLASH_SIZE 1M
/*
* K64F Flash configuration fields
* These are 16 bytes, which must be loaded to address 0x400, and include
* default protection and security settings.
* They are loaded at reset to various Flash Memory module (FTFE) registers.
*/
#define SKIP_TO_SECURITY_FRDM_K64F . = 0x400;
/*
* SRAM base address and size
*
* Although the K64F CPU has a 64 KB region of SRAM at 0x1FFF0000, it is not
* used by this BSP. Only the 192 KB region based at the standard ARMv7-M
* SRAM base address of 0x20000000 is supported.
*/
#define SRAM_START 0x20000000 /* 192K in SRAM space */
#define SRAM_SIZE 192K
#include <nanokernel/arm/CortexM/scripts/linker.cmd>

View File

@ -0,0 +1,53 @@
/* nmi_on_reset.s - default basic NMI handler before the kernel is up */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Provide a default handler for NMI before the system is up. The default action
is to hard hang, sleeping.
This might be preferable than rebooting to help debugging, or because
rebooting might trigger the exact same problem over and over.
*/
#define _ASMLANGUAGE
#include <toolchain.h>
#include <sections.h>
_ASM_FILE_PROLOGUE
GTEXT(_SysNmiOnReset)
SECTION_FUNC(TEXT, _SysNmiOnReset)
wfi
b _SysNmiOnReset

View File

@ -0,0 +1,64 @@
/* sw_isr_table.c - Software ISR table for fsl_frdm_k64f BSP */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This contains the ISR table meant to be used for ISRs that take a parameter.
It is also used when ISRs are to be connected at runtime, and in this case
provides a table that is filled with _SpuriousIRQ bindings.
*/
#include <toolchain.h>
#include <sections.h>
#include <sw_isr_table.h>
extern void _SpuriousIRQ(void *arg);
#if defined(CONFIG_SW_ISR_TABLE_DYNAMIC)
_IsrTableEntry_t __isr_table_section _IsrTable[CONFIG_NUM_IRQS] = {
[0 ...(CONFIG_NUM_IRQS - 1)].arg = (void *)0xABAD1DEA,
[0 ...(CONFIG_NUM_IRQS - 1)].isr = _SpuriousIRQ};
#else
#if defined(CONFIG_SW_ISR_TABLE)
#if !defined(CONFIG_SW_ISR_TABLE_STATIC_CUSTOM)
/* placeholders: fill with real ISRs */
_IsrTableEntry_t __isr_table_section _IsrTable[CONFIG_NUM_IRQS] = {
[0 ...(CONFIG_NUM_IRQS - 1)].arg = (void *)0xABAD1DEA,
[0 ...(CONFIG_NUM_IRQS - 1)].isr = _SpuriousIRQ};
#endif
#endif
#endif

View File

@ -0,0 +1,381 @@
/* system.c - system/hardware module for fsl_frdm_k64f BSP */
/*
* Copyright (c) 2014-2015 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module provides routines to initialize and support board-level hardware
for the fsl_frdm_k64f BSP.
*/
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <cputype.h>
#include <board.h>
#include <drivers/k20Mcg.h>
#include <drivers/uart.h>
#include <drivers/k20Pcr.h>
#include <drivers/k20Sim.h>
#include <drivers/k6xMpu.h>
#include <drivers/k6xPmc.h>
#include <sections.h>
#if defined(CONFIG_PRINTK) || defined(CONFIG_STDOUT_CONSOLE)
#define DO_CONSOLE_INIT
#endif
/* board's setting for PLL multipler (PRDIV0) */
#define FRDM_K64F_PLL_DIV_20 (20 - 1)
/* board's setting for PLL multipler (VDIV0) */
#define FRDM_K64F_PLL_MULT_48 (48 - 24)
#ifdef CONFIG_RUNTIME_NMI
extern void _NmiInit(void);
#define NMI_INIT() _NmiInit()
#else
#define NMI_INIT()
#endif
/*
* K64F Flash configuration fields
* These 16 bytes, which must be loaded to address 0x400, include default
* protection and security settings.
* They are loaded at reset to various Flash Memory module (FTFE) registers.
*
* The structure is:
* -Backdoor Comparison Key for unsecuring the MCU - 8 bytes
* -Program flash protection bytes, 4 bytes, written to FPROT0-3
* -Flash security byte, 1 byte, written to FSEC
* -Flash nonvolatile option byte, 1 byte, written to FOPT
* -Reserved, 1 byte, (Data flash protection byte for FlexNVM)
* -Reserved, 1 byte, (EEPROM protection byte for FlexNVM)
*
*/
uint8_t __security_frdm_k64f_section __security_frdm_k64f[] = {
/* Backdoor Comparison Key (unused) */
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
/* Program flash protection; 1 bit/region - 0=protected, 1=unprotected
*/
0xFF, 0xFF, 0xFF, 0xFF,
/*
* Flash security: Backdoor key disabled, Mass erase enabled,
* Factory access enabled, MCU is unsecure
*/
0xFE,
/* Flash nonvolatile option: NMI enabled, EzPort enabled, Normal boot */
0xFF,
/* Reserved for FlexNVM feature (unsupported by this MCU) */
0xFF, 0xFF};
/*******************************************************************************
*
* clkInit - initialize the system clock
*
* This routine will configure the multipurpose clock generator (MCG) to
* set up the system clock.
* The MCG has nine possible modes, including Stop mode. This routine assumes
* that the current MCG mode is FLL Engaged Internal (FEI), as from reset.
* It transitions through the FLL Bypassed External (FBE) and
* PLL Bypassed External (PBE) modes to get to the desired
* PLL Engaged External (PEE) mode and generate the maximum 120 MHz system clock.
*
* RETURNS: N/A
*
*/
static void clkInit(void)
{
uint8_t temp_reg;
K20_MCG_t *mcg_p = (K20_MCG_t *)PERIPH_ADDR_BASE_MCG; /* clk gen. ctl */
/*
* Select the 50 Mhz external clock as the MCG OSC clock.
* MCG Control 7 register:
* - Select OSCCLK0 / XTAL
*/
temp_reg = mcg_p->c7 & ~MCG_C7_OSCSEL_MASK;
temp_reg |= MCG_C7_OSCSEL_OSC0;
mcg_p->c7 = temp_reg;
/*
* Transition MCG from FEI mode (at reset) to FBE mode.
*/
/*
* MCG Control 2 register:
* - Set oscillator frequency range = very high for 50 MHz external
* clock
* - Set oscillator mode = low power
* - Select external reference clock as the oscillator source
*/
temp_reg = mcg_p->c2 &
~(MCG_C2_RANGE_MASK | MCG_C2_HGO_MASK | MCG_C2_EREFS_MASK);
temp_reg |=
(MCG_C2_RANGE_VHIGH | MCG_C2_HGO_LO_PWR | MCG_C2_EREFS_EXT_CLK);
mcg_p->c2 = temp_reg;
/*
* MCG Control 1 register:
* - Set system clock source (MCGOUTCLK) = external reference clock
* - Set FLL external reference divider = 1024 (MCG_C1_FRDIV_32_1024)
* to get the FLL frequency of 50 MHz/1024 = 48.828KHz
* (Note: If FLL frequency must be in the in 31.25KHz-39.0625KHz
*range,
* the FLL external reference divider = 1280
*(MCG_C1_FRDIV_64_1280)
* to get 50 MHz/1280 = 39.0625KHz)
* - Select the external reference clock as the FLL reference source
*
*/
temp_reg = mcg_p->c1 &
~(MCG_C1_CLKS_MASK | MCG_C1_FRDIV_MASK | MCG_C1_IREFS_MASK);
temp_reg |=
(MCG_C1_CLKS_EXT_REF | MCG_C1_FRDIV_32_1024 | MCG_C1_IREFS_EXT);
mcg_p->c1 = temp_reg;
/* Confirm that the external reference clock is the FLL reference source
*/
while ((mcg_p->s & MCG_S_IREFST_MASK) != 0)
;
;
/* Confirm the external ref. clock is the system clock source
* (MCGOUTCLK) */
while ((mcg_p->s & MCG_S_CLKST_MASK) != MCG_S_CLKST_EXT_REF)
;
;
/*
* Transition to PBE mode.
* Configure the PLL frequency in preparation for PEE mode.
* The goal is PEE mode with a 120 MHz system clock source (MCGOUTCLK),
* which is calculated as (oscillator clock / PLL divider) * PLL
* multiplier,
* where oscillator clock = 50MHz, PLL divider = 20 and PLL multiplier =
* 48.
*/
/*
* MCG Control 5 register:
* - Set the PLL divider
*/
temp_reg = mcg_p->c5 & ~MCG_C5_PRDIV0_MASK;
temp_reg |= FRDM_K64F_PLL_DIV_20;
mcg_p->c5 = temp_reg;
/*
* MCG Control 6 register:
* - Select PLL as output for PEE mode
* - Set the PLL multiplier
*/
temp_reg = mcg_p->c6 & ~(MCG_C6_PLLS_MASK | MCG_C6_VDIV0_MASK);
temp_reg |= (MCG_C6_PLLS_PLL | FRDM_K64F_PLL_MULT_48);
mcg_p->c6 = temp_reg;
/* Confirm that the PLL clock is selected as the PLL output */
while ((mcg_p->s & MCG_S_PLLST_MASK) == 0)
;
;
/* Confirm that the PLL has acquired lock */
while ((mcg_p->s & MCG_S_LOCK0_MASK) == 0)
;
;
/*
* Transition to PEE mode.
* MCG Control 1 register:
* - Select PLL as the system clock source (MCGOUTCLK)
*/
temp_reg = mcg_p->c1 & ~MCG_C1_CLKS_MASK;
temp_reg |= MCG_C1_CLKS_FLL_PLL;
mcg_p->c1 = temp_reg;
/* Confirm that the PLL output is the system clock source (MCGOUTCLK) */
while ((mcg_p->s & MCG_S_CLKST_MASK) != MCG_S_CLKST_PLL)
;
;
}
#if defined(DO_CONSOLE_INIT)
/*******************************************************************************
*
* consoleInit - initialize target-only console
*
* Only used for debugging.
*
* RETURNS: N/A
*
*/
#include "console/uart_console.h"
static void consoleInit(void)
{
uint32_t port;
uint32_t rxPin;
uint32_t txPin;
K20_PCR_t pcr = {0}; /* Pin Control Register */
/* Port/pin ctrl module */
K20_PORT_PCR_t *port_pcr_p = (K20_PORT_PCR_t *)PERIPH_ADDR_BASE_PCR;
struct uart_init_info info = {
.baud_rate = CONFIG_UART_CONSOLE_BAUDRATE,
.regs = CONFIG_UART_CONSOLE_REGS,
.sys_clk_freq = CONFIG_UART_CONSOLE_CLK_FREQ,
/* Only supported in polling mode, but init all info fields */
.irq = CONFIG_UART_CONSOLE_IRQ,
.int_pri = CONFIG_UART_CONSOLE_INT_PRI
};
/* UART0 Rx and Tx pin assignments */
port = CONFIG_UART_CONSOLE_PORT;
rxPin = CONFIG_UART_CONSOLE_PORT_RX_PIN;
txPin = CONFIG_UART_CONSOLE_PORT_TX_PIN;
/* Enable the UART Rx and Tx Pins */
pcr.field.mux = CONFIG_UART_CONSOLE_PORT_MUX_FUNC;
port_pcr_p->port[port].pcr[rxPin] = pcr;
port_pcr_p->port[port].pcr[txPin] = pcr;
uart_init(CONFIG_UART_CONSOLE_INDEX, &info);
uartConsoleInit();
}
#else
#define consoleInit() \
do {/* nothing */ \
} while ((0))
#endif /* DO_CONSOLE_INIT */
/*******************************************************************************
*
* _InitHardware - perform basic hardware initialization
*
* Initialize the interrupt controller device drivers and the
* Kinetis UART device driver.
* Also initialize the timer device driver, if required.
*
* RETURNS: N/A
*/
void _InitHardware(void)
{
/* System Integration module */
K20_SIM_t *sim_p = (K20_SIM_t *)PERIPH_ADDR_BASE_SIM;
/* Power Mgt Control module */
K6x_PMC_t *pmc_p = (K6x_PMC_t *)PERIPH_ADDR_BASE_PMC;
/* Power Mgt Control module */
K6x_MPU_t *mpu_p = (K6x_MPU_t *)PERIPH_ADDR_BASE_MPU;
int oldLevel; /* old interrupt lock level */
uint32_t temp_reg;
/* disable interrupts */
oldLevel = irq_lock();
/* enable the port clocks */
sim_p->scgc5.value |= (SIM_SCGC5_PORTA_CLK_EN | SIM_SCGC5_PORTB_CLK_EN |
SIM_SCGC5_PORTC_CLK_EN | SIM_SCGC5_PORTD_CLK_EN |
SIM_SCGC5_PORTE_CLK_EN);
/* release I/O power hold to allow normal run state */
pmc_p->regsc.value |= PMC_REGSC_ACKISO_MASK;
/*
* Disable memory protection and clear slave port errors.
* Note that the K64F does not implement the optional ARMv7-M memory
* protection unit (MPU), specified by the architecture (PMSAv7), in the
* Cortex-M4 core. Instead, the processor includes its own MPU module.
*/
temp_reg = mpu_p->ctrlErrStatus.value;
temp_reg &= ~MPU_VALID_MASK;
temp_reg |= MPU_SLV_PORT_ERR_MASK;
mpu_p->ctrlErrStatus.value = temp_reg;
/* clear all faults */
_ScbMemFaultAllFaultsReset();
_ScbBusFaultAllFaultsReset();
_ScbUsageFaultAllFaultsReset();
_ScbHardFaultAllFaultsReset();
/*
* Initialize the clock dividers for:
* core and system clocks = 120 MHz (PLL/OUTDIV1)
* bus clock = 60 MHz (PLL/OUTDIV2)
* FlexBus clock = 40 MHz (PLL/OUTDIV3)
* Flash clock = 24 MHz (PLL/OUTDIV4)
*/
sim_p->clkdiv1.value = ((SIM_CLKDIV(1) << SIM_CLKDIV1_OUTDIV1_SHIFT) |
(SIM_CLKDIV(2) << SIM_CLKDIV1_OUTDIV2_SHIFT) |
(SIM_CLKDIV(3) << SIM_CLKDIV1_OUTDIV3_SHIFT) |
(SIM_CLKDIV(5) << SIM_CLKDIV1_OUTDIV4_SHIFT));
clkInit(); /* Initialize PLL/system clock to 120 MHz */
consoleInit(); /* NOP if not needed */
NMI_INIT(); /* install default handler that simply resets the CPU
* if configured in the kernel, NOP otherwise */
/* restore interrupt state */
irq_unlock(oldLevel);
}

View File

@ -0,0 +1,104 @@
/* wdog.s - watchdog initialization for fsl_frdm_k64f BSP */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module initializes the watchdog for the fsl_frdm_k64f BSP.
*/
#define _ASMLANGUAGE
#include <board.h>
#include <toolchain.h>
#include <sections.h>
_ASM_FILE_PROLOGUE
GTEXT(_WdogInit)
/* watchdog register offsets */
#define WDOG_SCTRL_HI_OFFSET 0x0
#define WDOG_UNLOCK_OFFSET 0xE
/* watchdog command words */
#define WDOG_UNLOCK_1_CMD 0xC520
#define WDOG_UNLOCK_2_CMD 0xD928
/*******************************************************************************
*
* _WdogInit - Watchdog timer disable routine
*
* This routine will disable the watchdog timer.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT,_WdogInit)
/*
* NOTE: DO NOT SINGLE STEP THROUGH THIS FUNCTION!!!
* There are timing requirements for the execution of the unlock process.
* Single stepping through the code will cause the CPU to reset.
*/
/*
* First unlock the watchdog so that we can write to registers.
*
* This sequence must execute within 20 clock cycles, so disable
* interrupts to keep the code atomic and ensure the timing.
*/
cpsid i
ldr r0, =PERIPH_ADDR_BASE_WDOG
movw r1, #WDOG_UNLOCK_1_CMD
strh r1, [r0, #WDOG_UNLOCK_OFFSET]
movw r1, #WDOG_UNLOCK_2_CMD
strh r1, [r0, #WDOG_UNLOCK_OFFSET]
/*
* Disable the watchdog.
*
* Writes to control/configuration registers must execute within
* 256 clock cycles after unlocking.
*/
ldrh r1, [r0, #WDOG_SCTRL_HI_OFFSET]
mov r2, #1
bics r1, r2
strh r1, [r0, #WDOG_SCTRL_HI_OFFSET]
cpsie i
bx lr

View File

@ -0,0 +1,123 @@
/* exc.h - exception/interrupt context helpers for Cortex-M CPUs */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Exception/interrupt context helpers.
*/
#ifndef _ARM_CORTEXM_ISR__H_
#define _ARM_CORTEXM_ISR__H_
#include <nanokernel/cpu.h>
#ifdef _ASMLANGUAGE
/* nothing */
#else
/*******************************************************************************
*
* _IpsrGet - obtain value of IPSR register
*
* Obtain and return current value of IPSR register.
*
* RETURNS: the contents of the IPSR register
*
* \NOMANUAL
*/
#if defined(__GNUC__)
static ALWAYS_INLINE uint32_t _IpsrGet(void)
{
uint32_t vector;
__asm__ volatile("mrs %0, IPSR\n\t" : "=r"(vector));
return vector;
}
#elif defined(__DCC__)
__asm volatile uint32_t _IpsrGet(void)
{
% !"r0" mrs r0, IPSR
}
#endif
/*******************************************************************************
*
* _IsInIsr - find out if running in an ISR context
*
* The current executing vector is found in the IPSR register. We consider the
* IRQs (exception 16 and up), and the PendSV and SYSTICK exceptions, to be
* interrupts. Taking a fault within an exception is also considered in
* interrupt context.
*
* RETURNS: 1 if in ISR, 0 if not.
*
* \NOMANUAL
*/
static ALWAYS_INLINE int _IsInIsr(void)
{
uint32_t vector = _IpsrGet();
/* IRQs + PendSV + SYSTICK are interrupts */
return (vector > 13) || (vector && _ScbIsNestedExc());
}
/*******************************************************************************
* _ExcSetup - setup system exceptions
*
* Set exception priorities to conform with the BASEPRI locking mechanism.
* Set PendSV priority to lowest possible.
*
* Enable fault exceptions.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static ALWAYS_INLINE void _ExcSetup(void)
{
_ScbExcPrioSet(_EXC_PENDSV, _EXC_PRIO(0xff));
_ScbExcPrioSet(_EXC_SVC, _EXC_PRIO(0x01));
_ScbExcPrioSet(_EXC_MPU_FAULT, _EXC_PRIO(0x01));
_ScbExcPrioSet(_EXC_BUS_FAULT, _EXC_PRIO(0x01));
_ScbExcPrioSet(_EXC_USAGE_FAULT, _EXC_PRIO(0x01));
_ScbUsageFaultEnable();
_ScbBusFaultEnable();
_ScbMemFaultEnable();
}
#endif /* _ASMLANGUAGE */
#endif /* _ARM_CORTEXM_ISR__H_ */

View File

@ -0,0 +1,114 @@
/* stack.h - stack helpers for Cortex-M CPUs */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Stack helper functions.
*/
#ifndef _ARM_CORTEXM_STACK__H_
#define _ARM_CORTEXM_STACK__H_
#include <nanok.h>
#ifdef CONFIG_STACK_GROWS_DOWN
#define STACK_DIR STACK_GROWS_DOWN
#else
#define STACK_DIR STACK_GROWS_UP
#endif
#ifdef CONFIG_STACK_ALIGN_DOUBLE_WORD
#define STACK_ALIGN_SIZE 8
#else
#define STACK_ALIGN_SIZE 4
#endif
#ifdef _ASMLANGUAGE
/* nothing */
#else
#if (STACK_DIR == STACK_GROWS_DOWN)
#define __GET_MSP() \
STACK_ROUND_DOWN(&_InterruptStack[CONFIG_ISR_STACK_SIZE - 1])
#else
#define __GET_MSP() STACK_ROUND_UP(&_InterruptStack[0])
#endif
extern char _InterruptStack[CONFIG_ISR_STACK_SIZE];
/*******************************************************************************
*
* _MspSet - set the value of the Main Stack Pointer register
*
* Store the value of <msp> in MSP register.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
#if defined(__GNUC__)
static ALWAYS_INLINE void _MspSet(uint32_t msp /* value to store in MSP */
)
{
__asm__ volatile("msr MSP, %0\n\t" : : "r"(msp));
}
#elif defined(__DCC__)
__asm volatile void _MspSet(uint32_t msp)
{
% reg msp !"r0" msr MSP, msp
}
#endif
/*******************************************************************************
*
* _InterruptStackSetup - setup interrupt stack
*
* On Cortex-M, the interrupt stack is registered in the MSP (main stack
* pointer) register, and switched to automatically when taking an exception.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static ALWAYS_INLINE void _InterruptStackSetup(void)
{
uint32_t msp = __GET_MSP();
_MspSet(msp);
}
#endif /* _ASMLANGUAGE */
#endif /* _ARM_CORTEXM_STACK__H_ */

227
arch/arm/include/nanok.h Normal file
View File

@ -0,0 +1,227 @@
/* nanok.h - private nanokernel definitions (ARM) */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This file contains private nanokernel structures definitions and various other
definitions for the ARM Cortex-M3 processor architecture.
This file is also included by assembly language files which must #define
_ASMLANGUAGE before including this header file. Note that nanokernel assembly
source files obtains structure offset values via "absolute symbols" in the
offsets.o module.
*/
#ifndef _NANOK__H_
#define _NANOK__H_
#ifdef __cplusplus
extern "C" {
#endif
#include <toolchain.h>
#include <sections.h>
#include <nanokernel/cpu.h>
#ifndef _ASMLANGUAGE
#include <stdint.h>
#endif
#ifndef _ASMLANGUAGE
struct coop {
/*
* Unused for Cortex-M, which automatically saves the neccesary
*registers
* in its exception stack frame.
*
* For Cortex-A, this may be:
*
* uint32_t a1; // r0
* uint32_t a2; // r1
* uint32_t a3; // r2
* uint32_t a4; // r3
* uint32_t ip; // r12
* uint32_t lr; // r14
* uint32_t pc; // r15
* uint32_t xpsr;
*/
};
typedef struct __esf tESF;
struct preempt {
uint32_t v1; /* r4 */
uint32_t v2; /* r5 */
uint32_t v3; /* r6 */
uint32_t v4; /* r7 */
uint32_t v5; /* r8 */
uint32_t v6; /* r9 */
uint32_t v7; /* r10 */
uint32_t v8; /* r11 */
uint32_t psp; /* r13 */
};
typedef struct preempt tPreempt;
#endif /* _ASMLANGUAGE */
/* Bitmask definitions for the tCCS->flags bit field */
#define FIBER 0x000
#define TASK 0x001 /* 1 = task context, 0 = fiber context */
#define INT_ACTIVE 0x002 /* 1 = context is executing interrupt handler */
#define EXC_ACTIVE 0x004 /* 1 = context is executing exception handler */
#define USE_FP 0x010 /* 1 = context uses floating point unit */
#define PREEMPTIBLE \
0x020 /* 1 = preemptible context \
* NOTE: the value must be < 0x100 to be able to \
* use a small thumb instr with immediate \
* when loading PREEMPTIBLE in a GPR \
*/
#define ESSENTIAL 0x200 /* 1 = system context that must not abort */
#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */
/* stacks */
#define STACK_GROWS_DOWN 0
#define STACK_GROWS_UP 1
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE)
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE)
#ifdef CONFIG_CPU_CORTEXM
#include <CortexM/stack.h>
#include <CortexM/exc.h>
#endif
#ifndef _ASMLANGUAGE
struct s_CCS {
struct s_CCS *link; /* singly-linked list in _NanoKernel.fibers */
uint32_t flags;
uint32_t basepri;
int prio;
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
void *custom_data; /* available for custom use */
#endif
struct coop coopReg;
struct preempt preempReg;
#if defined(CONFIG_HOST_TOOLS_SUPPORT)
struct s_CCS *activeLink; /* link to next context in the active list */
#endif
};
struct s_NANO {
tCCS *fiber; /* singly linked list of runnable fiber contexts */
tCCS *task; /* pointer to runnable task context */
tCCS *current; /* currently scheduled context (fiber or task) */
int flags; /* tCCS->flags of 'current' context */
#if defined(CONFIG_HOST_TOOLS_SUPPORT)
tCCS *contexts; /* singly linked list of ALL fiber+tasks */
#endif
#ifdef CONFIG_FP_SHARING
tCCS *current_fp; /* context (fiber or task) that owns the FP regs */
#endif /* CONFIG_FP_SHARING */
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
int32_t idle; /* Number of ticks for kernel idling */
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
#if 0 /* not needed, MSP should only be loaded once at startup */
char *common_isp; /* interrupt stack pointer base */
#endif
};
typedef struct s_NANO tNANO;
extern tNANO _NanoKernel;
#endif /* _ASMLANGUAGE */
#ifndef _ASMLANGUAGE
extern void _FaultInit(void);
extern void _CpuIdleInit(void);
static ALWAYS_INLINE void nanoArchInit(void)
{
_NanoKernel.flags = FIBER;
_InterruptStackSetup();
_ExcSetup();
_FaultInit();
_CpuIdleInit();
}
/*******************************************************************************
*
* fiberRtnValueSet - set the return value for the specified fiber (inline)
*
* The register used to store the return value from a function call invocation
* to <value>. It is assumed that the specified <fiber> is pending, and thus
* the fiber's context is stored in its tCCS structure.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static ALWAYS_INLINE void fiberRtnValueSet(
tCCS *fiber, /* pointer to fiber */
unsigned int value /* value to set as return value */
)
{
tESF *pEsf = (void *)fiber->preempReg.psp;
pEsf->a1 = value;
}
extern void _InsertCCS(tCCS **, tCCS *);
extern void *_NewContext(char *,
unsigned,
_ContextEntry,
_ContextArg,
_ContextArg,
_ContextArg,
int,
unsigned);
extern unsigned int _Swap(unsigned int);
extern void nano_cpu_atomic_idle(unsigned int);
#define _IS_IN_ISR() _IsInIsr()
extern void _IntLibInit(void);
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* _NANOK__H_ */

View File

@ -0,0 +1,56 @@
/* start_task.h - ARM nanokernel declarations to start a task */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
ARM-specific parts of start_task().
Currently empty, only here for abstraction.
*/
#ifndef _START_TASK_ARCH__H_
#define _START_TASK_ARCH__H_
#include <toolchain.h>
#include <sections.h>
#include <microkernel/k_struct.h>
#include <minik.h>
#include <nanok.h>
#include <cputype.h>
#include <microkernel/task.h>
#define _START_TASK_ARCH(pKproc, pOpt) \
do {/* nothing */ \
} while ((0))
#endif /* _START_TASK_ARCH__H_ */

View File

@ -0,0 +1,149 @@
/* board.h - board configuration macros for the ti_lm3s6965 BSP */
/*
* Copyright (c) 2013-2015 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This header file is used to specify and describe board-level aspects for
the 'ti_lm3s6965' BSP.
*/
#ifndef _BOARD__H_
#define _BOARD__H_
#include <misc/util.h>
#define SRAM_SIZE KB(64)
/* default system clock */
#define SYSCLK_DEFAULT_IOSC_HZ MHZ(12)
/* address bases */
#define PERIPH_ADDR_BASE_UART0 0x4000C000
#define PERIPH_ADDR_BASE_UART1 0x4000D000
#define PERIPH_ADDR_BASE_UART2 0x4000E000
/* IRQs */
#define IRQ_GPIO_PORTA 0
#define IRQ_GPIO_PORTB 1
#define IRQ_GPIO_PORTC 2
#define IRQ_GPIO_PORTD 3
#define IRQ_GPIO_PORTE 4
#define IRQ_UART0 5
#define IRQ_UART1 6
#define IRQ_SSI0 7
#define IRQ_I2C0 8
#define IRQ_PWM_FAULT 9
#define IRQ_PWM_GEN0 10
#define IRQ_PWM_GEN1 11
#define IRQ_PWM_GEN2 12
#define IRQ_QEI0 13
#define IRQ_ADC0_SEQ0 14
#define IRQ_ADC0_SEQ1 15
#define IRQ_ADC0_SEQ2 16
#define IRQ_ADC0_SEQ3 17
#define IRQ_WDOG0 18
#define IRQ_TIMER0A 19
#define IRQ_TIMER0B 20
#define IRQ_TIMER1A 21
#define IRQ_TIMER1B 22
#define IRQ_TIMER2A 23
#define IRQ_TIMER2B 24
#define IRQ_ANALOG_COMP0 25
#define IRQ_ANALOG_COMP1 26
#define IRQ_RESERVED0 27
#define IRQ_SYS_CONTROL 28
#define IRQ_FLASH_MEM_CTRL 29
#define IRQ_GPIO_PORTF 30
#define IRQ_GPIO_PORTG 31
#define IRQ_RESERVED1 32
#define IRQ_UART2 33
#define IRQ_RESERVED2 34
#define IRQ_TIMER3A 35
#define IRQ_TIMER3B 36
#define IRQ_I2C1 37
#define IRQ_QEI1 38
#define IRQ_RESERVED3 39
#define IRQ_RESERVED4 40
#define IRQ_RESERVED5 41
#define IRQ_ETH 42
#define IRQ_HIBERNATION 43
#ifndef _ASMLANGUAGE
#include <misc/util.h>
#include <drivers/rand32.h>
/* uart configuration settings */
#define CONFIG_UART_NUM_SYSTEM_PORTS 2
#define CONFIG_UART_NUM_EXTRA_PORTS 1
#define CONFIG_UART_NUM_PORTS \
(CONFIG_UART_NUM_SYSTEM_PORTS + CONFIG_UART_NUM_EXTRA_PORTS)
#define CONFIG_UART_CONSOLE_INDEX 0
#define CONFIG_UART_CONSOLE_BAUDRATE 115200
#define CONFIG_UART_CONSOLE_IRQ IRQ_UART0
#define CONFIG_UART_CONSOLE_INT_PRI 3
#define CONFIG_UART_CONSOLE_REGS PERIPH_ADDR_BASE_UART0
#define EXC_FROM_IRQ(irq) ((irq) + 16)
#define VECTOR_FROM_IRQ(irq) EXC_FROM_IRQ(irq)
#define VECTOR_ADDR(vector) ((uint32_t *)((int)vector << 2))
/*
* Device drivers utilize the macros PLB_BYTE_REG_WRITE() and
* PLB_BYTE_REG_READ() to access byte-wide registers on the processor
* local bus (PLB), as opposed to a PCI bus, for example. Boards are
* expected to provide implementations of these macros.
*/
static inline void __plbByteRegWrite(unsigned char data, unsigned char *pAddr)
{
*pAddr = data;
}
#define PLB_BYTE_REG_WRITE(data, address) \
__plbByteRegWrite((unsigned char)data, (unsigned char *)address)
static inline unsigned char __plbByteRegRead(unsigned char *pAddr)
{
return *pAddr;
}
#define PLB_BYTE_REG_READ(address) __plbByteRegRead((unsigned char *)address)
#endif /* !_ASMLANGUAGE */
#endif /* _BOARD__H_ */

View File

@ -0,0 +1,102 @@
# defs.objs - build system
#
# Copyright (c) 2015 Wind River Systems, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2) Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3) Neither the name of Wind River Systems nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
bsp_INCLUDE_DIR = $(strip \
${vBASE}/kernel/common/bsp \
${vBASE}/include/bsp \
${vBASE}/include \
${vBASE}/arch/${vARCH} \
${vBASE}/drivers \
${vBSP_BASE_DIR}/${vBSP} \
)
# initialization
bsp_init_SRC = $(strip \
arch/arm/bsp/CortexM/vector_table.s \
arch/arm/bsp/CortexM/reset.s \
arch/arm/bsp/CortexM/prep_c.c \
kernel/common/bsp/init.c \
kernel/common/bsp/ctors.c \
)
# peripherals
bsp_arch_SRC = $(strip \
arch/arm/bsp/CortexM/scs.c \
arch/arm/bsp/CortexM/scb.c \
arch/arm/bsp/CortexM/nmi.c \
)
bsp_drivers_SRC_GDB_INFO_y = arch/${vARCH}/timer/systick_gdb.s
bsp_drivers_SRC = $(strip \
arch/${vARCH}/timer/systick.c \
drivers/serial/stellarisUartDrv.c \
drivers/console/uart_console.c \
${bsp_drivers_SRC_GDB_INFO_${CONFIG_GDB_INFO}} \
)
# configurable
bsp_module_SRC_MICROKERNEL_y = $(strip \
)
bsp_modules_SRC = $(strip \
${bsp_module_SRC_MICROKERNEL_${CONFIG_MICROKERNEL}} \
${bsp_module_SRC_RUNTIME_NMI_${CONFIG_RUNTIME_NMI}} \
)
# miscellaneous needed by kernel
bsp_misc_SRC = $(strip \
arch/arm/bsp/sysFatalErrorHandler.c \
arch/arm/bsp/rand32.c \
)
bsp_kernel_SRC = $(strip \
${bsp_init_SRC} \
${bsp_arch_SRC} \
${bsp_modules_SRC} \
${bsp_misc_SRC} \
)
bsp_SRC_IRQ_VECTOR_TABLE_y = ${vBSP_BASE_DIR}/${vBSP}/irq_vector_table.c
bsp_SRC_SW_ISR_TABLE_y = ${vBSP_BASE_DIR}/${vBSP}/sw_isr_table.c
bsp_SRC = $(strip \
${vBSP_BASE_DIR}/${vBSP}/system.c \
${vBSP_BASE_DIR}/${vBSP}/nmi_on_reset.s \
${vBSP_BASE_DIR}/${vBSP}/scp.c \
${bsp_SRC_IRQ_VECTOR_TABLE_${CONFIG_IRQ_VECTOR_TABLE_BSP}} \
${bsp_SRC_SW_ISR_TABLE_${CONFIG_SW_ISR_TABLE_BSP}} \
$(foreach src,${bsp_kernel_SRC},$(addprefix ${vBASE}/,${src})) \
$(foreach src,${bsp_drivers_SRC},$(addprefix ${vBASE}/,${src})) \
)
KLIBS += bsp

View File

@ -0,0 +1,67 @@
/* irq_vector_table.c - IRQ part of vector table */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This file contains the IRQ part of the vector table. It is meant to be used
for one of two cases:
a) When software-managed ISRs (SW_ISR_TABLE) is enabled, and in that case it
binds _IsrWrapper() to all the IRQ entries in the vector table.
b) When the BSP is written so that device ISRs are installed directly in the
vector table, they are enumerated here.
*/
#include <toolchain.h>
#include <sections.h>
extern void _IsrWrapper(void);
typedef void (*vth)(void); /* Vector Table Handler */
#if defined(CONFIG_SW_ISR_TABLE)
vth __irq_vector_table _IrqVectorTable[CONFIG_NUM_IRQS] = {
[0 ...(CONFIG_NUM_IRQS - 1)] = _IsrWrapper
};
#elif !defined(CONFIG_IRQ_VECTOR_TABLE_CUSTOM)
extern void _SpuriousIRQ(void);
/* placeholders: fill with real ISRs */
vth __irq_vector_table _IrqVectorTable[CONFIG_NUM_IRQS] = {
[0 ...(CONFIG_NUM_IRQS - 1)] = _SpuriousIRQ
};
#endif /* CONFIG_SW_ISR_TABLE */

View File

@ -0,0 +1,38 @@
/* linker.cmd - Linker command/script file */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#define FLASH_START 0x00000000
#define SRAM_START 0x20000000
#define FLASH_SIZE 256K
#define SRAM_SIZE 64K
#include <nanokernel/arm/CortexM/scripts/linker.cmd>

View File

@ -0,0 +1,53 @@
/* nmi_on_reset.s - default basic NMI handler before the kernel is up */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Provide a default handler for NMI before the system is up. The default action
is to hard hang, sleeping.
This might be preferable than rebooting to help debugging, or because
rebooting might trigger the exact same problem over and over.
*/
#define _ASMLANGUAGE
#include <toolchain.h>
#include <sections.h>
_ASM_FILE_PROLOGUE
GTEXT(_SysNmiOnReset)
SECTION_FUNC(TEXT, _SysNmiOnReset)
wfi
b _SysNmiOnReset

View File

@ -0,0 +1,68 @@
/* scp.c - TI LM3S6965 System Control Peripherals interface */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Library for controlling target-specific devices present in the 0x400fe000
peripherals memory region.
Currently, only enabling the main OSC with default value is implemented.
*/
#include <stdint.h>
#include <toolchain.h>
#include <sections.h>
#include "scp.h"
/* System Control Peripheral (SCP) Registers */
volatile struct __scp __scp_section __scp;
/*******************************************************************************
*
* _ScpMainOscEnable - enable main oscillator with default frequency of 6MHz
*
* RETURNS: N/A
*/
void _ScpMainOscEnable(void)
{
union __rcc reg;
reg.value = __scp.clock.rcc.value;
reg.bit.moscdis = 0;
reg.bit.oscsrc = _SCP_OSC_SOURCE_MAIN;
reg.bit.xtal = _SCP_CRYSTAL_6MHZ;
__scp.clock.rcc.value = reg.value;
}

179
arch/arm/ti_lm3s6965/scp.h Normal file
View File

@ -0,0 +1,179 @@
/* scp.h - TI LM3S6965 System Control Peripherals interface */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module defines the System Control Peripheral Registers for TI LM3S6965
processor. The registers defined are in region 0x400fe000.
System Control 0x400fe000
These modules are not defined:
Hibernation Module 0x400fc000
Internal Memory 0x400fd000
Hibernation Module 0x400fc000
The registers and bit field names are taken from the 'Stellaris LM3S6965
Microcontroller DATA SHEET (DS-LM3S6965-12746.2515) revision H' document,
section 5.4/5.5, pp .184-200.
*/
#ifndef _SCP_H_
#define _SCP_H_
#include <stdint.h>
#define _SCP_OSC_SOURCE_MAIN 0
#define _SCP_OSC_SOURCE_INTERNAL 1
#define _SCP_OSC_SOURCE_INTERNAL_DIV4 2
#define _SCP_OSC_SOURCE_INTERNAL_20KHZ 3
#define _SCP_OSC_SOURCE_EXTERNAL_32KHZ 7 /* Valid with RCC2 only */
#define _SCP_CRYSTAL_1MHZ_NOPLL 0
#define _SCP_CRYSTAL_1_8432MHZ_NOPLL 1
#define _SCP_CRYSTAL_2MHZ_NOPLL 2
#define _SCP_CRYSTAL_2_4576MHZ_NOPLL 3
#define _SCP_CRYSTAL_3_579545MHZ 4
#define _SCP_CRYSTAL_3_6864MHZ 5
#define _SCP_CRYSTAL_4MHZ 6
#define _SCP_CRYSTAL_4_0964MHZ 7
#define _SCP_CRYSTAL_4_9152MHZ 8
#define _SCP_CRYSTAL_5MHZ 9
#define _SCP_CRYSTAL_5_12MHZ 10
#define _SCP_CRYSTAL_6MHZ 11 /* reset value */
#define _SCP_CRYSTAL_6_144MHZ 12
#define _SCP_CRYSTAL_7_3728MHZ 13
#define _SCP_CRYSTAL_8MHZ 14
#define _SCP_CRYSTAL_8_192MHZ 15
union __rcc {
uint32_t value;
struct {
uint32_t moscdis : 1 __attribute__((packed));
uint32_t ioscdis : 1 __attribute__((packed));
uint32_t rsvd__2_3 : 2 __attribute__((packed));
uint32_t oscsrc : 2 __attribute__((packed));
uint32_t xtal : 4 __attribute__((packed));
uint32_t rsvd__10 : 1 __attribute__((packed));
uint32_t bypass : 1 __attribute__((packed));
uint32_t rsvd__12 : 1 __attribute__((packed));
uint32_t pwrdn : 1 __attribute__((packed));
uint32_t rsvd__14_16 : 3 __attribute__((packed));
uint32_t pwmdiv : 3 __attribute__((packed)); /* 2**(n+1) */
uint32_t usepwmdiv : 1 __attribute__((packed));
uint32_t rsvd__21 : 1 __attribute__((packed));
uint32_t usesysdiv : 1 __attribute__((packed));
uint32_t sysdiv : 4 __attribute__((packed));
uint32_t acg : 1 __attribute__((packed));
uint32_t rsvd__28_31 : 4 __attribute__((packed));
} bit;
};
union __rcc2 {
uint32_t value;
struct {
uint8_t rsvd__0_3 : 4 __attribute__((packed));
uint8_t oscsrc2 : 3 __attribute__((packed));
uint16_t rsvd__7_10 : 4 __attribute__((packed));
uint8_t bypass2 : 1 __attribute__((packed));
uint8_t rsvd__12 : 1 __attribute__((packed));
uint8_t pwrdn2 : 1 __attribute__((packed));
uint16_t rsvd__14_22 : 9 __attribute__((packed));
uint16_t sysdiv2 : 6 __attribute__((packed));
uint8_t rsvd__29_30 : 2 __attribute__((packed));
uint8_t usercc2 : 1 __attribute__((packed));
} bit;
};
struct __scp {
uint32_t did0; /* 0x000 RO Device ID*/
uint32_t did1; /* 0x004 RO Device ID*/
uint32_t dc0; /* 0x008 RO Device Capabilities */
uint32_t dc1; /* 0x00c RO Device Capabilities */
uint32_t dc2; /* 0x010 RO Device Capabilities */
uint32_t dc3; /* 0x014 RO Device Capabilities */
uint32_t dc4; /* 0x018 RO Device capabilities */
uint32_t rsvd__01c_02f[(0x30 - 0x1c) / 4];
uint32_t pborctl; /* 0x030 RW Brown-Out Reset ConTroL */
uint32_t ldopctl; /* 0x034 RW LDO Power ConTroL */
uint32_t rsvd__038_03f[(0x40 - 0x38) / 4];
uint32_t srcr0; /* 0x040 RW Software Reset Control Register */
uint32_t srcr1; /* 0x044 RW Software Reset Control Register */
uint32_t srcr2; /* 0x048 RW Software Reset Control Register */
uint32_t rsvd__04c_04f;
uint32_t ris; /* 0x050 RO Raw Interrupt Status */
uint32_t imc; /* 0x054 RW Interrupt Mask Control */
uint32_t misc; /* 0x058 RW1C Masked Int. Status & Clear */
uint32_t resc; /* 0x05C RW RESet Cause */
struct {
union __rcc rcc; /* 0x060 RW Run-mode Clock Configuration */
uint32_t pllcfg; /* 0x064 RW xtal-to-pll translation */
uint32_t rsvd__068_06f[(0x70 - 0x068) / 4];
union __rcc2 rcc2; /* 0x070 RW Run-mode Clock Configuration */
uint32_t rsvd__074_0ff[(0x100 - 0x074) / 4];
uint32_t rcgc0; /* 0x100 RW Run-mode Clock Gating */
uint32_t rcgc1; /* 0x104 RW Run-mode Clock Gating */
uint32_t rcgc2; /* 0x108 RW Run-mode Clock Gating */
uint32_t rsvd__10c_10f;
uint32_t scgc0; /* 0x110 RW Sleep-mode Clock Gating */
uint32_t scgc1; /* 0x114 RW Sleep-mode Clock Gating */
uint32_t scgc2; /* 0x118 RW Sleep-mode Clock Gating */
uint32_t rsvd__11c_11f;
uint32_t dcgc0; /* 0x120 RW Deep sleep mode Clock Gating */
uint32_t dcgc1; /* 0x124 RW Deep sleep mode Clock Gating */
uint32_t dcgc2; /* 0x128 RW Deep sleep mode Clock Gating */
uint32_t rsvd__12c_143[(0x144 - 0x12c) / 4];
uint32_t
dslpclkcfg; /* 0x144 RW Deep SLeeP CLocK ConFiGuration
*/
} clock;
};
extern volatile struct __scp __scp;
#endif /* _SCP_H_ */

View File

@ -0,0 +1,66 @@
/* sw_isr_table.c - Software ISR table for ti_lm3s6965 BSP */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This contains the ISR table meant to be used for ISRs that take a parameter.
It is also used when ISRs are to be connected at runtime, and in this case
provides a table that is filled with _SpuriousIRQ bindings.
*/
#include <toolchain.h>
#include <sections.h>
#include <sw_isr_table.h>
extern void _SpuriousIRQ(void *arg);
#if defined(CONFIG_SW_ISR_TABLE_DYNAMIC)
_IsrTableEntry_t __isr_table_section _IsrTable[CONFIG_NUM_IRQS] = {
[0 ...(CONFIG_NUM_IRQS - 1)].arg = (void *)0xABAD1DEA,
[0 ...(CONFIG_NUM_IRQS - 1)].isr = _SpuriousIRQ
};
#else
#if defined(CONFIG_SW_ISR_TABLE)
#if !defined(CONFIG_SW_ISR_TABLE_STATIC_CUSTOM)
/* placeholders: fill with real ISRs */
_IsrTableEntry_t __isr_table_section _IsrTable[CONFIG_NUM_IRQS] = {
[0 ...(CONFIG_NUM_IRQS - 1)].arg = (void *)0xABAD1DEA,
[0 ...(CONFIG_NUM_IRQS - 1)].isr = _SpuriousIRQ
};
#endif
#endif
#endif

View File

@ -0,0 +1,135 @@
/* system.c - system/hardware module for ti_lm3s6965 BSP */
/*
* Copyright (c) 2013-2015 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module provides routines to initialize and support board-level hardware
for the ti_lm3s6965 BSP.
*/
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <cputype.h>
#include <board.h>
#include <drivers/uart.h>
#if defined(CONFIG_PRINTK) || defined(CONFIG_STDOUT_CONSOLE)
#define DO_CONSOLE_INIT
#endif
#define RCGC1 *((volatile uint32_t *)0x400FE104)
#define RCGC1_UART0_EN 0x00000001
#define RCGC1_UART1_EN 0x00000002
#define RCGC1_UART2_EN 0x00000004
#ifdef CONFIG_RUNTIME_NMI
extern void _NmiInit(void);
#define NMI_INIT() _NmiInit()
#else
#define NMI_INIT()
#endif
#if defined(DO_CONSOLE_INIT)
/*******************************************************************************
*
* uartGenericInfoInit - initialize initialization information for one UART
*
* RETURNS: N/A
*
*/
static inline void uartGenericInfoInit(struct uart_init_info *pInfo)
{
pInfo->sys_clk_freq = SYSCLK_DEFAULT_IOSC_HZ;
pInfo->baud_rate = CONFIG_UART_CONSOLE_BAUDRATE;
}
#endif /* DO_CONSOLE_INIT */
#if defined(DO_CONSOLE_INIT)
/*******************************************************************************
*
* consoleInit - initialize target-only console
*
* Only used for debugging.
*
* RETURNS: N/A
*
*/
#include <console/uart_console.h>
static void consoleInit(void)
{
struct uart_init_info info;
/* enable clock to UART0 */
RCGC1 |= RCGC1_UART0_EN;
uartGenericInfoInit(&info);
info.regs = CONFIG_UART_CONSOLE_REGS;
/* Only supported in polling mode, but init all info fields */
info.irq = CONFIG_UART_CONSOLE_IRQ;
info.int_pri = CONFIG_UART_CONSOLE_INT_PRI;
uart_init(CONFIG_UART_CONSOLE_INDEX, &info);
uartConsoleInit();
}
#else
#define consoleInit() \
do {/* nothing */ \
} while ((0))
#endif /* DO_CONSOLE_INIT */
/*******************************************************************************
*
* _InitHardware - perform basic hardware initialization
*
* Initialize the interrupt controller device drivers and the
* integrated 16550-compatible UART device driver.
* Also initialize the timer device driver, if required.
*
* RETURNS: N/A
*/
void _InitHardware(void)
{
consoleInit(); /* NOP if not needed */
NMI_INIT(); /* install default handler that simply resets the CPU
* if configured in the kernel, NOP otherwise */
}

726
arch/arm/timer/systick.c Normal file
View File

@ -0,0 +1,726 @@
/* systick.c - ARM systick device driver */
/*
* Copyright (c) 2013-2015 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module implements the VxMicro's CORTEX-M3 ARM's systick device driver.
It provides the standard VxMicro "system clock driver" interfaces.
The driver utilizes systick to provide kernel ticks.
\INTERNAL IMPLEMENTATION DETAILS
The systick device provides a 24-bit clear-on-write, decrementing,
wrap-on-zero counter. Only edge sensitive triggered interrupt is supported.
\INTERNAL PACKAGING DETAILS
The systick device driver is part of the microkernel in both a monolithic kernel
system and a split kernel system; it is not included in the nanokernel portion
of a split kernel.
The device driver is also part of a nanokernel-only system, but omits more
complex capabilities (such as tickless idle support) that are only used in
conjunction with a microkernel.
*/
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <toolchain.h>
#include <sections.h>
#include <misc/__assert.h>
#include <clock_vars.h>
#include <drivers/system_timer.h>
#ifdef CONFIG_MICROKERNEL
#include <microkernel.h>
#include <cputype.h>
extern struct nano_stack K_Args;
#endif /* CONFIG_MICROKERNEL */
/* running total of timer count */
static uint32_t accumulatedCount = 0;
/*
* A board support package's board.h header must provide definitions for the
* following constants:
*
* CONFIG_SYSTICK_CLOCK_FREQ
*
* This is the sysTick input clock frequency.
*/
#include <board.h>
/* defines */
/*
* When GDB_INFO is enabled, the handler installed in the vector table
* (__systick), can be found in systick_gdb.s. In this case, the handler
* in this file becomes _Systick() and will be called by __systick.
*/
#ifdef CONFIG_GDB_INFO
#define _TIMER_INT_HANDLER _real_timer_int_handler
#else
#define _TIMER_INT_HANDLER _timer_int_handler
#endif
#ifdef CONFIG_TICKLESS_IDLE
#define TIMER_MODE_PERIODIC 0 /* normal running mode */
#define TIMER_MODE_ONE_SHOT 1 /* emulated, since sysTick has 1 mode */
#define IDLE_NOT_TICKLESS 0 /* non-tickless idle mode */
#define IDLE_TICKLESS 1 /* tickless idle mode */
#endif /* CONFIG_TICKLESS_IDLE */
/* globals */
#ifdef CONFIG_INT_LATENCY_BENCHMARK
extern uint32_t _HwIntToCHandlerLatency;
#endif
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
extern int32_t _NanoIdleValGet(void);
extern void _NanoIdleValClear(void);
extern void _SysPowerSaveIdleExit(int32_t ticks);
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
#ifdef CONFIG_TICKLESS_IDLE
extern int32_t _SysIdleElapsedTicks;
#endif /* CONFIG_TICKLESS_IDLE */
/* locals */
#ifdef CONFIG_TICKLESS_IDLE
static uint32_t __noinit defaultLoadVal; /* default count */
static uint32_t idleOrigCount = 0;
static uint32_t __noinit maxSysTicks;
static uint32_t idleOrigTicks = 0;
static uint32_t __noinit maxLoadValue;
static uint32_t __noinit timerIdleSkew;
static unsigned char timerMode = TIMER_MODE_PERIODIC;
static unsigned char idleMode = IDLE_NOT_TICKLESS;
#endif /* CONFIG_TICKLESS_IDLE */
#if defined(CONFIG_TICKLESS_IDLE) || \
defined(CONFIG_SYSTEM_TIMER_DISABLE)
/*******************************************************************************
*
* sysTickStop - stop the timer
*
* This routine disables the systick counter.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static ALWAYS_INLINE void sysTickStop(void)
{
union __stcsr reg;
/*
* Disable the counter and its interrupt while preserving the
* remaining bits.
*/
reg.val = __scs.systick.stcsr.val;
reg.bit.enable = 0;
reg.bit.tickint = 0;
__scs.systick.stcsr.val = reg.val;
}
#endif /* CONFIG_TICKLESS_IDLE || CONFIG_SYSTEM_TIMER_DISABLE */
#ifdef CONFIG_TICKLESS_IDLE
/*******************************************************************************
*
* sysTickStart - start the timer
*
* This routine enables the systick counter.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static ALWAYS_INLINE void sysTickStart(void)
{
union __stcsr reg;
/*
* Enable the counter, its interrupt and set the clock source to be
* the system clock while preserving the remaining bits.
*/
reg.val =
__scs.systick.stcsr.val; /* countflag is cleared by this read */
reg.bit.enable = 1;
reg.bit.tickint = 1;
reg.bit.clksource = 1;
__scs.systick.stcsr.val = reg.val;
}
/*******************************************************************************
*
* sysTickCurrentGet - get the current counter value
*
* This routine gets the value from the timer's current value register. This
* value is the 'time' remaining to decrement before the timer triggers an
* interrupt.
*
* RETURNS: the current counter value
*
* \NOMANUAL
*/
static ALWAYS_INLINE uint32_t sysTickCurrentGet(void)
{
return __scs.systick.stcvr;
}
/*******************************************************************************
*
* sysTickReloadGet - get the reload/countdown value
*
* This routine returns the value from the reload value register.
*
* RETURNS: the counter's initial count/wraparound value
*
* \NOMANUAL
*/
static ALWAYS_INLINE uint32_t sysTickReloadGet(void)
{
return __scs.systick.strvr;
}
#endif /* CONFIG_TICKLESS_IDLE */
/*******************************************************************************
*
* sysTickReloadSet - set the reload/countdown value
*
* This routine sets value from which the timer will count down and also
* sets the timer's current value register to zero.
* Note that the value given is assumed to be valid (i.e., count < (1<<24)).
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static ALWAYS_INLINE void sysTickReloadSet(
uint32_t count /* count from which timer is to count down */
)
{
/*
* Write the reload value and clear the current value in preparation
* for enabling the timer.
* The countflag in the control/status register is also cleared by
* this operation.
*/
__scs.systick.strvr = count;
__scs.systick.stcvr = 0; /* also clears the countflag */
}
/*******************************************************************************
*
* _TIMER_INT_HANDLER - system clock tick handler
*
* This routine handles the system clock tick interrupt. A TICK_EVENT event
* is pushed onto the microkernel stack.
*
* The symbol for this routine is either _timer_int_handler (for normal
* system operation) or _real_timer_int_handler (when GDB_INFO is enabled).
*
* RETURNS: N/A
*
* \NOMANUAL
*/
void _TIMER_INT_HANDLER(void *unused)
{
ARG_UNUSED(unused);
#ifdef CONFIG_INT_LATENCY_BENCHMARK
uint32_t value = __scs.systick.val;
uint32_t delta = __scs.systick.reload - value;
if (_HwIntToCHandlerLatency > delta) {
/* keep the lowest value observed */
_HwIntToCHandlerLatency = delta;
}
#endif
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
int32_t numIdleTicks;
/*
* All interrupts are disabled when handling idle wakeup.
* For tickless idle, this ensures that the calculation and programming
* of
* the device for the next timer deadline is not interrupted.
* For non-tickless idle, this ensures that the clearing of the kernel
* idle
* state is not interrupted.
* In each case, _SysPowerSaveIdleExit is called with interrupts
* disabled.
*/
__asm__(" cpsid i"); /* PRIMASK = 1 */
#ifdef CONFIG_TICKLESS_IDLE
/*
* If this a wakeup from a completed tickless idle or after
* _timer_idle_exit has processed a partial idle, return
* to the normal tick cycle.
*/
if (timerMode == TIMER_MODE_ONE_SHOT) {
sysTickStop();
sysTickReloadSet(defaultLoadVal);
sysTickStart();
timerMode = TIMER_MODE_PERIODIC;
}
/* set the number of elapsed ticks and announce them to the kernel */
if (idleMode == IDLE_TICKLESS) {
/* tickless idle completed without interruption */
idleMode = IDLE_NOT_TICKLESS;
_SysIdleElapsedTicks =
idleOrigTicks + 1; /* actual # of idle ticks */
nano_isr_stack_push(&K_Args, TICK_EVENT);
} else {
/*
* Increment the tick because _timer_idle_exit does not
* account for the tick due to the timer interrupt itself.
* Also, if not in tickless mode, _SysIdleElpasedTicks will be
* 0.
*/
_SysIdleElapsedTicks++;
/*
* If we transition from 0 elapsed ticks to 1 we need to
* announce the
* tick event to the microkernel. Other cases will be covered by
* _timer_idle_exit.
*/
if (_SysIdleElapsedTicks == 1) {
nano_isr_stack_push(&K_Args, TICK_EVENT);
}
}
/* accumulate total counter value */
accumulatedCount += defaultLoadVal * _SysIdleElapsedTicks;
#else /* !CONFIG_TICKLESS_IDLE */
/*
* No tickless idle:
* Update the total tick count and announce this tick to the kernel.
*/
accumulatedCount += sys_clock_hw_cycles_per_tick;
nano_isr_stack_push(&K_Args, TICK_EVENT);
#endif /* CONFIG_TICKLESS_IDLE */
numIdleTicks = _NanoIdleValGet(); /* get # of idle ticks requested */
if (numIdleTicks) {
_NanoIdleValClear(); /* clear kernel idle setting */
/*
* Complete idle processing.
* Note that for tickless idle, nothing will be done in
* _timer_idle_exit.
*/
_SysPowerSaveIdleExit(numIdleTicks);
}
__asm__(" cpsie i"); /* re-enable interrupts (PRIMASK = 0) */
#else /* !CONFIG_ADVANCED_POWER_MANAGEMENT */
/* accumulate total counter value */
accumulatedCount += sys_clock_hw_cycles_per_tick;
#ifdef CONFIG_MICROKERNEL
/*
* one more tick has occurred -- don't need to do anything special since
* timer is already configured to interrupt on the following tick
*/
nano_isr_stack_push(&K_Args, TICK_EVENT);
#else
nanoTicks++;
if (nanoTimerList) {
nanoTimerList->ticks--;
while (nanoTimerList && (!nanoTimerList->ticks)) {
struct nano_timer *expired = nanoTimerList;
struct nano_lifo *chan = &expired->lifo;
nanoTimerList = expired->link;
nano_isr_lifo_put(chan, expired->userData);
}
}
#endif /* CONFIG_MICROKERNEL */
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
extern void _ExcExit(void);
_ExcExit();
}
#ifdef CONFIG_TICKLESS_IDLE
/*******************************************************************************
*
* sysTickTicklessIdleInit - initialize the tickless idle feature
*
* This routine initializes the tickless idle feature by calculating the
* necessary hardware-specific parameters.
*
* Note that the maximum number of ticks that can elapse during a "tickless idle"
* is limited by <defaultLoadVal>. The larger the value (the lower the
* tick frequency), the fewer elapsed ticks during a "tickless idle".
* Conversely, the smaller the value (the higher the tick frequency), the
* more elapsed ticks during a "tickless idle".
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void sysTickTicklessIdleInit(void)
{
/* enable counter, disable interrupt and set clock src to system clock
*/
union __stcsr stcsr = {.bit = {1, 0, 1, 0, 0, 0}};
volatile uint32_t dummy; /* used to help determine the 'skew time' */
/* store the default reload value (which has already been set) */
defaultLoadVal = sysTickReloadGet();
/* calculate the max number of ticks with this 24-bit H/W counter */
maxSysTicks = 0x00ffffff / defaultLoadVal;
/* determine the associated load value */
maxLoadValue = maxSysTicks * defaultLoadVal;
/*
* Calculate the skew from switching the timer in and out of idle mode.
* The following sequence is emulated:
* 1. Stop the timer.
* 2. Read the current counter value.
* 3. Calculate the new/remaining counter reload value.
* 4. Load the new counter value.
* 5. Set the timer mode to periodic/one-shot.
* 6. Start the timer.
*
* The timer must be running for this to work, so enable the
* systick counter without generating interrupts, using the processor
*clock.
* Note that the reload value has already been set by the caller.
*/
__scs.systick.stcsr.val |= stcsr.val;
__asm__(" isb"); /* ensure the timer is started before reading */
timerIdleSkew = sysTickCurrentGet(); /* start of skew time */
__scs.systick.stcsr.val |= stcsr.val; /* normally sysTickStop() */
dummy = sysTickCurrentGet(); /* emulate sysTickReloadSet() */
/* emulate calculation of the new counter reload value */
if ((dummy == 1) || (dummy == defaultLoadVal)) {
dummy = maxSysTicks - 1;
dummy += maxLoadValue - defaultLoadVal;
} else {
dummy = dummy - 1;
dummy += dummy * defaultLoadVal;
}
/* _sysTickStart() without interrupts */
__scs.systick.stcsr.val |= stcsr.val;
timerMode = TIMER_MODE_PERIODIC;
/* skew time calculation for down counter (assumes no rollover) */
timerIdleSkew -= sysTickCurrentGet();
/* restore the previous sysTick state */
sysTickStop();
sysTickReloadSet(defaultLoadVal);
}
/*******************************************************************************
*
* _timer_idle_enter - Place the system timer into idle state
*
* Re-program the timer to enter into the idle state for the given number of
* ticks. It is set to a "one shot" mode where it will fire in the number of
* ticks supplied or the maximum number of ticks that can be programmed into
* hardware. A value of -1 will result in the maximum number of ticks.
*
* RETURNS: N/A
*/
void _timer_idle_enter(int32_t ticks /* system ticks */
)
{
sysTickStop();
/*
* We're being asked to have the timer fire in "ticks" from now. To
* maintain accuracy we must account for the remaining time left in the
* timer. So we read the count out of it and add it to the requested
* time out
*/
idleOrigCount = sysTickCurrentGet() - timerIdleSkew;
if ((ticks == -1) || (ticks > maxSysTicks)) {
/*
* We've been asked to fire the timer so far in the future that
* the
* required count value would not fit in the 24-bit reload
* register.
* Instead, we program for the maximum programmable interval
* minus one
* system tick to prevent overflow when the left over count read
* earlier
* is added.
*/
idleOrigCount += maxLoadValue - defaultLoadVal;
idleOrigTicks = maxSysTicks - 1;
} else {
/* leave one tick of buffer to have to time react when coming
* back */
idleOrigTicks = ticks - 1;
idleOrigCount += idleOrigTicks * defaultLoadVal;
}
/*
* Set timer to virtual "one shot" mode - sysTick does not have multiple
* modes, so the reload value is simply changed.
*/
timerMode = TIMER_MODE_ONE_SHOT;
idleMode = IDLE_TICKLESS;
sysTickReloadSet(idleOrigCount);
sysTickStart();
}
/*******************************************************************************
*
* _timer_idle_exit - handling of tickless idle when interrupted
*
* The routine, called by _SysPowerSaveIdleExit, is responsible for taking
* the timer out of idle mode and generating an interrupt at the next
* tick interval. It is expected that interrupts have been disabled.
*
* Note that in this routine, _SysIdleElapsedTicks must be zero because the
* ticker has done its work and consumed all the ticks. This has to be true
* otherwise idle mode wouldn't have been entered in the first place.
*
* RETURNS: N/A
*/
void _timer_idle_exit(void)
{
uint32_t count; /* timer's current count register value */
if (timerMode == TIMER_MODE_PERIODIC) {
/*
* The timer interrupt handler is handling a completed tickless
* idle
* or this has been called by mistake; there's nothing to do
* here.
*/
return;
}
sysTickStop();
/* timer is in idle mode, adjust the ticks expired */
count = sysTickCurrentGet();
if ((count == 0) || (__scs.systick.stcsr.bit.countflag)) {
/*
* The timer expired and/or wrapped around. Re-set the timer to
* its default value and mode.
*/
sysTickReloadSet(defaultLoadVal);
timerMode = TIMER_MODE_PERIODIC;
/*
* Announce elapsed ticks to the microkernel. Note we are
* guaranteed
* that the timer ISR will execute before the tick event is
* serviced,
* so _SysIdleElapsedTicks is adjusted to account for it.
*/
_SysIdleElapsedTicks = idleOrigTicks - 1;
nano_isr_stack_push(&K_Args, TICK_EVENT);
} else {
uint32_t elapsed; /* elapsed "counter time" */
uint32_t remaining; /* remaining "counter time" */
elapsed = idleOrigCount - count;
remaining = elapsed % defaultLoadVal;
/* ensure that the timer will interrupt at the next tick */
if (remaining == 0) {
/*
* Idle was interrupted on a tick boundary. Re-set the
* timer to
* its default value and mode.
*/
sysTickReloadSet(defaultLoadVal);
timerMode = TIMER_MODE_PERIODIC;
} else if (count > remaining) {
/*
* There is less time remaining to the next tick
* boundary than
* time left for idle. Leave in "one shot" mode.
*/
sysTickReloadSet(remaining);
}
_SysIdleElapsedTicks = elapsed / defaultLoadVal;
if (_SysIdleElapsedTicks) {
/* Announce elapsed ticks to the microkernel */
nano_isr_stack_push(&K_Args, TICK_EVENT);
}
}
idleMode = IDLE_NOT_TICKLESS;
sysTickStart();
}
#endif /* CONFIG_TICKLESS_IDLE */
/*******************************************************************************
*
* timer_driver - initialize and enable the system clock
*
* This routine is used to program the systick to deliver interrupts at the
* rate specified via the 'sys_clock_us_per_tick' global variable.
*
* RETURNS: N/A
*/
void timer_driver(int priority /* priority parameter is ignored by this driver
*/
)
{
/* enable counter, interrupt and set clock src to system clock */
union __stcsr stcsr = {.bit = {1, 1, 1, 0, 0, 0}};
ARG_UNUSED(priority);
/*
* Determine the reload value to achieve the configured tick rate.
*/
/* systick supports 24-bit H/W counter */
__ASSERT(sys_clock_hw_cycles_per_tick <= (1 << 24),
"sys_clock_hw_cycles_per_tick too large");
sysTickReloadSet(sys_clock_hw_cycles_per_tick - 1);
#ifdef CONFIG_TICKLESS_IDLE
/* calculate hardware-specific parameters for tickless idle */
sysTickTicklessIdleInit();
#endif /* CONFIG_TICKLESS_IDLE */
#ifdef CONFIG_MICROKERNEL
/* specify the kernel routine that will handle the TICK_EVENT event */
task_event_set_handler(TICK_EVENT, K_ticker);
#endif /* CONFIG_MICROKERNEL */
_ScbExcPrioSet(_EXC_SYSTICK, _EXC_IRQ_DEFAULT_PRIO);
__scs.systick.stcsr.val = stcsr.val;
}
/*******************************************************************************
*
* timer_read - read the BSP timer hardware
*
* This routine returns the current time in terms of timer hardware clock cycles.
* Some VxMicro facilities (e.g. benchmarking code) directly call timer_read()
* instead of utilizing the 'timer_read_fptr' function pointer.
*
* RETURNS: up counter of elapsed clock cycles
*
* \INTERNAL WARNING
* systick counter is a 24-bit down counter which is reset to "reload" value
* once it reaches 0.
*/
uint32_t timer_read(void)
{
return accumulatedCount + (__scs.systick.strvr - __scs.systick.stcvr);
}
#ifdef CONFIG_SYSTEM_TIMER_DISABLE
/*******************************************************************************
*
* timer_disable - stop announcing ticks into the kernel
*
* This routine disables the systick so that timer interrupts are no
* longer delivered.
*
* RETURNS: N/A
*/
void timer_disable(void)
{
unsigned int key; /* interrupt lock level */
union __stcsr reg;
key = irq_lock();
/* disable the systick counter and systick interrupt */
sysTickStop();
irq_unlock(key);
}
#endif /* CONFIG_SYSTEM_TIMER_DISABLE */

View File

@ -0,0 +1,57 @@
/* systick_gdb.c - ARM systick handler stub for GDB debugging */
/*
* Copyright (c) 2014-2015 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
GDB stub needed before the real systick handler runs to be able to display the
correct state of the thread that was interrupted.
*/
#ifdef CONFIG_GDB_INFO
#define _ASMLANGUAGE
#include <toolchain.h>
#include <sections.h>
#include <nanokernel/cpu.h>
_ASM_FILE_PROLOGUE
GTEXT(_timer_int_handler)
GTEXT(_real_timer_int_handler)
SECTION_FUNC(TEXT, _timer_int_handler)
_GDB_STUB_EXC_ENTRY
b _real_timer_int_handler
#endif /* CONFIG_GDB_INFO */

75
arch/x86/bsp/cache.c Normal file
View File

@ -0,0 +1,75 @@
/* cache.c - cache manipulation */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module contains functions for manipulation caches.
*/
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <misc/util.h>
#ifdef CONFIG_CLFLUSH_INSTRUCTION_SUPPORTED
#if (CONFIG_CACHE_LINE_SIZE == 0)
#error Cannot use this implementation with a cache line size of 0
#endif
/*******************************************************************************
*
* _SysCacheFlush - flush a page to main memory
*
* No alignment is required for either <virt> or <size>, but since
* _SysCacheFlush() iterates on the cache lines, a cache line alignment for both
* is optimal.
*
* The cache line size is specified via the CONFIG_CACHE_LINE_SIZE kconfig
* option.
*
* RETURNS: N/A
*/
void _SysCacheFlush(VIRT_ADDR virt, size_t size)
{
int end;
size = ROUND_UP(size, CONFIG_CACHE_LINE_SIZE);
end = virt + size;
for (; virt < end; virt += CONFIG_CACHE_LINE_SIZE) {
__asm__ volatile("clflush %0;\n\t" : : "m"(virt));
}
__asm__ volatile("mfence;\n\t");
}
#endif /* CONFIG_CLFLUSH_INSTRUCTION_SUPPORTED */

64
arch/x86/bsp/cache_s.s Normal file
View File

@ -0,0 +1,64 @@
/* cache_s.s - cache manipulation */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module contains functions for manipulating caches.
*/
#ifndef CONFIG_CLFLUSH_INSTRUCTION_SUPPORTED
#define _ASMLANGUAGE
#include <nanokernel/x86/asm.h>
/* externs (internal APIs) */
GTEXT(_SysCacheFlush)
/*******************************************************************************
*
* _SysCacheFlush - flush a page to main memory
*
* This implementation flushes the whole cache.
*
* C signature:
*
* void _SysCacheFlush (VIRT_ADDR virt, size_t size)
*
* Both parameters are ignored in this implementation.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _SysCacheFlush)
wbinvd
ret
#endif /* !CONFIG_CLFLUSH_INSTRUCTION_SUPPORTED */

662
arch/x86/bsp/crt0.s Normal file
View File

@ -0,0 +1,662 @@
/* crt0.s - crt0 module for the IA-32 boards */
/*
* Copyright (c) 2010-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module contains the initial code executed by the VxMicro ELF image
after having been loaded into RAM.
INTERNAL
The CONFIG_PROT_MODE_SWITCH configuration option was once required when
booting/loading an image into a Simics simulation environment, since the
invocation technique involved halting the BIOS code (via a breakpoint) before
it attempted to load a bootloader from disk, and then dumping the VxMicro ELF
image into memory. This meant that the IA-32 processor was executing in 16-bit
real mode (aka real mode) when __start() was called, thus requiring __start()
to perform the 16-bit to 32-bit transition code and other sundry PC-related
initialization steps (eg. enabling address line A20).
Later advances now allow a VxMicro image to be loaded into the Simics environment
after the system is already in 32-bit protected mode and address line A20 is
enabled. Consequently, the CONFIG_PROT_MODE_SWITCH configuration option
is no longer used by this BSP, and *all* booting scenarios for the BSP (e.g.
via GRUB or any other multiboot compliant bootloader) now assume that the
system is already in 32-bit protected mode and address line A20 is enabled.
However, the code associated with CONFIG_PROT_MODE_SWITCH has been left
in place in case future booting scenarios arise which require its use.
POMS memory model
When a kernel configured for POMS starts, it runs at a physical address that
is different from the logical address it has been linked at. So, symbols are not
accessible without doing some translation. It is best then to run at a logical
address ASAP to access give access to symbols, especially to be able to run C
code. To do that, we start using segment selectors for CS/DS/SS that are mapping
the physical location to the logical location at which the kernel is linked, ie.
0x1000. Those segment selectors are 0x18 for CS and 0x20 for DS/SS.
Before these are used, the startup code uses physical address locations of
some key symbols (eg. the gdt) provided by the linker script.
*/
#define _ASMLANGUAGE
#include <nanokernel/x86/asm.h>
/* exports (private APIs) */
GTEXT(__start)
/* externs */
GTEXT(_Cstart)
GDATA(_IdtBaseAddress)
GDATA(_InterruptStack)
#if defined(CONFIG_SSE)
GDATA(_Mxcsr)
#endif /* CONFIG_SSE */
#if defined(CONFIG_BOOT_TIME_MEASUREMENT)
GDATA(__start_tsc)
#endif
#ifdef CONFIG_ADVANCED_IDLE
GDATA(_AdvIdleCheckSleep)
GDATA(_AdvIdleStart)
#endif /* CONFIG_ADVANCED_IDLE */
#ifdef CONFIG_PROT_MODE_SWITCH
/* cold start code in 16-bit real mode */
/*
* Switch 'gas' into 16-bit mode, i.e. use a default operand and
* address size of 16 bits since the processor is executing in
* 16-bit real mode. The prevents the assembler from inserting
* 0x66 or 0x67 instruction prefixes when using 16-bit data values
* or 16-bit pointers.
*/
.code16
.section ".xreset", "ax"
.balign 16,0x90
__start:
#ifdef CONFIG_BOOT_TIME_MEASUREMENT
/*
* Record BootTime from start of Kernel.
* Store value temporarily in Register EDI & ESI and
* write to memory once memory access is allowed.
* That is, once the data segment register has been setup to access
* the .data/.rodata/.bss section of the linked image.
*/
rdtsc
mov %eax, %esi /* low value */
mov %edx, %edi /* high value */
#endif
/*
* Ensure interrupts are disabled. Interrupts are enabled when
* the first VxMicro thread context switch occurs.
*/
cli
/*
* BSPs that need to enable the A20 line to boot properly wil enable this
* option.
*/
#ifdef CONFIG_BOOT_A20_ENABLE
/*
* Set the stack pointer to just before the start of the .text
* section. This needs to be performed before any BIOS invocations.
*
* Note that attempting to set the stack pointer to beyond the end of
* the static ELF image (indicated via the _end symbol) may result
* in pointer wrap-around for images with large BSS sections
* (since the processor is still in "real" mode and thus the stack
* pointer is only a 16 bits).
*/
movl $__start, %esp
/*
* Before switching to 32-bit protected mode, Gate-A20 must be
* enabled (legacy PC hardware issue).
*
* The classical method of enabling address line A20 via the 8042
* keyboard controller will not be performed. Instead, it will be
* assumed that the board has a relatively modern BIOS which supports
* the following int 15h function:
*
* - int 15h: ax=2400 -> disable A20
* - int 15h: ax=2401 -> enable A20
* If successful: CF clear, AH = 00h
* On error: CF set, AH = status
* Status: 01h keyboard controller is in secure mode
* 86h function not supported
* - int 15h: ax=2402 -> query status A20
* status (0: disabled, 1: enabled) is returned in AL
* - int 15h: ax=2403 -> query A20 support (kbd or port 92)
* status (bit 0: kbd, bit 1: port 92) is returned in BX
*
* Return values:
* If successful: carry flag (CF) clear, AH = 00h
* On error: carry flag (CF) set, AH = status
* Status: 01h keyboard controller is in secure mode
* 86h function not supported
*
* If the int15h method fails, then the system control port A
* (I/O port 0x92) method will be used.
*/
movw $0x2401, %ax /* ax=2401 -> enable A20 */
int $0x15
jnc A20Enabled /* CF = 1 -> try I/O port 0x92 method */
/* try enabling Gate-A20 via system control port A (I/O port 0x92) */
movb $0x02, %al
outb %al, $0x92
xorl %ecx, %ecx
A20EnableWait:
inb $0x92, %al
andb $0x02, %al
loopz A20EnableWait
A20Enabled:
#endif /* CONFIG_BOOT_A20_ENABLE */
/* load Interrupt Descriptor and Global Descriptor tables (IDT & GDT) */
lgdt %cs:_GdtRom /* load 32-bit operand size GDT */
movl %cr0, %eax /* move CR0 to EAX */
orl $0x1, %eax /* set the Protection Enable (PE) bit */
andl $0x9fffffff, %eax /* Enable write-back caching */
movl %eax, %cr0 /* move EAX to CR0 */
jmp %cs:vstartProtMode /* flush prefetch input queue */
vstartProtMode:
/*
* At this point, the processor is executing in 32-bit protected
* mode, but the CS selector hasn't been loaded yet, i.e. the
* processor is still effectively executing instructions from a
* 16-bit code segment (thus the .code16 assembler directive still
* needs to be in effect.
*/
movw $0x10,%ax /* data segment selector (entry = 3) */
movw %ax,%ds /* set DS */
movw %ax,%es /* set ES */
movw %ax,%fs /* set FS */
movw %ax,%gs /* set GS */
movw %ax,%ss /* set SS */
/* this is 'ljmp $0x08:$vstart32BitCode': diab 5.9.1.0 does not encode
* it correctly */
.byte 0xea
.long vstart32BitCode
.byte 0x08
.byte 0x00
/*
* Switch 'gas' back to 32-bit data and address defaults. This
* can only be performed after loading the CS segment register
* (via an lcall instruction, for example) with a segment selector
* that describes a 32-bit code segment.
*/
.code32
SECTION_FUNC(TEXT_START, vstart32BitCode)
#else /* !CONFIG_PROT_MODE_SWITCH */
/* processor is executing in 32-bit protected mode */
.balign 16,0x90
SECTION_FUNC(TEXT_START, __start)
#ifdef CONFIG_BOOT_TIME_MEASUREMENT
/*
* Record BootTime from start of Kernel.
* Store value temporarily in Register edi & esi and
* write to memory once memory access is allowed.
* That is, once the data segment register has been setup to access
* the .data/.rodata/.bss section of the linked image.
*/
rdtsc
mov %eax, %esi /* low value */
mov %edx, %edi /* high value */
#endif
/* Enable write-back caching by clearing the NW and CD bits */
movl %cr0, %eax
andl $0x9fffffff, %eax
movl %eax, %cr0
/*
* Ensure interrupts are disabled. Interrupts are enabled when
* the first VxMicro thread context switch occurs.
*/
cli
/*
* Although the bootloader sets up an Interrupt Descriptor Table (IDT)
* and a Global Descriptor Table (GDT), the specification encourages
* booted operating systems to setup their own IDT and GDT.
*/
lgdt _GdtRom /* load 32-bit operand size GDT */
#endif /* !CONFIG_PROT_MODE_SWITCH */
lidt _Idt /* load 32-bit operand size IDT */
#ifndef CONFIG_PROT_MODE_SWITCH
#ifdef CONFIG_BOOTLOADER_UNKNOWN
/*
* Where we do not do the protected mode switch and the
* bootloader is unknown, do not make the assumption that the segment
* registers are set correctly.
*
* This is a special case for the generic_pc BSP, which must work for
* multiple platforms (QEMU, SIMICS, generic PC board, etc.). With other
* BSPs the bootloader is well known so assumptions can be made.
*/
movw $0x10, %ax /* data segment selector (entry = 3) */
movw %ax, %ds /* set DS */
movw %ax, %es /* set ES */
movw %ax, %fs /* set FS */
movw %ax, %gs /* set GS */
movw %ax, %ss /* set SS */
/* this is 'ljmp $0x08:$_csSet': diab 5.9.1.0 does not encode
* it correctly */
.byte 0xea
.long __csSet
.byte 0x08
.byte 0x00
__csSet:
#endif /* CONFIG_BOOTLOADER_UNKNOWN */
#endif /* CONFIG_PROT_MODE_SWITCH */
#ifdef CONFIG_BOOT_TIME_MEASUREMENT
/*
* Store rdtsc result from temporary regiter ESI & EDI into memory.
*/
mov %esi, __start_tsc /* low value */
mov %edi, __start_tsc+4 /* high value */
#endif
#ifdef CONFIG_ADVANCED_IDLE
/*
* Set up the temporary stack to call the _AdvIdleCheckSleep routine
* We use the separate stack here in order to avoid the memory
* corruption if the system recovers from deep sleep
*/
movl $_AdvIdleStack, %esp
addl $CONFIG_ADV_IDLE_STACK_SIZE, %esp
/* align to stack boundary: ROUND_DOWN (%esp, 4) */
andl $0xfffffffc, %esp
/*
* Invoke _AdvIdleCheckSleep() routine that checks if we are restoring
* from deep sleep or not. The routine returns non-zero if the kernel
* is recovering from deep sleep and to 0 if a cold boot is needed. The
* kernel can skip floating point initialization, BSS initialization,
* and data initialization if recovering from deep sleep.
*/
call _AdvIdleCheckSleep
cmpl $0, %eax
jne memInitDone
#endif /* CONFIG_ADVANCED_IDLE */
#if !defined(CONFIG_FLOAT)
/*
* Force an #NM exception for floating point instructions
* since FP support hasn't been configured
*/
movl %cr0, %eax /* move CR0 to EAX */
orl $0x2e, %eax /* CR0[NE+TS+EM+MP]=1 */
movl %eax, %cr0 /* move EAX to CR0 */
#else
/*
* Permit use of x87 FPU instructions
*
* Note that all floating point exceptions are masked by default,
* and that _no_ handler for x87 FPU exceptions (#MF) is provided.
*/
movl %cr0, %eax /* move CR0 to EAX */
orl $0x22, %eax /* CR0[NE+MP]=1 */
andl $~0xc, %eax /* CR0[TS+EM]=0 */
movl %eax, %cr0 /* move EAX to CR0 */
fninit /* set x87 FPU to its default state */
#if defined(CONFIG_SSE)
/*
* Permit use of SSE instructions
*
* Note that all SSE exceptions are masked by default,
* and that _no_ handler for SSE exceptions (#XM) is provided.
*/
movl %cr4, %eax /* move CR4 to EAX */
orl $0x200, %eax /* CR4[OSFXSR] = 1 */
andl $~0x400, %eax /* CR4[OSXMMEXCPT] = 0 */
movl %eax, %cr4 /* move EAX to CR4 */
ldmxcsr _Mxcsr /* initialize SSE control/status reg */
#endif /* CONFIG_SSE */
#endif /* !CONFIG_FLOAT */
#ifdef CONFIG_XIP
/*
* copy DATA section from ROM to RAM region
* DATA is followed by BSS section.
* Given that BSS section is initialized after this copy, we can
* safely over-write into the next section.
* Note: __data_num_words is a multiple of 4 bytes
* rounded up to next 4 bytes.
* Note: the sections might not be 4 byte aligned.
*/
movl $__data_ram_start, %edi /* DATA in RAM (dest) */
movl $__data_rom_start, %esi /* DATA in ROM (src) */
movl $__data_num_words, %ecx /* Size of DATA in quad bytes */
je copyDataDone
#ifdef CONFIG_SSE
/* copy 16 bytes at a time using XMM until < 16 bytes remain */
movl %ecx ,%edx /* save number of quad bytes */
shrl $2, %ecx /* How many 16 bytes? */
je dataWords
dataDQ:
movdqu (%esi), %xmm0
movdqu %xmm0, (%edi)
addl $16, %esi
addl $16, %edi
loop dataDQ
dataWords:
movl %edx, %ecx /* restore # quad bytes */
andl $0x3, %ecx /* only need to copy at most 3 quad bytes */
#endif /* CONFIG_SSE */
rep
movsl /* copy data 4 bytes at a time */
copyDataDone:
#endif /* CONFIG_XIP */
/*
* Clear BSS: bzero (__bss_start, __bss_num_words*4)
*
* It's assumed that BSS size will be a multiple of a long (4 bytes),
* and aligned on a double word (32-bit) boundary
*/
#ifdef CONFIG_SSE
/* use XMM register to clear 16 bytes at a time */
pxor %xmm0, %xmm0 /* zero out xmm0 register */
movl $__bss_start, %edi /* load BSS start address */
movl $__bss_num_words, %ecx /* number of quad bytes in .bss */
movl %ecx, %edx /* make a copy of # quad bytes */
shrl $2, %ecx /* How many multiples of 16 byte ? */
je bssWords
bssDQ:
movdqu %xmm0, (%edi) /* zero 16 bytes... */
addl $16, %edi
loop bssDQ
/* fall through to handle the remaining double words (32-bit chunks) */
bssWords:
xorl %eax, %eax /* fill memory with 0 */
movl %edx, %ecx /* move # quad bytes into ECX (for rep) */
andl $0x3, %ecx /* only need to zero at most 3 quad bytes */
cld
rep
stosl /* zero memory per 4 bytes */
#else /* !CONFIG_SSE */
/* clear out BSS double words (32-bits at a time) */
xorl %eax, %eax /* fill memory with 0 */
movl $__bss_start, %edi /* load BSS start address */
movl $__bss_num_words, %ecx /* number of quad bytes */
cld
rep
stosl /* zero memory per 4 bytes */
#endif /* CONFIG_SSE */
memInitDone:
/*
* Set the stack pointer to the area used for the interrupt stack.
* Note this stack is only used during the execution of __start() and
* _Cstart(), i.e. only until the multi-tasking VxMicro kernel is
* initialized. The dual-purposing of this area of memory is safe since
* interrupts are disabled until the first context switch.
*/
movl $_InterruptStack, %esp
addl $CONFIG_ISR_STACK_SIZE, %esp
/* align to stack boundary: ROUND_DOWN (%esp, 4) */
andl $0xfffffffc, %esp
/* activate RAM-based Global Descriptor Table (GDT) */
lgdt %ds:_Gdt
#if defined (CONFIG_ADVANCED_IDLE)
/*
* Invoke _AdvIdleStart(_Cstart, _Gdt, _GlobalTss) by jumping to it.
* If it's a cold boot, this routine jumps to _Cstart and the normal
* kernel boot sequence continues; otherwise, it uses the TSS info
* saved in the GDT to resumes kernel processing at the point it was
* when the system went into deep sleep; that is, _AdvIdleFunc()
* completes and returns a non-zero value.
*/
pushl $_GlobalTss
pushl $_Gdt
pushl $_Cstart
call _AdvIdleStart
#else
/* Jump to C portion of VxMicro kernel initialization and never return */
jmp _Cstart
#endif /* CONFIG_ADVANCED_IDLE */
#if defined(CONFIG_SSE)
/* SSE control & status register initial value */
_Mxcsr:
.long 0x1f80 /* all SSE exceptions clear & masked */
#endif /* CONFIG_SSE */
/* Interrupt Descriptor Table (IDT) definition */
_Idt:
.word (CONFIG_IDT_NUM_VECTORS * 8) - 1 /* limit: size of IDT-1 */
/*
* Physical start address = 0. When executing natively, this
* will be placed at the same location as the interrupt vector table
* setup by the BIOS (or GRUB?).
*/
.long _IdtBaseAddress /* physical start address */
#ifdef CONFIG_BOOTLOADER_UNKNOWN
/* Multiboot header definition is needed for some bootloaders */
/*
* The multiboot header must be in the first 8 Kb of the kernel image
* (not including the ELF section header(s)) and be aligned on a
* 4 byte boundary.
*/
.balign 4,0x90
.long 0x1BADB002 /* multiboot magic number */
/*
* Flags = no bits are being set, specifically bit 16 is not being
* set since the supplied kernel image is an ELF file, and the
* multiboot loader shall use the information from the program and
* section header to load and boot the kernel image.
*/
.long 0x00000000
/*
* checksum = 32-bit unsigned value which, when added to the other
* magic fields (i.e. "magic" and "flags"), must have a 32-bit
* unsigned sum of zero.
*/
.long -(0x1BADB002 + 0)
#endif /* CONFIG_BOOTLOADER_UNKNOWN */
#ifdef CONFIG_PROT_MODE_SWITCH
/* Global Descriptor Table (GDT) definition */
.section ".xreset", "ax"
.align 8
#endif /* CONFIG_PROT_MODE_SWITCH */
_GdtRom:
.word _GdtRomEnd - _GdtRomEntries - 1 /* Limit on GDT */
.long _GdtRomEntries /* table address: _GdtRomEntries */
.balign 16,0x90
/*
* The following 3 GDT entries implement the so-called "basic
* flat model", i.e. a single code segment descriptor and a single
* data segment descriptor such that VxMicro has access to a continuous,
* unsegmented address space. Both segment descriptors map the entire
* linear address space (i.e. 0 to 4 GB-1), thus the segmentation
* mechanism will never generate "out of limit memory reference"
* exceptions even if physical memory does not reside at the referenced
* address.
*
* The 'A' (accessed) bit in the type field is _not_ set for all the
* data/code segment descriptors to accomodate placing these entries
* in ROM, since such use is not planned for this platform.
*/
_GdtRomEntries:
/* Entry 0 (selector=0x0000): The "NULL descriptor" */
.word 0x0000
.word 0x0000
.byte 0x00
.byte 0x00
.byte 0x00
.byte 0x00
/* Entry 1 (selector=0x0008): Code descriptor: DPL0 */
.word 0xffff /* limit: xffff */
.word 0x0000 /* base : xxxx0000 */
.byte 0x00 /* base : xx00xxxx */
.byte 0x9a /* Code e/r, Present, DPL0 */
.byte 0xcf /* limit: fxxxx, Page Gra, 32bit */
.byte 0x00 /* base : 00xxxxxx */
/* Entry 2 (selector=0x0010): Data descriptor: DPL0 */
.word 0xffff /* limit: xffff */
.word 0x0000 /* base : xxxx0000 */
.byte 0x00 /* base : xx00xxxx */
.byte 0x92 /* Data r/w, Present, DPL0 */
.byte 0xcf /* limit: fxxxx, Page Gra, 32bit */
.byte 0x00 /* base : 00xxxxxx */
#ifdef CONFIG_PROT_MODE_SWITCH
/* Reset vector */
.code16
.section ".xresetv", "ax"
_ResetVector:
jmp __start
#endif /* CONFIG_PROT_MODE_SWITCH */
_GdtRomEnd:
#ifdef CONFIG_ADVANCED_IDLE
.section .NOINIT
.balign 4,0x90
_AdvIdleStack:
.fill CONFIG_ADV_IDLE_STACK_SIZE
#endif

View File

@ -0,0 +1,145 @@
/* driver_static_irq_stubs.s - interrupt stubs */
/*
* Copyright (c) 2012-2015, Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module contains the static interrupt stubs for the various drivers employed
by x86 BSPs.
*/
#define _ASMLANGUAGE
#ifndef CONFIG_DYNAMIC_INT_STUBS
#include <nanokernel/x86/asm.h>
#include <drivers/ioapic.h>
#include <drivers/loapic.h>
#include <drivers/pic.h>
#include <drivers/system_timer.h>
/* exports (internal APIs) */
#if defined(CONFIG_LOAPIC_TIMER)
GTEXT(_loApicTimerIntStub)
#endif
#if defined(CONFIG_HPET_TIMER)
GTEXT(_hpetIntStub)
#endif
#if defined (CONFIG_PIC)
GTEXT(_masterStrayIntStub)
GTEXT(_slaveStrayIntStub)
#endif
#if defined (CONFIG_PIT)
GTEXT(_i8253IntStub)
#endif
/* externs (internal APIs) */
GTEXT(_IntEnt)
GTEXT(_IntExit)
#if defined(CONFIG_LOAPIC_TIMER)
SECTION_FUNC (TEXT, _loApicTimerIntStub)
call _IntEnt /* Inform kernel interrupt has begun */
pushl $0 /* Push dummy parameter */
call _timer_int_handler /* Call actual interrupt handler */
call _loapic_eoi /* Inform loapic interrupt is done */
addl $4, %esp /* Clean-up stack from push above */
jmp _IntExit /* Inform kernel interrupt is done */
#endif /* CONFIG_LOAPIC_TIMER */
#if defined(CONFIG_HPET_TIMER)
SECTION_FUNC(TEXT, _hpetIntStub)
call _IntEnt /* Inform kernel interrupt has begun */
pushl $0 /* Push dummy parameter */
call _timer_int_handler /* Call actual interrupt handler */
call _ioapic_eoi /* Inform ioapic interrupt is done */
addl $4, %esp /* Clean-up stack from push above */
jmp _IntExit /* Inform kernel interrupt is done */
#endif /* CONFIG_HPET_TIMER */
#if defined(CONFIG_PIC)
SECTION_FUNC(TEXT, _masterStrayIntStub)
/*
* Handle possible spurious (stray) interrupts on IRQ 7. Since on this
* particular BSP, no device is hooked up to IRQ 7, a C level ISR is
* not called as the call to the BOI routine will not return.
*/
call _IntEnt /* Inform kernel interrupt has begun */
call _i8259_boi_master /* Call the BOI routine (won't return) */
/*
* If an actual device was installed on IRQ 7, then the BOI may return,
* indicating a real interrupt was asserted on IRQ 7.
* The following code should be invoked in this case to invoke the ISR:
*
* pushl $param /+ Push argument to ISR +/
* call ISR /+ Call 'C' level ISR +/
* addl $4, %esp /+ pop arg to ISR +/
* jmp _IntExit /+ Inform kernel interrupt is done +/
*/
SECTION_FUNC(TEXT, _slaveStrayIntStub)
/*
* Handle possible spurious (stray) interrupts on IRQ 15 (slave PIC
* IRQ 7). Since on this particular BSP, no device is hooked up to
* IRQ 15, a C level ISR is not called as the call the BOI routine
* will not return.
*/
call _IntEnt /* Inform kernel interrupt has begun */
call _i8259_boi_slave /* Call the BOI routine (won't return) */
/*
* If an actual device was installed on IRQ 15, then the BOI may return,
* indicating a real interrupt was asserted on IRQ 15.
* The following code should be invoked in this case to invoke the ISR:
*
* pushl $param /+ Push argument to ISR +/
* call ISR /+ Call 'C' level ISR +/
* addl $4, %esp /+ pop arg to ISR +/
* jmp _IntExit /+ Inform kernel interrupt is done +/
*/
#endif /* CONFIG_PIC */
#if defined(CONFIG_PIT)
SECTION_FUNC(TEXT, _i8253IntStub)
call _IntEnt /* Inform kernel interrupt has begun */
pushl $0 /* Push dummy parameter */
call _timer_int_handler /* Call actual interrupt handler */
call _i8259_eoi_master /* Inform the PIC interrupt is done */
addl $4, %esp /* Clean-up stack from push above */
jmp _IntExit /* Inform kernel interrupt is done */
#endif /* CONFIG_PIT */
#endif /* !CONFIG_DYNAMIC_INT_STUBS */

151
arch/x86/bsp/i8259Boi.s Normal file
View File

@ -0,0 +1,151 @@
/* i8259Boi.s - Intel 8259A PIC BOI Handler */
/*
* Copyright (c) 2013-2015 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
The PIC BOI handler determines if the IRQ in question is a spurious or real
interrupt. The IRQ inputs must remain high until after the falling edge of the
first INTA. A spurious interrupt on IRQ 7 can occur if the IRQ input goes low
before this time when the CPU acknowledges the interrupt. In this case, the
interrupt handler should simply return without sending an EOI command.
The distinction between a spurious interrupt and a real one is detected by
looking at the in service register (ISR). The bit (bit 7) will be 1 indicating
a real IRQ has been inserted.
*/
/* includes */
#define _ASMLANGUAGE
#include <nanokernel/cpu.h>
#include <nanokernel/x86/asm.h>
#include <drivers/pic.h>
#include <board.h>
/* externs */
GTEXT(_IntExit)
GDATA(_I8259SpuriousIntCount)
/*******************************************************************************
*
* _i8259_boi_master - detect whether it is spurious interrupt or not
*
* This routine is called before the user's interrupt handler to detect the
* spurious interrupt on the master PIC. If a spurious interrupt condition is
* detected, a global variable is incremented and the execution of the interrupt
* stub is "short circuited", i.e. a return to the interrupted context
* occurs.
*
* void _i8259_boi_master (void)
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _i8259_boi_master)
/* disable interrupts */
pushfl
cli
/* Master PIC, get contents of in serivce register */
PLB_BYTE_REG_WRITE (0x0b, PIC_PORT1(PIC_MASTER_BASE_ADRS))
PLB_BYTE_REG_READ (PIC_PORT1(PIC_MASTER_BASE_ADRS))
/* enable interrupts */
popfl
/* Contents of ISR in %AL */
andb $0x80, %al
je spur_isr
ret
/*******************************************************************************
*
* _i8259_boi_slave - detect whether it is spurious interrupt or not
*
* This routine is called before the user's interrupt handler to detect the
* spurious interrupt on the slave PIC. If a spurious interrupt condition is
* detected, a global variable is incremented and the execution of the interrupt
* stub is "short circuited", i.e. a return to the interrupted context
* occurs.
*
* void _i8259_boi_slave (void)
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _i8259_boi_slave)
/* disable interrupts */
pushfl
cli
/* Slave PIC, get contents of in serivce register */
PLB_BYTE_REG_WRITE (0x0b, PIC_PORT1 (PIC_SLAVE_BASE_ADRS))
PLB_BYTE_REG_READ (PIC_PORT1 (PIC_SLAVE_BASE_ADRS))
/* Contents of ISR in EAX */
testb %al, %al
jne check_isr
/* Check the master PIC's in service register for slave PIC IRQ */
PLB_BYTE_REG_WRITE (0x0b, PIC_PORT1(PIC_MASTER_BASE_ADRS))
PLB_BYTE_REG_READ (PIC_PORT1(PIC_MASTER_BASE_ADRS))
/* Slave connected to IRQ2 on master */
testb $0x4, %al
je check_isr
/* Send non-specific EOI to the master PIC IRQ2 */
PLB_BYTE_REG_WRITE (I8259_EOI, PIC_IACK (PIC_MASTER_BASE_ADRS));
BRANCH_LABEL(check_isr)
/* unlock interrupts */
popfl
/* Contents of ISR for either PIC in %AL */
andb $0x80, %al
je spur_isr
ret
BRANCH_LABEL(spur_isr)
/* An actual spurious interrupt. Increment counter and short circuit */
incl _I8259SpuriousIntCount
/* Pop the return address */
addl $4, %esp
jmp _IntExit

84
arch/x86/bsp/rand32.c Normal file
View File

@ -0,0 +1,84 @@
/* rand32.c - non-random number generator */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module provides a non-random implementation of _Rand32Get(), which is not
meant to be used in a final product as a truly random number generator. It
was provided to allow testing of kernel stack canaries on a BSP that does not
(yet) provide a random number generator.
*/
#include <drivers/rand32.h>
#if defined(CONFIG_TEST_RANDOM_GENERATOR)
/*******************************************************************************
*
* _Rand32Init - initialize the random number generator
*
* The non-random number generator does not require any initialization.
*
* RETURNS: N/A
*/
void _Rand32Init(void)
{
}
/*******************************************************************************
*
* _Rand32Get - get a 32 bit random number
*
* The non-random number generator returns values that are based off the
* CPU's timestamp counter, which means that successive calls will normally
* display ever-increasing values.
*
* RETURNS: a 32-bit number
*/
#if defined(__GNUC__)
uint32_t _Rand32Get(void)
{
uint32_t rv;
__asm__ volatile("rdtsc" : "=a"(rv) : : "%edx");
return rv;
}
#elif defined(__DCC__)
__asm volatile uint32_t _Rand32Get(void)
{
% !"ax", "dx" rdtsc
}
#endif /* __GNUCC__/__DCC__ */
#endif /* CONFIG_TEST_RANDOM_GENERATOR */

View File

@ -0,0 +1,112 @@
/* sysFatalErrorHandler - common system fatal error handler */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module provides the _SysFatalErrorHandler() routine which is common to
supported BSPs.
*/
/* includes */
#include <cputype.h>
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <toolchain.h>
#include <sections.h>
#ifdef CONFIG_PRINTK
#include <misc/printk.h>
#define PRINTK(...) printk(__VA_ARGS__)
#else
#define PRINTK(...)
#endif /* CONFIG_PRINTK */
/*******************************************************************************
*
* _SysFatalErrorHandler - fatal error handler
*
* This routine implements the corrective action to be taken when the system
* detects a fatal error.
*
* This sample implementation attempts to abort the current context and allow
* the system to continue executing, which may permit the system to continue
* functioning with degraded capabilities.
*
* System designers may wish to enhance or substitute this sample
* implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system.
*
* RETURNS: This function does not return.
*
* \NOMANUAL
*/
FUNC_NORETURN void _SysFatalErrorHandler(
unsigned int reason, /* fatal error reason */
const NANO_ESF * pEsf /* pointer to exception stack frame */
)
{
nano_context_type_t curCtx = context_type_get();
ARG_UNUSED(reason);
ARG_UNUSED(pEsf);
if ((curCtx != NANO_CTX_ISR) && !_context_essential_check(NULL)) {
#ifdef CONFIG_MICROKERNEL
if (curCtx == NANO_CTX_TASK) {
extern FUNC_NORETURN void _TaskAbort(void);
PRINTK("Fatal task error! Aborting task.\n");
_TaskAbort();
} else
#endif /* CONFIG_MICROKERNEL */
{
PRINTK("Fatal fiber error! Aborting fiber.\n");
fiber_abort();
}
} else {
#ifdef CONFIG_PRINTK
/*
* Conditionalize the ctxText[] definition to prevent an "unused
* variable" warning when the PRINTK kconfig option is disabled.
*/
static char *ctxText[] = {"ISR", "essential fiber",
"essential task"};
PRINTK("Fatal %s error! Spinning...\n", ctxText[curCtx]);
#endif /* CONFIG_PRINTK */
}
do {
} while (1);
}

252
arch/x86/bsp/systemApic.c Normal file
View File

@ -0,0 +1,252 @@
/* systemApic.c - system module for variants with LOAPIC */
/*
* Copyright (c) 2013-2015, Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module provides routines to initialize and support board-level hardware
for the atom_n28xx variant of generic_pc BSP.
*/
#include <misc/__assert.h>
#include "board.h"
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <drivers/ioapic.h>
#include <drivers/loapic.h>
/*******************************************************************************
*
* _SysIntVecAlloc - allocate interrupt vector
*
* This BSP provided routine supports the irq_connect() API. This
* routine is required to perform the following 3 functions:
*
* a) Allocate a vector satisfying the requested priority. The utility routine
* _IntVecAlloc() provided by the nanokernel will be used to perform the
* the allocation since the local APIC prioritizes interrupts as assumed
* by _IntVecAlloc().
* b) Return End of Interrupt (EOI) and Beginning of Interrupt (BOI) related
* information to be used when generating the interrupt stub code, and
* c) If an interrupt vector can be allocated, and the <irq> argument is not
* equal to NANO_SOFT_IRQ, the IOAPIC redirection table (RED) or the
* LOAPIC local vector table (LVT) will be updated with the allocated
* interrupt vector.
*
* The board virtualizes IRQs as follows:
*
* - The first IOAPIC_NUM_RTES IRQs are provided by the IOAPIC
* - The remaining IRQs are provided by the LOAPIC.
*
* Thus, for example, if the IOAPIC supports 24 IRQs:
*
* - IRQ0 to IRQ23 map to IOAPIC IRQ0 to IRQ23
* - IRQ24 to IRQ29 map to LOAPIC LVT entries as follows:
*
* IRQ24 -> LOAPIC_TIMER
* IRQ25 -> LOAPIC_THERMAL
* IRQ26 -> LOAPIC_PMC
* IRQ27 -> LOAPIC_LINT0
* IRQ28 -> LOAPIC_LINT1
* IRQ29 -> LOAPIC_ERROR
*
* The IOAPIC_NUM_RTES macro is provided by board.h, and it specifies the number
* of IRQs supported by the on-board I/O APIC device.
*
* RETURNS: the allocated interrupt vector
*
* INTERNAL
* For debug kernels, this routine will return -1 if there are no vectors
* remaining in the specified <priority> level, or if the <priority> or <irq>
* parameters are invalid.
*/
int _SysIntVecAlloc(
unsigned int irq, /* virtualized IRQ */
unsigned int priority, /* get vector from <priority> group */
NANO_EOI_GET_FUNC * boiRtn, /* ptr to BOI routine; NULL if none */
NANO_EOI_GET_FUNC * eoiRtn, /* ptr to EOI routine; NULL if none */
void **boiRtnParm, /* BOI routine parameter, if any */
void **eoiRtnParm, /* EOI routine parameter, if any */
unsigned char *boiParamRequired, /* BOI routine parameter req? */
unsigned char *eoiParamRequired /* BOI routine parameter req? */
)
{
int vector;
ARG_UNUSED(boiRtnParm);
ARG_UNUSED(boiParamRequired);
#if defined(DEBUG)
if ((priority > 15) ||
((irq > (IOAPIC_NUM_RTES + 5)) && (irq != NANO_SOFT_IRQ)))
return -1;
#endif
/*
* Use the nanokernel utility function _IntVecAlloc(). A value of
* -1 will be returned if there are no free vectors in the requested
* priority.
*/
vector = _IntVecAlloc(priority);
__ASSERT(vector != -1, "No free vectors in the requested priority");
/*
* Set up the appropriate interrupt controller to generate the allocated
* interrupt vector for the specified IRQ. Also, provide the required
* EOI and BOI related information for the interrupt stub code
*generation
* step.
*
* For software interrupts (NANO_SOFT_IRQ), skip the interrupt
*controller
* programming step, and indicate that a BOI and EOI handler is not
* required.
*
* Skip both steps if a vector could not be allocated.
*/
*boiRtn = (NANO_EOI_GET_FUNC)NULL; /* a BOI handler is never required */
*eoiRtn = (NANO_EOI_GET_FUNC)NULL; /* assume NANO_SOFT_IRQ */
#if defined(DEBUG)
if ((vector != -1) && (irq != NANO_SOFT_IRQ))
#else
if (irq != NANO_SOFT_IRQ)
#endif
{
if (irq < IOAPIC_NUM_RTES) {
_ioapic_int_vec_set(irq, vector);
/*
* query IOAPIC driver to obtain EOI handler information
* for the
* interrupt vector that was just assigned to the
* specified IRQ
*/
*eoiRtn = (NANO_EOI_GET_FUNC)_ioapic_eoi_get(
irq, (char *)eoiParamRequired, eoiRtnParm);
} else {
_loapic_int_vec_set(irq - IOAPIC_NUM_RTES, vector);
/* specify that the EOI handler in loApicIntr.c driver
* be invoked */
*eoiRtn = (NANO_EOI_GET_FUNC)_loapic_eoi;
*eoiParamRequired = 0;
}
}
return vector;
}
/*******************************************************************************
*
* _SysIntVecProgram - program interrupt controller
*
* This BSP provided routine programs the appropriate interrupt controller
* with the given vector based on the given IRQ parameter.
*
* Drivers call this routine instead of irq_connect() when interrupts are
* configured statically.
*
* The Clanton board virtualizes IRQs as follows:
*
* - The first IOAPIC_NUM_RTES IRQs are provided by the IOAPIC so the IOAPIC
* is programmed for these IRQs
* - The remaining IRQs are provided by the LOAPIC and hence the LOAPIC is
* programmed.
*
* The IOAPIC_NUM_RTES macro is provided by board.h, and it specifies the number
* of IRQs supported by the on-board I/O APIC device.
*
*/
void _SysIntVecProgram(unsigned int vector, /* vector number */
unsigned int irq /* virtualized IRQ */
)
{
if (irq < IOAPIC_NUM_RTES) {
_ioapic_int_vec_set(irq, vector);
} else {
_loapic_int_vec_set(irq - IOAPIC_NUM_RTES, vector);
}
}
/*******************************************************************************
*
* irq_enable - enable an individual interrupt (IRQ)
*
* The public interface for enabling/disabling a specific IRQ for the IA-32
* architecture is defined as follows in include/nanokernel/x86/arch.h
*
* extern void irq_enable (unsigned int irq);
* extern void irq_disable (unsigned int irq);
*
* The irq_enable() routine is provided by the BSP due to the
* IRQ virtualization that is performed by this BSP. See the comments
* in _SysIntVecAlloc() for more information regarding IRQ virtualization.
*
* RETURNS: N/A
*/
void irq_enable(unsigned int irq)
{
if (irq < IOAPIC_NUM_RTES) {
_ioapic_irq_enable(irq);
} else {
_loapic_irq_enable(irq - IOAPIC_NUM_RTES);
}
}
/*******************************************************************************
*
* irq_disable - disable an individual interrupt (IRQ)
*
* The irq_disable() routine is provided by the BSP due to the
* IRQ virtualization that is performed by this BSP. See the comments
* in _SysIntVecAlloc() for more information regarding IRQ virtualization.
*
* RETURNS: N/A
*/
void irq_disable(unsigned int irq)
{
if (irq < IOAPIC_NUM_RTES) {
_ioapic_irq_disable(irq);
} else {
_loapic_irq_disable(irq - IOAPIC_NUM_RTES);
}
}

150
arch/x86/bsp/systemPic.c Normal file
View File

@ -0,0 +1,150 @@
/* systemPic.c - system module for variants with PIC */
/*
* Copyright (c) 2013-2015, Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module provides routines to initialize and support board-level hardware
for the pentium4 and minuteia variants of the generic_pc BSP.
*/
#include "board.h"
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <drivers/pic.h>
/* Handle possible stray or spurious interrupts on the master and slave PICs */
extern void _masterStrayIntStub(void);
extern void _slaveStrayIntStub(void);
SYS_INT_REGISTER(_masterStrayIntStub, PIC_MASTER_STRAY_INT_LVL, 0);
SYS_INT_REGISTER(_slaveStrayIntStub, PIC_SLAVE_STRAY_INT_LVL, 0);
/*******************************************************************************
*
* _SysIntVecAlloc - allocate interrupt vector
*
* This BSP provided routine supports the irq_connect() API. This
* routine performs the following functions:
*
* a) Allocates a vector satisfying the requested priority, where possible.
* When the <irq> argument is not equal to NANO_SOFT_IRQ, the vector assigned
* to the <irq> during interrupt controller initialization is returned,
* which may or may not have the desired prioritization. (Prioritization of
* such vectors is fixed by the 8259 interrupt controllers, and cannot be
* programmed on an IRQ basis; for example, IRQ0 is always the highest
* priority interrupt no matter which interrupt vector was assigned to IRQ0.)
* b) Provides End of Interrupt (EOI) and Beginning of Interrupt (BOI) related
* information to be used when generating the interrupt stub code.
*
* The pcPentium4 board virtualizes IRQs as follows:
*
* - IRQ0 to IRQ7 are provided by the master i8259 PIC
* - IRQ8 to IRQ15 are provided by the slave i8259 PIC
*
* RETURNS: the allocated interrupt vector
*
* INTERNAL
* For debug kernels, this routine will return -1 for invalid <priority> or
* <irq> parameter values.
*/
int _SysIntVecAlloc(
unsigned int irq, /* virtualized IRQ */
unsigned int priority, /* get vector from <priority> group */
NANO_EOI_GET_FUNC * boiRtn, /* ptr to BOI routine; NULL if none */
NANO_EOI_GET_FUNC * eoiRtn, /* ptr to EOI routine; NULL if none */
void **boiRtnParm, /* BOI routine parameter, if any */
void **eoiRtnParm, /* EOI routine parameter, if any */
unsigned char *boiParamRequired, /* BOI routine parameter req? */
unsigned char *eoiParamRequired /* BOI routine parameter req? */
)
{
int vector;
ARG_UNUSED(boiRtnParm);
ARG_UNUSED(eoiRtnParm);
#if defined(DEBUG)
if ((priority > 15) || (irq > 15) && (irq != NANO_SOFT_IRQ))
return -1;
#endif
/* The PIC BOI does not require a parameter */
*boiParamRequired = 0;
/* Assume BOI is not required */
*boiRtn = (NANO_EOI_GET_FUNC)NULL;
if (irq != NANO_SOFT_IRQ) {
/* convert interrupt 'vector' to an interrupt controller IRQ
* number */
vector = INT_VEC_IRQ0 + irq;
/* mark vector as allocated */
_IntVecMarkAllocated(vector);
/* vector not handled by PIC, thus don't specify an EOI handler
*/
if (irq >= N_PIC_IRQS) {
*eoiRtn = (NANO_EOI_GET_FUNC)NULL;
return vector;
}
if (irq == PIC_MASTER_STRAY_INT_LVL) {
*boiRtn = (NANO_EOI_GET_FUNC)_i8259_boi_master;
} else if (irq == PIC_SLAVE_STRAY_INT_LVL) {
*boiRtn = (NANO_EOI_GET_FUNC)_i8259_boi_slave;
}
if (irq <= PIC_MASTER_STRAY_INT_LVL)
*eoiRtn = (NANO_EOI_GET_FUNC)_i8259_eoi_master;
else
*eoiRtn = (NANO_EOI_GET_FUNC)_i8259_eoi_slave;
*eoiParamRequired = 0;
} else {
/*
* Use the nanokernel utility function _IntVecAlloc() to
* allocate
* a vector for software generated interrupts.
*/
vector = _IntVecAlloc(priority);
*eoiRtn = (NANO_EOI_GET_FUNC)NULL;
}
return vector;
}

169
arch/x86/core/cpuhalt.s Normal file
View File

@ -0,0 +1,169 @@
/* cpuhalt.s - VxMicro CPU power management code for IA-32 */
/*
* Copyright (c) 2011-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module provides an implementation of the architecture-specific
nano_cpu_idle() primitive required by the nanokernel idle loop component.
It can be called within an implementation of _SysPowerSaveIdle(),
which is provided for the microkernel by the BSP.
The module also provides an implementation of nano_cpu_atomic_idle(), which
atomically re-enables interrupts and enters low power mode.
INTERNAL
These implementations of nano_cpu_idle() and nano_cpu_atomic_idle() could be used
when operating as a Hypervisor guest. More specifically, the Hypervisor
supports the execution of the 'hlt' instruction from a guest (results in a
VM exit), and more importantly, the Hypervisor will respect the
single instruction delay slot after the 'sti' instruction as required
by nano_cpu_atomic_idle().
*/
#define _ASMLANGUAGE
#include <nanokernel/x86/asm.h>
/* exports (external APIs) */
GTEXT(nano_cpu_idle)
GTEXT(nano_cpu_atomic_idle)
#if defined(CONFIG_BOOT_TIME_MEASUREMENT)
GDATA(__idle_tsc)
#endif
#ifndef CONFIG_NO_ISRS
/*******************************************************************************
*
* nano_cpu_idle - power save idle routine for IA-32
*
* This function will be called by the nanokernel idle loop or possibly within
* an implementation of _SysPowerSaveIdle in the microkernel when the
* '_SysPowerSaveFlag' variable is non-zero. The IA-32 'hlt' instruction
* will be issued causing a low-power consumption sleep mode.
*
* RETURNS: N/A
*
* C function prototype:
*
* void nano_cpu_idle (void);
*/
SECTION_FUNC(TEXT, nano_cpu_idle)
#ifdef CONFIG_INT_LATENCY_BENCHMARK
call _IntLatencyStop
#endif
#if defined(CONFIG_BOOT_TIME_MEASUREMENT)
rdtsc /* record idle timestamp */
mov %eax, __idle_tsc /* ... low 32 bits */
mov %edx, __idle_tsc+4 /* ... high 32 bits */
#endif
sti /* make sure interrupts are enabled */
hlt /* sleep */
ret /* return after processing ISR */
/*******************************************************************************
*
* nano_cpu_atomic_idle - atomically re-enable interrupts and enter low power mode
*
* This function is utilized by the nanokernel object "wait" APIs for task
* contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(),
* and nano_task_fifo_get_wait().
*
* INTERNAL
* The requirements for nano_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments
* in nano_task_lifo_get_wait(), for example, of the race condition that occurs
* if this requirement is not met.
*
* 2) After waking up from the low-power mode, the interrupt lockout state
* must be restored as indicated in the 'imask' input parameter.
*
* RETURNS: N/A
*
* C function prototype:
*
* void nano_cpu_atomic_idle (unsigned int imask);
*/
SECTION_FUNC(TEXT, nano_cpu_atomic_idle)
#ifdef CONFIG_INT_LATENCY_BENCHMARK
call _IntLatencyStop
#endif
sti /* make sure interrupts are enabled */
/*
* The following statement appears in "Intel 64 and IA-32 Architectures
* Software Developer's Manual", regarding the 'sti' instruction:
*
* "After the IF flag is set, the processor begins responding to
* external, maskable interrupts after the next instruction is
* executed."
*
* Thus the IA-32 implementation of nano_cpu_atomic_idle() will atomically
* re-enable interrupts and enter a low-power mode.
*/
hlt
/* restore interrupt lockout state before returning to caller */
testl $0x200, SP_ARG1(%esp)
jnz skipIntDisable
#ifdef CONFIG_INT_LATENCY_BENCHMARK
call _IntLatencyStart
#endif
cli
BRANCH_LABEL(skipIntDisable)
ret
#else
/*
* When no interrupt support is configured, both nano_cpu_idle() and
* nano_cpu_atomic_idle() are "no op" routines that simply return immediately
* without entering low-power mode.
*/
SECTION_FUNC(TEXT, nano_cpu_idle)
SECTION_FUNC(TEXT, nano_cpu_atomic_idle)
ret
#endif /* !CONFIG_NO_ISRS */

246
arch/x86/core/excconnect.c Normal file
View File

@ -0,0 +1,246 @@
/* excconnect.c - VxMicro exception management support for IA-32 arch */
/*
* Copyright (c) 2010-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module provides routines to manage exceptions (synchronous interrupts)
in VxMicro on the IA-32 architecture.
This module provides the public routine nanoCpuExcConnect().
INTERNAL
An exception is defined as a synchronous interrupt, i.e. an interrupt
asserted as a direct result of program execution as opposed to a
hardware device asserting an interrupt.
Many (but not all) exceptions are handled by an "exception stub" whose code
is generated by the system itself. The stub performs various actions before
and after invoking the application (or operating system) specific exception
handler; for example, a thread or ISR context save is performed prior to
invoking the exception handler.
The IA-32 code that makes up a "full" exception stub is shown below. A full
exception stub is one that pushes a dummy error code at the start of
exception processing. Exception types where the processor automatically
pushes an error code when handling an exception utilize similar exception
stubs, however the first instruction is omitted. The use of the dummy error
code means that _ExcEnt() and _ExcExit() do not have to worry about whether
an error code is present on the stack or not.
0x00 pushl $0 /@ push dummy error code @/
Machine code: 0x68, 0x00, 0x00, 0x00, 0x00
0x05 call _ExcEnt /@ inform kernel of exception @/
Machine code: 0xe8, 0x00, 0x00, 0x00, 0x00
0x0a call ExcHandler /@ invoke exception handler @/
Machine code: 0xe8, 0x00, 0x00, 0x00, 0x00
/@ _ExcExit() will adjust the stack to discard the error code @/
0x0f jmp _ExcExit /@ restore context context @/
Machine code: 0xe9, 0x00, 0x00, 0x00, 0x00
NOTE: Be sure to update the arch specific definition of the _EXC_STUB_SIZE
macro to reflect the size of the full exception stub (as shown above).
The _EXC_STUB_SIZE macro is defined in kernel/arch/Intel/include/nanok.h
and include/Intel/nanokernel.h.
*/
#include <nanokernel.h>
#include <nanok.h>
/* forward declarations */
void _NanoCpuExcConnectAtDpl(unsigned int vector,
void (*routine)(NANO_ESF * pEsf),
NANO_EXC_STUB pExcStubMem,
unsigned int dpl);
/*******************************************************************************
*
* nanoCpuExcConnect - connect a C routine to an exception
*
* This routine connects an exception handler coded in C to the specified
* interrupt vector. An exception is defined as a synchronous interrupt, i.e.
* an interrupt asserted as a direct result of program execution as opposed
* to a hardware device asserting an interrupt.
*
* When the exception specified by <vector> is asserted, the current context
* is saved on the current stack, i.e. a switch to some other stack is not
* performed, followed by executing <routine> which has the following signature:
*
* void (*routine) (NANO_ESF *pEsf)
*
* The <pExcStubMem> argument points to memory that the system can use to
* synthesize the exception stub that calls <routine>. The memory need not be
* initialized, but must be persistent (i.e. it cannot be on the caller's stack).
* Declaring a global or static variable of type NANO_EXC_STUB will provide a
* suitable area of the proper size.
*
* The handler is connected via an interrupt-gate descriptor having a
* descriptor privilege level (DPL) equal to zero.
*
* RETURNS: N/A
*
* INTERNAL
* The function prototype for nanoCpuExcConnect() only exists in nanok.h,
* in other words, it's still considered private since the definitions for
* the NANO_ESF structures have not been completed.
*/
void nanoCpuExcConnect(unsigned int vector, /* interrupt vector: 0 to 255 on
IA-32 */
void (*routine)(NANO_ESF * pEsf),
NANO_EXC_STUB pExcStubMem)
{
_NanoCpuExcConnectAtDpl(vector, routine, pExcStubMem, 0);
}
/*******************************************************************************
*
* _NanoCpuExcConnectAtDpl - connect a C routine to an exception
*
* This routine connects an exception handler coded in C to the specified
* interrupt vector. An exception is defined as a synchronous interrupt, i.e.
* an interrupt asserted as a direct result of program execution as opposed
* to a hardware device asserting an interrupt.
*
* When the exception specified by <vector> is asserted, the current context
* is saved on the current stack, i.e. a switch to some other stack is not
* performed, followed by executing <routine> which has the following signature:
*
* void (*routine) (NANO_ESF *pEsf)
*
* The <pExcStubMem> argument points to memory that the system can use to
* synthesize the exception stub that calls <routine>. The memory need not be
* initialized, but must be persistent (i.e. it cannot be on the caller's stack).
* Declaring a global or static variable of type NANO_EXC_STUB will provide a
* suitable area of the proper size.
*
* The handler is connected via an interrupt-gate descriptor having the supplied
* descriptor privilege level (DPL).
*
* RETURNS: N/A
*
* INTERNAL
* The function prototype for nanoCpuExcConnect() only exists in nanok.h,
* in other words, it's still considered private since the definitions for
* the NANO_ESF structures have not been completed.
*/
void _NanoCpuExcConnectAtDpl(
unsigned int vector, /* interrupt vector: 0 to 255 on IA-32 */
void (*routine)(NANO_ESF * pEsf),
NANO_EXC_STUB pExcStubMem,
unsigned int dpl /* priv level for interrupt-gate descriptor */
)
{
extern void _ExcEnt(void);
extern void _ExcExit(void);
unsigned int offsetAdjust = 0;
#define STUB_PTR pExcStubMem
/*
* The <vector> parameter must be less than IV_INTEL_RESERVED_END,
* however, explicit validation will not be performed in this primitive.
*/
/*
* If the specified <vector> represents an exception type where the CPU
* does not push an error code onto the stack, then generate a stub that
* pushes a dummy code. This results in a single implementation of
* _ExcEnt
* and _ExcExit which expects an error code to be pushed onto the stack
* (along with the faulting CS:EIP and EFLAGS).
*/
if (((1 << vector) & _EXC_ERROR_CODE_FAULTS) == 0) {
STUB_PTR[0] = IA32_PUSH_OPCODE;
UNALIGNED_WRITE((unsigned int *)&STUB_PTR[1],
(unsigned int)0 /* value of dummy error code */
);
offsetAdjust = 5;
}
/* generate code that invokes _ExcEnt() */
STUB_PTR[offsetAdjust] = IA32_CALL_OPCODE;
UNALIGNED_WRITE((unsigned int *)&STUB_PTR[1 + offsetAdjust],
(unsigned int)&_ExcEnt -
(unsigned int)&pExcStubMem[5 + offsetAdjust]);
offsetAdjust += 5;
/* generate code that invokes the exception handler */
STUB_PTR[offsetAdjust] = IA32_CALL_OPCODE;
UNALIGNED_WRITE((unsigned int *)&STUB_PTR[1 + offsetAdjust],
(unsigned int)routine -
(unsigned int)&pExcStubMem[5 + offsetAdjust]);
offsetAdjust += 5;
/*
* generate code that invokes _ExcExit(); note that a jump is used,
* since _ExcExit() takes care of popping the error code and returning
* back to the context that triggered the exception
*/
STUB_PTR[offsetAdjust] = IA32_JMP_OPCODE;
UNALIGNED_WRITE((unsigned int *)&STUB_PTR[1 + offsetAdjust],
(unsigned int)&_ExcExit -
(unsigned int)&pExcStubMem[5 + offsetAdjust]);
/*
* There is no need to explicitly synchronize or flush the instruction
* cache due to the above code synthesis. See the Intel 64 and IA-32
* Architectures Software Developer's Manual: Volume 3A: System
*Programming
* Guide; specifically the section titled "Self Modifying Code".
*
* Cache synchronization/flushing is not required for the i386 as it
* does not contain any on-chip I-cache; likewise, post-i486 processors
* invalidate the I-cache automatically. An i486 requires the CPU
* to perform a 'jmp' instruction before executing the synthesized code;
* however, the call and return that follows meets this requirement.
*/
_IntVecSet(vector, (void (*)(void *))pExcStubMem, dpl);
}

322
arch/x86/core/excstub.s Normal file
View File

@ -0,0 +1,322 @@
/* excstub.s - VxMicro exception management support for IA-32 architecture */
/*
* Copyright (c) 2011-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module implements assembly routines to manage exceptions (synchronous
interrupts) in VxMicro on the Intel IA-32 architecture. More specifically,
exceptions are implemented in this module. The stubs are invoked when entering
and exiting a C exception handler.
*/
#define _ASMLANGUAGE
#include <nanok.h>
#include <nanokernel/x86/asm.h>
#include <nanokernel/x86/arch.h> /* For MK_ISR_NAME */
#include <offsets.h> /* nanokernel structure offset definitions */
#include <asmPrv.h>
/* exports (internal APIs) */
GTEXT(_ExcEnt)
GTEXT(_ExcExit)
/* externs (internal APIs) */
/*******************************************************************************
*
* _ExcEnt - inform the VxMicro kernel of an exception
*
* This function is called from the exception stub created by nanoCpuExcConnect()
* to inform the VxMicro kernel of an exception. This routine currently does
* _not_ increment a context/interrupt specific exception count. Also,
* execution of the exception handler occurs on the current stack, i.e.
* _ExcEnt() does not switch to another stack. The volatile integer
* registers are saved on the stack, and control is returned back to the
* exception stub.
*
* WARNINGS
*
* Host-based tools and the target-based GDB agent depend on the stack frame
* created by this routine to determine the locations of volatile registers.
* These tools must be updated to reflect any changes to the stack frame.
*
* RETURNS: N/A
*
* C function prototype:
*
* void _ExcEnt (void);
*
*/
SECTION_FUNC(TEXT, _ExcEnt)
/*
* The _IntVecSet() routine creates an interrupt-gate descriptor for
* all connections. The processor will automatically clear the IF
* bit in the EFLAGS register upon execution of the handler, thus
* _ExcEnt() (and _IntEnt) need not issue an 'cli' as the first
* instruction.
*/
/*
* Note that the processor has pushed both the EFLAGS register
* and the linear return address (cs:eip) onto the stack prior
* to invoking the handler specified in the IDT.
*
* Clear the direction flag. It is automatically restored when the
* exception exits.
*/
cld
/*
* Swap eax and return address on the current stack;
* this saves eax on the stack without losing knowledge
* of how to get back to the exception stub.
*/
#ifdef CONFIG_LOCK_INSTRUCTION_UNSUPPORTED
pushl (%esp)
movl %eax, 4(%esp)
popl %eax
#else
xchgl %eax, (%esp)
#endif /* CONFIG_LOCK_INSTRUCTION_UNSUPPORTED*/
/*
* Push the remaining volatile registers on the existing stack.
* Note that eax has already been saved on the context stack.
*/
pushl %ecx
pushl %edx
#ifdef CONFIG_GDB_INFO
/*
* Push the cooperative registers on the existing stack as they are
* required by debug tools.
*/
pushl %edi
pushl %esi
pushl %ebx
pushl %ebp
#endif /* CONFIG_GDB_INFO */
/*
* Save possible faulting address: this has to be done before
* interrupts are re-enabled.
*/
movl %cr2, %ecx
pushl %ecx
/* ESP is pointing to the ESF at this point */
#if defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)
movl _NanoKernel + __tNANO_current_OFFSET, %ecx
incl __tCCS_excNestCount_OFFSET(%ecx) /* inc exception nest count */
#ifdef CONFIG_GDB_INFO
/*
* Save the pointer to the stack frame (NANO_ESF *) in
* the current context if this is the outermost exception.
* The ESF pointer is used by debug tools to locate the volatile
* registers and the stack of the preempted context.
*/
testl $EXC_ACTIVE, __tCCS_flags_OFFSET (%ecx)
jne alreadyInException
movl %esp, __tCCS_esfPtr_OFFSET(%ecx)
BRANCH_LABEL(alreadyInException)
#endif /* CONFIG_GDB_INFO */
/*
* Set the EXC_ACTIVE bit in the tCCS of the current context.
* This enables _Swap() to preserve the context's FP registers
* (where needed) if the exception handler causes a context switch.
* It also indicates to debug tools that an exception is being
* handled in the event of a context switch.
*/
orl $EXC_ACTIVE, __tCCS_flags_OFFSET(%ecx)
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
/*
* restore interrupt enable state, then "return" back to exception stub
*
* interrupts are enabled only if they were allowed at the time
* the exception was triggered -- this protects kernel level code
* that mustn't be interrupted
*
* Test IF bit of saved EFLAGS and re-enable interrupts if IF=1.
*/
/* ESP is still pointing to the ESF at this point */
testl $0x200, __NANO_ESF_eflags_OFFSET(%esp)
je allDone
sti
BRANCH_LABEL(allDone)
pushl %esp /* push NANO_ESF * parameter */
jmp *%eax /* "return" back to stub */
/*******************************************************************************
*
* _ExcExit - inform the VxMicro kernel of an exception exit
*
* This function is called from the exception stub created by nanoCpuExcConnect()
* to inform the VxMicro kernel that the processing of an exception has
* completed. This routine restores the volatile integer registers and
* then control is returned back to the interrupted context or ISR.
*
* RETURNS: N/A
*
* C function prototype:
*
* void _ExcExit (void);
*
*/
SECTION_FUNC(TEXT, _ExcExit)
/* On entry, interrupts may or may not be enabled. */
/* discard the NANO_ESF * parameter and CR2 */
addl $8, %esp
#if defined(CONFIG_SUPPORT_FP_SHARING) || defined(CONFIG_GDB_INFO)
movl _NanoKernel + __tNANO_current_OFFSET, %ecx
/*
* Must lock interrupts to prevent outside interference.
* (Using "lock" prefix would be nicer, but this won't work
* on BSPs that don't respect the CPU's bus lock signal.)
*/
cli
#if ( defined(CONFIG_FP_SHARING) || \
defined(CONFIG_GDB_INFO) )
/*
* Determine whether exiting from a nested interrupt.
*/
decl __tCCS_excNestCount_OFFSET(%ecx) /* dec exception nest count */
cmpl $0, __tCCS_excNestCount_OFFSET(%ecx)
jne nestedException
/*
* Clear the EXC_ACTIVE bit in the tCCS of the current context
* if we are not in a nested exception (ie, when we exit the outermost
* exception).
*/
andl $~EXC_ACTIVE, __tCCS_flags_OFFSET (%ecx)
BRANCH_LABEL(nestedException)
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
#ifdef CONFIG_GDB_INFO
/*
* Pop the non-volatile registers from the stack.
* Note that debug tools may have altered the saved register values while
* the task was stopped, and we want to pick up the altered values.
*/
popl %ebp
popl %ebx
popl %esi
popl %edi
#endif /* CONFIG_GDB_INFO */
#endif /* CONFIG_SUPPORT_FP_SHARING || CONFIG_GDB_INFO */
/* restore edx and ecx which are always saved on the stack */
popl %edx
popl %ecx
popl %eax
addl $4, %esp /* "pop" error code */
/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
iret
/* Static exception handler stubs */
#ifdef CONFIG_AUTOMATIC_FP_ENABLING
#if defined(__GNUC__)
NANO_CPU_EXC_CONNECT_NO_ERR(_FpNotAvailableExcHandler,IV_DEVICE_NOT_AVAILABLE,0)
#elif defined(__DCC__)
NANO_CPU_INT_REGISTER_ASM(_FpNotAvailableExcHandler,IV_DEVICE_NOT_AVAILABLE,0)
GTEXT(MK_STUB_NAME(_FpNotAvailableExcHandler))
SECTION_FUNC(TEXT, MK_STUB_NAME(_FpNotAvailableExcHandler))
NANO_CPU_EXC_CONNECT_NO_ERR_CODE(_FpNotAvailableExcHandler)
#endif
#endif /* CONFIG_AUTOMATIC_FP_ENABLING */

134
arch/x86/core/ffs.s Normal file
View File

@ -0,0 +1,134 @@
/* ffs.s - find first set bit APIs for IA-32 architecture */
/*
* Copyright (c) 2011-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module implements the ffsMsb() and ffsLsb() functions for the IA-32
architecture.
INTERNAL
Inline versions of these APIs, find_last_set_inline() and find_first_set_inline(),
are defined in arch.h.
*/
#define _ASMLANGUAGE
#include <nanokernel/x86/asm.h>
#include <offsets.h> /* nanokernel structure offset definitions */
/* exports (public APIs) */
GTEXT(find_last_set)
GTEXT(find_first_set)
/*******************************************************************************
*
* find_first_set - find first set bit searching from the LSB
*
* This routine finds the first bit set in the passed argument and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit to 32 for the most significant bit.
* A return value of zero indicates that the value passed is zero.
*
* RETURNS: bit position from 1 to 32, or 0 if the argument is zero.
*
* INTERNAL
* For Intel64 (x86_64) architectures, the 'cmovz' can be removed
* and leverage the fact that the 'bsrl' doesn't modify the destination operand
* when the source operand is zero. The "bitpos" variable can be preloaded
* into the destination register, and given the unconditional ++bitpos that
* is performed after the 'cmovz', the correct results are yielded.
*/
SECTION_FUNC(TEXT, find_first_set)
#if !defined(CONFIG_CMOV_UNSUPPORTED)
movl $0xffffffff, %ecx /* preload for 0 return value */
bsfl 0x4(%esp), %eax /* bit scan "forward" */
cmovz %ecx, %eax /* if operand == 0 then %ecx -> eax */
addl $1, %eax
ret
#else
bsfl 0x4(%esp), %eax /* bit scan "forward" */
jnz ffsLsb_argNotZero
xorl %eax, %eax /* return 0 when arg=0 */
ret
BRANCH_LABEL(ffsLsb_argNotZero) /* this label serves find_first_set() & find_last_set() */
addl $1, %eax
ret
#endif /* !CONFIG_CMOV_UNSUPPORTED */
/*******************************************************************************
*
* find_last_set - find first set bit searching from the MSB
*
* This routine finds the first bit set in the passed argument and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit to 32 for the most significant bit.
* A return value of zero indicates that the value passed is zero.
*
* RETURNS: bit position from 1 to 32, or 0 if the argument is zero.
*
* INTERNAL
* For Intel64 (x86_64) architectures, the 'cmovz' can be removed
* and leverage the fact that the 'bsfl' doesn't modify the destination operand
* when the source operand is zero. The "bitpos" variable can be preloaded
* into the destination register, and given the unconditional ++bitpos that
* is performed after the 'cmovz', the correct results are yielded.
*/
SECTION_FUNC(TEXT, find_last_set)
#if !defined(CONFIG_CMOV_UNSUPPORTED)
movl $0xffffffff, %ecx /* preload for 0 return value */
bsrl 0x4(%esp), %eax /* bit scan "reverse" */
cmovz %ecx, %eax /* if operand == 0 then %ecx -> eax */
addl $1, %eax
ret
#else
bsrl 0x4(%esp), %eax /* bit scan "reverse" */
jnz ffsLsb_argNotZero
xorl %eax, %eax /* return 0 when arg=0 */
ret
#endif /* !CONFIG_CMOV_UNSUPPORTED */

103
arch/x86/core/gdt.c Normal file
View File

@ -0,0 +1,103 @@
/* gdt.c - Global Descriptor Table support */
/*
* Copyright (c) 2011-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module contains routines for updating the global descriptor table (GDT)
for the IA-32 architecture.
*/
/* includes */
#include <linker-defs.h>
#include <toolchain.h>
#include <sections.h>
#include <nanok.h>
#include <nanokernel/cpu.h>
#include <gdt.h>
/* defines */
#if (CONFIG_NUM_GDT_SPARE_ENTRIES < 0)
#error "**** CONFIG_NUM_GDT_SPARE_ENTRIES must be at least 0\n\n"
#endif
#define NUM_BASE_GDT_ENTRIES 3
#define MAX_GDT_ENTRIES \
(NUM_BASE_GDT_ENTRIES + CONFIG_NUM_GDT_SPARE_ENTRIES)
/* locals */
/*
* The RAM based global descriptor table. It is aligned on an 8 byte boundary
* as the Intel manuals recommend this for best performance.
*/
/*
* For MM_POMS, _GdtEntries must be global so the linker script can
* generate a _GdtEntriesP for crt0.s
*/
static
tGdtDesc
_GdtEntries[MAX_GDT_ENTRIES]
__attribute__((aligned(8))) = {
{/* Entry 0 (selector=0x0000): The "NULL descriptor" */
0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00},
{ /* Entry 1 (selector=0x0008): Code descriptor: DPL0 */
0xffff, /* limit: xffff */
0x0000, /* base : xxxx0000 */
0x00, /* base : xx00xxxx */
0x9b, /* Code e/r, Present, DPL0, Accessed=1 */
0xcf, /* limit: fxxxx, Page Gra, 32 bit */
0x00 /* base : 00xxxxxx */
},
{ /* Entry 2 (selector=0x0010): Data descriptor: DPL0 */
0xffff, /* limit: xffff */
0x0000, /* base : xxxx0000 */
0x00, /* base : xx00xxxx */
0x93, /* Data r/w, Present, DPL0, Accessed=1 */
0xcf, /* limit: fxxxx, Page Gra, 32 bit */
0x00 /* base : 00xxxxxx */
},
};
/* globals */
tGdtHeader _Gdt = {
sizeof(tGdtDesc[MAX_GDT_ENTRIES - CONFIG_NUM_GDT_SPARE_ENTRIES]) -
1,
&_GdtEntries[0]};

View File

@ -0,0 +1,82 @@
/* intboiexit.s - VxMicro spurious interrupt support for IA-32 architecture */
/*
* Copyright (c) 2010-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module implements the single interrupt handling routine _IntBoiExit().
This routine is used by some interrupt controller drivers (e.g. the Intel
8259A driver) to short circuit the execution of normal interrupt stub
processing when a spurious interrupt is detected.
When a spurious interrupt condition is detected by the interrupt controller's
"beginning of interrupt" (BOI) handler it forces a return to _IntBoiExit()
rather than returning back to the interrupt stub. The _IntBoiExit() routine
then pops the parameter passed to the BOI handler and branches to _IntExit(),
thereby circumventing execution of the "application" ISR and the interrupt
controller driver's "end of interrupt" (EOI) handler (if present).
\INTERNAL
The _IntBoiExit() routine is provided in a separate module so that it gets
included in the final image only if an interrupt controller driver utilizing
_IntBoiExit() is present.
*/
#define _ASMLANGUAGE
#include <nanokernel/x86/asm.h>
#include <offsets.h> /* nanokernel structure offset definitions */
/* exports (internal APIs) */
GTEXT(_IntBoiExit)
/* externs */
GTEXT(_IntExit)
/*******************************************************************************
*
* _IntBoiExit - exit interrupt handler stub without invoking ISR
*
* This routine exits an interrupt handler stub without invoking the associated
* ISR handler (or the EOI handler, if present). It should only be jumped to
* by an interrupt controller driver's BOI routine, and only if the BOI routine
* is passed a single parameter by the interrupt stub.
*
* \INTERNAL
* A BOI routine that has no parameters can jump directly to _IntExit().
*/
SECTION_FUNC(TEXT, _IntBoiExit)
addl $4, %esp /* pop off the $BoiParameter */
jmp FUNC(_IntExit) /* exit via kernel */

600
arch/x86/core/intconnect.c Normal file
View File

@ -0,0 +1,600 @@
/* intconnect.c - VxMicro interrupt management support for IA-32 arch */
/*
* Copyright (c) 2010-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module provides routines to manage asynchronous interrupts in VxMicro
on the IA-32 architecture.
This module provides the public routine irq_connect(), the private
routine _IntVecSet(), and the BSP support routines _IntVecAlloc(),
_IntVecMarkAllocated() and _IntVecMarkFree().
INTERNAL
The _IdtBaseAddress symbol is used to determine the base address of the IDT.
(It is generated by the linker script, and doesn't correspond to an actual
global variable.)
Interrupts are handled by an "interrupt stub" whose code is generated by the
system itself. The stub performs various actions before and after invoking
the application (or operating system) specific interrupt handler; for example,
a thread context save is performed prior to invoking the interrupt handler.
The IA-32 code that makes up a "full" interrupt stub is shown below. A full
interrupt stub is one that is associated with an interrupt vector that requires
a "beginning of interrupt" (BOI) callout and an "end of interrupt" (EOI) callout
(both of which require a parameter).
0x00 call _IntEnt /@ inform kernel of interrupt @/
Machine code: 0xe8, 0x00, 0x00, 0x00, 0x00
0x05 pushl $BoiParameter /@ optional: push BOI handler parameter @/
Machine code: 0x68, 0x00, 0x00, 0x00, 0x00
0x0a call BoiRoutine /@ optional: callout to BOI rtn @/
Machine code: 0xe8, 0x00, 0x00, 0x00, 0x00
0x0f pushl $IsrParameter /@ push ISR parameter @/
Machine code: 0x68, 0x00, 0x00, 0x00, 0x00
0x14 call IsrRoutine /@ invoke ISR @/
Machine code: 0xe8, 0x00, 0x00, 0x00, 0x00
0x19 pushl $EoiParameter /@ optional: push EOI handler parameter @/
Machine code: 0x68, 0x00, 0x00, 0x00, 0x00
0x1e call EoiRoutine /@ optional: callout to EOI rtn @/
Machine code: 0xe8, 0x00, 0x00, 0x00, 0x00
0x23 addl $(4 * numParams), %esp /@ pop parameters @/
Machine code: 0x83, 0xc4, (4 * numParams)
0x26 jmp _IntExit /@ restore context or reschedule @/
Machine code: 0xe9, 0x00, 0x00, 0x00, 0x00
NOTE: Be sure to update the arch specific definition of the _INT_STUB_SIZE macro
to reflect the maximum potential size of the interrupt stub (as shown above).
The _INT_STUB_SIZE macro is defined in kernel/arch/Intel/include/nanok.h
and include/Intel/nanokernel.h
*/
#ifndef CONFIG_NO_ISRS
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <nanok.h>
/* the _IdtBaseAddress symbol is generated via a linker script */
extern unsigned char _IdtBaseAddress[];
extern void _SpuriousIntHandler(void *);
extern void _SpuriousIntNoErrCodeHandler(void *);
/*
* These 'dummy' variables are used in nanoArchInit() to force the inclusion of
* the spurious interrupt handlers. They *must* be declared in a module other
* than the one they are used in to get around garbage collection issues and
* warnings issued some compilers that they aren't used. Therefore care must
* be taken if they are to be moved. See nanok.h for more information.
*/
void *__DummySpur;
void *__DummyExcEnt;
/*
* Place the addresses of the spurious interrupt handlers into the intList
* section. The genIdt tool can then populate any unused vectors with
* these routines.
*/
void *__attribute__((section(".spurIsr"))) MK_ISR_NAME(_SpuriousIntHandler) =
&_SpuriousIntHandler;
void *__attribute__((section(".spurNoErrIsr")))
MK_ISR_NAME(_SpuriousIntNoErrCodeHandler) =
&_SpuriousIntNoErrCodeHandler;
/*
* Bitfield used to track which interrupt vectors are available for allocation.
* The array is initialized to indicate all vectors are currently available.
*
* NOTE: For portability reasons, the ROUND_UP() macro can NOT be used to
* perform the rounding up calculation below. Unlike both GCC and ICC, the
* Diab compiler generates an error when a macro that takes a parameter is
* used to define the size of an array.
*/
#define VEC_ALLOC_NUM_INTS ((CONFIG_IDT_NUM_VECTORS + 31) & ~31) / 32
static unsigned int _VectorsAllocated[VEC_ALLOC_NUM_INTS] = {
[0 ...(VEC_ALLOC_NUM_INTS - 1)] = 0xffffffff
};
/*******************************************************************************
*
* _IntVecSet - connect a routine to an interrupt vector
*
* This routine "connects" the specified <routine> to the specified interrupt
* <vector>. On the IA-32 architecture, an interrupt vector is a value from
* 0 to 255. This routine merely fills in the appropriate interrupt
* descriptor table (IDT) with an interrupt-gate descriptor such that <routine>
* is invoked when interrupt <vector> is asserted. The <dpl> argument specifies
* the privilege level for the interrupt-gate descriptor; (hardware) interrupts
* and exceptions should specify a level of 0, whereas handlers for user-mode
* software generated interrupts should specify 3.
*
* RETURNS: N/A
*
* INTERNAL
* Unlike nanoCpuExcConnect() and irq_connect(), the _IntVecSet() routine
* is a very basic API that simply updates the appropriate entry in Interrupt
* Descriptor Table (IDT) such that the specified routine is invoked when the
* specified interrupt vector is asserted.
*
*/
void _IntVecSet(
unsigned int vector, /* interrupt vector: 0 to 255 on IA-32 */
void (*routine)(void *),
unsigned int dpl /* priv level for interrupt-gate descriptor */
)
{
unsigned long long *pIdtEntry;
/*
* The <vector> parameter must be less than the value of the
* CONFIG_IDT_NUM_VECTORS configuration parameter, however,
* explicit
* validation will not be performed in this primitive.
*/
pIdtEntry = (unsigned long long *)(_IdtBaseAddress + (vector << 3));
_IdtEntCreate(pIdtEntry, routine, dpl);
/* not required to synchronize the instruction and data caches */
}
/*******************************************************************************
*
* irq_connect - connect a C routine to a hardware interrupt
*
* This routine connects an interrupt service routine (ISR) coded in C to
* the specified hardware <irq>. An interrupt vector will be allocated to
* satisfy the specified <priority>. If the interrupt service routine is being
* connected to a software generated interrupt, then <irq> must be set to
* NANO_SOFT_IRQ.
*
* The specified <irq> represents a virtualized IRQ, i.e. it does not
* necessarily represent a specific IRQ line on a given interrupt controller
* device. The BSP presents a virtualized set of IRQs from 0 to N, where N
* is the total number of IRQs supported by all the interrupt controller devices
* on the board. See the BSP's documentation for the mapping of virtualized
* IRQ to physical IRQ.
*
* When the device asserts an interrupt on the specified <irq>, a switch to
* the interrupt stack is performed (if not already executing on the interrupt
* stack), followed by saving the integer (i.e. non-floating point) context of
* the currently executing task, fiber, or ISR. The ISR specified by <routine>
* will then be invoked with the single <parameter>. When the ISR returns, a
* context switch may occur.
*
* The <pIntStubMem> argument points to memory that the system can use to
* synthesize the interrupt stub that calls <routine>. The memory need not be
* initialized, but must be persistent (i.e. it cannot be on the caller's stack).
* Declaring a global or static variable of type NANO_INT_STUB will provide a
* suitable area of the proper size.
*
* RETURNS: the allocated interrupt vector
*
* WARNINGS
* Some boards utilize interrupt controllers where the interrupt vector
* cannot be programmed on an IRQ basis; as a result, the vector assigned
* to the <irq> during interrupt controller initialization will be returned.
* In these cases, the requested <priority> is not honoured since the interrupt
* prioritization is fixed by the interrupt controller (e.g. IRQ0 will always
* be the highest priority interrupt regardless of what interrupt vector
* was assigned to IRQ0).
*
* This routine does not perform range checking on the requested <priority>
* and thus, depending on the underlying interrupt controller, may result
* in the assignment of an interrupt vector located in the reserved range of
* the processor.
*
* INTERNAL
* For debug kernels, this routine shall return -1 when there are no
* vectors remaining in the specified <priority> level.
*/
int irq_connect(
unsigned int irq, /* virtualized IRQ to connect to */
unsigned int priority, /* requested priority of interrupt */
void (*routine)(void *parameter), /* C interrupt handler */
void *parameter, /* parameter passed to C routine */
NANO_INT_STUB pIntStubMem /* memory for synthesized stub code */
)
{
unsigned char offsetAdjust;
unsigned char numParameters = 1; /* stub always pushes ISR parameter */
extern void _IntEnt(void);
extern void _IntExit(void);
int vector;
NANO_EOI_GET_FUNC boiRtn;
NANO_EOI_GET_FUNC eoiRtn;
void *boiRtnParm;
void *eoiRtnParm;
unsigned char boiParamRequired;
unsigned char eoiParamRequired;
#define STUB_PTR pIntStubMem
/*
* Invoke the BSP provided routine _SysIntVecAlloc() which will:
* a) allocate a vector satisfying the requested priority,
* b) return EOI and BOI related information for stub code synthesis,
*and
* c) program the underlying interrupt controller device such that
* when <irq> is asserted, the allocated interrupt vector will be
* presented to the CPU.
*
* The _SysIntVecAlloc() routine will use the "utility" routine
* _IntVecAlloc() provided in this module to scan the
*_VectorsAllocated[]
* array for a suitable vector.
*/
vector = _SysIntVecAlloc(irq,
priority,
&boiRtn,
&eoiRtn,
&boiRtnParm,
&eoiRtnParm,
&boiParamRequired,
&eoiParamRequired);
#if defined(DEBUG)
/*
* The return value from _SysIntVecAlloc() will be -1 if an invalid
* <irq> or <priority> was specified, or if a vector could not be
* allocated to honour the requested priority (for the boards that can
* support programming the interrupt vector for each IRQ).
*/
if (vector == -1)
return (-1);
#endif /* DEBUG */
/*
* A minimal interrupt stub code will be synthesized based on the
* values of <boiRtn>, <eoiRtn>, <boiRtnParm>, <eoiRtnParm>,
* <boiParamRequired>, and <eoiParamRequired>. The invocation of
* _IntEnt() and _IntExit() will always be required.
*/
STUB_PTR[0] = IA32_CALL_OPCODE;
UNALIGNED_WRITE((unsigned int *)&STUB_PTR[1],
(unsigned int)&_IntEnt - (unsigned int)&pIntStubMem[5]);
offsetAdjust = 5;
#ifdef CONFIG_BOI_HANDLER_SUPPORTED
/* poke in the BOI related opcodes */
if (boiRtn == NULL)
/* no need to insert anything */;
else if (boiParamRequired != 0) {
STUB_PTR[offsetAdjust] = IA32_PUSH_OPCODE;
UNALIGNED_WRITE((unsigned int *)&STUB_PTR[1 + offsetAdjust],
(unsigned int)boiRtnParm);
STUB_PTR[5 + offsetAdjust] = IA32_CALL_OPCODE;
UNALIGNED_WRITE(
(unsigned int *)&STUB_PTR[6 + offsetAdjust],
(unsigned int)boiRtn -
(unsigned int)&pIntStubMem[10 + offsetAdjust]);
offsetAdjust += 10;
++numParameters;
} else {
STUB_PTR[offsetAdjust] = IA32_CALL_OPCODE;
UNALIGNED_WRITE(
(unsigned int *)&STUB_PTR[1 + offsetAdjust],
(unsigned int)boiRtn -
(unsigned int)&pIntStubMem[5 + offsetAdjust]);
offsetAdjust += 5;
}
#endif /* CONFIG_BOI_HANDLER_SUPPORTED */
/* IsrParameter and IsrRoutine always required */
STUB_PTR[offsetAdjust] = IA32_PUSH_OPCODE;
UNALIGNED_WRITE((unsigned int *)&STUB_PTR[1 + offsetAdjust],
(unsigned int)parameter);
STUB_PTR[5 + offsetAdjust] = IA32_CALL_OPCODE;
UNALIGNED_WRITE((unsigned int *)&STUB_PTR[6 + offsetAdjust],
(unsigned int)routine -
(unsigned int)&pIntStubMem[10 + offsetAdjust]);
offsetAdjust += 10;
#ifdef CONFIG_EOI_HANDLER_SUPPORTED
/* poke in the EOI related opcodes */
if (eoiRtn == NULL)
/* no need to insert anything */;
else if (eoiParamRequired != 0) {
STUB_PTR[offsetAdjust] = IA32_PUSH_OPCODE;
UNALIGNED_WRITE((unsigned int *)&STUB_PTR[1 + offsetAdjust],
(unsigned int)eoiRtnParm);
STUB_PTR[5 + offsetAdjust] = IA32_CALL_OPCODE;
UNALIGNED_WRITE(
(unsigned int *)&STUB_PTR[6 + offsetAdjust],
(unsigned int)eoiRtn -
(unsigned int)&pIntStubMem[10 + offsetAdjust]);
offsetAdjust += 10;
++numParameters;
} else {
STUB_PTR[offsetAdjust] = IA32_CALL_OPCODE;
UNALIGNED_WRITE(
(unsigned int *)&STUB_PTR[1 + offsetAdjust],
(unsigned int)eoiRtn -
(unsigned int)&pIntStubMem[5 + offsetAdjust]);
offsetAdjust += 5;
}
#endif /* CONFIG_EOI_HANDLER_SUPPORTED */
/*
* Poke in the stack popping related opcode. Do it a byte at a time
* because
* &STUB_PTR[offsetAdjust] may not be aligned which does not work for
* all
* targets.
*/
STUB_PTR[offsetAdjust] = IA32_ADD_OPCODE & 0xFF;
STUB_PTR[1 + offsetAdjust] = IA32_ADD_OPCODE >> 8;
STUB_PTR[2 + offsetAdjust] = (unsigned char)(4 * numParameters);
offsetAdjust += 3;
/*
* generate code that invokes _IntExit(); note that a jump is used,
* since _IntExit() takes care of returning back to the context that
* experienced the interrupt (i.e. branch tail optimization)
*/
STUB_PTR[offsetAdjust] = IA32_JMP_OPCODE;
UNALIGNED_WRITE((unsigned int *)&STUB_PTR[1 + offsetAdjust],
(unsigned int)&_IntExit -
(unsigned int)&pIntStubMem[5 + offsetAdjust]);
/*
* There is no need to explicitly synchronize or flush the instruction
* cache due to the above code synthesis. See the Intel 64 and IA-32
* Architectures Software Developer's Manual: Volume 3A: System
*Programming
* Guide; specifically the section titled "Self Modifying Code".
*
* Cache synchronization/flushing is not required for the i386 as it
* does not contain any on-chip I-cache; likewise, post-i486 processors
* invalidate the I-cache automatically. An i486 requires the CPU
* to perform a 'jmp' instruction before executing the synthesized code;
* however, the call and return that follows meets this requirement.
*/
_IntVecSet(vector, (void (*)(void *))pIntStubMem, 0);
return vector;
}
/*******************************************************************************
*
* _IntVecAlloc - allocate a free interrupt vector given <priority>
*
* This routine scans the _VectorsAllocated[] array for a free vector that
* satisfies the specified <priority>. It is a utility function for use only
* by a BSP's _SysIntVecAlloc() routine.
*
* This routine assumes that the relationship between interrupt priority and
* interrupt vector is :
*
* priority = vector / 16;
*
* Since vectors 0 to 31 are reserved by the IA-32 architecture, the priorities
* of user defined interrupts range from 2 to 15. Each interrupt priority level
* contains 16 vectors, and the prioritization of interrupts within a priority
* level is determined by the vector number; the higher the vector number, the
* higher the priority within that priority level.
*
* It is also assumed that the interrupt controllers are capable of managing
* interrupt requests on a per-vector level as opposed to a per-priority level.
* For example, the local APIC on Pentium4 and later processors, the in-service
* register (ISR) and the interrupt request register (IRR) are 256 bits wide.
*
* RETURNS: allocated interrupt vector
*
* INTERNAL
* For debug kernels, this routine shall return -1 when there are no
* vectors remaining in the specified <priority> level.
*/
int _IntVecAlloc(unsigned int priority)
{
unsigned int imask;
unsigned int entryToScan;
unsigned int fsb; /* first set bit in entry */
int vector;
#if defined(DEBUG)
/*
* check whether the IDT was configured with sufficient vectors to
* satisfy the priority request.
*/
if (((priority << 4) + 15) > CONFIG_IDT_NUM_VECTORS)
return (-1);
#endif /* DEBUG */
/*
* Atomically allocate a vector from the _VectorsAllocated[] array
* to prevent race conditions with other tasks/fibers attempting to
* allocate an interrupt vector.
*/
entryToScan = priority >> 1; /* _VectorsAllocated[] entry to scan */
/*
* The _VectorsAllocated[] entry specified by 'entryToScan' is a 32-bit
* quantity and thus represents the vectors for a pair of priority
*levels.
* Use find_last_set() to scan for the upper of the 2, and find_first_set() to
*scan
* for the lower of the 2 priorities.
*
* Note that find_first_set/find_last_set returns bit position from 1 to 32,
* or 0 if the argument is zero.
*/
imask = irq_lock();
if ((priority % 2) == 0) {
/* scan from the LSB for even priorities */
fsb = find_first_set(_VectorsAllocated[entryToScan]);
#if defined(DEBUG)
if ((fsb == 0) || (fsb > 16)) {
/*
* No bits are set in the lower 16 bits, thus all
* vectors for this
* priority have been allocated.
*/
irq_unlock(imask);
return (-1);
}
#endif /* DEBUG */
} else {
/* scan from the MSB for odd priorities */
fsb = find_last_set(_VectorsAllocated[entryToScan]);
#if defined(DEBUG)
if ((fsb == 0) || (fsb < 17)) {
/*
* No bits are set in the lower 16 bits, thus all
* vectors for this
* priority have been allocated.
*/
irq_unlock(imask);
return (-1);
}
#endif /* DEBUG */
}
/* ffsLsb/ffsMsb returns bit positions as 1 to 32 */
--fsb;
/* mark the vector as allocated */
_VectorsAllocated[entryToScan] &= ~(1 << fsb);
irq_unlock(imask);
/* compute vector given allocated bit within the priority level */
vector = (entryToScan << 5) + fsb;
return vector;
}
/*******************************************************************************
*
* _IntVecMarkAllocated - mark interrupt vector as allocated
*
* This routine is used to "reserve" an interrupt vector that is allocated
* or assigned by any means other than _IntVecAllocate(). This marks the vector
* as allocated so that any future invocations of _IntVecAllocate() will not
* return that vector.
*
* RETURNS: N/A
*
*/
void _IntVecMarkAllocated(unsigned int vector)
{
unsigned int entryToSet = vector / 32;
unsigned int bitToSet = vector % 32;
unsigned int imask;
imask = irq_lock();
_VectorsAllocated[entryToSet] &= ~(1 << bitToSet);
irq_unlock(imask);
}
/*******************************************************************************
*
* _IntVecMarkFree - mark interrupt vector as free
*
* This routine is used to "free" an interrupt vector that is allocated
* or assigned using _IntVecAllocate() or _IntVecMarkAllocated(). This marks the
* vector as available so that any future allocations can return that vector.
*
*/
void _IntVecMarkFree(unsigned int vector)
{
unsigned int entryToSet = vector / 32;
unsigned int bitToSet = vector % 32;
unsigned int imask;
imask = irq_lock();
_VectorsAllocated[entryToSet] |= (1 << bitToSet);
irq_unlock(imask);
}
#endif /* CONFIG_NO_ISRS */

146
arch/x86/core/inthndlset.c Normal file
View File

@ -0,0 +1,146 @@
/* inthndlset.c - VxMicro interrupt management support for IA-32 arch */
/*
* Copyright (c) 2011-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* DESCRIPTION
* This module contains the irq_handler_set() API. This routine is closely
* associated with irq_connect(), and any changes to the layout of the
* constructed interrupt stub must be reflected in both places.
*
* INTERNAL
* This routine is defined here, rather than in intconnect.c, so that it can be
* omitted from a system image if it isn't required.
*/
#include <nanok.h>
/* the _IdtBaseAddress symbol is generated via a linker script */
extern unsigned char _IdtBaseAddress[];
/*
* The FIRST_OPT_OPCODE_OFF macro defines the offset of the first optional
* opcode in an interrupt stub. Given that only the "call _IntEnt" is
* mandatory, the subsequent instruction at offset 5 is "optional".
*/
#define FIRST_OPT_OPCODE_OFF 5
/*******************************************************************************
*
* irq_handler_set - set the handler in an already connected stub
*
* This routine is used to modify an already fully constructed interrupt stub
* to specify a new <routine> and/or <parameter>.
*
* WARNINGS:
*
* A fully constructed interrupt stub is generated via irq_connect(), i.e.
* the irq_handler_set() function must only be called after invoking
* irq_connect().
*
* The caller must ensure that the associated interrupt does not occur while
* this routine is executing, otherwise race conditions may arise that could
* cause the interrupt stub to invoke the handler using an incorrect routine
* and/or parameter. If possible, silence the source of the associated interrupt
* only, rather than locking out all interrupts.
*
* RETURNS: N/A
*
*/
void irq_handler_set
(unsigned int vector,
void (*oldRoutine)(void *parameter),
void (*newRoutine)(void *parameter),
void *parameter)
{
unsigned int ix =
FIRST_OPT_OPCODE_OFF; /* call _IntEnt is not optional */
unsigned int *pIdtEntry;
unsigned char *pIntStubMem;
pIdtEntry = (unsigned int *)(_IdtBaseAddress + (vector << 3));
pIntStubMem = (unsigned char *)(((uint16_t)pIdtEntry[0]) |
(pIdtEntry[1] & 0xffff0000));
/*
* Given the generation of the stub is dynamic, i.e. the invocations of
* an EOI routine (with parameter) and/or BOI routine (with parameter)
* are optional based on the requirements of the interrupt controller,
* the <oldRoutine> parameter is used to quickly find the correct
* bytes in the stub code to update.
*/
while (ix < _INT_STUB_SIZE) {
/* locate the call opcode */
if (pIntStubMem[ix] == IA32_CALL_OPCODE) {
unsigned int opcodeOffToMatch;
unsigned int opcodeOff;
/* read the offset encoded in the call opcode */
opcodeOff = UNALIGNED_READ(
(unsigned int *)&pIntStubMem[ix + 1]);
opcodeOffToMatch = (unsigned int)oldRoutine -
(unsigned int)&pIntStubMem[ix + 5];
/* does the encoded offset match <oldRoutine> ? */
if (opcodeOff == opcodeOffToMatch) {
/* match found -> write new routine and parameter */
UNALIGNED_WRITE(
(unsigned int *)&pIntStubMem[ix + 1],
(unsigned int)newRoutine -
(unsigned int)&pIntStubMem[ix +
5]);
UNALIGNED_WRITE(
(unsigned int *)&pIntStubMem[ix - 4],
(unsigned int)parameter);
return; /* done */
}
}
/* all instructions in the stub are 5 bytes long */
ix += 5;
}
}

525
arch/x86/core/intstub.s Normal file
View File

@ -0,0 +1,525 @@
/* intstub.s - VxMicro interrupt management support for IA-32 architecture */
/*
* Copyright (c) 2010-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module implements assembly routines to manage interrupts in VxMicro on
the Intel IA-32 architecture. More specifically, the interrupt (asynchronous
exception) stubs are implemented in this module. The stubs are invoked when
entering and exiting a C interrupt handler.
*/
#define _ASMLANGUAGE
#ifndef CONFIG_NO_ISRS
#include <nanok.h>
#include <nanokernel/x86/asm.h>
#include <offsets.h> /* nanokernel structure offset definitions */
#include <nanokernel/cpu.h> /* _NANO_ERR_SPURIOUS_INT */
/* exports (internal APIs) */
GTEXT(_IntEnt)
GTEXT(_IntExit)
GTEXT(_SpuriousIntNoErrCodeHandler)
GTEXT(_SpuriousIntHandler)
/* exports (public APIs) */
GTEXT(irq_lock)
GTEXT(irq_unlock)
/* externs */
GTEXT(_Swap)
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
GTEXT(_SysPowerSaveIdleExit)
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
#ifdef CONFIG_INT_LATENCY_BENCHMARK
GTEXT(_IntLatencyStart)
GTEXT(_IntLatencyStop)
#endif
/*******************************************************************************
*
* _IntEnt - inform the VxMicro kernel of an interrupt
*
* This function is called from the interrupt stub created by irq_connect()
* to inform the VxMicro kernel of an interrupt. This routine increments
* _NanoKernel.nested (to support interrupt nesting), switches to the
* base of the interrupt stack, if not already on the interrupt stack, and then
* saves the volatile integer registers onto the stack. Finally, control is
* returned back to the interrupt stub code (which will then invoke the
* "application" interrupt service routine).
*
* Only the volatile integer registers are saved since ISRs are assumed not to
* utilize floating point (or SSE) instructions. If an ISR requires the usage
* of floating point (or SSE) instructions, it must first invoke nanoCpuFpSave()
* (or nanoCpuSseSave()) at the beginning of the ISR. A subsequent
* nanoCpuFpRestore() (or nanoCpuSseRestore()) is needed just prior to returning
* from the ISR. Note that the nanoCpuFpSave(), nanoCpuSseSave(),
* nanoCpuFpRestore(), and nanoCpuSseRestore() APIs have not been
* implemented yet.
*
* WARNINGS
*
* Host-based tools and the target-based GDB agent depend on the stack frame
* created by this routine to determine the locations of volatile registers.
* These tools must be updated to reflect any changes to the stack frame.
*
* RETURNS: N/A
*
* C function prototype:
*
* void _IntEnt (void);
*
* NOMANUAL
*/
SECTION_FUNC(TEXT, _IntEnt)
/*
* The _IntVecSet() routine creates an interrupt-gate descriptor for
* all connections. The processor will automatically clear the IF
* bit in the EFLAGS register upon execution of the handler, thus
* _IntEnt() (and _ExcEnt) need not issue an 'cli' as the first
* instruction.
*
* Clear the direction flag. It is automatically restored when the
* interrupt exits via the IRET instruction.
*/
cld
/*
* Note that the processor has pushed both the EFLAGS register
* and the logical return address (cs:eip) onto the stack prior
* to invoking the handler specified in the IDT
*/
/*
* swap eax and return address on the current stack;
* this saves eax on the stack without losing knowledge
* of how to get back to the interrupt stub
*/
#ifdef CONFIG_LOCK_INSTRUCTION_UNSUPPORTED
pushl (%esp)
movl %eax, 4(%esp)
popl %eax
#else
xchgl %eax, (%esp)
#endif /* CONFIG_LOCK_INSTRUCTION_UNSUPPORTED*/
/*
* The remaining volatile registers are pushed onto the current
* stack.
*/
pushl %ecx
pushl %edx
#ifdef CONFIG_INT_LATENCY_BENCHMARK
/*
* Volatile registers are now saved it is safe to start measuring
* how long interrupt are disabled.
* The interrupt gate created by irq_connect disables the
* interrupt.
*
* Preserve EAX as it contains the stub return address.
*/
pushl %eax
call _IntLatencyStart
popl %eax
#endif
/* load %ecx with &_NanoKernel */
movl $_NanoKernel, %ecx
/* switch to the interrupt stack for the non-nested case */
incl __tNANO_nested_OFFSET(%ecx) /* inc interrupt nest count */
cmpl $1, __tNANO_nested_OFFSET(%ecx) /* use int stack if !nested */
jne alreadyOnIntStack
/* switch to base of the interrupt stack */
movl %esp, %edx /* save current context stack pointer */
movl __tNANO_common_isp_OFFSET(%ecx), %esp /* load new sp value */
/* save context stack pointer onto base of interrupt stack */
pushl %edx /* Save stack pointer */
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
cmpl $0, __tNANO_idle_OFFSET(%ecx)
jne _HandleIdle
/* fast path is !idle, in the pipeline */
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
/* fall through to nested case */
BRANCH_LABEL(alreadyOnIntStack)
#ifdef CONFIG_INT_LATENCY_BENCHMARK
/* preserve eax which contain stub return address */
pushl %eax
call _IntLatencyStop
popl %eax
#endif
sti /* re-enable interrupts */
jmp *%eax /* "return" back to stub */
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
BRANCH_LABEL(_HandleIdle)
pushl %eax
push __tNANO_idle_OFFSET(%ecx)
movl $0, __tNANO_idle_OFFSET(%ecx)
/*
* Beware that a timer driver's _SysPowerSaveIdleExit() implementation might
* expect that interrupts are disabled when invoked. This ensures that
* the calculation and programming of the device for the next timer
* deadline is not interrupted.
*/
call _SysPowerSaveIdleExit
add $0x4, %esp
#ifdef CONFIG_INT_LATENCY_BENCHMARK
call _IntLatencyStop
#endif
sti /* re-enable interrupts */
popl %eax
jmp *%eax /* "return" back to stub */
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
/*******************************************************************************
*
* _IntExit - inform the VxMicro kernel of an interrupt exit
*
* This function is called from the interrupt stub created by irq_connect()
* to inform the VxMicro kernel that the processing of an interrupt has
* completed. This routine decrements _NanoKernel.nested (to support interrupt
* nesting), restores the volatile integer registers, and then switches
* back to the interrupted context's stack, if this isn't a nested interrupt.
*
* Finally, control is returned back to the interrupted fiber context or ISR.
* A context switch _may_ occur if the interrupted context was a task context,
* in which case one or more other fiber and task contexts will execute before
* this routine resumes and control gets returned to the interrupted task.
*
* RETURNS: N/A
*
* C function prototype:
*
* void _IntExit (void);
*
* NOMANUAL
*/
SECTION_FUNC(TEXT, _IntExit)
cli /* disable interrupts */
#ifdef CONFIG_INT_LATENCY_BENCHMARK
call _IntLatencyStart
#endif
/* determine whether exiting from a nested interrupt */
movl $_NanoKernel, %ecx
decl __tNANO_nested_OFFSET(%ecx) /* dec interrupt nest count */
jne nestedInterrupt /* 'iret' if nested case */
/*
* Determine whether the execution of the ISR requires a context
* switch. If the interrupted context is PREEMPTIBLE and
* _NanoKernel.fiber is non-NULL, a _Swap() needs to occur.
*/
movl __tNANO_current_OFFSET (%ecx), %eax
testl $PREEMPTIBLE, __tCCS_flags_OFFSET(%eax)
je noReschedule
cmpl $0, __tNANO_fiber_OFFSET (%ecx)
je noReschedule
/*
* Set the INT_ACTIVE bit in the tCCS to allow the upcoming call to
* _Swap() to determine whether non-floating registers need to be
* preserved using the lazy save/restore algorithm, or to indicate to
* debug tools that a preemptive context switch has occurred.
*
* Setting the NO_METRICS bit tells _Swap() that the per-context
* [totalRunTime] calculation has already been performed and that
* there is no need to do it again.
*/
#if defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)
orl $INT_ACTIVE, __tCCS_flags_OFFSET(%eax)
#endif
/*
* A context reschedule is required: keep the volatile registers of
* the interrupted context on the context's stack. Utilize
* the existing _Swap() primitive to save the remaining
* thread's registers (including floating point) and perform
* a switch to the new context.
*/
popl %esp /* switch back to kernel stack */
pushfl /* push KERNEL_LOCK_KEY argument */
call _Swap
/*
* The interrupted context thread has now been scheduled,
* as the result of a _later_ invocation of _Swap().
*
* Now need to restore the interrupted context's environment before
* returning control to it at the point where it was interrupted ...
*/
#if ( defined(CONFIG_FP_SHARING) || \
defined(CONFIG_GDB_INFO) )
/*
* _Swap() has restored the floating point registers, if needed.
* Clear the INT_ACTIVE bit of the interrupted context's tCCS
* since it has served its purpose.
*/
movl _NanoKernel + __tNANO_current_OFFSET, %eax
andl $~INT_ACTIVE, __tCCS_flags_OFFSET (%eax)
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
addl $4, %esp /* pop KERNEL_LOCK_KEY argument */
/* Restore volatile registers and return to the interrupted context */
#ifdef CONFIG_INT_LATENCY_BENCHMARK
call _IntLatencyStop
#endif
popl %edx
popl %ecx
popl %eax
/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
iret
BRANCH_LABEL(noReschedule)
/*
* A thread reschedule is not required; switch back to the
* interrupted thread's stack and restore volatile registers
*/
popl %esp /* pop thread stack pointer */
/* fall through to 'nestedInterrupt' */
/*
* For the nested interrupt case, the interrupt stack must still be
* utilized, and more importantly, a rescheduling decision must
* not be performed.
*/
BRANCH_LABEL(nestedInterrupt)
#ifdef CONFIG_INT_LATENCY_BENCHMARK
call _IntLatencyStop
#endif
popl %edx /* pop volatile registers in reverse order */
popl %ecx
popl %eax
/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
iret
/*******************************************************************************
*
* _SpuriousIntHandler -
* _SpuriousIntNoErrCodeHandler - spurious interrupt handler stubs
*
* Interrupt-gate descriptors are statically created for all slots in the IDT
* that point to _SpuriousIntHandler() or _SpuriousIntNoErrCodeHandler(). The
* former stub is connected to exception vectors where the processor pushes an
* error code onto the stack (or kernel stack) in addition to the EFLAGS/CS/EIP
* records.
*
* A spurious interrupt is considered a fatal condition, thus this routine
* merely sets up the 'reason' and 'pEsf' parameters to the BSP provided
* routine: _SysFatalHwErrorHandler(). In other words, there is no provision
* to return to the interrupted context and thus the volatile registers
* are not saved.
*
* RETURNS: Never returns
*
* C function prototype:
*
* void _SpuriousIntHandler (void);
*
* INTERNAL
* The _IntVecSet() routine creates an interrupt-gate descriptor for all
* connections. The processor will automatically clear the IF bit
* in the EFLAGS register upon execution of the handler,
* thus _SpuriousIntNoErrCodeHandler()/_SpuriousIntHandler() shall be
* invoked with interrupts disabled.
*
* NOMANUAL
*/
SECTION_FUNC(TEXT, _SpuriousIntNoErrCodeHandler)
pushl $0 /* push dummy err code onto stk */
/* fall through to _SpuriousIntHandler */
SECTION_FUNC(TEXT, _SpuriousIntHandler)
cld /* Clear direction flag */
/*
* The task's regular stack is being used, but push the value of ESP
* anyway so that _ExcExit can "recover the stack pointer"
* without determining whether the exception occured while CPL=3
*/
pushl %esp /* push cur stack pointer: pEsf arg */
BRANCH_LABEL(finishSpuriousInt)
/* re-enable interrupts */
sti
/* push the 'unsigned int reason' parameter */
pushl $_NANO_ERR_SPURIOUS_INT
BRANCH_LABEL(callFatalHandler)
/* call the fatal error handler */
call _NanoFatalErrorHandler
/* handler shouldn't return, but call it again if it does */
jmp callFatalHandler
/*******************************************************************************
*
* irq_lock - disable interrupts on the local CPU
*
* This routine disables interrupts. It can be called from either interrupt
* or context level. This routine returns an architecture-dependent
* lock-out key representing the "interrupt disable state" prior to the call;
* this key can be passed to fiber_enable_ints() to re-enable interrupts.
*
* The lock-out key should only be used as the argument to the
* fiber_enable_ints() API. It should never be used to manually re-enable
* interrupts or to inspect or manipulate the contents of the source register.
*
* WARNINGS
* Invoking a VxMicro system routine with interrupts locked may result in
* interrupts being re-enabled for an unspecified period of time. If the
* called routine blocks, interrupts will be re-enabled while another
* context executes, or while the system is idle.
*
* The "interrupt disable state" is an attribute of a context, i.e. it's part
* of the context context. Thus, if a context disables interrupts and
* subsequently invokes a VxMicro system routine that causes the calling context
* to block, the interrupt disable state will be restored when the context is
* later rescheduled for execution.
*
* RETURNS: An architecture-dependent lock-out key representing the
* "interrupt disable state" prior to the call.
*/
SECTION_FUNC(TEXT, irq_lock)
pushfl
cli
#ifdef CONFIG_INT_LATENCY_BENCHMARK
call _IntLatencyStart
#endif
popl %eax
ret
/*******************************************************************************
*
* irq_unlock - enable interrupts on the local CPU
*
* This routine re-enables interrupts on the local CPU. The <key> parameter
* is an architecture-dependent lock-out key that is returned by a previous
* invocation of irq_lock().
*
* This routine can be called from either a context or ISR context.
*/
SECTION_FUNC(TEXT, irq_unlock)
testl $0x200, SP_ARG1(%esp)
jz skipIntEnable
#ifdef CONFIG_INT_LATENCY_BENCHMARK
call _IntLatencyStop
#endif
sti
BRANCH_LABEL(skipIntEnable)
ret
#endif /* CONFIG_NO_ISRS */

Some files were not shown because too many files have changed in this diff Show More