add __always_inline to trivial super low level inline functions (#379)
This commit is contained in:
parent
53f1915a6b
commit
ec0dc7a88b
@ -84,7 +84,7 @@ typedef ioptr const const_ioptr;
|
|||||||
* \param addr Address of writable register
|
* \param addr Address of writable register
|
||||||
* \param mask Bit-mask specifying bits to set
|
* \param mask Bit-mask specifying bits to set
|
||||||
*/
|
*/
|
||||||
inline static void hw_set_bits(io_rw_32 *addr, uint32_t mask) {
|
__force_inline static void hw_set_bits(io_rw_32 *addr, uint32_t mask) {
|
||||||
*(io_rw_32 *) hw_set_alias_untyped((volatile void *) addr) = mask;
|
*(io_rw_32 *) hw_set_alias_untyped((volatile void *) addr) = mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,7 +94,7 @@ inline static void hw_set_bits(io_rw_32 *addr, uint32_t mask) {
|
|||||||
* \param addr Address of writable register
|
* \param addr Address of writable register
|
||||||
* \param mask Bit-mask specifying bits to clear
|
* \param mask Bit-mask specifying bits to clear
|
||||||
*/
|
*/
|
||||||
inline static void hw_clear_bits(io_rw_32 *addr, uint32_t mask) {
|
__force_inline static void hw_clear_bits(io_rw_32 *addr, uint32_t mask) {
|
||||||
*(io_rw_32 *) hw_clear_alias_untyped((volatile void *) addr) = mask;
|
*(io_rw_32 *) hw_clear_alias_untyped((volatile void *) addr) = mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -104,7 +104,7 @@ inline static void hw_clear_bits(io_rw_32 *addr, uint32_t mask) {
|
|||||||
* \param addr Address of writable register
|
* \param addr Address of writable register
|
||||||
* \param mask Bit-mask specifying bits to invert
|
* \param mask Bit-mask specifying bits to invert
|
||||||
*/
|
*/
|
||||||
inline static void hw_xor_bits(io_rw_32 *addr, uint32_t mask) {
|
__force_inline static void hw_xor_bits(io_rw_32 *addr, uint32_t mask) {
|
||||||
*(io_rw_32 *) hw_xor_alias_untyped((volatile void *) addr) = mask;
|
*(io_rw_32 *) hw_xor_alias_untyped((volatile void *) addr) = mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,7 +120,7 @@ inline static void hw_xor_bits(io_rw_32 *addr, uint32_t mask) {
|
|||||||
* \param values Bits values
|
* \param values Bits values
|
||||||
* \param write_mask Mask of bits to change
|
* \param write_mask Mask of bits to change
|
||||||
*/
|
*/
|
||||||
inline static void hw_write_masked(io_rw_32 *addr, uint32_t values, uint32_t write_mask) {
|
__force_inline static void hw_write_masked(io_rw_32 *addr, uint32_t values, uint32_t write_mask) {
|
||||||
hw_xor_bits(addr, (*addr ^ values) & write_mask);
|
hw_xor_bits(addr, (*addr ^ values) & write_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ typedef volatile uint32_t spin_lock_t;
|
|||||||
|
|
||||||
* The SEV (send event) instruction sends an event to both cores.
|
* The SEV (send event) instruction sends an event to both cores.
|
||||||
*/
|
*/
|
||||||
inline static void __sev(void) {
|
__force_inline static void __sev(void) {
|
||||||
__asm volatile ("sev");
|
__asm volatile ("sev");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -90,7 +90,7 @@ inline static void __sev(void) {
|
|||||||
* The WFE (wait for event) instruction waits until one of a number of
|
* The WFE (wait for event) instruction waits until one of a number of
|
||||||
* events occurs, including events signalled by the SEV instruction on either core.
|
* events occurs, including events signalled by the SEV instruction on either core.
|
||||||
*/
|
*/
|
||||||
inline static void __wfe(void) {
|
__force_inline static void __wfe(void) {
|
||||||
__asm volatile ("wfe");
|
__asm volatile ("wfe");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -99,7 +99,7 @@ inline static void __wfe(void) {
|
|||||||
*
|
*
|
||||||
* The WFI (wait for interrupt) instruction waits for a interrupt to wake up the core.
|
* The WFI (wait for interrupt) instruction waits for a interrupt to wake up the core.
|
||||||
*/
|
*/
|
||||||
inline static void __wfi(void) {
|
__force_inline static void __wfi(void) {
|
||||||
__asm volatile ("wfi");
|
__asm volatile ("wfi");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -109,7 +109,7 @@ inline static void __wfi(void) {
|
|||||||
* The DMB (data memory barrier) acts as a memory barrier, all memory accesses prior to this
|
* The DMB (data memory barrier) acts as a memory barrier, all memory accesses prior to this
|
||||||
* instruction will be observed before any explicit access after the instruction.
|
* instruction will be observed before any explicit access after the instruction.
|
||||||
*/
|
*/
|
||||||
inline static void __dmb(void) {
|
__force_inline static void __dmb(void) {
|
||||||
__asm volatile ("dmb" : : : "memory");
|
__asm volatile ("dmb" : : : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,7 +120,7 @@ inline static void __dmb(void) {
|
|||||||
* memory barrier (DMB). The DSB operation completes when all explicit memory
|
* memory barrier (DMB). The DSB operation completes when all explicit memory
|
||||||
* accesses before this instruction complete.
|
* accesses before this instruction complete.
|
||||||
*/
|
*/
|
||||||
inline static void __dsb(void) {
|
__force_inline static void __dsb(void) {
|
||||||
__asm volatile ("dsb" : : : "memory");
|
__asm volatile ("dsb" : : : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -131,14 +131,14 @@ inline static void __dsb(void) {
|
|||||||
* so that all instructions following the ISB are fetched from cache or memory again, after
|
* so that all instructions following the ISB are fetched from cache or memory again, after
|
||||||
* the ISB instruction has been completed.
|
* the ISB instruction has been completed.
|
||||||
*/
|
*/
|
||||||
inline static void __isb(void) {
|
__force_inline static void __isb(void) {
|
||||||
__asm volatile ("isb");
|
__asm volatile ("isb");
|
||||||
}
|
}
|
||||||
|
|
||||||
/*! \brief Acquire a memory fence
|
/*! \brief Acquire a memory fence
|
||||||
* \ingroup hardware_sync
|
* \ingroup hardware_sync
|
||||||
*/
|
*/
|
||||||
inline static void __mem_fence_acquire(void) {
|
__force_inline static void __mem_fence_acquire(void) {
|
||||||
// the original code below makes it hard for us to be included from C++ via a header
|
// the original code below makes it hard for us to be included from C++ via a header
|
||||||
// which itself is in an extern "C", so just use __dmb instead, which is what
|
// which itself is in an extern "C", so just use __dmb instead, which is what
|
||||||
// is required on Cortex M0+
|
// is required on Cortex M0+
|
||||||
@ -154,7 +154,7 @@ inline static void __mem_fence_acquire(void) {
|
|||||||
* \ingroup hardware_sync
|
* \ingroup hardware_sync
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
inline static void __mem_fence_release(void) {
|
__force_inline static void __mem_fence_release(void) {
|
||||||
// the original code below makes it hard for us to be included from C++ via a header
|
// the original code below makes it hard for us to be included from C++ via a header
|
||||||
// which itself is in an extern "C", so just use __dmb instead, which is what
|
// which itself is in an extern "C", so just use __dmb instead, which is what
|
||||||
// is required on Cortex M0+
|
// is required on Cortex M0+
|
||||||
@ -171,7 +171,7 @@ inline static void __mem_fence_release(void) {
|
|||||||
*
|
*
|
||||||
* \return The prior interrupt enable status for restoration later via restore_interrupts()
|
* \return The prior interrupt enable status for restoration later via restore_interrupts()
|
||||||
*/
|
*/
|
||||||
inline static uint32_t save_and_disable_interrupts(void) {
|
__force_inline static uint32_t save_and_disable_interrupts(void) {
|
||||||
uint32_t status;
|
uint32_t status;
|
||||||
__asm volatile ("mrs %0, PRIMASK" : "=r" (status)::);
|
__asm volatile ("mrs %0, PRIMASK" : "=r" (status)::);
|
||||||
__asm volatile ("cpsid i");
|
__asm volatile ("cpsid i");
|
||||||
@ -183,7 +183,7 @@ inline static uint32_t save_and_disable_interrupts(void) {
|
|||||||
*
|
*
|
||||||
* \param status Previous interrupt status from save_and_disable_interrupts()
|
* \param status Previous interrupt status from save_and_disable_interrupts()
|
||||||
*/
|
*/
|
||||||
inline static void restore_interrupts(uint32_t status) {
|
__force_inline static void restore_interrupts(uint32_t status) {
|
||||||
__asm volatile ("msr PRIMASK,%0"::"r" (status) : );
|
__asm volatile ("msr PRIMASK,%0"::"r" (status) : );
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,7 +193,7 @@ inline static void restore_interrupts(uint32_t status) {
|
|||||||
* \param lock_num Spinlock ID
|
* \param lock_num Spinlock ID
|
||||||
* \return The spinlock instance
|
* \return The spinlock instance
|
||||||
*/
|
*/
|
||||||
inline static spin_lock_t *spin_lock_instance(uint lock_num) {
|
__force_inline static spin_lock_t *spin_lock_instance(uint lock_num) {
|
||||||
invalid_params_if(SYNC, lock_num >= NUM_SPIN_LOCKS);
|
invalid_params_if(SYNC, lock_num >= NUM_SPIN_LOCKS);
|
||||||
return (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET + lock_num * 4);
|
return (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET + lock_num * 4);
|
||||||
}
|
}
|
||||||
@ -204,7 +204,7 @@ inline static spin_lock_t *spin_lock_instance(uint lock_num) {
|
|||||||
* \param lock The Spinlock instance
|
* \param lock The Spinlock instance
|
||||||
* \return The Spinlock ID
|
* \return The Spinlock ID
|
||||||
*/
|
*/
|
||||||
inline static uint spin_lock_get_num(spin_lock_t *lock) {
|
__force_inline static uint spin_lock_get_num(spin_lock_t *lock) {
|
||||||
invalid_params_if(SYNC, (uint) lock < SIO_BASE + SIO_SPINLOCK0_OFFSET ||
|
invalid_params_if(SYNC, (uint) lock < SIO_BASE + SIO_SPINLOCK0_OFFSET ||
|
||||||
(uint) lock >= NUM_SPIN_LOCKS * sizeof(spin_lock_t) + SIO_BASE + SIO_SPINLOCK0_OFFSET ||
|
(uint) lock >= NUM_SPIN_LOCKS * sizeof(spin_lock_t) + SIO_BASE + SIO_SPINLOCK0_OFFSET ||
|
||||||
((uint) lock - SIO_BASE + SIO_SPINLOCK0_OFFSET) % sizeof(spin_lock_t) != 0);
|
((uint) lock - SIO_BASE + SIO_SPINLOCK0_OFFSET) % sizeof(spin_lock_t) != 0);
|
||||||
@ -216,7 +216,7 @@ inline static uint spin_lock_get_num(spin_lock_t *lock) {
|
|||||||
*
|
*
|
||||||
* \param lock Spinlock instance
|
* \param lock Spinlock instance
|
||||||
*/
|
*/
|
||||||
inline static void spin_lock_unsafe_blocking(spin_lock_t *lock) {
|
__force_inline static void spin_lock_unsafe_blocking(spin_lock_t *lock) {
|
||||||
// Note we don't do a wfe or anything, because by convention these spin_locks are VERY SHORT LIVED and NEVER BLOCK and run
|
// Note we don't do a wfe or anything, because by convention these spin_locks are VERY SHORT LIVED and NEVER BLOCK and run
|
||||||
// with INTERRUPTS disabled (to ensure that)... therefore nothing on our core could be blocking us, so we just need to wait on another core
|
// with INTERRUPTS disabled (to ensure that)... therefore nothing on our core could be blocking us, so we just need to wait on another core
|
||||||
// anyway which should be finished soon
|
// anyway which should be finished soon
|
||||||
@ -229,7 +229,7 @@ inline static void spin_lock_unsafe_blocking(spin_lock_t *lock) {
|
|||||||
*
|
*
|
||||||
* \param lock Spinlock instance
|
* \param lock Spinlock instance
|
||||||
*/
|
*/
|
||||||
inline static void spin_unlock_unsafe(spin_lock_t *lock) {
|
__force_inline static void spin_unlock_unsafe(spin_lock_t *lock) {
|
||||||
__mem_fence_release();
|
__mem_fence_release();
|
||||||
*lock = 0;
|
*lock = 0;
|
||||||
}
|
}
|
||||||
@ -242,7 +242,7 @@ inline static void spin_unlock_unsafe(spin_lock_t *lock) {
|
|||||||
* \param lock Spinlock instance
|
* \param lock Spinlock instance
|
||||||
* \return interrupt status to be used when unlocking, to restore to original state
|
* \return interrupt status to be used when unlocking, to restore to original state
|
||||||
*/
|
*/
|
||||||
inline static uint32_t spin_lock_blocking(spin_lock_t *lock) {
|
__force_inline static uint32_t spin_lock_blocking(spin_lock_t *lock) {
|
||||||
uint32_t save = save_and_disable_interrupts();
|
uint32_t save = save_and_disable_interrupts();
|
||||||
spin_lock_unsafe_blocking(lock);
|
spin_lock_unsafe_blocking(lock);
|
||||||
return save;
|
return save;
|
||||||
@ -270,7 +270,7 @@ inline static bool is_spin_locked(spin_lock_t *lock) {
|
|||||||
*
|
*
|
||||||
* \sa spin_lock_blocking()
|
* \sa spin_lock_blocking()
|
||||||
*/
|
*/
|
||||||
inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) {
|
__force_inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) {
|
||||||
spin_unlock_unsafe(lock);
|
spin_unlock_unsafe(lock);
|
||||||
restore_interrupts(saved_irq);
|
restore_interrupts(saved_irq);
|
||||||
}
|
}
|
||||||
@ -280,7 +280,7 @@ inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) {
|
|||||||
*
|
*
|
||||||
* \return The core number the call was made from
|
* \return The core number the call was made from
|
||||||
*/
|
*/
|
||||||
static inline uint get_core_num(void) {
|
__force_inline static uint get_core_num(void) {
|
||||||
return (*(uint32_t *) (SIO_BASE + SIO_CPUID_OFFSET));
|
return (*(uint32_t *) (SIO_BASE + SIO_CPUID_OFFSET));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,10 +45,15 @@ extern "C" {
|
|||||||
* Decorates a function name, such that the function will execute from RAM, explicitly marking it as
|
* Decorates a function name, such that the function will execute from RAM, explicitly marking it as
|
||||||
* noinline to prevent it being inlined into a flash function by the compiler
|
* noinline to prevent it being inlined into a flash function by the compiler
|
||||||
*/
|
*/
|
||||||
#define __no_inline_not_in_flash_func(func_name) __attribute__((noinline)) __not_in_flash_func(func_name)
|
#define __no_inline_not_in_flash_func(func_name) __noinline __not_in_flash_func(func_name)
|
||||||
|
|
||||||
#define __packed_aligned __packed __aligned(4)
|
#define __packed_aligned __packed __aligned(4)
|
||||||
|
|
||||||
|
#if defined(__GNUC__) && __GNUC__ < 7
|
||||||
|
#define __force_inline inline __always_inline
|
||||||
|
#else
|
||||||
|
#define __force_inline __always_inline
|
||||||
|
#endif
|
||||||
#ifndef count_of
|
#ifndef count_of
|
||||||
#define count_of(a) (sizeof(a)/sizeof((a)[0]))
|
#define count_of(a) (sizeof(a)/sizeof((a)[0]))
|
||||||
#endif
|
#endif
|
||||||
@ -71,7 +76,7 @@ static inline void __breakpoint(void) {
|
|||||||
/**
|
/**
|
||||||
* Ensure that the compiler does not move memory access across this method call
|
* Ensure that the compiler does not move memory access across this method call
|
||||||
*/
|
*/
|
||||||
static inline void __compiler_memory_barrier(void) {
|
__force_inline static void __compiler_memory_barrier(void) {
|
||||||
__asm__ volatile ("" : : : "memory");
|
__asm__ volatile ("" : : : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -140,7 +145,7 @@ static inline void tight_loop_contents(void) {}
|
|||||||
* \param b the second operand
|
* \param b the second operand
|
||||||
* \return a * b
|
* \return a * b
|
||||||
*/
|
*/
|
||||||
inline static int32_t __mul_instruction(int32_t a, int32_t b) {
|
__force_inline static int32_t __mul_instruction(int32_t a, int32_t b) {
|
||||||
asm ("mul %0, %1" : "+l" (a) : "l" (b) : );
|
asm ("mul %0, %1" : "+l" (a) : "l" (b) : );
|
||||||
return a;
|
return a;
|
||||||
}
|
}
|
||||||
@ -167,7 +172,7 @@ return a;
|
|||||||
* Get the current exception level on this core
|
* Get the current exception level on this core
|
||||||
* \return the exception number if the CPU is handling an exception, or 0 otherwise
|
* \return the exception number if the CPU is handling an exception, or 0 otherwise
|
||||||
*/
|
*/
|
||||||
extern uint __get_current_exception(void);
|
uint __get_current_exception(void);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user