diff --git a/src/common/pico_sync/include/pico/lock_core.h b/src/common/pico_sync/include/pico/lock_core.h index fc676cc..bf8bee7 100644 --- a/src/common/pico_sync/include/pico/lock_core.h +++ b/src/common/pico_sync/include/pico/lock_core.h @@ -93,6 +93,13 @@ void lock_init(lock_core_t *core, uint lock_num); * By default this returns the calling core number, but may be overridden (e.g. to return an RTOS task id) */ #define lock_get_caller_owner_id() ((lock_owner_id_t)get_core_num()) +#ifndef lock_is_owner_id_valid +#define lock_is_owner_id_valid(id) ((id)>=0) +#endif +#endif + +#ifndef lock_is_owner_id_valid +#define lock_is_owner_id_valid(id) ((id) != LOCK_INVALID_OWNER_ID) #endif #ifndef lock_internal_spin_unlock_with_wait diff --git a/src/common/pico_sync/include/pico/mutex.h b/src/common/pico_sync/include/pico/mutex.h index 22dd19d..269caba 100644 --- a/src/common/pico_sync/include/pico/mutex.h +++ b/src/common/pico_sync/include/pico/mutex.h @@ -19,25 +19,51 @@ extern "C" { * \brief Mutex API for non IRQ mutual exclusion between cores * * Mutexes are application level locks usually used protecting data structures that might be used by - * multiple cores. Unlike critical sections, the mutex protected code is not necessarily - * required/expected to complete quickly, as no other sytemwide locks are held on account of a locked mutex. + * multiple threads of execution. Unlike critical sections, the mutex protected code is not necessarily + * required/expected to complete quickly, as no other sytem wide locks are held on account of an acquired mutex. * - * Because they are not re-entrant on the same core, blocking on a mutex should never be done in an IRQ - * handler. It is valid to call \ref mutex_try_enter from within an IRQ handler, if the operation - * that would be conducted under lock can be skipped if the mutex is locked (at least by the same core). + * When acquired, the mutex has an owner (see \ref lock_get_caller_owner_id) which with the plain SDK is just + * the acquiring core, but in an RTOS it could be a task, or an IRQ handler context. + * + * Two variants of mutex are provided; \ref mutex_t (and associated mutex_ functions) is a regular mutex that cannot + * be acquired recursively by the same owner (a deadlock will occur if you try). \ref recursive_mutex_t + * (and associated recursive_mutex_ functions) is a recursive mutex that can be recursively obtained by + * the same caller, at the expense of some more overhead when acquiring and releasing. + * + * It is generally a bad idea to call blocking mutex_ or recursive_mutex_ functions from within an IRQ handler. + * It is valid to call \ref mutex_try_enter or \ref recursive_mutex_try_enter from within an IRQ handler, if the operation + * that would be conducted under lock can be skipped if the mutex is locked (at least by the same owner). + * + * NOTE: For backwards compatibility with version 1.2.0 of the SDK, if the define + * PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY is set to 1, then the the regular mutex_ functions + * may also be used for recursive mutexes. This flag will be removed in a future version of the SDK. * * See \ref critical_section.h for protecting access between multiple cores AND IRQ handlers */ + +/*! \brief recursive mutex instance + * \ingroup mutex + */ +typedef struct __packed_aligned { + lock_core_t core; + lock_owner_id_t owner; //! owner id LOCK_INVALID_OWNER_ID for unowned + uint8_t enter_count; //! ownership count +#if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY + bool recursive; +#endif +} recursive_mutex_t; + +/*! \brief regular (non recursive) mutex instance + * \ingroup mutex + */ +#if !PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY typedef struct __packed_aligned mutex { lock_core_t core; lock_owner_id_t owner; //! owner id LOCK_INVALID_OWNER_ID for unowned - uint8_t recursion_state; //! 0 means non recursive (owner or unowned) - //! 1 is a maxed out recursive lock - //! 2-254 is an owned lock - //! 255 is an un-owned lock } mutex_t; - -#define MAX_RECURSION_STATE ((uint8_t)255) +#else +typedef recursive_mutex_t mutex_t; // they are one and the same when backwards compatible with SDK1.2.0 +#endif /*! \brief Initialise a mutex structure * \ingroup mutex @@ -51,74 +77,140 @@ void mutex_init(mutex_t *mtx); * * A recursive mutex may be entered in a nested fashion by the same owner * - * \param mtx Pointer to mutex structure + * \param mtx Pointer to recursive mutex structure */ -void recursive_mutex_init(mutex_t *mtx); +void recursive_mutex_init(recursive_mutex_t *mtx); /*! \brief Take ownership of a mutex * \ingroup mutex * - * This function will block until the calling core can claim ownership of the mutex. - * On return the caller core owns the mutex + * This function will block until the caller can be granted ownership of the mutex. + * On return the caller owns the mutex * * \param mtx Pointer to mutex structure */ void mutex_enter_blocking(mutex_t *mtx); +/*! \brief Take ownership of a recursive mutex + * \ingroup mutex + * + * This function will block until the caller can be granted ownership of the mutex. + * On return the caller owns the mutex + * + * \param mtx Pointer to recursive mutex structure + */ +void recursive_mutex_enter_blocking(recursive_mutex_t *mtx); + /*! \brief Attempt to take ownership of a mutex * \ingroup mutex * - * If the mutex wasn't owned, this will claim the mutex and return true. + * If the mutex wasn't owned, this will claim the mutex for the caller and return true. * Otherwise (if the mutex was already owned) this will return false and the - * calling core will *NOT* own the mutex. + * caller will *NOT* own the mutex. * * \param mtx Pointer to mutex structure - * \param owner_out If mutex was already owned, and this pointer is non-zero, it will be filled in with the core number of the current owner of the mutex + * \param owner_out If mutex was already owned, and this pointer is non-zero, it will be filled in with the owner id of the current owner of the mutex + * \return true if mutex now owned, false otherwise */ bool mutex_try_enter(mutex_t *mtx, uint32_t *owner_out); -/*! \brief Wait for mutex with timeout +/*! \brief Attempt to take ownership of a recursive mutex * \ingroup mutex * - * Wait for up to the specific time to take ownership of the mutex. If the calling - * core can take ownership of the mutex before the timeout expires, then true will be returned - * and the calling core will own the mutex, otherwise false will be returned and the calling - * core will *NOT* own the mutex. + * If the mutex wasn't owned or was owned by the caller, this will claim the mutex and return true. + * Otherwise (if the mutex was already owned by another owner) this will return false and the + * caller will *NOT* own the mutex. * - * \param mtx Pointer to mutex structure - * \param timeout_ms The timeout in milliseconds. - * \return true if mutex now owned, false if timeout occurred before mutex became available + * \param mtx Pointer to recursive mutex structure + * \param owner_out If mutex was already owned by another owner, and this pointer is non-zero, + * it will be filled in with the owner id of the current owner of the mutex + * \return true if the recursive mutex (now) owned, false otherwise */ -bool mutex_enter_timeout_ms(mutex_t *mtx, uint32_t timeout_ms); +bool recursive_mutex_try_enter(recursive_mutex_t *mtx, uint32_t *owner_out); /*! \brief Wait for mutex with timeout * \ingroup mutex * - * Wait for up to the specific time to take ownership of the mutex. If the calling - * core can take ownership of the mutex before the timeout expires, then true will be returned - * and the calling core will own the mutex, otherwise false will be returned and the calling - * core will *NOT* own the mutex. + * Wait for up to the specific time to take ownership of the mutex. If the caller + * can be granted ownership of the mutex before the timeout expires, then true will be returned + * and the caller will own the mutex, otherwise false will be returned and the caller will *NOT* own the mutex. + * + * \param mtx Pointer to mutex structure + * \param timeout_ms The timeout in milliseconds. + * \return true if mutex now owned, false if timeout occurred before ownership could be granted + */ +bool mutex_enter_timeout_ms(mutex_t *mtx, uint32_t timeout_ms); + +/*! \brief Wait for recursive mutex with timeout + * \ingroup mutex + * + * Wait for up to the specific time to take ownership of the recursive mutex. If the caller + * already has ownership of the mutex or can be granted ownership of the mutex before the timeout expires, + * then true will be returned and the caller will own the mutex, otherwise false will be returned and the caller + * will *NOT* own the mutex. + * + * \param mtx Pointer to recursive mutex structure + * \param timeout_ms The timeout in milliseconds. + * \return true if the recursive mutex (now) owned, false if timeout occurred before ownership could be granted + */ +bool recursive_mutex_enter_timeout_ms(recursive_mutex_t *mtx, uint32_t timeout_ms); + +/*! \brief Wait for mutex with timeout + * \ingroup mutex + * + * Wait for up to the specific time to take ownership of the mutex. If the caller + * can be granted ownership of the mutex before the timeout expires, then true will be returned + * and the caller will own the mutex, otherwise false will be returned and the caller + * will *NOT* own the mutex. * * \param mtx Pointer to mutex structure * \param timeout_us The timeout in microseconds. - * \return true if mutex now owned, false if timeout occurred before mutex became available + * \return true if mutex now owned, false if timeout occurred before ownership could be granted */ bool mutex_enter_timeout_us(mutex_t *mtx, uint32_t timeout_us); +/*! \brief Wait for recursive mutex with timeout + * \ingroup mutex + * + * Wait for up to the specific time to take ownership of the recursive mutex. If the caller + * already has ownership of the mutex or can be granted ownership of the mutex before the timeout expires, + * then true will be returned and the caller will own the mutex, otherwise false will be returned and the caller + * will *NOT* own the mutex. + * + * \param mtx Pointer to mutex structure + * \param timeout_us The timeout in microseconds. + * \return true if the recursive mutex (now) owned, false if timeout occurred before ownership could be granted + */ +bool recursive_mutex_enter_timeout_us(recursive_mutex_t *mtx, uint32_t timeout_us); + /*! \brief Wait for mutex until a specific time * \ingroup mutex * - * Wait until the specific time to take ownership of the mutex. If the calling - * core can take ownership of the mutex before the timeout expires, then true will be returned - * and the calling core will own the mutex, otherwise false will be returned and the calling - * core will *NOT* own the mutex. + * Wait until the specific time to take ownership of the mutex. If the caller + * can be granted ownership of the mutex before the timeout expires, then true will be returned + * and the caller will own the mutex, otherwise false will be returned and the caller + * will *NOT* own the mutex. * * \param mtx Pointer to mutex structure - * \param until The time after which to return if the core cannot take ownership of the mutex - * \return true if mutex now owned, false if timeout occurred before mutex became available + * \param until The time after which to return if the caller cannot be granted ownership of the mutex + * \return true if mutex now owned, false if timeout occurred before ownership could be granted */ bool mutex_enter_block_until(mutex_t *mtx, absolute_time_t until); +/*! \brief Wait for mutex until a specific time + * \ingroup mutex + * + * Wait until the specific time to take ownership of the mutex. If the caller + * already has ownership of the mutex or can be granted ownership of the mutex before the timeout expires, + * then true will be returned and the caller will own the mutex, otherwise false will be returned and the caller + * will *NOT* own the mutex. + * + * \param mtx Pointer to recursive mutex structure + * \param until The time after which to return if the caller cannot be granted ownership of the mutex + * \return true if the recursive mutex (now) owned, false if timeout occurred before ownership could be granted + */ +bool recursive_mutex_enter_block_until(recursive_mutex_t *mtx, absolute_time_t until); + /*! \brief Release ownership of a mutex * \ingroup mutex * @@ -126,13 +218,30 @@ bool mutex_enter_block_until(mutex_t *mtx, absolute_time_t until); */ void mutex_exit(mutex_t *mtx); -/*! \brief Test for mutex initialised state +/*! \brief Release ownership of a recursive mutex + * \ingroup mutex + * + * \param mtx Pointer to recursive mutex structure + */ +void recursive_mutex_exit(recursive_mutex_t *mtx); + +/*! \brief Test for mutex initialized state * \ingroup mutex * * \param mtx Pointer to mutex structure - * \return true if the mutex is initialised, false otherwise + * \return true if the mutex is initialized, false otherwise */ -static inline bool mutex_is_initialzed(mutex_t *mtx) { +static inline bool mutex_is_initialized(mutex_t *mtx) { + return mtx->core.spin_lock != 0; +} + +/*! \brief Test for recursive mutex initialized state + * \ingroup mutex + * + * \param mtx Pointer to recursive mutex structure + * \return true if the recursive mutex is initialized, false otherwise + */ +static inline bool recursive_mutex_is_initialized(recursive_mutex_t *mtx) { return mtx->core.spin_lock != 0; } @@ -165,22 +274,22 @@ static inline bool mutex_is_initialzed(mutex_t *mtx) { * A recursive mutex defined as follows: * * ```c - * auto_init_recursive_mutex(my_mutex); + * auto_init_recursive_mutex(my_recursive_mutex); * ``` * * Is equivalent to doing * * ```c - * static mutex_t my_mutex; + * static recursive_mutex_t my_recursive_mutex; * * void my_init_function() { - * recursive_mutex_init(&my_mutex); + * recursive_mutex_init(&my_recursive_mutex); * } * ``` * * But the initialization of the mutex is performed automatically during runtime initialization */ -#define auto_init_recursive_mutex(name) static __attribute__((section(".mutex_array"))) mutex_t name = { .recursion_state = MAX_RECURSION_STATE } +#define auto_init_recursive_mutex(name) static __attribute__((section(".mutex_array"))) recursive_mutex_t name = { .core.spin_lock = (spin_lock_t *)1 /* marker for runtime_init */ } #ifdef __cplusplus } diff --git a/src/common/pico_sync/mutex.c b/src/common/pico_sync/mutex.c index 45ede4d..3ea81c7 100644 --- a/src/common/pico_sync/mutex.c +++ b/src/common/pico_sync/mutex.c @@ -7,53 +7,87 @@ #include "pico/mutex.h" #include "pico/time.h" -static void mutex_init_internal(mutex_t *mtx, uint8_t recursion_state) { +void mutex_init(mutex_t *mtx) { lock_init(&mtx->core, next_striped_spin_lock_num()); mtx->owner = LOCK_INVALID_OWNER_ID; - mtx->recursion_state = recursion_state; +#if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY + mtx->recursive = false; +#endif __mem_fence_release(); } -void mutex_init(mutex_t *mtx) { - mutex_init_internal(mtx, 0); -} - -void recursive_mutex_init(mutex_t *mtx) { - mutex_init_internal(mtx, MAX_RECURSION_STATE); +void recursive_mutex_init(recursive_mutex_t *mtx) { + lock_init(&mtx->core, next_striped_spin_lock_num()); + mtx->owner = LOCK_INVALID_OWNER_ID; + mtx->enter_count = 0; +#if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY + mtx->recursive = true; +#endif + __mem_fence_release(); } void __time_critical_func(mutex_enter_blocking)(mutex_t *mtx) { - assert(mtx->core.spin_lock); +#if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY + if (mtx->recursive) { + recursive_mutex_enter_blocking(mtx); + return; + } +#endif + lock_owner_id_t caller = lock_get_caller_owner_id(); do { uint32_t save = spin_lock_blocking(mtx->core.spin_lock); - lock_owner_id_t caller = lock_get_caller_owner_id(); - if (mtx->owner == LOCK_INVALID_OWNER_ID) { + if (!lock_is_owner_id_valid(mtx->owner)) { mtx->owner = caller; - if (mtx->recursion_state) { - assert(mtx->recursion_state == MAX_RECURSION_STATE); - mtx->recursion_state--; - } - } else if (mtx->owner == caller && mtx->recursion_state > 1) { - mtx->recursion_state--; + spin_unlock(mtx->core.spin_lock, save); + break; + } + lock_internal_spin_unlock_with_wait(&mtx->core, save); + } while (true); +} + +void __time_critical_func(recursive_mutex_enter_blocking)(recursive_mutex_t *mtx) { + lock_owner_id_t caller = lock_get_caller_owner_id(); + do { + uint32_t save = spin_lock_blocking(mtx->core.spin_lock); + if (mtx->owner == caller || !lock_is_owner_id_valid(mtx->owner)) { + mtx->owner = caller; + uint __unused total = ++mtx->enter_count; + spin_unlock(mtx->core.spin_lock, save); + assert(total); // check for overflow + return; } else { lock_internal_spin_unlock_with_wait(&mtx->core, save); - // spin lock already unlocked, so loop again - continue; } - spin_unlock(mtx->core.spin_lock, save); - break; } while (true); } bool __time_critical_func(mutex_try_enter)(mutex_t *mtx, uint32_t *owner_out) { +#if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY + if (mtx->recursive) { + return recursive_mutex_try_enter(mtx, owner_out); + } +#endif bool entered; uint32_t save = spin_lock_blocking(mtx->core.spin_lock); - lock_owner_id_t caller = lock_get_caller_owner_id(); - if (mtx->owner == LOCK_INVALID_OWNER_ID) { + if (!lock_is_owner_id_valid(mtx->owner)) { mtx->owner = lock_get_caller_owner_id(); entered = true; - } else if (mtx->owner == caller && mtx->recursion_state > 1) { - mtx->recursion_state--; + } else { + if (owner_out) *owner_out = (uint32_t) mtx->owner; + entered = false; + } + spin_unlock(mtx->core.spin_lock, save); + return entered; +} + +bool __time_critical_func(recursive_mutex_try_enter)(recursive_mutex_t *mtx, uint32_t *owner_out) { + bool entered; + lock_owner_id_t caller = lock_get_caller_owner_id(); + uint32_t save = spin_lock_blocking(mtx->core.spin_lock); + if (!lock_is_owner_id_valid(mtx->owner) || mtx->owner == caller) { + mtx->owner = caller; + uint __unused total = ++mtx->enter_count; + assert(total); // check for overflow entered = true; } else { if (owner_out) *owner_out = (uint32_t) mtx->owner; @@ -67,47 +101,84 @@ bool __time_critical_func(mutex_enter_timeout_ms)(mutex_t *mtx, uint32_t timeout return mutex_enter_block_until(mtx, make_timeout_time_ms(timeout_ms)); } +bool __time_critical_func(recursive_mutex_enter_timeout_ms)(recursive_mutex_t *mtx, uint32_t timeout_ms) { + return recursive_mutex_enter_block_until(mtx, make_timeout_time_ms(timeout_ms)); +} + bool __time_critical_func(mutex_enter_timeout_us)(mutex_t *mtx, uint32_t timeout_us) { return mutex_enter_block_until(mtx, make_timeout_time_us(timeout_us)); } +bool __time_critical_func(recursive_mutex_enter_timeout_us)(recursive_mutex_t *mtx, uint32_t timeout_us) { + return recursive_mutex_enter_block_until(mtx, make_timeout_time_us(timeout_us)); +} + bool __time_critical_func(mutex_enter_block_until)(mutex_t *mtx, absolute_time_t until) { +#if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY + if (mtx->recursive) { + return recursive_mutex_enter_block_until(mtx, until); + } +#endif assert(mtx->core.spin_lock); + lock_owner_id_t caller = lock_get_caller_owner_id(); do { uint32_t save = spin_lock_blocking(mtx->core.spin_lock); - lock_owner_id_t caller = lock_get_caller_owner_id(); - if (mtx->owner == LOCK_INVALID_OWNER_ID) { + if (!lock_is_owner_id_valid(mtx->owner)) { mtx->owner = caller; - } else if (mtx->owner == caller && mtx->recursion_state > 1) { - mtx->recursion_state--; + spin_unlock(mtx->core.spin_lock, save); + return true; } else { if (lock_internal_spin_unlock_with_best_effort_wait_or_timeout(&mtx->core, save, until)) { // timed out return false; - } else { - // not timed out; spin lock already unlocked, so loop again - continue; } + // not timed out; spin lock already unlocked, so loop again + } + } while (true); +} + +bool __time_critical_func(recursive_mutex_enter_block_until)(recursive_mutex_t *mtx, absolute_time_t until) { + assert(mtx->core.spin_lock); + lock_owner_id_t caller = lock_get_caller_owner_id(); + do { + uint32_t save = spin_lock_blocking(mtx->core.spin_lock); + if (!lock_is_owner_id_valid(mtx->owner) || mtx->owner == caller) { + mtx->owner = caller; + uint __unused total = ++mtx->enter_count; + spin_unlock(mtx->core.spin_lock, save); + assert(total); // check for overflow + return true; + } else { + if (lock_internal_spin_unlock_with_best_effort_wait_or_timeout(&mtx->core, save, until)) { + // timed out + return false; + } + // not timed out; spin lock already unlocked, so loop again } - spin_unlock(mtx->core.spin_lock, save); - return true; } while (true); } void __time_critical_func(mutex_exit)(mutex_t *mtx) { +#if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY + if (mtx->recursive) { + recursive_mutex_exit(mtx); + return; + } +#endif uint32_t save = spin_lock_blocking(mtx->core.spin_lock); - assert(mtx->owner != LOCK_INVALID_OWNER_ID); - if (!mtx->recursion_state) { + assert(lock_is_owner_id_valid(mtx->owner)); + mtx->owner = LOCK_INVALID_OWNER_ID; + lock_internal_spin_unlock_with_notify(&mtx->core, save); +} + +void __time_critical_func(recursive_mutex_exit)(recursive_mutex_t *mtx) { + uint32_t save = spin_lock_blocking(mtx->core.spin_lock); + assert(lock_is_owner_id_valid(mtx->owner)); + assert(mtx->enter_count); + if (!--mtx->enter_count) { mtx->owner = LOCK_INVALID_OWNER_ID; lock_internal_spin_unlock_with_notify(&mtx->core, save); } else { - mtx->recursion_state++; - assert(mtx->recursion_state); - if (mtx->recursion_state == MAX_RECURSION_STATE) { - mtx->owner = LOCK_INVALID_OWNER_ID; - lock_internal_spin_unlock_with_notify(&mtx->core, save); - } else { - spin_unlock(mtx->core.spin_lock, save); - } + spin_unlock(mtx->core.spin_lock, save); } } \ No newline at end of file diff --git a/src/rp2_common/pico_multicore/multicore.c b/src/rp2_common/pico_multicore/multicore.c index 531e294..a6bf8c3 100644 --- a/src/rp2_common/pico_multicore/multicore.c +++ b/src/rp2_common/pico_multicore/multicore.c @@ -176,7 +176,7 @@ static void __isr __not_in_flash_func(multicore_lockout_handler)(void) { static void check_lockout_mutex_init(void) { // use known available lock - we only need it briefly uint32_t save = hw_claim_lock(); - if (!mutex_is_initialzed(&lockout_mutex)) { + if (!mutex_is_initialized(&lockout_mutex)) { mutex_init(&lockout_mutex); } hw_claim_unlock(save); @@ -237,7 +237,7 @@ void multicore_lockout_start_blocking() { } static bool multicore_lockout_end_block_until(absolute_time_t until) { - assert(mutex_is_initialzed(&lockout_mutex)); + assert(mutex_is_initialized(&lockout_mutex)); if (!mutex_enter_block_until(&lockout_mutex, until)) { return false; } diff --git a/src/rp2_common/pico_runtime/runtime.c b/src/rp2_common/pico_runtime/runtime.c index 06526b7..400f6cb 100644 --- a/src/rp2_common/pico_runtime/runtime.c +++ b/src/rp2_common/pico_runtime/runtime.c @@ -119,15 +119,27 @@ void runtime_init(void) { hw_clear_alias(padsbank0_hw)->io[28] = hw_clear_alias(padsbank0_hw)->io[29] = PADS_BANK0_GPIO0_IE_BITS; #endif - extern mutex_t __mutex_array_start; - extern mutex_t __mutex_array_end; + // this is an array of either mutex_t or recursive_mutex_t (i.e. not necessarily the same size) + // however each starts with a lock_core_t, and the spin_lock is initialized to address 1 for a recursive + // spinlock and 0 for a regular one. - // the first function pointer, not the address of it. - for (mutex_t *m = &__mutex_array_start; m < &__mutex_array_end; m++) { - if (m->recursion_state) { - recursive_mutex_init(m); + static_assert(!(sizeof(mutex_t)&3), ""); + static_assert(!(sizeof(recursive_mutex_t)&3), ""); + static_assert(!offsetof(mutex_t, core), ""); + static_assert(!offsetof(recursive_mutex_t, core), ""); + extern lock_core_t __mutex_array_start; + extern lock_core_t __mutex_array_end; + + for (lock_core_t *l = &__mutex_array_start; l < &__mutex_array_end; ) { + if (l->spin_lock) { + assert(1 == (uintptr_t)l->spin_lock); // indicator for a recursive mutex + recursive_mutex_t *rm = (recursive_mutex_t *)l; + recursive_mutex_init(rm); + l = &rm[1].core; // next } else { + mutex_t *m = (mutex_t *)l; mutex_init(m); + l = &m[1].core; // next } }