Split recursive mutex into their own functions (was Reduce performance hit of recursive mutex) (#495)

mutex_t and mutex_ are reverted to non recursive versions (pre SDK1.2.0) and new recursive_mutex_t and recursive_mutex_ functions have been added

PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY flag has been added to allow old SDK1.2.0 compatibility (i.e. mutex_t can be used recursively or not) but this is slower (and is will be removed in a future version)
This commit is contained in:
Graham Sanderson 2021-10-20 18:27:59 -05:00 committed by GitHub
parent 9320d192c3
commit 3c72e753b6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 296 additions and 97 deletions

View File

@ -93,6 +93,13 @@ void lock_init(lock_core_t *core, uint lock_num);
* By default this returns the calling core number, but may be overridden (e.g. to return an RTOS task id) * By default this returns the calling core number, but may be overridden (e.g. to return an RTOS task id)
*/ */
#define lock_get_caller_owner_id() ((lock_owner_id_t)get_core_num()) #define lock_get_caller_owner_id() ((lock_owner_id_t)get_core_num())
#ifndef lock_is_owner_id_valid
#define lock_is_owner_id_valid(id) ((id)>=0)
#endif
#endif
#ifndef lock_is_owner_id_valid
#define lock_is_owner_id_valid(id) ((id) != LOCK_INVALID_OWNER_ID)
#endif #endif
#ifndef lock_internal_spin_unlock_with_wait #ifndef lock_internal_spin_unlock_with_wait

View File

@ -19,25 +19,51 @@ extern "C" {
* \brief Mutex API for non IRQ mutual exclusion between cores * \brief Mutex API for non IRQ mutual exclusion between cores
* *
* Mutexes are application level locks usually used protecting data structures that might be used by * Mutexes are application level locks usually used protecting data structures that might be used by
* multiple cores. Unlike critical sections, the mutex protected code is not necessarily * multiple threads of execution. Unlike critical sections, the mutex protected code is not necessarily
* required/expected to complete quickly, as no other sytemwide locks are held on account of a locked mutex. * required/expected to complete quickly, as no other sytem wide locks are held on account of an acquired mutex.
* *
* Because they are not re-entrant on the same core, blocking on a mutex should never be done in an IRQ * When acquired, the mutex has an owner (see \ref lock_get_caller_owner_id) which with the plain SDK is just
* handler. It is valid to call \ref mutex_try_enter from within an IRQ handler, if the operation * the acquiring core, but in an RTOS it could be a task, or an IRQ handler context.
* that would be conducted under lock can be skipped if the mutex is locked (at least by the same core). *
* Two variants of mutex are provided; \ref mutex_t (and associated mutex_ functions) is a regular mutex that cannot
* be acquired recursively by the same owner (a deadlock will occur if you try). \ref recursive_mutex_t
* (and associated recursive_mutex_ functions) is a recursive mutex that can be recursively obtained by
* the same caller, at the expense of some more overhead when acquiring and releasing.
*
* It is generally a bad idea to call blocking mutex_ or recursive_mutex_ functions from within an IRQ handler.
* It is valid to call \ref mutex_try_enter or \ref recursive_mutex_try_enter from within an IRQ handler, if the operation
* that would be conducted under lock can be skipped if the mutex is locked (at least by the same owner).
*
* NOTE: For backwards compatibility with version 1.2.0 of the SDK, if the define
* PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY is set to 1, then the the regular mutex_ functions
* may also be used for recursive mutexes. This flag will be removed in a future version of the SDK.
* *
* See \ref critical_section.h for protecting access between multiple cores AND IRQ handlers * See \ref critical_section.h for protecting access between multiple cores AND IRQ handlers
*/ */
/*! \brief recursive mutex instance
* \ingroup mutex
*/
typedef struct __packed_aligned {
lock_core_t core;
lock_owner_id_t owner; //! owner id LOCK_INVALID_OWNER_ID for unowned
uint8_t enter_count; //! ownership count
#if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
bool recursive;
#endif
} recursive_mutex_t;
/*! \brief regular (non recursive) mutex instance
* \ingroup mutex
*/
#if !PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
typedef struct __packed_aligned mutex { typedef struct __packed_aligned mutex {
lock_core_t core; lock_core_t core;
lock_owner_id_t owner; //! owner id LOCK_INVALID_OWNER_ID for unowned lock_owner_id_t owner; //! owner id LOCK_INVALID_OWNER_ID for unowned
uint8_t recursion_state; //! 0 means non recursive (owner or unowned)
//! 1 is a maxed out recursive lock
//! 2-254 is an owned lock
//! 255 is an un-owned lock
} mutex_t; } mutex_t;
#else
#define MAX_RECURSION_STATE ((uint8_t)255) typedef recursive_mutex_t mutex_t; // they are one and the same when backwards compatible with SDK1.2.0
#endif
/*! \brief Initialise a mutex structure /*! \brief Initialise a mutex structure
* \ingroup mutex * \ingroup mutex
@ -51,74 +77,140 @@ void mutex_init(mutex_t *mtx);
* *
* A recursive mutex may be entered in a nested fashion by the same owner * A recursive mutex may be entered in a nested fashion by the same owner
* *
* \param mtx Pointer to mutex structure * \param mtx Pointer to recursive mutex structure
*/ */
void recursive_mutex_init(mutex_t *mtx); void recursive_mutex_init(recursive_mutex_t *mtx);
/*! \brief Take ownership of a mutex /*! \brief Take ownership of a mutex
* \ingroup mutex * \ingroup mutex
* *
* This function will block until the calling core can claim ownership of the mutex. * This function will block until the caller can be granted ownership of the mutex.
* On return the caller core owns the mutex * On return the caller owns the mutex
* *
* \param mtx Pointer to mutex structure * \param mtx Pointer to mutex structure
*/ */
void mutex_enter_blocking(mutex_t *mtx); void mutex_enter_blocking(mutex_t *mtx);
/*! \brief Take ownership of a recursive mutex
* \ingroup mutex
*
* This function will block until the caller can be granted ownership of the mutex.
* On return the caller owns the mutex
*
* \param mtx Pointer to recursive mutex structure
*/
void recursive_mutex_enter_blocking(recursive_mutex_t *mtx);
/*! \brief Attempt to take ownership of a mutex /*! \brief Attempt to take ownership of a mutex
* \ingroup mutex * \ingroup mutex
* *
* If the mutex wasn't owned, this will claim the mutex and return true. * If the mutex wasn't owned, this will claim the mutex for the caller and return true.
* Otherwise (if the mutex was already owned) this will return false and the * Otherwise (if the mutex was already owned) this will return false and the
* calling core will *NOT* own the mutex. * caller will *NOT* own the mutex.
* *
* \param mtx Pointer to mutex structure * \param mtx Pointer to mutex structure
* \param owner_out If mutex was already owned, and this pointer is non-zero, it will be filled in with the core number of the current owner of the mutex * \param owner_out If mutex was already owned, and this pointer is non-zero, it will be filled in with the owner id of the current owner of the mutex
* \return true if mutex now owned, false otherwise
*/ */
bool mutex_try_enter(mutex_t *mtx, uint32_t *owner_out); bool mutex_try_enter(mutex_t *mtx, uint32_t *owner_out);
/*! \brief Wait for mutex with timeout /*! \brief Attempt to take ownership of a recursive mutex
* \ingroup mutex * \ingroup mutex
* *
* Wait for up to the specific time to take ownership of the mutex. If the calling * If the mutex wasn't owned or was owned by the caller, this will claim the mutex and return true.
* core can take ownership of the mutex before the timeout expires, then true will be returned * Otherwise (if the mutex was already owned by another owner) this will return false and the
* and the calling core will own the mutex, otherwise false will be returned and the calling * caller will *NOT* own the mutex.
* core will *NOT* own the mutex.
* *
* \param mtx Pointer to mutex structure * \param mtx Pointer to recursive mutex structure
* \param timeout_ms The timeout in milliseconds. * \param owner_out If mutex was already owned by another owner, and this pointer is non-zero,
* \return true if mutex now owned, false if timeout occurred before mutex became available * it will be filled in with the owner id of the current owner of the mutex
* \return true if the recursive mutex (now) owned, false otherwise
*/ */
bool mutex_enter_timeout_ms(mutex_t *mtx, uint32_t timeout_ms); bool recursive_mutex_try_enter(recursive_mutex_t *mtx, uint32_t *owner_out);
/*! \brief Wait for mutex with timeout /*! \brief Wait for mutex with timeout
* \ingroup mutex * \ingroup mutex
* *
* Wait for up to the specific time to take ownership of the mutex. If the calling * Wait for up to the specific time to take ownership of the mutex. If the caller
* core can take ownership of the mutex before the timeout expires, then true will be returned * can be granted ownership of the mutex before the timeout expires, then true will be returned
* and the calling core will own the mutex, otherwise false will be returned and the calling * and the caller will own the mutex, otherwise false will be returned and the caller will *NOT* own the mutex.
* core will *NOT* own the mutex. *
* \param mtx Pointer to mutex structure
* \param timeout_ms The timeout in milliseconds.
* \return true if mutex now owned, false if timeout occurred before ownership could be granted
*/
bool mutex_enter_timeout_ms(mutex_t *mtx, uint32_t timeout_ms);
/*! \brief Wait for recursive mutex with timeout
* \ingroup mutex
*
* Wait for up to the specific time to take ownership of the recursive mutex. If the caller
* already has ownership of the mutex or can be granted ownership of the mutex before the timeout expires,
* then true will be returned and the caller will own the mutex, otherwise false will be returned and the caller
* will *NOT* own the mutex.
*
* \param mtx Pointer to recursive mutex structure
* \param timeout_ms The timeout in milliseconds.
* \return true if the recursive mutex (now) owned, false if timeout occurred before ownership could be granted
*/
bool recursive_mutex_enter_timeout_ms(recursive_mutex_t *mtx, uint32_t timeout_ms);
/*! \brief Wait for mutex with timeout
* \ingroup mutex
*
* Wait for up to the specific time to take ownership of the mutex. If the caller
* can be granted ownership of the mutex before the timeout expires, then true will be returned
* and the caller will own the mutex, otherwise false will be returned and the caller
* will *NOT* own the mutex.
* *
* \param mtx Pointer to mutex structure * \param mtx Pointer to mutex structure
* \param timeout_us The timeout in microseconds. * \param timeout_us The timeout in microseconds.
* \return true if mutex now owned, false if timeout occurred before mutex became available * \return true if mutex now owned, false if timeout occurred before ownership could be granted
*/ */
bool mutex_enter_timeout_us(mutex_t *mtx, uint32_t timeout_us); bool mutex_enter_timeout_us(mutex_t *mtx, uint32_t timeout_us);
/*! \brief Wait for recursive mutex with timeout
* \ingroup mutex
*
* Wait for up to the specific time to take ownership of the recursive mutex. If the caller
* already has ownership of the mutex or can be granted ownership of the mutex before the timeout expires,
* then true will be returned and the caller will own the mutex, otherwise false will be returned and the caller
* will *NOT* own the mutex.
*
* \param mtx Pointer to mutex structure
* \param timeout_us The timeout in microseconds.
* \return true if the recursive mutex (now) owned, false if timeout occurred before ownership could be granted
*/
bool recursive_mutex_enter_timeout_us(recursive_mutex_t *mtx, uint32_t timeout_us);
/*! \brief Wait for mutex until a specific time /*! \brief Wait for mutex until a specific time
* \ingroup mutex * \ingroup mutex
* *
* Wait until the specific time to take ownership of the mutex. If the calling * Wait until the specific time to take ownership of the mutex. If the caller
* core can take ownership of the mutex before the timeout expires, then true will be returned * can be granted ownership of the mutex before the timeout expires, then true will be returned
* and the calling core will own the mutex, otherwise false will be returned and the calling * and the caller will own the mutex, otherwise false will be returned and the caller
* core will *NOT* own the mutex. * will *NOT* own the mutex.
* *
* \param mtx Pointer to mutex structure * \param mtx Pointer to mutex structure
* \param until The time after which to return if the core cannot take ownership of the mutex * \param until The time after which to return if the caller cannot be granted ownership of the mutex
* \return true if mutex now owned, false if timeout occurred before mutex became available * \return true if mutex now owned, false if timeout occurred before ownership could be granted
*/ */
bool mutex_enter_block_until(mutex_t *mtx, absolute_time_t until); bool mutex_enter_block_until(mutex_t *mtx, absolute_time_t until);
/*! \brief Wait for mutex until a specific time
* \ingroup mutex
*
* Wait until the specific time to take ownership of the mutex. If the caller
* already has ownership of the mutex or can be granted ownership of the mutex before the timeout expires,
* then true will be returned and the caller will own the mutex, otherwise false will be returned and the caller
* will *NOT* own the mutex.
*
* \param mtx Pointer to recursive mutex structure
* \param until The time after which to return if the caller cannot be granted ownership of the mutex
* \return true if the recursive mutex (now) owned, false if timeout occurred before ownership could be granted
*/
bool recursive_mutex_enter_block_until(recursive_mutex_t *mtx, absolute_time_t until);
/*! \brief Release ownership of a mutex /*! \brief Release ownership of a mutex
* \ingroup mutex * \ingroup mutex
* *
@ -126,13 +218,30 @@ bool mutex_enter_block_until(mutex_t *mtx, absolute_time_t until);
*/ */
void mutex_exit(mutex_t *mtx); void mutex_exit(mutex_t *mtx);
/*! \brief Test for mutex initialised state /*! \brief Release ownership of a recursive mutex
* \ingroup mutex
*
* \param mtx Pointer to recursive mutex structure
*/
void recursive_mutex_exit(recursive_mutex_t *mtx);
/*! \brief Test for mutex initialized state
* \ingroup mutex * \ingroup mutex
* *
* \param mtx Pointer to mutex structure * \param mtx Pointer to mutex structure
* \return true if the mutex is initialised, false otherwise * \return true if the mutex is initialized, false otherwise
*/ */
static inline bool mutex_is_initialzed(mutex_t *mtx) { static inline bool mutex_is_initialized(mutex_t *mtx) {
return mtx->core.spin_lock != 0;
}
/*! \brief Test for recursive mutex initialized state
* \ingroup mutex
*
* \param mtx Pointer to recursive mutex structure
* \return true if the recursive mutex is initialized, false otherwise
*/
static inline bool recursive_mutex_is_initialized(recursive_mutex_t *mtx) {
return mtx->core.spin_lock != 0; return mtx->core.spin_lock != 0;
} }
@ -165,22 +274,22 @@ static inline bool mutex_is_initialzed(mutex_t *mtx) {
* A recursive mutex defined as follows: * A recursive mutex defined as follows:
* *
* ```c * ```c
* auto_init_recursive_mutex(my_mutex); * auto_init_recursive_mutex(my_recursive_mutex);
* ``` * ```
* *
* Is equivalent to doing * Is equivalent to doing
* *
* ```c * ```c
* static mutex_t my_mutex; * static recursive_mutex_t my_recursive_mutex;
* *
* void my_init_function() { * void my_init_function() {
* recursive_mutex_init(&my_mutex); * recursive_mutex_init(&my_recursive_mutex);
* } * }
* ``` * ```
* *
* But the initialization of the mutex is performed automatically during runtime initialization * But the initialization of the mutex is performed automatically during runtime initialization
*/ */
#define auto_init_recursive_mutex(name) static __attribute__((section(".mutex_array"))) mutex_t name = { .recursion_state = MAX_RECURSION_STATE } #define auto_init_recursive_mutex(name) static __attribute__((section(".mutex_array"))) recursive_mutex_t name = { .core.spin_lock = (spin_lock_t *)1 /* marker for runtime_init */ }
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -7,53 +7,87 @@
#include "pico/mutex.h" #include "pico/mutex.h"
#include "pico/time.h" #include "pico/time.h"
static void mutex_init_internal(mutex_t *mtx, uint8_t recursion_state) { void mutex_init(mutex_t *mtx) {
lock_init(&mtx->core, next_striped_spin_lock_num()); lock_init(&mtx->core, next_striped_spin_lock_num());
mtx->owner = LOCK_INVALID_OWNER_ID; mtx->owner = LOCK_INVALID_OWNER_ID;
mtx->recursion_state = recursion_state; #if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
mtx->recursive = false;
#endif
__mem_fence_release(); __mem_fence_release();
} }
void mutex_init(mutex_t *mtx) { void recursive_mutex_init(recursive_mutex_t *mtx) {
mutex_init_internal(mtx, 0); lock_init(&mtx->core, next_striped_spin_lock_num());
} mtx->owner = LOCK_INVALID_OWNER_ID;
mtx->enter_count = 0;
void recursive_mutex_init(mutex_t *mtx) { #if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
mutex_init_internal(mtx, MAX_RECURSION_STATE); mtx->recursive = true;
#endif
__mem_fence_release();
} }
void __time_critical_func(mutex_enter_blocking)(mutex_t *mtx) { void __time_critical_func(mutex_enter_blocking)(mutex_t *mtx) {
assert(mtx->core.spin_lock); #if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
if (mtx->recursive) {
recursive_mutex_enter_blocking(mtx);
return;
}
#endif
lock_owner_id_t caller = lock_get_caller_owner_id();
do { do {
uint32_t save = spin_lock_blocking(mtx->core.spin_lock); uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
lock_owner_id_t caller = lock_get_caller_owner_id(); if (!lock_is_owner_id_valid(mtx->owner)) {
if (mtx->owner == LOCK_INVALID_OWNER_ID) {
mtx->owner = caller; mtx->owner = caller;
if (mtx->recursion_state) {
assert(mtx->recursion_state == MAX_RECURSION_STATE);
mtx->recursion_state--;
}
} else if (mtx->owner == caller && mtx->recursion_state > 1) {
mtx->recursion_state--;
} else {
lock_internal_spin_unlock_with_wait(&mtx->core, save);
// spin lock already unlocked, so loop again
continue;
}
spin_unlock(mtx->core.spin_lock, save); spin_unlock(mtx->core.spin_lock, save);
break; break;
}
lock_internal_spin_unlock_with_wait(&mtx->core, save);
} while (true);
}
void __time_critical_func(recursive_mutex_enter_blocking)(recursive_mutex_t *mtx) {
lock_owner_id_t caller = lock_get_caller_owner_id();
do {
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
if (mtx->owner == caller || !lock_is_owner_id_valid(mtx->owner)) {
mtx->owner = caller;
uint __unused total = ++mtx->enter_count;
spin_unlock(mtx->core.spin_lock, save);
assert(total); // check for overflow
return;
} else {
lock_internal_spin_unlock_with_wait(&mtx->core, save);
}
} while (true); } while (true);
} }
bool __time_critical_func(mutex_try_enter)(mutex_t *mtx, uint32_t *owner_out) { bool __time_critical_func(mutex_try_enter)(mutex_t *mtx, uint32_t *owner_out) {
#if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
if (mtx->recursive) {
return recursive_mutex_try_enter(mtx, owner_out);
}
#endif
bool entered; bool entered;
uint32_t save = spin_lock_blocking(mtx->core.spin_lock); uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
lock_owner_id_t caller = lock_get_caller_owner_id(); if (!lock_is_owner_id_valid(mtx->owner)) {
if (mtx->owner == LOCK_INVALID_OWNER_ID) {
mtx->owner = lock_get_caller_owner_id(); mtx->owner = lock_get_caller_owner_id();
entered = true; entered = true;
} else if (mtx->owner == caller && mtx->recursion_state > 1) { } else {
mtx->recursion_state--; if (owner_out) *owner_out = (uint32_t) mtx->owner;
entered = false;
}
spin_unlock(mtx->core.spin_lock, save);
return entered;
}
bool __time_critical_func(recursive_mutex_try_enter)(recursive_mutex_t *mtx, uint32_t *owner_out) {
bool entered;
lock_owner_id_t caller = lock_get_caller_owner_id();
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
if (!lock_is_owner_id_valid(mtx->owner) || mtx->owner == caller) {
mtx->owner = caller;
uint __unused total = ++mtx->enter_count;
assert(total); // check for overflow
entered = true; entered = true;
} else { } else {
if (owner_out) *owner_out = (uint32_t) mtx->owner; if (owner_out) *owner_out = (uint32_t) mtx->owner;
@ -67,47 +101,84 @@ bool __time_critical_func(mutex_enter_timeout_ms)(mutex_t *mtx, uint32_t timeout
return mutex_enter_block_until(mtx, make_timeout_time_ms(timeout_ms)); return mutex_enter_block_until(mtx, make_timeout_time_ms(timeout_ms));
} }
bool __time_critical_func(recursive_mutex_enter_timeout_ms)(recursive_mutex_t *mtx, uint32_t timeout_ms) {
return recursive_mutex_enter_block_until(mtx, make_timeout_time_ms(timeout_ms));
}
bool __time_critical_func(mutex_enter_timeout_us)(mutex_t *mtx, uint32_t timeout_us) { bool __time_critical_func(mutex_enter_timeout_us)(mutex_t *mtx, uint32_t timeout_us) {
return mutex_enter_block_until(mtx, make_timeout_time_us(timeout_us)); return mutex_enter_block_until(mtx, make_timeout_time_us(timeout_us));
} }
bool __time_critical_func(recursive_mutex_enter_timeout_us)(recursive_mutex_t *mtx, uint32_t timeout_us) {
return recursive_mutex_enter_block_until(mtx, make_timeout_time_us(timeout_us));
}
bool __time_critical_func(mutex_enter_block_until)(mutex_t *mtx, absolute_time_t until) { bool __time_critical_func(mutex_enter_block_until)(mutex_t *mtx, absolute_time_t until) {
#if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
if (mtx->recursive) {
return recursive_mutex_enter_block_until(mtx, until);
}
#endif
assert(mtx->core.spin_lock); assert(mtx->core.spin_lock);
lock_owner_id_t caller = lock_get_caller_owner_id();
do { do {
uint32_t save = spin_lock_blocking(mtx->core.spin_lock); uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
lock_owner_id_t caller = lock_get_caller_owner_id(); if (!lock_is_owner_id_valid(mtx->owner)) {
if (mtx->owner == LOCK_INVALID_OWNER_ID) {
mtx->owner = caller; mtx->owner = caller;
} else if (mtx->owner == caller && mtx->recursion_state > 1) { spin_unlock(mtx->core.spin_lock, save);
mtx->recursion_state--; return true;
} else { } else {
if (lock_internal_spin_unlock_with_best_effort_wait_or_timeout(&mtx->core, save, until)) { if (lock_internal_spin_unlock_with_best_effort_wait_or_timeout(&mtx->core, save, until)) {
// timed out // timed out
return false; return false;
} else { }
// not timed out; spin lock already unlocked, so loop again // not timed out; spin lock already unlocked, so loop again
continue;
} }
} while (true);
} }
bool __time_critical_func(recursive_mutex_enter_block_until)(recursive_mutex_t *mtx, absolute_time_t until) {
assert(mtx->core.spin_lock);
lock_owner_id_t caller = lock_get_caller_owner_id();
do {
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
if (!lock_is_owner_id_valid(mtx->owner) || mtx->owner == caller) {
mtx->owner = caller;
uint __unused total = ++mtx->enter_count;
spin_unlock(mtx->core.spin_lock, save); spin_unlock(mtx->core.spin_lock, save);
assert(total); // check for overflow
return true; return true;
} else {
if (lock_internal_spin_unlock_with_best_effort_wait_or_timeout(&mtx->core, save, until)) {
// timed out
return false;
}
// not timed out; spin lock already unlocked, so loop again
}
} while (true); } while (true);
} }
void __time_critical_func(mutex_exit)(mutex_t *mtx) { void __time_critical_func(mutex_exit)(mutex_t *mtx) {
#if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
if (mtx->recursive) {
recursive_mutex_exit(mtx);
return;
}
#endif
uint32_t save = spin_lock_blocking(mtx->core.spin_lock); uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
assert(mtx->owner != LOCK_INVALID_OWNER_ID); assert(lock_is_owner_id_valid(mtx->owner));
if (!mtx->recursion_state) {
mtx->owner = LOCK_INVALID_OWNER_ID; mtx->owner = LOCK_INVALID_OWNER_ID;
lock_internal_spin_unlock_with_notify(&mtx->core, save); lock_internal_spin_unlock_with_notify(&mtx->core, save);
} else { }
mtx->recursion_state++;
assert(mtx->recursion_state); void __time_critical_func(recursive_mutex_exit)(recursive_mutex_t *mtx) {
if (mtx->recursion_state == MAX_RECURSION_STATE) { uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
assert(lock_is_owner_id_valid(mtx->owner));
assert(mtx->enter_count);
if (!--mtx->enter_count) {
mtx->owner = LOCK_INVALID_OWNER_ID; mtx->owner = LOCK_INVALID_OWNER_ID;
lock_internal_spin_unlock_with_notify(&mtx->core, save); lock_internal_spin_unlock_with_notify(&mtx->core, save);
} else { } else {
spin_unlock(mtx->core.spin_lock, save); spin_unlock(mtx->core.spin_lock, save);
} }
} }
}

View File

@ -176,7 +176,7 @@ static void __isr __not_in_flash_func(multicore_lockout_handler)(void) {
static void check_lockout_mutex_init(void) { static void check_lockout_mutex_init(void) {
// use known available lock - we only need it briefly // use known available lock - we only need it briefly
uint32_t save = hw_claim_lock(); uint32_t save = hw_claim_lock();
if (!mutex_is_initialzed(&lockout_mutex)) { if (!mutex_is_initialized(&lockout_mutex)) {
mutex_init(&lockout_mutex); mutex_init(&lockout_mutex);
} }
hw_claim_unlock(save); hw_claim_unlock(save);
@ -237,7 +237,7 @@ void multicore_lockout_start_blocking() {
} }
static bool multicore_lockout_end_block_until(absolute_time_t until) { static bool multicore_lockout_end_block_until(absolute_time_t until) {
assert(mutex_is_initialzed(&lockout_mutex)); assert(mutex_is_initialized(&lockout_mutex));
if (!mutex_enter_block_until(&lockout_mutex, until)) { if (!mutex_enter_block_until(&lockout_mutex, until)) {
return false; return false;
} }

View File

@ -119,15 +119,27 @@ void runtime_init(void) {
hw_clear_alias(padsbank0_hw)->io[28] = hw_clear_alias(padsbank0_hw)->io[29] = PADS_BANK0_GPIO0_IE_BITS; hw_clear_alias(padsbank0_hw)->io[28] = hw_clear_alias(padsbank0_hw)->io[29] = PADS_BANK0_GPIO0_IE_BITS;
#endif #endif
extern mutex_t __mutex_array_start; // this is an array of either mutex_t or recursive_mutex_t (i.e. not necessarily the same size)
extern mutex_t __mutex_array_end; // however each starts with a lock_core_t, and the spin_lock is initialized to address 1 for a recursive
// spinlock and 0 for a regular one.
// the first function pointer, not the address of it. static_assert(!(sizeof(mutex_t)&3), "");
for (mutex_t *m = &__mutex_array_start; m < &__mutex_array_end; m++) { static_assert(!(sizeof(recursive_mutex_t)&3), "");
if (m->recursion_state) { static_assert(!offsetof(mutex_t, core), "");
recursive_mutex_init(m); static_assert(!offsetof(recursive_mutex_t, core), "");
extern lock_core_t __mutex_array_start;
extern lock_core_t __mutex_array_end;
for (lock_core_t *l = &__mutex_array_start; l < &__mutex_array_end; ) {
if (l->spin_lock) {
assert(1 == (uintptr_t)l->spin_lock); // indicator for a recursive mutex
recursive_mutex_t *rm = (recursive_mutex_t *)l;
recursive_mutex_init(rm);
l = &rm[1].core; // next
} else { } else {
mutex_t *m = (mutex_t *)l;
mutex_init(m); mutex_init(m);
l = &m[1].core; // next
} }
} }