Rework lock_core / timers (#378)
- Add recursive_mutex - Make all locking primitives and sleep use common overridable wait/notify support to allow RTOS implementations to replace WFE/SEV with something more appropriate - Add busy_wait_ms
This commit is contained in:
@ -49,6 +49,7 @@ void spin_lock_claim_mask(uint32_t mask) {
|
||||
|
||||
void spin_lock_unclaim(uint lock_num) {
|
||||
check_lock_num(lock_num);
|
||||
spin_unlock_unsafe(spin_lock_instance(lock_num));
|
||||
hw_claim_clear((uint8_t *) &claimed, lock_num);
|
||||
}
|
||||
|
||||
|
@ -80,17 +80,24 @@ uint64_t time_us_64(void);
|
||||
/*! \brief Busy wait wasting cycles for the given (32 bit) number of microseconds
|
||||
* \ingroup hardware_timer
|
||||
*
|
||||
* \param delay_us delay amount
|
||||
* \param delay_us delay amount in microseconds
|
||||
*/
|
||||
void busy_wait_us_32(uint32_t delay_us);
|
||||
|
||||
/*! \brief Busy wait wasting cycles for the given (64 bit) number of microseconds
|
||||
* \ingroup hardware_timer
|
||||
*
|
||||
* \param delay_us delay amount
|
||||
* \param delay_us delay amount in microseconds
|
||||
*/
|
||||
void busy_wait_us(uint64_t delay_us);
|
||||
|
||||
/*! \brief Busy wait wasting cycles for the given number of milliseconds
|
||||
* \ingroup hardware_timer
|
||||
*
|
||||
* \param delay_ms delay amount in milliseconds
|
||||
*/
|
||||
void busy_wait_ms(uint32_t delay_ms);
|
||||
|
||||
/*! \brief Busy wait wasting cycles until after the specified timestamp
|
||||
* \ingroup hardware_timer
|
||||
*
|
||||
|
@ -73,6 +73,15 @@ void busy_wait_us(uint64_t delay_us) {
|
||||
busy_wait_until(t);
|
||||
}
|
||||
|
||||
void busy_wait_ms(uint32_t delay_ms)
|
||||
{
|
||||
if (delay_ms <= 0x7fffffffu / 1000) {
|
||||
busy_wait_us_32(delay_ms * 1000);
|
||||
} else {
|
||||
busy_wait_us(delay_ms * 1000ull);
|
||||
}
|
||||
}
|
||||
|
||||
void busy_wait_until(absolute_time_t t) {
|
||||
uint64_t target = to_us_since_boot(t);
|
||||
uint32_t hi_target = (uint32_t)(target >> 32u);
|
||||
|
@ -118,7 +118,11 @@ void runtime_init(void) {
|
||||
|
||||
// the first function pointer, not the address of it.
|
||||
for (mutex_t *m = &__mutex_array_start; m < &__mutex_array_end; m++) {
|
||||
mutex_init(m);
|
||||
if (m->recursion_state) {
|
||||
recursive_mutex_init(m);
|
||||
} else {
|
||||
mutex_init(m);
|
||||
}
|
||||
}
|
||||
|
||||
#if !(PICO_NO_RAM_VECTOR_TABLE || PICO_NO_FLASH)
|
||||
|
@ -34,13 +34,15 @@ static stdio_driver_t *filter;
|
||||
auto_init_mutex(print_mutex);
|
||||
|
||||
bool stdout_serialize_begin(void) {
|
||||
uint core_num = get_core_num();
|
||||
lock_owner_id_t caller = lock_get_caller_owner_id();
|
||||
// not using lock_owner_id_t to avoid backwards incompatibility change to mutex_try_enter API
|
||||
static_assert(sizeof(lock_owner_id_t) <= 4, "");
|
||||
uint32_t owner;
|
||||
if (!mutex_try_enter(&print_mutex, &owner)) {
|
||||
if (owner == core_num) {
|
||||
if (owner == (uint32_t)caller) {
|
||||
return false;
|
||||
}
|
||||
// other core owns the mutex, so lets wait
|
||||
// we are not a nested call, so lets wait
|
||||
mutex_enter_blocking(&print_mutex);
|
||||
}
|
||||
return true;
|
||||
|
Reference in New Issue
Block a user