Rework lock_core / timers (#378)
- Add recursive_mutex - Make all locking primitives and sleep use common overridable wait/notify support to allow RTOS implementations to replace WFE/SEV with something more appropriate - Add busy_wait_ms
This commit is contained in:
@ -22,8 +22,10 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "pico/lock_core.h"
|
||||
|
||||
typedef struct {
|
||||
spin_lock_t *lock;
|
||||
lock_core_t core;
|
||||
uint8_t *data;
|
||||
uint16_t wptr;
|
||||
uint16_t rptr;
|
||||
@ -85,9 +87,9 @@ static inline uint queue_get_level_unsafe(queue_t *q) {
|
||||
* \return Number of entries in the queue
|
||||
*/
|
||||
static inline uint queue_get_level(queue_t *q) {
|
||||
uint32_t save = spin_lock_blocking(q->lock);
|
||||
uint32_t save = spin_lock_blocking(q->core.spin_lock);
|
||||
uint level = queue_get_level_unsafe(q);
|
||||
spin_unlock(q->lock, save);
|
||||
spin_unlock(q->core.spin_lock, save);
|
||||
return level;
|
||||
}
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include "pico/util/queue.h"
|
||||
|
||||
void queue_init_with_spinlock(queue_t *q, uint element_size, uint element_count, uint spinlock_num) {
|
||||
q->lock = spin_lock_instance(spinlock_num);
|
||||
lock_init(&q->core, spinlock_num);
|
||||
q->data = (uint8_t *)calloc(element_count + 1, element_size);
|
||||
q->element_count = (uint16_t)element_count;
|
||||
q->element_size = (uint16_t)element_size;
|
||||
@ -33,66 +33,79 @@ static inline uint16_t inc_index(queue_t *q, uint16_t index) {
|
||||
return index;
|
||||
}
|
||||
|
||||
static bool queue_add_internal(queue_t *q, void *data, bool block) {
|
||||
do {
|
||||
uint32_t save = spin_lock_blocking(q->core.spin_lock);
|
||||
if (queue_get_level_unsafe(q) != q->element_count) {
|
||||
memcpy(element_ptr(q, q->wptr), data, q->element_size);
|
||||
q->wptr = inc_index(q, q->wptr);
|
||||
lock_internal_spin_unlock_with_notify(&q->core, save);
|
||||
return true;
|
||||
}
|
||||
if (block) {
|
||||
lock_internal_spin_unlock_with_wait(&q->core, save);
|
||||
} else {
|
||||
spin_unlock(q->core.spin_lock, save);
|
||||
return false;
|
||||
}
|
||||
} while (true);
|
||||
}
|
||||
|
||||
static bool queue_remove_internal(queue_t *q, void *data, bool block) {
|
||||
do {
|
||||
uint32_t save = spin_lock_blocking(q->core.spin_lock);
|
||||
if (queue_get_level_unsafe(q) != 0) {
|
||||
memcpy(data, element_ptr(q, q->rptr), q->element_size);
|
||||
q->rptr = inc_index(q, q->rptr);
|
||||
lock_internal_spin_unlock_with_notify(&q->core, save);
|
||||
return true;
|
||||
}
|
||||
if (block) {
|
||||
lock_internal_spin_unlock_with_wait(&q->core, save);
|
||||
} else {
|
||||
spin_unlock(q->core.spin_lock, save);
|
||||
return false;
|
||||
}
|
||||
} while (true);
|
||||
}
|
||||
|
||||
static bool queue_peek_internal(queue_t *q, void *data, bool block) {
|
||||
do {
|
||||
uint32_t save = spin_lock_blocking(q->core.spin_lock);
|
||||
if (queue_get_level_unsafe(q) != 0) {
|
||||
memcpy(data, element_ptr(q, q->rptr), q->element_size);
|
||||
lock_internal_spin_unlock_with_notify(&q->core, save);
|
||||
return true;
|
||||
}
|
||||
if (block) {
|
||||
lock_internal_spin_unlock_with_wait(&q->core, save);
|
||||
} else {
|
||||
spin_unlock(q->core.spin_lock, save);
|
||||
return false;
|
||||
}
|
||||
} while (true);
|
||||
}
|
||||
|
||||
bool queue_try_add(queue_t *q, void *data) {
|
||||
bool success = false;
|
||||
uint32_t flags = spin_lock_blocking(q->lock);
|
||||
if (queue_get_level_unsafe(q) != q->element_count) {
|
||||
memcpy(element_ptr(q, q->wptr), data, q->element_size);
|
||||
q->wptr = inc_index(q, q->wptr);
|
||||
success = true;
|
||||
}
|
||||
spin_unlock(q->lock, flags);
|
||||
if (success) __sev();
|
||||
return success;
|
||||
return queue_add_internal(q, data, false);
|
||||
}
|
||||
|
||||
bool queue_try_remove(queue_t *q, void *data) {
|
||||
bool success = false;
|
||||
uint32_t flags = spin_lock_blocking(q->lock);
|
||||
if (queue_get_level_unsafe(q) != 0) {
|
||||
memcpy(data, element_ptr(q, q->rptr), q->element_size);
|
||||
q->rptr = inc_index(q, q->rptr);
|
||||
success = true;
|
||||
}
|
||||
spin_unlock(q->lock, flags);
|
||||
if (success) __sev();
|
||||
return success;
|
||||
return queue_remove_internal(q, data, false);
|
||||
}
|
||||
|
||||
bool queue_try_peek(queue_t *q, void *data) {
|
||||
bool success = false;
|
||||
uint32_t flags = spin_lock_blocking(q->lock);
|
||||
if (queue_get_level_unsafe(q) != 0) {
|
||||
memcpy(data, element_ptr(q, q->rptr), q->element_size);
|
||||
success = true;
|
||||
}
|
||||
spin_unlock(q->lock, flags);
|
||||
return success;
|
||||
return queue_peek_internal(q, data, false);
|
||||
}
|
||||
|
||||
void queue_add_blocking(queue_t *q, void *data) {
|
||||
bool done;
|
||||
do {
|
||||
done = queue_try_add(q, data);
|
||||
if (done) break;
|
||||
__wfe();
|
||||
} while (true);
|
||||
queue_add_internal(q, data, true);
|
||||
}
|
||||
|
||||
void queue_remove_blocking(queue_t *q, void *data) {
|
||||
bool done;
|
||||
do {
|
||||
done = queue_try_remove(q, data);
|
||||
if (done) break;
|
||||
__wfe();
|
||||
} while (true);
|
||||
queue_remove_internal(q, data, true);
|
||||
}
|
||||
|
||||
void queue_peek_blocking(queue_t *q, void *data) {
|
||||
bool done;
|
||||
do {
|
||||
done = queue_try_peek(q, data);
|
||||
if (done) break;
|
||||
__wfe();
|
||||
} while (true);
|
||||
queue_peek_internal(q, data, true);
|
||||
}
|
||||
|
Reference in New Issue
Block a user