Add new async_context abstraction and refactor cyw43_arch to use it (#1177)
* Extract all poll/threadsafe_background/freertos from cyw43_arch into new abstraction async_context: * provides support for asynchronous events (timers/IRQ notifications) to be handled in a safe context. * now guarantees all callbacks happen on a single core. * is reusable by multiple different libraries (stdio_usb can now be ported to this but hasn't been yet). * supports multiple independent instances (independent instances will not block each other). * cyw43_arch libraries cleaned up to use the new abstraction. Note each distinct cyw43_arch type is now a very thin layer that creates the right type of context and adds cyw43_driver and lwip support as appropriate. Additionally, * Add new pico_time and hardware_alarm APIs * Add from_us_since_boot() * Add alarm_pool_create_with_unused_hardware_alarm() * Add alarm_pool_add_alarm_at_force_in_context() * Add hardware_alarm_claim_unused() * Add hardware_alarm_force_irq() * Added panic_compact() and some minor comment cleanup; moved FIRST_USER_IRQ define to platform_defs.h
This commit is contained in:
parent
c578422528
commit
a540ca905a
@ -41,6 +41,7 @@
|
||||
* This group of libraries provide higher level functionality that isn't hardware related or provides a richer
|
||||
* set of functionality above the basic hardware interfaces
|
||||
* @{
|
||||
* \defgroup pico_async_context pico_async_context
|
||||
* \defgroup pico_multicore pico_multicore
|
||||
* \defgroup pico_stdlib pico_stdlib
|
||||
* \defgroup pico_sync pico_sync
|
||||
@ -59,6 +60,7 @@
|
||||
* \defgroup networking Networking Libraries
|
||||
* Functions for implementing networking
|
||||
* @{
|
||||
* \defgroup pico_cyw43_driver pico_cyw43_driver
|
||||
* \defgroup pico_lwip pico_lwip
|
||||
* \defgroup pico_cyw43_arch pico_cyw43_arch
|
||||
* @}
|
||||
|
@ -65,6 +65,18 @@ static inline void update_us_since_boot(absolute_time_t *t, uint64_t us_since_bo
|
||||
#endif
|
||||
}
|
||||
|
||||
/*! fn from_us_since_boot
|
||||
* \brief convert a number of microseconds since boot to an absolute_time_t
|
||||
* \param us_since_boot number of microseconds since boot
|
||||
* \return an absolute time equivalent to us_since_boot
|
||||
* \ingroup timestamp
|
||||
*/
|
||||
static inline absolute_time_t from_us_since_boot(uint64_t us_since_boot) {
|
||||
absolute_time_t t;
|
||||
update_us_since_boot(&t, us_since_boot);
|
||||
return t;
|
||||
}
|
||||
|
||||
#ifdef NDEBUG
|
||||
#define ABSOLUTE_TIME_INITIALIZED_VAR(name, value) name = value
|
||||
#else
|
||||
|
@ -157,6 +157,17 @@ static inline int64_t absolute_time_diff_us(absolute_time_t from, absolute_time_
|
||||
return (int64_t)(to_us_since_boot(to) - to_us_since_boot(from));
|
||||
}
|
||||
|
||||
/*! \brief Return the earlier of two timestamps
|
||||
* \ingroup timestamp
|
||||
*
|
||||
* \param a the first timestamp
|
||||
* \param b the second timestamp
|
||||
* \return the earlier of the two timestamps
|
||||
*/
|
||||
static inline absolute_time_t absolute_time_min(absolute_time_t a, absolute_time_t b) {
|
||||
return to_us_since_boot(a) < to_us_since_boot(b) ? a : b;
|
||||
}
|
||||
|
||||
/*! \brief The timestamp representing the end of time; this is actually not the maximum possible
|
||||
* timestamp, but is set to 0x7fffffff_ffffffff microseconds to avoid sign overflows with time
|
||||
* arithmetic. This is almost 300,000 years, so should be sufficient.
|
||||
@ -397,6 +408,25 @@ alarm_pool_t *alarm_pool_get_default(void);
|
||||
*/
|
||||
alarm_pool_t *alarm_pool_create(uint hardware_alarm_num, uint max_timers);
|
||||
|
||||
/**
|
||||
* \brief Create an alarm pool, claiming an used hardware alarm to back it.
|
||||
*
|
||||
* The alarm pool will call callbacks from an alarm IRQ Handler on the core of this function is called from.
|
||||
*
|
||||
* In many situations there is never any need for anything other than the default alarm pool, however you
|
||||
* might want to create another if you want alarm callbacks on core 1 or require alarm pools of
|
||||
* different priority (IRQ priority based preemption of callbacks)
|
||||
*
|
||||
* \note This method will hard assert if the there is no free hardware to claim.
|
||||
*
|
||||
* \ingroup alarm
|
||||
* \param max_timers the maximum number of timers
|
||||
* \note For implementation reasons this is limited to PICO_PHEAP_MAX_ENTRIES which defaults to 255
|
||||
* \sa alarm_pool_get_default()
|
||||
* \sa hardware_claiming
|
||||
*/
|
||||
alarm_pool_t *alarm_pool_create_with_unused_hardware_alarm(uint max_timers);
|
||||
|
||||
/**
|
||||
* \brief Return the hardware alarm used by an alarm pool
|
||||
* \ingroup alarm
|
||||
@ -446,6 +476,25 @@ void alarm_pool_destroy(alarm_pool_t *pool);
|
||||
*/
|
||||
alarm_id_t alarm_pool_add_alarm_at(alarm_pool_t *pool, absolute_time_t time, alarm_callback_t callback, void *user_data, bool fire_if_past);
|
||||
|
||||
/*!
|
||||
* \brief Add an alarm callback to be called at or after a specific time
|
||||
* \ingroup alarm
|
||||
*
|
||||
* The callback is called as soon as possible after the time specified from an IRQ handler
|
||||
* on the core the alarm pool was created on. Unlike \ref alarm_pool_add_alarm_at, this method
|
||||
* guarantees to call the callback from that core even if the time is during this method call or in the past.
|
||||
*
|
||||
* \note It is safe to call this method from an IRQ handler (including alarm callbacks), and from either core.
|
||||
*
|
||||
* @param pool the alarm pool to use for scheduling the callback (this determines which hardware alarm is used, and which core calls the callback)
|
||||
* @param time the timestamp when (after which) the callback should fire
|
||||
* @param callback the callback function
|
||||
* @param user_data user data to pass to the callback function
|
||||
* @return >0 the alarm id for an active (at the time of return) alarm
|
||||
* @return -1 if there were no alarm slots available
|
||||
*/
|
||||
alarm_id_t alarm_pool_add_alarm_at_force_in_context(alarm_pool_t *pool, absolute_time_t time, alarm_callback_t callback,
|
||||
void *user_data);
|
||||
/*!
|
||||
* \brief Add an alarm callback to be called after a delay specified in microseconds
|
||||
* \ingroup alarm
|
||||
|
@ -81,6 +81,7 @@ void alarm_pool_init_default() {
|
||||
if (!default_alarm_pool_initialized()) {
|
||||
ph_post_alloc_init(default_alarm_pool.heap, PICO_TIME_DEFAULT_ALARM_POOL_MAX_TIMERS,
|
||||
timer_pool_entry_comparator, &default_alarm_pool);
|
||||
hardware_alarm_claim(PICO_TIME_DEFAULT_ALARM_POOL_HARDWARE_ALARM_NUM);
|
||||
alarm_pool_post_alloc_init(&default_alarm_pool,
|
||||
PICO_TIME_DEFAULT_ALARM_POOL_HARDWARE_ALARM_NUM);
|
||||
}
|
||||
@ -181,12 +182,21 @@ alarm_pool_t *alarm_pool_create(uint hardware_alarm_num, uint max_timers) {
|
||||
pool->heap = ph_create(max_timers, timer_pool_entry_comparator, pool);
|
||||
pool->entries = (alarm_pool_entry_t *)calloc(max_timers, sizeof(alarm_pool_entry_t));
|
||||
pool->entry_ids_high = (uint8_t *)calloc(max_timers, sizeof(uint8_t));
|
||||
hardware_alarm_claim(hardware_alarm_num);
|
||||
alarm_pool_post_alloc_init(pool, hardware_alarm_num);
|
||||
return pool;
|
||||
}
|
||||
|
||||
alarm_pool_t *alarm_pool_create_with_unused_hardware_alarm(uint max_timers) {
|
||||
alarm_pool_t *pool = (alarm_pool_t *) malloc(sizeof(alarm_pool_t));
|
||||
pool->heap = ph_create(max_timers, timer_pool_entry_comparator, pool);
|
||||
pool->entries = (alarm_pool_entry_t *)calloc(max_timers, sizeof(alarm_pool_entry_t));
|
||||
pool->entry_ids_high = (uint8_t *)calloc(max_timers, sizeof(uint8_t));
|
||||
alarm_pool_post_alloc_init(pool, (uint)hardware_alarm_claim_unused(true));
|
||||
return pool;
|
||||
}
|
||||
|
||||
void alarm_pool_post_alloc_init(alarm_pool_t *pool, uint hardware_alarm_num) {
|
||||
hardware_alarm_claim(hardware_alarm_num);
|
||||
hardware_alarm_cancel(hardware_alarm_num);
|
||||
hardware_alarm_set_callback(hardware_alarm_num, alarm_pool_alarm_callback);
|
||||
pool->lock = spin_lock_instance(next_striped_spin_lock_num());
|
||||
@ -260,6 +270,26 @@ alarm_id_t alarm_pool_add_alarm_at(alarm_pool_t *pool, absolute_time_t time, ala
|
||||
return public_id;
|
||||
}
|
||||
|
||||
alarm_id_t alarm_pool_add_alarm_at_force_in_context(alarm_pool_t *pool, absolute_time_t time, alarm_callback_t callback,
|
||||
void *user_data) {
|
||||
bool missed = false;
|
||||
|
||||
uint8_t id_high = 0;
|
||||
uint32_t save = spin_lock_blocking(pool->lock);
|
||||
|
||||
pheap_node_id_t id = add_alarm_under_lock(pool, time, callback, user_data, 0, true, &missed);
|
||||
if (id) id_high = *get_entry_id_high(pool, id);
|
||||
spin_unlock(pool->lock, save);
|
||||
if (!id) return -1;
|
||||
if (missed) {
|
||||
// we want to fire the timer forcibly because it is in the past. Note that we do
|
||||
// not care about racing with other timers, as it is harmless to have the IRQ
|
||||
// wake up one time too many, we just need to make sure it does wake up
|
||||
hardware_alarm_force_irq(pool->hardware_alarm_num);
|
||||
}
|
||||
return make_public_id(id_high, id);
|
||||
}
|
||||
|
||||
bool alarm_pool_cancel_alarm(alarm_pool_t *pool, alarm_id_t alarm_id) {
|
||||
bool rc = false;
|
||||
uint32_t save = spin_lock_blocking(pool->lock);
|
||||
|
@ -31,10 +31,11 @@ bool time_reached(absolute_time_t t);
|
||||
typedef void (*hardware_alarm_callback_t)(uint alarm_num);
|
||||
void hardware_alarm_claim(uint alarm_num);
|
||||
void hardware_alarm_unclaim(uint alarm_num);
|
||||
int hardware_alarm_claim_unused(bool required);
|
||||
void hardware_alarm_set_callback(uint alarm_num, hardware_alarm_callback_t callback);
|
||||
bool hardware_alarm_set_target(uint alarm_num, absolute_time_t t);
|
||||
void hardware_alarm_cancel(uint alarm_num);
|
||||
|
||||
void hardware_alarm_force_irq(uint alarm_num);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -87,6 +87,13 @@ void hardware_alarm_unclaim(uint alarm_num) {
|
||||
claimed_alarms &= ~(1u <<alarm_num);
|
||||
}
|
||||
|
||||
int hardware_alarm_claim_unused(bool required) {
|
||||
int alarm_id = claimed_alarms ? __builtin_clz(~claimed_alarms) : 1;
|
||||
if (alarm_id >= NUM_TIMERS) return -1;
|
||||
claimed_alarms |= 1u << alarm_id;
|
||||
return alarm_id;
|
||||
}
|
||||
|
||||
PICO_WEAK_FUNCTION_DEF(hardware_alarm_set_callback)
|
||||
void PICO_WEAK_FUNCTION_IMPL_NAME(hardware_alarm_set_callback)(uint alarm_num, hardware_alarm_callback_t callback) {
|
||||
panic_unsupported();
|
||||
@ -101,3 +108,8 @@ PICO_WEAK_FUNCTION_DEF(hardware_alarm_cancel)
|
||||
void PICO_WEAK_FUNCTION_IMPL_NAME(hardware_alarm_cancel)(uint alarm_num) {
|
||||
panic_unsupported();
|
||||
}
|
||||
|
||||
PICO_WEAK_FUNCTION_DEF(hardware_alarm_force_irq)
|
||||
void PICO_WEAK_FUNCTION_IMPL_NAME(hardware_alarm_force_irq)(uint alarm_num) {
|
||||
panic_unsupported();
|
||||
}
|
||||
|
@ -42,5 +42,7 @@
|
||||
#define XOSC_MHZ _u(12)
|
||||
#endif
|
||||
|
||||
#define FIRST_USER_IRQ (NUM_IRQS - NUM_USER_IRQS)
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -58,7 +58,8 @@ if (NOT PICO_BARE_METAL)
|
||||
pico_add_subdirectory(tinyusb)
|
||||
pico_add_subdirectory(pico_stdio_usb)
|
||||
|
||||
pico_add_subdirectory(cyw43_driver)
|
||||
pico_add_subdirectory(pico_async_context)
|
||||
pico_add_subdirectory(pico_cyw43_driver)
|
||||
pico_add_subdirectory(pico_lwip)
|
||||
pico_add_subdirectory(pico_cyw43_arch)
|
||||
pico_add_subdirectory(pico_mbedtls)
|
||||
|
@ -14,10 +14,11 @@
|
||||
*
|
||||
* Low level flash programming and erase API
|
||||
*
|
||||
* Note these functions are *unsafe* if you have two cores concurrently
|
||||
* executing from flash. In this case you must perform your own
|
||||
* synchronisation to make sure no XIP accesses take place during flash
|
||||
* programming.
|
||||
* Note these functions are *unsafe* if you are using both cores, and the other
|
||||
* is executing from flash concurrently with the operation. In this could be the
|
||||
* case, you must perform your own synchronisation to make sure that no XIP
|
||||
* accesses take place during flash programming. One option is to use the
|
||||
* \ref multicore_lockout functions.
|
||||
*
|
||||
* Likewise they are *unsafe* if you have interrupt handlers or an interrupt
|
||||
* vector table in flash, so you must disable interrupts before calling in
|
||||
|
@ -435,8 +435,6 @@ void irq_init_priorities() {
|
||||
#endif
|
||||
}
|
||||
|
||||
#define FIRST_USER_IRQ (NUM_IRQS - NUM_USER_IRQS)
|
||||
|
||||
static uint get_user_irq_claim_index(uint irq_num) {
|
||||
invalid_params_if(IRQ, irq_num < FIRST_USER_IRQ || irq_num >= NUM_IRQS);
|
||||
// we count backwards from the last, to match the existing hard coded uses of user IRQs in the SDK which were previously using 31
|
||||
|
@ -136,6 +136,16 @@ typedef void (*hardware_alarm_callback_t)(uint alarm_num);
|
||||
*/
|
||||
void hardware_alarm_claim(uint alarm_num);
|
||||
|
||||
/*! \brief cooperatively claim the use of this hardware alarm_num
|
||||
* \ingroup hardware_timer
|
||||
*
|
||||
* This method attempts to claim an unused hardware alarm
|
||||
*
|
||||
* \return alarm_num the hardware alarm claimed or -1 if requires was false, and none are available
|
||||
* \sa hardware_claiming
|
||||
*/
|
||||
int hardware_alarm_claim_unused(bool required);
|
||||
|
||||
/*! \brief cooperatively release the claim on use of this hardware alarm_num
|
||||
* \ingroup hardware_timer
|
||||
*
|
||||
@ -187,11 +197,23 @@ bool hardware_alarm_set_target(uint alarm_num, absolute_time_t t);
|
||||
* \brief Cancel an existing target (if any) for a given hardware_alarm
|
||||
* \ingroup hardware_timer
|
||||
*
|
||||
* @param alarm_num
|
||||
* @param alarm_num the hardware alarm number
|
||||
*/
|
||||
|
||||
void hardware_alarm_cancel(uint alarm_num);
|
||||
|
||||
/**
|
||||
* \brief Force and IRQ for a specific hardware alarm
|
||||
* \ingroup hardware_timer
|
||||
*
|
||||
* This method will forcibly make sure the current alarm callback (if present) for the hardware
|
||||
* alarm is called from an IRQ context after this call. If an actual callback is due at the same
|
||||
* time then the callback may only be called once.
|
||||
*
|
||||
* Calling this method does not otherwise interfere with regular callback operations.
|
||||
*
|
||||
* @param alarm_num the hardware alarm number
|
||||
*/
|
||||
void hardware_alarm_force_irq(uint alarm_num);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -33,6 +33,10 @@ bool hardware_alarm_is_claimed(uint alarm_num) {
|
||||
return hw_is_claimed(&claimed, alarm_num);
|
||||
}
|
||||
|
||||
int hardware_alarm_claim_unused(bool required) {
|
||||
return hw_claim_unused_from_range(&claimed, required, 0, NUM_TIMERS - 1, "No timers available");
|
||||
}
|
||||
|
||||
/// tag::time_us_64[]
|
||||
uint64_t time_us_64() {
|
||||
// Need to make sure that the upper 32 bits of the timer
|
||||
@ -108,9 +112,7 @@ static inline uint harware_alarm_irq_number(uint alarm_num) {
|
||||
|
||||
static void hardware_alarm_irq_handler(void) {
|
||||
// Determine which timer this IRQ is for
|
||||
uint32_t ipsr;
|
||||
__asm volatile ("mrs %0, ipsr" : "=r" (ipsr)::);
|
||||
uint alarm_num = (ipsr & 0x3fu) - 16 - TIMER_IRQ_0;
|
||||
uint alarm_num = __get_current_exception() - 16 - TIMER_IRQ_0;
|
||||
check_hardware_alarm_num_param(alarm_num);
|
||||
|
||||
hardware_alarm_callback_t callback = NULL;
|
||||
@ -119,6 +121,8 @@ static void hardware_alarm_irq_handler(void) {
|
||||
uint32_t save = spin_lock_blocking(lock);
|
||||
// Clear the timer IRQ (inside lock, because we check whether we have handled the IRQ yet in alarm_set by looking at the interrupt status
|
||||
timer_hw->intr = 1u << alarm_num;
|
||||
// Clear any forced IRQ
|
||||
hw_clear_bits(&timer_hw->intf, 1u << alarm_num);
|
||||
|
||||
// make sure the IRQ is still valid
|
||||
if (timer_callbacks_pending & (1u << alarm_num)) {
|
||||
@ -227,4 +231,11 @@ void hardware_alarm_cancel(uint alarm_num) {
|
||||
spin_unlock(lock, save);
|
||||
}
|
||||
|
||||
|
||||
void hardware_alarm_force_irq(uint alarm_num) {
|
||||
check_hardware_alarm_num_param(alarm_num);
|
||||
spin_lock_t *lock = spin_lock_instance(PICO_SPINLOCK_ID_TIMER);
|
||||
uint32_t save = spin_lock_blocking(lock);
|
||||
timer_callbacks_pending |= (uint8_t)(1u << alarm_num);
|
||||
spin_unlock(lock, save);
|
||||
hw_set_bits(&timer_hw->intf, 1u << alarm_num);
|
||||
}
|
24
src/rp2_common/pico_async_context/CMakeLists.txt
Normal file
24
src/rp2_common/pico_async_context/CMakeLists.txt
Normal file
@ -0,0 +1,24 @@
|
||||
add_library(pico_async_context_base INTERFACE)
|
||||
target_include_directories(pico_async_context_base INTERFACE ${CMAKE_CURRENT_LIST_DIR}/include)
|
||||
target_sources(pico_async_context_base INTERFACE
|
||||
${CMAKE_CURRENT_LIST_DIR}/async_context_base.c
|
||||
)
|
||||
target_link_libraries(pico_async_context_base INTERFACE pico_platform)
|
||||
|
||||
pico_add_impl_library(pico_async_context_poll INTERFACE)
|
||||
target_sources(pico_async_context_poll INTERFACE
|
||||
${CMAKE_CURRENT_LIST_DIR}/async_context_poll.c
|
||||
)
|
||||
target_link_libraries(pico_async_context_poll INTERFACE pico_async_context_base)
|
||||
|
||||
pico_add_impl_library(pico_async_context_threadsafe_background INTERFACE)
|
||||
target_sources(pico_async_context_threadsafe_background INTERFACE
|
||||
${CMAKE_CURRENT_LIST_DIR}/async_context_threadsafe_background.c
|
||||
)
|
||||
target_link_libraries(pico_async_context_threadsafe_background INTERFACE pico_async_context_base)
|
||||
|
||||
pico_add_impl_library(pico_async_context_freertos INTERFACE)
|
||||
target_sources(pico_async_context_freertos INTERFACE
|
||||
${CMAKE_CURRENT_LIST_DIR}/async_context_freertos.c
|
||||
)
|
||||
target_link_libraries(pico_async_context_freertos INTERFACE pico_async_context_base)
|
123
src/rp2_common/pico_async_context/async_context_base.c
Normal file
123
src/rp2_common/pico_async_context/async_context_base.c
Normal file
@ -0,0 +1,123 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include "pico/async_context_base.h"
|
||||
|
||||
bool async_context_base_add_at_time_worker(async_context_t *self, async_at_time_worker_t *worker) {
|
||||
async_at_time_worker_t **prev = &self->at_time_list;
|
||||
while (*prev) {
|
||||
if (worker == *prev) {
|
||||
return false;
|
||||
}
|
||||
prev = &(*prev)->next;
|
||||
}
|
||||
*prev = worker;
|
||||
worker->next = NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool async_context_base_remove_at_time_worker(async_context_t *self, async_at_time_worker_t *worker) {
|
||||
async_at_time_worker_t **prev = &self->at_time_list;
|
||||
while (*prev) {
|
||||
if (worker == *prev) {
|
||||
*prev = worker->next;
|
||||
return true;
|
||||
}
|
||||
prev = &(*prev)->next;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool async_context_base_add_when_pending_worker(async_context_t *self, async_when_pending_worker_t *worker) {
|
||||
async_when_pending_worker_t **prev = &self->when_pending_list;
|
||||
while (*prev) {
|
||||
if (worker == *prev) {
|
||||
return false;
|
||||
}
|
||||
prev = &(*prev)->next;
|
||||
}
|
||||
*prev = worker;
|
||||
worker->next = NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool async_context_base_remove_when_pending_worker(async_context_t *self, async_when_pending_worker_t *worker) {
|
||||
async_when_pending_worker_t **prev = &self->when_pending_list;
|
||||
while (*prev) {
|
||||
if (worker == *prev) {
|
||||
*prev = worker->next;
|
||||
return true;
|
||||
}
|
||||
prev = &(*prev)->next;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
async_at_time_worker_t *async_context_base_remove_ready_at_time_worker(async_context_t *self) {
|
||||
async_at_time_worker_t **best_prev = NULL;
|
||||
if (self->at_time_list) {
|
||||
absolute_time_t earliest = get_absolute_time();
|
||||
for (async_at_time_worker_t **prev = &self->at_time_list; *prev; prev = &(*prev)->next) {
|
||||
if (absolute_time_diff_us((*prev)->next_time, earliest) >= 0) {
|
||||
earliest = (*prev)->next_time;
|
||||
assert(!is_at_the_end_of_time(earliest)); // should never be less than now
|
||||
best_prev = prev;
|
||||
}
|
||||
}
|
||||
}
|
||||
async_at_time_worker_t *rc;
|
||||
if (best_prev) {
|
||||
rc = *best_prev;
|
||||
*best_prev = rc->next;
|
||||
} else {
|
||||
rc = NULL;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
void async_context_base_refresh_next_timeout(async_context_t *self) {
|
||||
absolute_time_t earliest = at_the_end_of_time;
|
||||
for (async_at_time_worker_t *worker = self->at_time_list; worker; ) {
|
||||
async_at_time_worker_t *next = worker->next;
|
||||
if (absolute_time_diff_us(worker->next_time, earliest) > 0) {
|
||||
earliest = worker->next_time;
|
||||
}
|
||||
worker = next;
|
||||
}
|
||||
self->next_time = earliest;
|
||||
}
|
||||
|
||||
absolute_time_t async_context_base_execute_once(async_context_t *self) {
|
||||
async_at_time_worker_t *at_time_worker;
|
||||
while (NULL != (at_time_worker = async_context_base_remove_ready_at_time_worker(self))) {
|
||||
at_time_worker->do_work(self, at_time_worker);
|
||||
}
|
||||
for(async_when_pending_worker_t *when_pending_worker = self->when_pending_list; when_pending_worker; when_pending_worker = when_pending_worker->next) {
|
||||
if (when_pending_worker->work_pending) {
|
||||
when_pending_worker->work_pending = false;
|
||||
when_pending_worker->do_work(self, when_pending_worker);
|
||||
}
|
||||
}
|
||||
async_context_base_refresh_next_timeout(self);
|
||||
return self->next_time;
|
||||
}
|
||||
|
||||
bool async_context_base_needs_servicing(async_context_t *self) {
|
||||
absolute_time_t now = get_absolute_time();
|
||||
if (self->at_time_list) {
|
||||
for (async_at_time_worker_t *worker = self->at_time_list; worker; worker = worker->next) {
|
||||
if (absolute_time_diff_us(worker->next_time, now) >= 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
for(async_when_pending_worker_t *when_pending_worker = self->when_pending_list; when_pending_worker; when_pending_worker = when_pending_worker->next) {
|
||||
if (when_pending_worker->work_pending) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
293
src/rp2_common/pico_async_context/async_context_freertos.c
Normal file
293
src/rp2_common/pico_async_context/async_context_freertos.c
Normal file
@ -0,0 +1,293 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
#include "pico/async_context_freertos.h"
|
||||
#include "pico/async_context_base.h"
|
||||
#include "pico/sync.h"
|
||||
#include "hardware/irq.h"
|
||||
|
||||
#include "semphr.h"
|
||||
|
||||
#if configNUM_CORES > 1 && !defined(configUSE_CORE_AFFINITY)
|
||||
#error async_context_freertos requires configUSE_CORE_AFFINITY under SMP
|
||||
#endif
|
||||
|
||||
static const async_context_type_t template;
|
||||
|
||||
static void async_context_freertos_acquire_lock_blocking(async_context_t *self_base);
|
||||
static void async_context_freertos_release_lock(async_context_t *self_base);
|
||||
static void async_context_freertos_lock_check(async_context_t *self_base);
|
||||
|
||||
static TickType_t sensible_ticks_until(absolute_time_t until) {
|
||||
TickType_t ticks;
|
||||
int64_t delay_us = absolute_time_diff_us(get_absolute_time(), until);
|
||||
if (delay_us <= 0) {
|
||||
ticks = 0;
|
||||
} else {
|
||||
static const uint32_t max_delay = 60000000;
|
||||
uint32_t delay_us_32 = delay_us > max_delay ? max_delay : (uint32_t) delay_us;
|
||||
ticks = pdMS_TO_TICKS((delay_us_32+999)/1000);
|
||||
// we want to round up, as both rounding down to zero is wrong (may produce no delays
|
||||
// where delays are needed), but also we don't want to wake up, and then realize there
|
||||
// is no work to do yet!
|
||||
ticks++;
|
||||
}
|
||||
return ticks;
|
||||
}
|
||||
|
||||
static void process_under_lock(async_context_freertos_t *self) {
|
||||
#ifndef NDEBUG
|
||||
async_context_freertos_lock_check(&self->core);
|
||||
#endif
|
||||
bool repeat;
|
||||
do {
|
||||
repeat = false;
|
||||
absolute_time_t next_time = async_context_base_execute_once(&self->core);
|
||||
TickType_t ticks;
|
||||
if (is_at_the_end_of_time(next_time)) {
|
||||
ticks = portMAX_DELAY;
|
||||
} else {
|
||||
ticks = sensible_ticks_until(next_time);
|
||||
}
|
||||
if (ticks) {
|
||||
// last parameter (timeout) is also 'ticks', since there is no point waiting to change the period
|
||||
// for longer than the period itself!
|
||||
repeat = pdFALSE == xTimerChangePeriod(self->timer_handle, ticks, ticks);
|
||||
} else {
|
||||
repeat = true;
|
||||
}
|
||||
} while (repeat);
|
||||
}
|
||||
|
||||
static void async_context_task(__unused void *vself) {
|
||||
async_context_freertos_t *self = (async_context_freertos_t *)vself;
|
||||
do {
|
||||
ulTaskNotifyTake(pdFALSE, portMAX_DELAY);
|
||||
if (self->task_should_exit) break;
|
||||
async_context_freertos_acquire_lock_blocking(&self->core);
|
||||
process_under_lock(self);
|
||||
async_context_freertos_release_lock(&self->core);
|
||||
__sev(); // it is possible regular code is waiting on a WFE on the other core
|
||||
} while (!self->task_should_exit);
|
||||
vTaskDelete(NULL);
|
||||
}
|
||||
|
||||
static void async_context_freertos_wake_up(async_context_t *self_base) {
|
||||
async_context_freertos_t *self = (async_context_freertos_t *)self_base;
|
||||
if (self->task_handle) {
|
||||
if (portCHECK_IF_IN_ISR()) {
|
||||
vTaskNotifyGiveFromISR(self->task_handle, NULL);
|
||||
xSemaphoreGiveFromISR(self->work_needed_sem, NULL);
|
||||
} else {
|
||||
// we don't want to wake ourselves up (we will only ever be called
|
||||
// from the async_context_task if we own the lock, in which case processing
|
||||
// will already happen when the lock is finally unlocked
|
||||
if (xTaskGetCurrentTaskHandle() != self->task_handle) {
|
||||
xTaskNotifyGive(self->task_handle);
|
||||
xSemaphoreGive(self->work_needed_sem);
|
||||
} else {
|
||||
#ifndef NDEBUG
|
||||
async_context_freertos_lock_check(self_base);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void timer_handler(__unused TimerHandle_t handle)
|
||||
{
|
||||
async_context_freertos_t *self = (async_context_freertos_t *)pvTimerGetTimerID(handle);
|
||||
async_context_freertos_wake_up(&self->core);
|
||||
}
|
||||
|
||||
bool async_context_freertos_init(async_context_freertos_t *self, async_context_freertos_config_t *config) {
|
||||
memset(self, 0, sizeof(*self));
|
||||
self->core.type = &template;
|
||||
self->core.flags = ASYNC_CONTEXT_FLAG_CALLBACK_FROM_NON_IRQ;
|
||||
self->core.core_num = get_core_num();
|
||||
self->lock_mutex = xSemaphoreCreateRecursiveMutex();
|
||||
self->work_needed_sem = xSemaphoreCreateBinary();
|
||||
self->timer_handle = xTimerCreate( "async_context_timer", // Just a text name, not used by the kernel.
|
||||
portMAX_DELAY,
|
||||
pdFALSE, // The timers will auto-reload themselves when they expire.
|
||||
self,
|
||||
timer_handler);
|
||||
|
||||
if (!self->lock_mutex ||
|
||||
!self->work_needed_sem ||
|
||||
!self->timer_handle ||
|
||||
pdPASS != xTaskCreate(async_context_task, "async_context_task", config->task_stack_size, self,
|
||||
config->task_priority, &self->task_handle)) {
|
||||
async_context_deinit(&self->core);
|
||||
return false;
|
||||
}
|
||||
#if configNUM_CORES > 1
|
||||
UBaseType_t core_id = config->task_core_id;
|
||||
if (core_id == (UBaseType_t)-1) {
|
||||
core_id = portGET_CORE_ID();
|
||||
}
|
||||
// we must run on a single core
|
||||
vTaskCoreAffinitySet(self->task_handle, 1u << core_id);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
static uint32_t end_task_func(void *param) {
|
||||
async_context_freertos_t *self = (async_context_freertos_t *)param;
|
||||
// we will immediately exit
|
||||
self->task_should_exit = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void async_context_freertos_deinit(async_context_t *self_base) {
|
||||
async_context_freertos_t *self = (async_context_freertos_t *)self_base;
|
||||
if (self->task_handle) {
|
||||
async_context_execute_sync(self_base, end_task_func, self_base);
|
||||
}
|
||||
if (self->timer_handle) {
|
||||
xTimerDelete(self->timer_handle, 0);
|
||||
}
|
||||
if (self->lock_mutex) {
|
||||
vSemaphoreDelete(self->lock_mutex);
|
||||
}
|
||||
if (self->work_needed_sem) {
|
||||
vSemaphoreDelete(self->work_needed_sem);
|
||||
}
|
||||
memset(self, 0, sizeof(*self));
|
||||
}
|
||||
|
||||
void async_context_freertos_acquire_lock_blocking(async_context_t *self_base) {
|
||||
async_context_freertos_t *self = (async_context_freertos_t *)self_base;
|
||||
// Lock the other core and stop low_prio_irq running
|
||||
assert(!portCHECK_IF_IN_ISR());
|
||||
xSemaphoreTakeRecursive(self->lock_mutex, portMAX_DELAY);
|
||||
self->nesting++;
|
||||
}
|
||||
|
||||
void async_context_freertos_lock_check(async_context_t *self_base) {
|
||||
async_context_freertos_t *self = (async_context_freertos_t *)self_base;
|
||||
// Lock the other core and stop low_prio_irq running
|
||||
assert(xSemaphoreGetMutexHolder(self->lock_mutex) == xTaskGetCurrentTaskHandle());
|
||||
}
|
||||
|
||||
typedef struct sync_func_call{
|
||||
async_when_pending_worker_t worker;
|
||||
SemaphoreHandle_t sem;
|
||||
uint32_t (*func)(void *param);
|
||||
void *param;
|
||||
uint32_t rc;
|
||||
} sync_func_call_t;
|
||||
|
||||
static void handle_sync_func_call(async_context_t *context, async_when_pending_worker_t *worker) {
|
||||
sync_func_call_t *call = (sync_func_call_t *)worker;
|
||||
call->rc = call->func(call->param);
|
||||
xSemaphoreGive(call->sem);
|
||||
async_context_remove_when_pending_worker(context, worker);
|
||||
}
|
||||
|
||||
uint32_t async_context_freertos_execute_sync(async_context_t *self_base, uint32_t (*func)(void *param), void *param) {
|
||||
async_context_freertos_t *self = (async_context_freertos_t*)self_base;
|
||||
hard_assert(xSemaphoreGetMutexHolder(self->lock_mutex) != xTaskGetCurrentTaskHandle());
|
||||
sync_func_call_t call;
|
||||
call.worker.do_work = handle_sync_func_call;
|
||||
call.func = func;
|
||||
call.param = param;
|
||||
call.sem = xSemaphoreCreateBinary();
|
||||
async_context_add_when_pending_worker(self_base, &call.worker);
|
||||
async_context_set_work_pending(self_base, &call.worker);
|
||||
xSemaphoreTake(call.sem, portMAX_DELAY);
|
||||
vSemaphoreDelete(call.sem);
|
||||
return call.rc;
|
||||
}
|
||||
|
||||
void async_context_freertos_release_lock(async_context_t *self_base) {
|
||||
async_context_freertos_t *self = (async_context_freertos_t *)self_base;
|
||||
bool do_wakeup = false;
|
||||
if (self->nesting == 1) {
|
||||
// note that we always do a processing on outermost lock exit, to facilitate cases
|
||||
// like lwIP where we have no notification when lwIP timers are added.
|
||||
//
|
||||
// this operation must be done from the right task
|
||||
if (self->task_handle != xTaskGetCurrentTaskHandle()) {
|
||||
// note we defer the wakeup until after we release the lock, otherwise it can be wasteful
|
||||
// (waking up the task, but then having it block immediately on us)
|
||||
do_wakeup = true;
|
||||
} else {
|
||||
process_under_lock(self);
|
||||
}
|
||||
}
|
||||
--self->nesting;
|
||||
xSemaphoreGiveRecursive(self->lock_mutex);
|
||||
if (do_wakeup) {
|
||||
async_context_freertos_wake_up(self_base);
|
||||
}
|
||||
}
|
||||
|
||||
static bool async_context_freertos_add_at_time_worker(async_context_t *self_base, async_at_time_worker_t *worker) {
|
||||
async_context_freertos_acquire_lock_blocking(self_base);
|
||||
bool rc = async_context_base_add_at_time_worker(self_base, worker);
|
||||
async_context_freertos_release_lock(self_base);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static bool async_context_freertos_remove_at_time_worker(async_context_t *self_base, async_at_time_worker_t *worker) {
|
||||
async_context_freertos_acquire_lock_blocking(self_base);
|
||||
bool rc = async_context_base_remove_at_time_worker(self_base, worker);
|
||||
async_context_freertos_release_lock(self_base);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static bool async_context_freertos_add_when_pending_worker(async_context_t *self_base, async_when_pending_worker_t *worker) {
|
||||
async_context_freertos_acquire_lock_blocking(self_base);
|
||||
bool rc = async_context_base_add_when_pending_worker(self_base, worker);
|
||||
async_context_freertos_release_lock(self_base);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static bool async_context_freertos_remove_when_pending_worker(async_context_t *self_base, async_when_pending_worker_t *worker) {
|
||||
async_context_freertos_acquire_lock_blocking(self_base);
|
||||
bool rc = async_context_base_remove_when_pending_worker(self_base, worker);
|
||||
async_context_freertos_release_lock(self_base);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void async_context_freertos_set_work_pending(async_context_t *self_base, async_when_pending_worker_t *worker) {
|
||||
worker->work_pending = true;
|
||||
async_context_freertos_wake_up(self_base);
|
||||
}
|
||||
|
||||
static void async_context_freertos_wait_until(async_context_t *self_base, absolute_time_t until) {
|
||||
assert(!portCHECK_IF_IN_ISR());
|
||||
TickType_t ticks = sensible_ticks_until(until);
|
||||
vTaskDelay(ticks);
|
||||
}
|
||||
|
||||
static void async_context_freertos_wait_for_work_until(async_context_t *self_base, absolute_time_t until) {
|
||||
async_context_freertos_t *self = (async_context_freertos_t *)self_base;
|
||||
assert(!portCHECK_IF_IN_ISR());
|
||||
while (!time_reached(until)) {
|
||||
TickType_t ticks = sensible_ticks_until(until);
|
||||
if (!ticks || xSemaphoreTake(self->work_needed_sem, ticks)) return;
|
||||
}
|
||||
}
|
||||
|
||||
static const async_context_type_t template = {
|
||||
.type = ASYNC_CONTEXT_FREERTOS,
|
||||
.acquire_lock_blocking = async_context_freertos_acquire_lock_blocking,
|
||||
.release_lock = async_context_freertos_release_lock,
|
||||
.lock_check = async_context_freertos_lock_check,
|
||||
.execute_sync = async_context_freertos_execute_sync,
|
||||
.add_at_time_worker = async_context_freertos_add_at_time_worker,
|
||||
.remove_at_time_worker = async_context_freertos_remove_at_time_worker,
|
||||
.add_when_pending_worker = async_context_freertos_add_when_pending_worker,
|
||||
.remove_when_pending_worker = async_context_freertos_remove_when_pending_worker,
|
||||
.set_work_pending = async_context_freertos_set_work_pending,
|
||||
.poll = 0,
|
||||
.wait_until = async_context_freertos_wait_until,
|
||||
.wait_for_work_until = async_context_freertos_wait_for_work_until,
|
||||
.deinit = async_context_freertos_deinit,
|
||||
};
|
73
src/rp2_common/pico_async_context/async_context_poll.c
Normal file
73
src/rp2_common/pico_async_context/async_context_poll.c
Normal file
@ -0,0 +1,73 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
#include "pico/async_context_poll.h"
|
||||
#include "pico/async_context_base.h"
|
||||
#include "pico/sync.h"
|
||||
|
||||
static void noop(__unused async_context_t *context) { }
|
||||
|
||||
static const async_context_type_t template;
|
||||
|
||||
bool async_context_poll_init_with_defaults(async_context_poll_t *self) {
|
||||
memset(self, 0, sizeof(*self));
|
||||
self->core.core_num = get_core_num();
|
||||
self->core.type = &template;
|
||||
self->core.flags = ASYNC_CONTEXT_FLAG_POLLED | ASYNC_CONTEXT_FLAG_CALLBACK_FROM_NON_IRQ;
|
||||
sem_init(&self->sem, 1, 1);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void async_context_poll_wake_up(async_context_t *self_base) {
|
||||
sem_release(&((async_context_poll_t *)self_base)->sem);
|
||||
}
|
||||
|
||||
static void async_context_poll_requires_update(async_context_t *self_base, async_when_pending_worker_t *worker) {
|
||||
worker->work_pending = true;
|
||||
async_context_poll_wake_up(self_base);
|
||||
}
|
||||
|
||||
static void async_context_poll_poll(async_context_t *self_base) {
|
||||
async_context_base_execute_once(self_base);
|
||||
}
|
||||
|
||||
static void async_context_poll_wait_until(__unused async_context_t *self_base, absolute_time_t until) {
|
||||
sleep_until(until);
|
||||
}
|
||||
|
||||
static void async_context_poll_wait_for_work_until(async_context_t *self_base, absolute_time_t until) {
|
||||
absolute_time_t next_time = self_base->next_time;
|
||||
async_context_poll_t *self = (async_context_poll_t *)self_base;
|
||||
sem_acquire_block_until(&self->sem, absolute_time_min(next_time, until));
|
||||
}
|
||||
|
||||
static void async_context_poll_lock_check(async_context_t *self_base) {
|
||||
if (__get_current_exception() || get_core_num() != self_base->core_num) {
|
||||
panic("async_context_poll context check failed (IRQ or wrong core)");
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t async_context_poll_execute_sync(__unused async_context_t *context, uint32_t (*func)(void *param), void *param) {
|
||||
return func(param);
|
||||
}
|
||||
|
||||
static const async_context_type_t template = {
|
||||
.type = ASYNC_CONTEXT_POLL,
|
||||
.acquire_lock_blocking = noop,
|
||||
.release_lock = noop,
|
||||
.lock_check = async_context_poll_lock_check,
|
||||
.execute_sync = async_context_poll_execute_sync,
|
||||
.add_at_time_worker = async_context_base_add_at_time_worker,
|
||||
.remove_at_time_worker = async_context_base_remove_at_time_worker,
|
||||
.add_when_pending_worker = async_context_base_add_when_pending_worker,
|
||||
.remove_when_pending_worker = async_context_base_remove_when_pending_worker,
|
||||
.set_work_pending = async_context_poll_requires_update,
|
||||
.poll = async_context_poll_poll,
|
||||
.wait_until = async_context_poll_wait_until,
|
||||
.wait_for_work_until = async_context_poll_wait_for_work_until,
|
||||
.deinit = noop,
|
||||
};
|
@ -0,0 +1,369 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
#include "pico/async_context_threadsafe_background.h"
|
||||
#include "pico/async_context_base.h"
|
||||
#include "pico/sync.h"
|
||||
#include "hardware/irq.h"
|
||||
|
||||
static const async_context_type_t template;
|
||||
static async_context_threadsafe_background_t *async_contexts_by_user_irq[NUM_USER_IRQS];
|
||||
|
||||
static void low_priority_irq_handler(void);
|
||||
static void process_under_lock(async_context_threadsafe_background_t *self);
|
||||
static int64_t alarm_handler(alarm_id_t id, void *user_data);
|
||||
|
||||
#ifndef ASYNC_CONTEXT_THREADSAFE_BACKGROUND_DEFAULT_LOW_PRIORITY_IRQ_HANDLER_PRIORITY
|
||||
#define ASYNC_CONTEXT_THREADSAFE_BACKGROUND_DEFAULT_LOW_PRIORITY_IRQ_HANDLER_PRIORITY PICO_LOWEST_IRQ_PRIORITY
|
||||
#endif
|
||||
|
||||
#ifndef ASYNC_CONTEXT_THREADSAFE_BACKGROUND_ALARM_POOL_MAX_ALARMS
|
||||
#define ASYNC_CONTEXT_THREADSAFE_BACKGROUND_ALARM_POOL_MAX_ALARMS 4
|
||||
#endif
|
||||
|
||||
async_context_threadsafe_background_config_t async_context_threadsafe_background_default_config(void) {
|
||||
async_context_threadsafe_background_config_t config = {
|
||||
.low_priority_irq_handler_priority = ASYNC_CONTEXT_THREADSAFE_BACKGROUND_DEFAULT_LOW_PRIORITY_IRQ_HANDLER_PRIORITY,
|
||||
.custom_alarm_pool = NULL,
|
||||
};
|
||||
return config;
|
||||
}
|
||||
|
||||
static inline uint recursive_mutex_enter_count(recursive_mutex_t *mutex) {
|
||||
return mutex->enter_count;
|
||||
}
|
||||
|
||||
static inline lock_owner_id_t recursive_mutex_owner(recursive_mutex_t *mutex) {
|
||||
return mutex->owner;
|
||||
}
|
||||
|
||||
static void async_context_threadsafe_background_wake_up(async_context_t *self_base) {
|
||||
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
|
||||
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
|
||||
if (self_base->core_num == get_core_num()) {
|
||||
// on same core, can dispatch directly
|
||||
irq_set_pending(self->low_priority_irq_num);
|
||||
} else {
|
||||
// remove the existing alarm (it may have already fired) so we don't overflow the pool with repeats
|
||||
//
|
||||
// note that force_alarm_id is not protected here, however if we miss removing one, they will fire
|
||||
// almost immediately anyway (since they were set in the past)
|
||||
alarm_id_t force_alarm_id = self->force_alarm_id;
|
||||
if (force_alarm_id > 0) {
|
||||
alarm_pool_cancel_alarm(self->alarm_pool, force_alarm_id);
|
||||
}
|
||||
// we cause an early timeout (0 is always in the past) on the alarm_pool core
|
||||
// note that by the time this returns, the timer may already have fired, so we
|
||||
// may end up setting self->force_alarm_id to a stale timer id, but that is fine as we
|
||||
// will harmlessly cancel it again next time
|
||||
self->force_alarm_id = alarm_pool_add_alarm_at_force_in_context(self->alarm_pool, from_us_since_boot(0),
|
||||
alarm_handler, self);
|
||||
}
|
||||
#else
|
||||
// on same core, can dispatch directly
|
||||
irq_set_pending(self->low_priority_irq_num);
|
||||
#endif
|
||||
sem_release(&self->work_needed_sem);
|
||||
}
|
||||
|
||||
// Prevent background processing in pensv and access by the other core
|
||||
// These methods are called in pensv context and on either core
|
||||
// They can be called recursively
|
||||
static inline void lock_acquire(async_context_threadsafe_background_t *self) {
|
||||
// Lock the other core and stop low_prio_irq running
|
||||
recursive_mutex_enter_blocking(&self->lock_mutex);
|
||||
}
|
||||
|
||||
static void async_context_threadsafe_background_lock_check(async_context_t *self_base) {
|
||||
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
|
||||
// Lock the other core and stop low_prio_irq running
|
||||
if (recursive_mutex_enter_count(&self->lock_mutex) < 1 || recursive_mutex_owner(&self->lock_mutex) != lock_get_caller_owner_id()) {
|
||||
panic_compact("async_context lock_check failed");
|
||||
}
|
||||
}
|
||||
|
||||
typedef struct sync_func_call{
|
||||
async_when_pending_worker_t worker;
|
||||
semaphore_t sem;
|
||||
uint32_t (*func)(void *param);
|
||||
void *param;
|
||||
uint32_t rc;
|
||||
} sync_func_call_t;
|
||||
|
||||
static void handle_sync_func_call(async_context_t *context, async_when_pending_worker_t *worker) {
|
||||
sync_func_call_t *call = (sync_func_call_t *)worker;
|
||||
call->rc = call->func(call->param);
|
||||
sem_release(&call->sem);
|
||||
async_context_remove_when_pending_worker(context, worker);
|
||||
}
|
||||
|
||||
|
||||
static void lock_release(async_context_threadsafe_background_t *self) {
|
||||
bool outermost = 1 == recursive_mutex_enter_count(&self->lock_mutex);
|
||||
// note that it is *not* a requirement to have low_prio_irq_missed handled on the
|
||||
// same core and in low-priority riq, as we are only *logically* a single thread. the user
|
||||
// is already free to call from either core, and this would only happen on a different
|
||||
// core, if the user *themselves* is acquiring the lock from other cores anyway
|
||||
|
||||
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
|
||||
bool wake_other_core = false;
|
||||
#endif
|
||||
if (outermost) {
|
||||
// note that we always do a processing on outermost lock exit, to facilitate cases
|
||||
// like lwIP where we have no notification when lwIP timers are added.
|
||||
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
|
||||
if (self->core.core_num == get_core_num()) {
|
||||
process_under_lock(self);
|
||||
} else if (async_context_base_needs_servicing(&self->core)) {
|
||||
// have to wake up other core
|
||||
wake_other_core = true;
|
||||
}
|
||||
#else
|
||||
process_under_lock(self);
|
||||
#endif
|
||||
}
|
||||
recursive_mutex_exit(&self->lock_mutex);
|
||||
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
|
||||
if (wake_other_core) {
|
||||
async_context_threadsafe_background_wake_up(&self->core);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
uint32_t async_context_threadsafe_background_execute_sync(async_context_t *self_base, uint32_t (*func)(void *param), void *param) {
|
||||
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t*)self_base;
|
||||
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
|
||||
if (self_base->core_num != get_core_num()) {
|
||||
hard_assert(!recursive_mutex_enter_count(&self->lock_mutex));
|
||||
sync_func_call_t call;
|
||||
call.worker.do_work = handle_sync_func_call;
|
||||
call.func = func;
|
||||
call.param = param;
|
||||
sem_init(&call.sem, 0, 1);
|
||||
async_context_add_when_pending_worker(self_base, &call.worker);
|
||||
async_context_set_work_pending(self_base, &call.worker);
|
||||
sem_acquire_blocking(&call.sem);
|
||||
return call.rc;
|
||||
}
|
||||
#endif
|
||||
// short-circuit if we are on the right core
|
||||
lock_acquire(self);
|
||||
uint32_t rc = func(param);
|
||||
lock_release(self);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static bool low_prio_irq_init(async_context_threadsafe_background_t *self, uint8_t priority) {
|
||||
assert(get_core_num() == self->core.core_num);
|
||||
int irq = user_irq_claim_unused(false);
|
||||
if (irq < 0) return false;
|
||||
self->low_priority_irq_num = (uint8_t) irq;
|
||||
uint index = irq - FIRST_USER_IRQ;
|
||||
assert(index < count_of(async_contexts_by_user_irq));
|
||||
async_contexts_by_user_irq[index] = self;
|
||||
irq_set_exclusive_handler(self->low_priority_irq_num, low_priority_irq_handler);
|
||||
irq_set_enabled(self->low_priority_irq_num, true);
|
||||
irq_set_priority(self->low_priority_irq_num, priority);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void low_prio_irq_deinit(async_context_threadsafe_background_t *self) {
|
||||
if (self->low_priority_irq_num > 0) {
|
||||
assert(get_core_num() == self->core.core_num);
|
||||
irq_set_enabled(self->low_priority_irq_num, false);
|
||||
irq_remove_handler(self->low_priority_irq_num, low_priority_irq_handler);
|
||||
user_irq_unclaim(self->low_priority_irq_num);
|
||||
self->low_priority_irq_num = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int64_t alarm_handler(__unused alarm_id_t id, void *user_data) {
|
||||
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t*)user_data;
|
||||
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
|
||||
self->force_alarm_id = 0;
|
||||
#endif
|
||||
self->alarm_pending = false;
|
||||
async_context_threadsafe_background_wake_up(&self->core);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool async_context_threadsafe_background_init(async_context_threadsafe_background_t *self, async_context_threadsafe_background_config_t *config) {
|
||||
memset(self, 0, sizeof(*self));
|
||||
self->core.type = &template;
|
||||
self->core.flags = ASYNC_CONTEXT_FLAG_CALLBACK_FROM_IRQ | ASYNC_CONTEXT_FLAG_CALLBACK_FROM_NON_IRQ;
|
||||
self->core.core_num = get_core_num();
|
||||
if (config->custom_alarm_pool) {
|
||||
self->alarm_pool = config->custom_alarm_pool;
|
||||
} else {
|
||||
#if PICO_TIME_DEFAULT_ALARM_POOL_DISABLED
|
||||
self->alarm_pool = alarm_pool_create_with_unused_hardware_alarm(ASYNC_CONTEXT_THREADSAFE_BACKGROUND_ALARM_POOL_MAX_ALARMS);
|
||||
self->alarm_pool_owned = true;
|
||||
#else
|
||||
self->alarm_pool = alarm_pool_get_default();
|
||||
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
|
||||
if (self->core.core_num != alarm_pool_core_num(self->alarm_pool)) {
|
||||
self->alarm_pool = alarm_pool_create_with_unused_hardware_alarm(ASYNC_CONTEXT_THREADSAFE_BACKGROUND_ALARM_POOL_MAX_ALARMS);
|
||||
self->alarm_pool_owned = true;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
assert(self->core.core_num == alarm_pool_core_num(self->alarm_pool));
|
||||
sem_init(&self->work_needed_sem, 1, 1);
|
||||
recursive_mutex_init(&self->lock_mutex);
|
||||
bool ok = low_prio_irq_init(self, config->low_priority_irq_handler_priority);
|
||||
return ok;
|
||||
}
|
||||
|
||||
static void async_context_threadsafe_background_set_work_pending(async_context_t *self_base, async_when_pending_worker_t *worker) {
|
||||
worker->work_pending = true;
|
||||
async_context_threadsafe_background_wake_up(self_base);
|
||||
}
|
||||
|
||||
static void async_context_threadsafe_background_deinit(async_context_t *self_base) {
|
||||
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
|
||||
// todo we do not currently handle this correctly; we could, but seems like a rare case
|
||||
assert(get_core_num() == self_base->core_num);
|
||||
low_prio_irq_deinit(self);
|
||||
if (self->alarm_id > 0) alarm_pool_cancel_alarm(self->alarm_pool, self->alarm_id);
|
||||
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
|
||||
if (self->alarm_pool_owned) {
|
||||
alarm_pool_destroy(self->alarm_pool);
|
||||
}
|
||||
#endif
|
||||
// acquire the lock to make sure the callback is not running (we have already disabled the IRQ
|
||||
recursive_mutex_enter_blocking(&self->lock_mutex);
|
||||
recursive_mutex_exit(&self->lock_mutex);
|
||||
memset(self, 0, sizeof(*self));
|
||||
}
|
||||
|
||||
static void process_under_lock(async_context_threadsafe_background_t *self) {
|
||||
#ifndef NDEBUG
|
||||
async_context_threadsafe_background_lock_check(&self->core);
|
||||
assert(self->core.core_num == get_core_num());
|
||||
#endif
|
||||
do {
|
||||
absolute_time_t next_time = async_context_base_execute_once(&self->core);
|
||||
// if the next wakeup time is in the past then loop
|
||||
if (absolute_time_diff_us(get_absolute_time(), next_time) <= 0) continue;
|
||||
// if there is no next wakeup time, we're done
|
||||
if (is_at_the_end_of_time(next_time)) {
|
||||
// cancel the alarm early (we will have been called soon after an alarm wakeup), so that
|
||||
// we don't risk alarm_id collision.
|
||||
if (self->alarm_id > 0) {
|
||||
alarm_pool_cancel_alarm(self->alarm_pool, self->alarm_id);
|
||||
self->alarm_id = 0;
|
||||
}
|
||||
break;
|
||||
}
|
||||
// the following is an optimization; we are often called much more frequently than timeouts actually change,
|
||||
// and removing and re-adding the timers has some non-trivial overhead (10s of microseconds), we choose
|
||||
// to allow the existing timeout to run to completion, and then re-asses from there, unless the new wakeup
|
||||
// time is before the last one set.
|
||||
//
|
||||
// note that alarm_pending is not protected, however, if it is wrong, it is wrong in the sense that it is
|
||||
// false when it should be true, so if it is wrong due to a race, we will cancel and re-add the alarm which is safe.
|
||||
if (self->alarm_pending && absolute_time_diff_us(self->last_set_alarm_time, next_time) > 0) break;
|
||||
// cancel the existing alarm (it may no longer exist)
|
||||
if (self->alarm_id > 0) alarm_pool_cancel_alarm(self->alarm_pool, self->alarm_id);
|
||||
self->last_set_alarm_time = next_time;
|
||||
self->alarm_pending = true;
|
||||
self->alarm_id = alarm_pool_add_alarm_at(self->alarm_pool, next_time, alarm_handler, self, false);
|
||||
if (self->alarm_id > 0) break;
|
||||
self->alarm_pending = false;
|
||||
} while (true);
|
||||
}
|
||||
|
||||
// Low priority interrupt handler to perform background processing
|
||||
static void low_priority_irq_handler(void) {
|
||||
uint index = __get_current_exception() - 16 - FIRST_USER_IRQ;
|
||||
assert(index < count_of(async_contexts_by_user_irq));
|
||||
async_context_threadsafe_background_t *self = async_contexts_by_user_irq[index];
|
||||
if (!self) return;
|
||||
assert(self->core.core_num == get_core_num());
|
||||
if (recursive_mutex_try_enter(&self->lock_mutex, NULL)) {
|
||||
// if the recurse count is not 1 then we have pre-empted something which held the lock on the same core,
|
||||
// so we cannot do processing here (however processing will be done when that lock is released)
|
||||
if (recursive_mutex_enter_count(&self->lock_mutex) == 1) {
|
||||
process_under_lock(self);
|
||||
}
|
||||
recursive_mutex_exit(&self->lock_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static void async_context_threadsafe_background_wait_until(__unused async_context_t *self_base, absolute_time_t until) {
|
||||
// can be called in IRQs, in which case we just have to wait
|
||||
if (__get_current_exception()) {
|
||||
busy_wait_until(until);
|
||||
} else {
|
||||
sleep_until(until);
|
||||
}
|
||||
}
|
||||
|
||||
static void async_context_threadsafe_background_wait_for_work_until(async_context_t *self_base, absolute_time_t until) {
|
||||
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
|
||||
sem_acquire_block_until(&self->work_needed_sem, until);
|
||||
}
|
||||
|
||||
static bool async_context_threadsafe_background_add_at_time_worker(async_context_t *self_base, async_at_time_worker_t *worker) {
|
||||
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
|
||||
lock_acquire(self);
|
||||
bool rc = async_context_base_add_at_time_worker(self_base, worker);
|
||||
lock_release(self);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static bool async_context_threadsafe_background_remove_at_time_worker(async_context_t *self_base, async_at_time_worker_t *worker) {
|
||||
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
|
||||
lock_acquire(self);
|
||||
bool rc = async_context_base_remove_at_time_worker(self_base, worker);
|
||||
lock_release(self);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static bool async_context_threadsafe_background_add_when_pending_worker(async_context_t *self_base, async_when_pending_worker_t *worker) {
|
||||
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
|
||||
lock_acquire(self);
|
||||
bool rc = async_context_base_add_when_pending_worker(self_base, worker);
|
||||
lock_release(self);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static bool async_context_threadsafe_background_when_pending_worker(async_context_t *self_base, async_when_pending_worker_t *worker) {
|
||||
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
|
||||
lock_acquire(self);
|
||||
bool rc = async_context_base_remove_when_pending_worker(self_base, worker);
|
||||
lock_release(self);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void async_context_threadsafe_background_acquire_lock_blocking(async_context_t *self_base) {
|
||||
lock_acquire((async_context_threadsafe_background_t *) self_base);
|
||||
}
|
||||
|
||||
static void async_context_threadsafe_background_release_lock(async_context_t *self_base) {
|
||||
lock_release((async_context_threadsafe_background_t *)self_base);
|
||||
}
|
||||
|
||||
static const async_context_type_t template = {
|
||||
.type = ASYNC_CONTEXT_THREADSAFE_BACKGROUND,
|
||||
.acquire_lock_blocking = async_context_threadsafe_background_acquire_lock_blocking,
|
||||
.release_lock = async_context_threadsafe_background_release_lock,
|
||||
.lock_check = async_context_threadsafe_background_lock_check,
|
||||
.execute_sync = async_context_threadsafe_background_execute_sync,
|
||||
.add_at_time_worker = async_context_threadsafe_background_add_at_time_worker,
|
||||
.remove_at_time_worker = async_context_threadsafe_background_remove_at_time_worker,
|
||||
.add_when_pending_worker = async_context_threadsafe_background_add_when_pending_worker,
|
||||
.remove_when_pending_worker = async_context_threadsafe_background_when_pending_worker,
|
||||
.set_work_pending = async_context_threadsafe_background_set_work_pending,
|
||||
.poll = 0,
|
||||
.wait_until = async_context_threadsafe_background_wait_until,
|
||||
.wait_for_work_until = async_context_threadsafe_background_wait_for_work_until,
|
||||
.deinit = async_context_threadsafe_background_deinit,
|
||||
};
|
||||
|
||||
|
462
src/rp2_common/pico_async_context/include/pico/async_context.h
Normal file
462
src/rp2_common/pico_async_context/include/pico/async_context.h
Normal file
@ -0,0 +1,462 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
/** \file pico/async_context.h
|
||||
* \defgroup pico_async_context pico_async_context
|
||||
*
|
||||
* An \ref async_context provides a logically single-threaded context for performing work, and responding
|
||||
* to asynchronous events. Thus an async_context instance is suitable for servicing third-party libraries
|
||||
* that are not re-entrant.
|
||||
*
|
||||
* The "context" in async_context refers to the fact that when calling workers or timeouts within the
|
||||
* async_context various pre-conditions hold:
|
||||
*
|
||||
* <ol>
|
||||
* <li>That there is a single logical thread of execution; i.e. that the context does not call any worker
|
||||
* functions concurrently.
|
||||
* <li>That the context always calls workers from the same processor core, as most uses of async_context rely on interaction
|
||||
* with IRQs which are themselves core-specific.
|
||||
* </ol>
|
||||
*
|
||||
* THe async_context provides two mechanisms for asynchronous work:
|
||||
*
|
||||
* * <em>when_pending</em> workers, which are processed whenever they have work pending. See \ref async_context_add_when_pending_worker,
|
||||
* \ref async_context_remove_when_pending_worker, and \ref async_context_set_work_pending, the latter of which can be used from an interrupt handler
|
||||
* to signal that servicing work is required to be performed by the worker from the regular async_context.
|
||||
* * <em>at_time</em> workers, that are executed after at a specific time.
|
||||
*
|
||||
* Note: "when pending" workers with work pending are executed before "at time" workers.
|
||||
*
|
||||
* The async_context provides locking mechanisms, see \ref async_context_acquire_lock_blocking,
|
||||
* \ref async_context_release_lock and \ref async_context_check_lock which can be used by
|
||||
* external code to ensure execution of external code does not happen concurrently with worker code.
|
||||
* Locked code runs on the calling core, however \ref async_context_execute_sync is provided to
|
||||
* synchronously run a function from the core of the async_context.
|
||||
*
|
||||
* The SDK ships with the following default async_contexts:
|
||||
*
|
||||
* async_context_poll - this context is not thread-safe, and the user is responsible for calling
|
||||
* \ref async_context_poll() periodically, and can use async_context_wait_for_work_until() to sleep
|
||||
* between calls until work is needed if the user has nothing else to do.
|
||||
*
|
||||
* async_context_threadsafe_background - in order to work in the background, a low priority IRQ is used
|
||||
* to handle callbacks. Code is usually invoked from this IRQ context, but may be invoked after any other code
|
||||
* that uses the async context in another (non-IRQ) context on the same core. Calling \ref async_context_poll() is
|
||||
* not required, and is a no-op. This context implements async_context locking and is thus safe to call
|
||||
* from either core, according to the specific notes on each API.
|
||||
*
|
||||
* async_context_freertos - Work is performed from a separate "async_context" task, however once again, code may
|
||||
* also be invoked after a direct use of the async_context on the same core that the async_context belongs to. Calling
|
||||
* \ref async_context_poll() is not required, and is a no-op. This context implements async_context locking and is thus
|
||||
* safe to call from any task, and from either core, according to the specific notes on each API.
|
||||
*
|
||||
* Each async_context provides bespoke methods of instantiation which are provided in the corresponding headers (e.g.
|
||||
* async_context_poll.h, async_context_threadsafe_background.h, asycn_context_freertos.h).
|
||||
* async_contexts are de-initialized by the common async_context_deint() method.
|
||||
*
|
||||
* Multiple async_context instances can be used by a single application, and they will operate independently.
|
||||
*/
|
||||
|
||||
#ifndef _PICO_ASYNC_CONTEXT_H
|
||||
#define _PICO_ASYNC_CONTEXT_H
|
||||
|
||||
#include "pico.h"
|
||||
#include "pico/time.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
enum {
|
||||
ASYNC_CONTEXT_POLL = 1,
|
||||
ASYNC_CONTEXT_THREADSAFE_BACKGROUND = 2,
|
||||
ASYNC_CONTEXT_FREERTOS = 3,
|
||||
};
|
||||
|
||||
typedef struct async_context async_context_t;
|
||||
|
||||
/*! \brief A "timeout" instance used by an async_context
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* A "timeout" represents some future action that must be taken at a specific time.
|
||||
* It's methods are called from the async_context under lock at the given time
|
||||
*
|
||||
* \see async_context_add_worker_at
|
||||
* \see async_context_add_worker_in_ms
|
||||
*/
|
||||
typedef struct async_work_on_timeout {
|
||||
/*!
|
||||
* private link list pointer
|
||||
*/
|
||||
struct async_work_on_timeout *next;
|
||||
/*!
|
||||
* Method called when the timeout is reached; may not be NULL
|
||||
*
|
||||
* Note, that when this method is called, the timeout has been removed from the async_context, so
|
||||
* if you want the timeout to repeat, you should re-add it during this callback
|
||||
* @param context
|
||||
* @param timeout
|
||||
*/
|
||||
void (*do_work)(async_context_t *context, struct async_work_on_timeout *timeout);
|
||||
/*!
|
||||
* The next timeout time; this should only be modified during the above methods
|
||||
* or via async_context methods
|
||||
*/
|
||||
absolute_time_t next_time;
|
||||
/*!
|
||||
* User data associated with the timeout instance
|
||||
*/
|
||||
void *user_data;
|
||||
} async_at_time_worker_t;
|
||||
|
||||
/*! \brief A "worker" instance used by an async_context
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* A "worker" represents some external entity that must do work in response
|
||||
* to some external stimulus (usually an IRQ).
|
||||
* It's methods are called from the async_context under lock at the given time
|
||||
*
|
||||
* \see async_context_add_worker_at
|
||||
* \see async_context_add_worker_in_ms
|
||||
*/
|
||||
typedef struct async_when_pending_worker {
|
||||
/*!
|
||||
* private link list pointer
|
||||
*/
|
||||
struct async_when_pending_worker *next;
|
||||
/*!
|
||||
* Called by the async_context when the worker has been marked as having "work pending"
|
||||
*
|
||||
* @param context the async_context
|
||||
* @param worker the function to be called when work is pending
|
||||
*/
|
||||
void (*do_work)(async_context_t *context, struct async_when_pending_worker *worker);
|
||||
/**
|
||||
* True if the worker need do_work called
|
||||
*/
|
||||
bool work_pending;
|
||||
} async_when_pending_worker_t;
|
||||
|
||||
#define ASYNC_CONTEXT_FLAG_CALLBACK_FROM_NON_IRQ 0x1
|
||||
#define ASYNC_CONTEXT_FLAG_CALLBACK_FROM_IRQ 0x2
|
||||
#define ASYNC_CONTEXT_FLAG_POLLED 0x4
|
||||
|
||||
/*!
|
||||
* \brief Implementation of an async_context
|
||||
* \ingroup pico_async_context
|
||||
*/
|
||||
typedef struct async_context_type {
|
||||
uint16_t type;
|
||||
// see wrapper functions for documentation
|
||||
void (*acquire_lock_blocking)(async_context_t *self);
|
||||
void (*release_lock)(async_context_t *self);
|
||||
void (*lock_check)(async_context_t *self);
|
||||
uint32_t (*execute_sync)(async_context_t *context, uint32_t (*func)(void *param), void *param);
|
||||
bool (*add_at_time_worker)(async_context_t *self, async_at_time_worker_t *worker);
|
||||
bool (*remove_at_time_worker)(async_context_t *self, async_at_time_worker_t *worker);
|
||||
bool (*add_when_pending_worker)(async_context_t *self, async_when_pending_worker_t *worker);
|
||||
bool (*remove_when_pending_worker)(async_context_t *self, async_when_pending_worker_t *worker);
|
||||
void (*set_work_pending)(async_context_t *self, async_when_pending_worker_t *worker);
|
||||
void (*poll)(async_context_t *self); // may be NULL
|
||||
void (*wait_until)(async_context_t *self, absolute_time_t until);
|
||||
void (*wait_for_work_until)(async_context_t *self, absolute_time_t until);
|
||||
void (*deinit)(async_context_t *self);
|
||||
} async_context_type_t;
|
||||
|
||||
struct async_context {
|
||||
const async_context_type_t *type;
|
||||
async_when_pending_worker_t *when_pending_list;
|
||||
async_at_time_worker_t *at_time_list;
|
||||
absolute_time_t next_time;
|
||||
uint16_t flags;
|
||||
uint8_t core_num;
|
||||
};
|
||||
|
||||
/*!
|
||||
* \brief Acquire the async_context lock
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* The owner of the async_context lock is the logic owner of the async_context
|
||||
* and other work related to this async_context will not happen concurrently.
|
||||
*
|
||||
* This method may be called in a nested fashion by the the lock owner.
|
||||
*
|
||||
* \note the async_context lock is nestable by the same caller, so an internal count is maintained
|
||||
*
|
||||
* \note for async_contexts that provide locking (not async_context_poll), this method is threadsafe. and may be called from within any
|
||||
* worker method called by the async_context or from any other non-IRQ context.
|
||||
*
|
||||
* \param context the async_context
|
||||
*
|
||||
* \see async_context_release_lock
|
||||
*/
|
||||
static inline void async_context_acquire_lock_blocking(async_context_t *context) {
|
||||
context->type->acquire_lock_blocking(context);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Release the async_context lock
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* \note the async_context lock may be called in a nested fashion, so an internal count is maintained. On the outermost
|
||||
* release, When the outermost lock is released, a check is made for work which might have been skipped while the lock was held,
|
||||
* and any such work may be performed during this call IF the call is made from the same core that the async_context belongs to.
|
||||
*
|
||||
* \note for async_contexts that provide locking (not async_context_poll), this method is threadsafe. and may be called from within any
|
||||
* worker method called by the async_context or from any other non-IRQ context.
|
||||
*
|
||||
* \param context the async_context
|
||||
*
|
||||
* \see async_context_acquire_lock_blocking
|
||||
*/
|
||||
static inline void async_context_release_lock(async_context_t *context) {
|
||||
context->type->release_lock(context);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Assert if the caller does not own the lock for the async_context
|
||||
* \ingroup pico_async_context
|
||||
* \note this method is thread-safe
|
||||
*
|
||||
* \param context the async_context
|
||||
*/
|
||||
static inline void async_context_lock_check(async_context_t *context) {
|
||||
context->type->lock_check(context);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Execute work synchronously on the core the async_context belongs to.
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* This method is intended for code external to the async_context (e.g. another thread/task) to
|
||||
* execute a function with the same guarantees (single core, logical thread of execution) that
|
||||
* async_context workers are called with.
|
||||
*
|
||||
* \note you should NOT call this method while holding the async_context's lock
|
||||
*
|
||||
* \param context the async_context
|
||||
* \param func the function to call
|
||||
* \param parm the paramter to pass to the function
|
||||
* \return the return value from func
|
||||
*/
|
||||
static inline uint32_t async_context_execute_sync(async_context_t *context, uint32_t (*func)(void *param), void *param) {
|
||||
return context->type->execute_sync(context, func, param);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Add an "at time" worker to a context
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* An "at time" worker will run at or after a specific point in time, and is automatically when (just before) it runs.
|
||||
*
|
||||
* The time to fire is specified in the next_time field of the worker.
|
||||
*
|
||||
* \note for async_contexts that provide locking (not async_context_poll), this method is threadsafe. and may be called from within any
|
||||
* worker method called by the async_context or from any other non-IRQ context.
|
||||
*
|
||||
* \param context the async_context
|
||||
* \param worker the "at time" worker to add
|
||||
* \return true if the worker was added, false if the worker was already present.
|
||||
*/
|
||||
static inline bool async_context_add_at_time_worker(async_context_t *context, async_at_time_worker_t *worker) {
|
||||
return context->type->add_at_time_worker(context, worker);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Add an "at time" worker to a context
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* An "at time" worker will run at or after a specific point in time, and is automatically when (just before) it runs.
|
||||
*
|
||||
* The time to fire is specified by the at parameter.
|
||||
*
|
||||
* \note for async_contexts that provide locking (not async_context_poll), this method is threadsafe. and may be called from within any
|
||||
* worker method called by the async_context or from any other non-IRQ context.
|
||||
*
|
||||
* \param context the async_context
|
||||
* \param worker the "at time" worker to add
|
||||
* \param at the time to fire at
|
||||
* \return true if the worker was added, false if the worker was already present.
|
||||
*/
|
||||
static inline bool async_context_add_at_time_worker_at(async_context_t *context, async_at_time_worker_t *worker, absolute_time_t at) {
|
||||
worker->next_time = at;
|
||||
return context->type->add_at_time_worker(context, worker);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Add an "at time" worker to a context
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* An "at time" worker will run at or after a specific point in time, and is automatically when (just before) it runs.
|
||||
*
|
||||
* The time to fire is specified by a delay via the ms parameter
|
||||
*
|
||||
* \note for async_contexts that provide locking (not async_context_poll), this method is threadsafe. and may be called from within any
|
||||
* worker method called by the async_context or from any other non-IRQ context.
|
||||
*
|
||||
* \param context the async_context
|
||||
* \param worker the "at time" worker to add
|
||||
* \param ms the number of milliseconds from now to fire after
|
||||
* \return true if the worker was added, false if the worker was already present.
|
||||
*/
|
||||
static inline bool async_context_add_at_time_worker_in_ms(async_context_t *context, async_at_time_worker_t *worker, uint32_t ms) {
|
||||
worker->next_time = make_timeout_time_ms(ms);
|
||||
return context->type->add_at_time_worker(context, worker);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Remove an "at time" worker from a context
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* \note for async_contexts that provide locking (not async_context_poll), this method is threadsafe. and may be called from within any
|
||||
* worker method called by the async_context or from any other non-IRQ context.
|
||||
*
|
||||
* \param context the async_context
|
||||
* \param worker the "at time" worker to remove
|
||||
* \return true if the worker was removed, false if the instance not present.
|
||||
*/
|
||||
static inline bool async_context_remove_at_time_worker(async_context_t *context, async_at_time_worker_t *worker) {
|
||||
return context->type->remove_at_time_worker(context, worker);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Add a "when pending" worker to a context
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* An "when pending" worker will run when it is pending (can be set via \ref async_context_set_work_pending), and
|
||||
* is NOT automatically removed when it runs.
|
||||
*
|
||||
* The time to fire is specified by a delay via the ms parameter
|
||||
*
|
||||
* \note for async_contexts that provide locking (not async_context_poll), this method is threadsafe. and may be called from within any
|
||||
* worker method called by the async_context or from any other non-IRQ context.
|
||||
*
|
||||
* \param context the async_context
|
||||
* \param worker the "when pending" worker to add
|
||||
* \return true if the worker was added, false if the worker was already present.
|
||||
*/
|
||||
static inline bool async_context_add_when_pending_worker(async_context_t *context, async_when_pending_worker_t *worker) {
|
||||
return context->type->add_when_pending_worker(context, worker);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Remove a "when pending" worker from a context
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* \note for async_contexts that provide locking (not async_context_poll), this method is threadsafe. and may be called from within any
|
||||
* worker method called by the async_context or from any other non-IRQ context.
|
||||
*
|
||||
* \param context the async_context
|
||||
* \param worker the "when pending" worker to remove
|
||||
* \return true if the worker was removed, false if the instance not present.
|
||||
*/
|
||||
static inline bool async_context_remove_when_pending_worker(async_context_t *context, async_when_pending_worker_t *worker) {
|
||||
return context->type->remove_when_pending_worker(context, worker);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Mark a "when pending" worker as having work pending
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* The worker will be run from the async_context at a later time.
|
||||
*
|
||||
* \note this method may be called from any context including IRQs
|
||||
*
|
||||
* \param context the async_context
|
||||
* \param worker the "when pending" worker to mark as pending.
|
||||
*/
|
||||
static inline void async_context_set_work_pending(async_context_t *context, async_when_pending_worker_t *worker) {
|
||||
context->type->set_work_pending(context, worker);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Perform any pending work for polling style async_context
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* For a polled async_context (e.g. \ref async_context_poll) the user is responsible for calling this method
|
||||
* periodically to perform any required work.
|
||||
*
|
||||
* This method may immediately perform outstanding work on other context types, but is not required to.
|
||||
*
|
||||
* \param context the async_context
|
||||
*/
|
||||
static inline void async_context_poll(async_context_t *context) {
|
||||
if (context->type->poll) context->type->poll(context);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief sleep until the specified time in an async_context callback safe way
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* \note for async_contexts that provide locking (not async_context_poll), this method is threadsafe. and may be called from within any
|
||||
* worker method called by the async_context or from any other non-IRQ context.
|
||||
*
|
||||
* \param context the async_context
|
||||
* \param until the time to sleep until
|
||||
*/
|
||||
static inline void async_context_wait_until(async_context_t *context, absolute_time_t until) {
|
||||
context->type->wait_until(context, until);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Block until work needs to be done or the specified time has been reached
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* \note this method should not be called from a worker callback
|
||||
*
|
||||
* \param context the async_context
|
||||
* \param until the time to return at if no work is required
|
||||
*/
|
||||
static inline void async_context_wait_for_work_until(async_context_t *context, absolute_time_t until) {
|
||||
context->type->wait_for_work_until(context, until);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Block until work needs to be done or the specified number of milliseconds have passed
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* \note this method should not be called from a worker callback
|
||||
*
|
||||
* \param context the async_context
|
||||
* \param ms the number of milliseconds to return after if no work is required
|
||||
*/
|
||||
static inline void async_context_wait_for_work_ms(async_context_t *context, uint32_t ms) {
|
||||
async_context_wait_for_work_until(context, make_timeout_time_ms(ms));
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Return the processor core this async_context belongs to
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* \param context the async_context
|
||||
* \return the physical core number
|
||||
*/
|
||||
static inline uint async_context_core_num(const async_context_t *context) {
|
||||
return context->core_num;
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief End async_context processing, and free any resources
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* Note the user should clean up any resources associated with workers
|
||||
* in the async_context themselves.
|
||||
*
|
||||
* Asynchronous (non-polled) async_contexts guarantee that no
|
||||
* callback is being called once this method returns.
|
||||
*
|
||||
* \param context the async_context
|
||||
* \return the physical core number
|
||||
*/
|
||||
static inline void async_context_deinit(async_context_t *context) {
|
||||
context->type->deinit(context);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#ifndef _PICO_ASYNC_CONTEXT_BASE_H
|
||||
#define _PICO_ASYNC_CONTEXT_BASE_H
|
||||
|
||||
#include "pico/async_context.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// common functions for async_context implementations to use
|
||||
bool async_context_base_add_at_time_worker(async_context_t *self, async_at_time_worker_t *worker);
|
||||
bool async_context_base_remove_at_time_worker(async_context_t *self, async_at_time_worker_t *worker);
|
||||
|
||||
bool async_context_base_add_when_pending_worker(async_context_t *self, async_when_pending_worker_t *worker);
|
||||
bool async_context_base_remove_when_pending_worker(async_context_t *self, async_when_pending_worker_t *worker);
|
||||
|
||||
async_at_time_worker_t *async_context_base_remove_ready_at_time_worker(async_context_t *self);
|
||||
void async_context_base_refresh_next_timeout(async_context_t *self);
|
||||
|
||||
absolute_time_t async_context_base_execute_once(async_context_t *self);
|
||||
bool async_context_base_needs_servicing(async_context_t *self);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@ -0,0 +1,120 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#ifndef _PICO_ASYNC_CONTEXT_FREERTOS_H
|
||||
#define _PICO_ASYNC_CONTEXT_FREERTOS_H
|
||||
|
||||
/** \file pico/async_context.h
|
||||
* \defgroup async_context_freertos async_context_freertos
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* async_context_freertos provides an implementation of \ref async_context that handles asynchronous
|
||||
* work in a separate FreeRTOS task.
|
||||
*/
|
||||
#include "pico/async_context.h"
|
||||
|
||||
// FreeRTOS includes
|
||||
#include "FreeRTOS.h"
|
||||
#include "semphr.h"
|
||||
#include "timers.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef ASYNC_CONTEXT_DEFAULT_FREERTOS_TASK_PRIORITY
|
||||
#define ASYNC_CONTEXT_DEFAULT_FREERTOS_TASK_PRIORITY ( tskIDLE_PRIORITY + 4)
|
||||
#endif
|
||||
|
||||
#ifndef ASYNC_CONTEXT_DEFAULT_FREERTOS_TASK_STACK_SIZE
|
||||
#define ASYNC_CONTEXT_DEFAULT_FREERTOS_TASK_STACK_SIZE configMINIMAL_STACK_SIZE
|
||||
#endif
|
||||
|
||||
typedef struct async_context_freertos async_context_freertos_t;
|
||||
|
||||
/**
|
||||
* \brief Configuration object for async_context_freertos instances.
|
||||
*/
|
||||
typedef struct async_context_freertos_config {
|
||||
/**
|
||||
* Task priority for the async_context task
|
||||
*/
|
||||
UBaseType_t task_priority;
|
||||
/**
|
||||
* Stack size for the async_context task
|
||||
*/
|
||||
configSTACK_DEPTH_TYPE task_stack_size;
|
||||
/**
|
||||
* the core ID (see \ref portGET_CORE_ID()) to pin the task to.
|
||||
* This is only relevant in SMP mode.
|
||||
*/
|
||||
#if configUSE_CORE_AFFINITY && configNUM_CORES > 1
|
||||
UBaseType_t task_core_id;
|
||||
#endif
|
||||
} async_context_freertos_config_t;
|
||||
|
||||
struct async_context_freertos {
|
||||
async_context_t core;
|
||||
SemaphoreHandle_t lock_mutex;
|
||||
SemaphoreHandle_t work_needed_sem;
|
||||
TimerHandle_t timer_handle;
|
||||
TaskHandle_t task_handle;
|
||||
uint8_t nesting;
|
||||
volatile bool task_should_exit;
|
||||
};
|
||||
|
||||
/*!
|
||||
* \brief Initialize an async_context_freertos instance using the specified configuration
|
||||
* \ingroup async_context_freertos
|
||||
*
|
||||
* If this method succeeds (returns true), then the async_context is available for use
|
||||
* and can be de-initialized by calling async_context_deinit().
|
||||
*
|
||||
* \param self a pointer to async_context_freertos structure to initialize
|
||||
* \param config the configuration object specifying characteristics for the async_context
|
||||
* \return true if initialization is successful, false otherwise
|
||||
*/
|
||||
bool async_context_freertos_init(async_context_freertos_t *self, async_context_freertos_config_t *config);
|
||||
|
||||
/*!
|
||||
* \brief Return a copy of the default configuration object used by \ref async_context_freertos_init_with_defaults()
|
||||
* \ingroup async_context_freertos
|
||||
*
|
||||
* The caller can then modify just the settings it cares about, and call \ref async_context_threasafe_background_init()
|
||||
* \return the default configuration object
|
||||
*/
|
||||
static inline async_context_freertos_config_t async_context_freertos_default_config(void) {
|
||||
async_context_freertos_config_t config = {
|
||||
.task_priority = ASYNC_CONTEXT_DEFAULT_FREERTOS_TASK_PRIORITY,
|
||||
.task_stack_size = ASYNC_CONTEXT_DEFAULT_FREERTOS_TASK_STACK_SIZE,
|
||||
#if configUSE_CORE_AFFINITY
|
||||
.task_core_id = (UBaseType_t)-1, // none
|
||||
#endif
|
||||
};
|
||||
return config;
|
||||
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Initialize an async_context_freertos instance with default values
|
||||
* \ingroup async_context_freertos
|
||||
*
|
||||
* If this method succeeds (returns true), then the async_context is available for use
|
||||
* and can be de-initialized by calling async_context_deinit().
|
||||
*
|
||||
* \param self a pointer to async_context_freertos structure to initialize
|
||||
* \return true if initialization is successful, false otherwise
|
||||
*/
|
||||
static inline bool async_context_freertos_init_with_defaults(async_context_freertos_t *self) {
|
||||
async_context_freertos_config_t config = async_context_freertos_default_config();
|
||||
return async_context_freertos_init(self, &config);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#ifndef _PICO_ASYNC_CONTEXT_POLL_H
|
||||
#define _PICO_ASYNC_CONTEXT_POLL_H
|
||||
|
||||
/** \file pico/async_context.h
|
||||
* \defgroup async_context_poll async_context_poll
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* async_context_poll provides an implementation of \ref async_context that is intended for use with a simple
|
||||
* polling loop on one core. It is not thread safe.
|
||||
*
|
||||
* The \ref async_context_poll() method must be called periodically to handle asynchronous work that may now be
|
||||
* pending. \ref async_context_wait_for_work_until() may be used to block a polling loop until there is work to do,
|
||||
* and prevent tight spinning.
|
||||
*/
|
||||
#include "pico/async_context.h"
|
||||
#include "pico/sem.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct async_context_poll {
|
||||
async_context_t core;
|
||||
semaphore_t sem;
|
||||
} async_context_poll_t;
|
||||
|
||||
/*!
|
||||
* \brief Initialize an async_context_poll instance with default values
|
||||
* \ingroup async_context_poll
|
||||
*
|
||||
* If this method succeeds (returns true), then the async_context is available for use
|
||||
* and can be de-initialized by calling async_context_deinit().
|
||||
*
|
||||
* \param self a pointer to async_context_poll structure to initialize
|
||||
* \return true if initialization is successful, false otherwise
|
||||
*/
|
||||
bool async_context_poll_init_with_defaults(async_context_poll_t *self);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@ -0,0 +1,112 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#ifndef _PICO_ASYNC_CONTEXT_THREADSAFE_BACKGROUND_H
|
||||
#define _PICO_ASYNC_CONTEXT_THREADSAFE_BACKGROUND_H
|
||||
|
||||
/** \file pico/async_context.h
|
||||
* \defgroup async_context_threadsafe_background async_context_threadsafe_background
|
||||
* \ingroup pico_async_context
|
||||
*
|
||||
* async_context_threadsafe_background provides an implementation of \ref async_context that handles asynchronous
|
||||
* work in a low priority IRQ, and there is no need for the user to poll for work.
|
||||
*
|
||||
* \note The workers used with this async_context MUST be safe to call from an IRQ.
|
||||
*/
|
||||
|
||||
#include "pico/async_context.h"
|
||||
#include "pico/sem.h"
|
||||
#include "pico/mutex.h"
|
||||
#include "hardware/irq.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
|
||||
#define ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE LIB_PICO_MULTICORE
|
||||
#endif
|
||||
|
||||
typedef struct async_context_threadsafe_background async_context_threadsafe_background_t;
|
||||
|
||||
/**
|
||||
* \brief Configuration object for async_context_threadsafe_background instances.
|
||||
*/
|
||||
typedef struct async_context_threadsafe_background_config {
|
||||
/**
|
||||
* the priority of the low priority IRQ
|
||||
*/
|
||||
uint8_t low_priority_irq_handler_priority;
|
||||
/**
|
||||
* a specific alarm pool to use (or NULL to use ta default)
|
||||
*
|
||||
* \note this alarm pool MUST be on the same core as the async_context
|
||||
*
|
||||
* The default alarm pool used is the "default alarm pool" (see
|
||||
* \ref alarm_pool_get_default()) if available, and if that is on the same
|
||||
* core, otherwise a private alarm_pool instance created during
|
||||
* initialization.
|
||||
*/
|
||||
alarm_pool_t *custom_alarm_pool;
|
||||
} async_context_threadsafe_background_config_t;
|
||||
|
||||
struct async_context_threadsafe_background {
|
||||
async_context_t core;
|
||||
alarm_pool_t *alarm_pool; // this must be on the same core as core_num
|
||||
absolute_time_t last_set_alarm_time;
|
||||
recursive_mutex_t lock_mutex;
|
||||
semaphore_t work_needed_sem;
|
||||
volatile alarm_id_t alarm_id;
|
||||
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
|
||||
volatile alarm_id_t force_alarm_id;
|
||||
bool alarm_pool_owned;
|
||||
#endif
|
||||
uint8_t low_priority_irq_num;
|
||||
volatile bool alarm_pending;
|
||||
};
|
||||
|
||||
/*!
|
||||
* \brief Initialize an async_context_threadsafe_background instance using the specified configuration
|
||||
* \ingroup async_context_threadsafe_background
|
||||
*
|
||||
* If this method succeeds (returns true), then the async_context is available for use
|
||||
* and can be de-initialized by calling async_context_deinit().
|
||||
*
|
||||
* \param self a pointer to async_context_threadsafe_background structure to initialize
|
||||
* \param config the configuration object specifying characteristics for the async_context
|
||||
* \return true if initialization is successful, false otherwise
|
||||
*/
|
||||
bool async_context_threadsafe_background_init(async_context_threadsafe_background_t *self, async_context_threadsafe_background_config_t *config);
|
||||
|
||||
/*!
|
||||
* \brief Return a copy of the default configuration object used by \ref async_context_threadsafe_background_init_with_defaults()
|
||||
* \ingroup async_context_threadsafe_background
|
||||
*
|
||||
* The caller can then modify just the settings it cares about, and call \ref async_context_threasafe_background_init()
|
||||
* \return the default configuration object
|
||||
*/
|
||||
async_context_threadsafe_background_config_t async_context_threadsafe_background_default_config(void);
|
||||
|
||||
/*!
|
||||
* \brief Initialize an async_context_threadsafe_background instance with default values
|
||||
* \ingroup async_context_threadsafe_background
|
||||
*
|
||||
* If this method succeeds (returns true), then the async_context is available for use
|
||||
* and can be de-initialized by calling async_context_deinit().
|
||||
*
|
||||
* \param self a pointer to async_context_threadsafe_background structure to initialize
|
||||
* \return true if initialization is successful, false otherwise
|
||||
*/
|
||||
static inline bool async_context_threadsafe_background_init_with_defaults(async_context_threadsafe_background_t *self) {
|
||||
async_context_threadsafe_background_config_t config = async_context_threadsafe_background_default_config();
|
||||
return async_context_threadsafe_background_init(self, &config);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@ -130,7 +130,7 @@ static inline void *rom_hword_as_ptr(uint16_t rom_address) {
|
||||
#endif
|
||||
|
||||
/*!
|
||||
* \brief Lookup a bootrom function by code. This method is forceably inlined into the caller for FLASH/RAM sensitive code usage
|
||||
* \brief Lookup a bootrom function by code. This method is forcibly inlined into the caller for FLASH/RAM sensitive code usage
|
||||
* \ingroup pico_bootrom
|
||||
* \param code the code
|
||||
* \return a pointer to the function, or NULL if the code does not match any bootrom function
|
||||
|
@ -1,7 +1,5 @@
|
||||
if (PICO_CYW43_SUPPORTED) # set by BOARD=pico-w
|
||||
if (TARGET cyw43_driver_picow)
|
||||
message("Enabling build support for Pico W wireless.")
|
||||
|
||||
pico_add_impl_library(pico_cyw43_arch)
|
||||
target_sources(pico_cyw43_arch INTERFACE
|
||||
${CMAKE_CURRENT_LIST_DIR}/cyw43_arch.c
|
||||
@ -15,45 +13,70 @@ if (PICO_CYW43_SUPPORTED) # set by BOARD=pico-w
|
||||
|
||||
target_link_libraries(pico_cyw43_arch INTERFACE
|
||||
pico_unique_id
|
||||
cyw43_driver_picow)
|
||||
cyw43_driver_picow # driver for pico w
|
||||
pico_cyw43_driver # integration with async_context
|
||||
)
|
||||
|
||||
if (NOT TARGET pico_lwip)
|
||||
message(WARNING "lwIP is not available; Full Pico W wireless support will be unavailable too")
|
||||
message(WARNING "lwIP is not available; Full Pico W wireless support will be unavailable")
|
||||
else()
|
||||
add_library(pico_cyw43_arch_lwip_poll INTERFACE)
|
||||
target_link_libraries(pico_cyw43_arch_lwip_poll INTERFACE
|
||||
message("Pico W wireless build support available.")
|
||||
add_library(pico_cyw43_arch_poll INTERFACE)
|
||||
target_link_libraries(pico_cyw43_arch_poll INTERFACE
|
||||
pico_cyw43_arch
|
||||
pico_lwip
|
||||
pico_lwip_nosys)
|
||||
target_compile_definitions(pico_cyw43_arch_lwip_poll INTERFACE
|
||||
CYW43_LWIP=1
|
||||
pico_async_context_poll)
|
||||
target_compile_definitions(pico_cyw43_arch_poll INTERFACE
|
||||
PICO_CYW43_ARCH_POLL=1
|
||||
)
|
||||
|
||||
add_library(pico_cyw43_arch_lwip_threadsafe_background INTERFACE)
|
||||
target_link_libraries(pico_cyw43_arch_lwip_threadsafe_background INTERFACE
|
||||
add_library(pico_cyw43_arch_lwip_poll INTERFACE)
|
||||
target_link_libraries(pico_cyw43_arch_lwip_poll INTERFACE
|
||||
pico_lwip_nosys
|
||||
pico_cyw43_arch_poll)
|
||||
target_compile_definitions(pico_cyw43_arch_lwip_poll INTERFACE
|
||||
CYW43_LWIP=1
|
||||
)
|
||||
|
||||
add_library(pico_cyw43_arch_threadsafe_background INTERFACE)
|
||||
target_link_libraries(pico_cyw43_arch_threadsafe_background INTERFACE
|
||||
pico_cyw43_arch
|
||||
pico_lwip
|
||||
pico_lwip_nosys)
|
||||
pico_async_context_threadsafe_background)
|
||||
target_compile_definitions(pico_cyw43_arch_threadsafe_background INTERFACE
|
||||
PICO_CYW43_ARCH_THREADSAFE_BACKGROUND=1
|
||||
)
|
||||
add_library(pico_cyw43_arch_lwip_threadsafe_background INTERFACE)
|
||||
|
||||
target_link_libraries(pico_cyw43_arch_lwip_threadsafe_background INTERFACE
|
||||
pico_lwip_nosys
|
||||
pico_cyw43_arch_threadsafe_background)
|
||||
target_compile_definitions(pico_cyw43_arch_lwip_threadsafe_background INTERFACE
|
||||
CYW43_LWIP=1
|
||||
PICO_CYW43_ARCH_THREADSAFE_BACKGROUND=1
|
||||
)
|
||||
|
||||
add_library(pico_cyw43_arch_sys_freertos INTERFACE)
|
||||
target_link_libraries(pico_cyw43_arch_sys_freertos INTERFACE
|
||||
pico_cyw43_arch
|
||||
pico_async_context_freertos)
|
||||
target_compile_definitions(pico_cyw43_arch_sys_freertos INTERFACE
|
||||
PICO_CYW43_ARCH_FREERTOS=1
|
||||
)
|
||||
|
||||
add_library(pico_cyw43_arch_lwip_sys_freertos INTERFACE)
|
||||
target_link_libraries(pico_cyw43_arch_lwip_sys_freertos INTERFACE
|
||||
pico_cyw43_arch
|
||||
pico_lwip
|
||||
pico_lwip_contrib_freertos)
|
||||
pico_lwip_freertos
|
||||
pico_cyw43_arch_sys_freertos)
|
||||
target_compile_definitions(pico_cyw43_arch_lwip_sys_freertos INTERFACE
|
||||
CYW43_LWIP=1
|
||||
LWIP_PROVIDE_ERRNO=1
|
||||
PICO_CYW43_ARCH_FREERTOS=1
|
||||
# now the default
|
||||
#PICO_LWIP_CUSTOM_LOCK_TCPIP_CORE=1 # we want to override the lwip locking mechanism to use our mutex
|
||||
)
|
||||
endif()
|
||||
|
||||
add_library(pico_cyw43_arch_none INTERFACE)
|
||||
target_link_libraries(pico_cyw43_arch_none INTERFACE pico_cyw43_arch)
|
||||
target_link_libraries(pico_cyw43_arch_none INTERFACE
|
||||
pico_cyw43_arch
|
||||
pico_async_context_threadsafe_background)
|
||||
target_compile_definitions(pico_cyw43_arch_none INTERFACE
|
||||
CYW43_LWIP=0
|
||||
PICO_CYW43_ARCH_THREADSAFE_BACKGROUND=1 # none still uses threadsafe_background to make gpio use easy
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include "cyw43_ll.h"
|
||||
#include "cyw43_stats.h"
|
||||
|
||||
#if CYW43_ARCH_DEBUG_ENABLED
|
||||
#if PICO_CYW43_ARCH_DEBUG_ENABLED
|
||||
#define CYW43_ARCH_DEBUG(...) printf(__VA_ARGS__)
|
||||
#else
|
||||
#define CYW43_ARCH_DEBUG(...) ((void)0)
|
||||
@ -22,6 +22,12 @@
|
||||
|
||||
static uint32_t country_code = PICO_CYW43_ARCH_DEFAULT_COUNTRY_CODE;
|
||||
|
||||
static async_context_t *async_context;
|
||||
|
||||
void cyw43_arch_set_async_context(async_context_t *context) {
|
||||
async_context = context;
|
||||
}
|
||||
|
||||
void cyw43_arch_enable_sta_mode() {
|
||||
assert(cyw43_is_initialized(&cyw43_state));
|
||||
cyw43_wifi_set_up(&cyw43_state, CYW43_ITF_STA, true, cyw43_arch_get_country_code());
|
||||
@ -39,9 +45,9 @@ void cyw43_arch_enable_ap_mode(const char *ssid, const char *password, uint32_t
|
||||
cyw43_wifi_set_up(&cyw43_state, CYW43_ITF_AP, true, cyw43_arch_get_country_code());
|
||||
}
|
||||
|
||||
#if CYW43_ARCH_DEBUG_ENABLED
|
||||
#if PICO_CYW43_ARCH_DEBUG_ENABLED
|
||||
// Return a string for the wireless state
|
||||
static const char* status_name(int status)
|
||||
const char* cyw43_tcpip_link_status_name(int status)
|
||||
{
|
||||
switch (status) {
|
||||
case CYW43_LINK_DOWN:
|
||||
@ -79,14 +85,14 @@ int cyw43_arch_wifi_connect_until(const char *ssid, const char *pw, uint32_t aut
|
||||
int new_status = cyw43_tcpip_link_status(&cyw43_state, CYW43_ITF_STA);
|
||||
if (new_status != status) {
|
||||
status = new_status;
|
||||
CYW43_ARCH_DEBUG("connect status: %s\n", status_name(status));
|
||||
CYW43_ARCH_DEBUG("connect status: %s\n", cyw43_tcpip_link_status_name(status));
|
||||
}
|
||||
// in case polling is required
|
||||
cyw43_arch_poll();
|
||||
best_effort_wfe_or_timeout(until);
|
||||
if (time_reached(until)) {
|
||||
return PICO_ERROR_TIMEOUT;
|
||||
}
|
||||
// Do polling
|
||||
cyw43_arch_poll();
|
||||
cyw43_arch_wait_for_work_until(until);
|
||||
}
|
||||
return status == CYW43_LINK_UP ? 0 : status;
|
||||
}
|
||||
@ -143,3 +149,64 @@ bool cyw43_arch_gpio_get(uint wl_gpio) {
|
||||
cyw43_gpio_get(&cyw43_state, (int)wl_gpio, &value);
|
||||
return value;
|
||||
}
|
||||
|
||||
async_context_t *cyw43_arch_async_context(void) {
|
||||
return async_context;
|
||||
}
|
||||
|
||||
void cyw43_arch_poll(void)
|
||||
{
|
||||
async_context_poll(async_context);
|
||||
}
|
||||
|
||||
void cyw43_arch_wait_for_work_until(absolute_time_t until) {
|
||||
async_context_wait_for_work_until(async_context, until);
|
||||
}
|
||||
|
||||
// Prevent background processing in pensv and access by the other core
|
||||
// These methods are called in pensv context and on either core
|
||||
// They can be called recursively
|
||||
void cyw43_thread_enter(void) {
|
||||
async_context_acquire_lock_blocking(async_context);
|
||||
}
|
||||
|
||||
void cyw43_thread_exit(void) {
|
||||
async_context_release_lock(async_context);
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
void cyw43_thread_lock_check(void) {
|
||||
async_context_lock_check(async_context);
|
||||
}
|
||||
#endif
|
||||
|
||||
void cyw43_await_background_or_timeout_us(uint32_t timeout_us) {
|
||||
async_context_wait_for_work_until(async_context, make_timeout_time_us(timeout_us));
|
||||
}
|
||||
|
||||
void cyw43_delay_ms(uint32_t ms) {
|
||||
async_context_wait_until(async_context, make_timeout_time_ms(ms));
|
||||
}
|
||||
|
||||
void cyw43_delay_us(uint32_t us) {
|
||||
async_context_wait_until(async_context, make_timeout_time_us(us));
|
||||
}
|
||||
|
||||
#if !CYW43_LWIP
|
||||
static void no_lwip_fail() {
|
||||
panic("cyw43 has no ethernet interface");
|
||||
}
|
||||
void __attribute__((weak)) cyw43_cb_tcpip_init(cyw43_t *self, int itf) {
|
||||
}
|
||||
void __attribute__((weak)) cyw43_cb_tcpip_deinit(cyw43_t *self, int itf) {
|
||||
}
|
||||
void __attribute__((weak)) cyw43_cb_tcpip_set_link_up(cyw43_t *self, int itf) {
|
||||
no_lwip_fail();
|
||||
}
|
||||
void __attribute__((weak)) cyw43_cb_tcpip_set_link_down(cyw43_t *self, int itf) {
|
||||
no_lwip_fail();
|
||||
}
|
||||
void __attribute__((weak)) cyw43_cb_process_ethernet(void *cb_data, int itf, size_t len, const uint8_t *buf) {
|
||||
no_lwip_fail();
|
||||
}
|
||||
#endif
|
||||
|
@ -4,250 +4,71 @@
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#if PICO_CYW43_ARCH_FREERTOS
|
||||
|
||||
#include "pico/cyw43_arch.h"
|
||||
|
||||
#include "hardware/gpio.h"
|
||||
#include "hardware/irq.h"
|
||||
#include "hardware/sync.h"
|
||||
|
||||
#include "cyw43_stats.h"
|
||||
#include "pico/cyw43_driver.h"
|
||||
#include "pico/async_context_freertos.h"
|
||||
|
||||
#if CYW43_LWIP
|
||||
#include "pico/lwip_freertos.h"
|
||||
#include <lwip/tcpip.h>
|
||||
#endif
|
||||
|
||||
#if PICO_CYW43_ARCH_FREERTOS
|
||||
|
||||
// FreeRTOS includes
|
||||
#include "FreeRTOS.h"
|
||||
#include "timers.h"
|
||||
#include "semphr.h"
|
||||
|
||||
#if NO_SYS
|
||||
#error example_cyw43_arch_frertos_sys requires NO_SYS=0
|
||||
#error example_cyw43_arch_freetos_sys requires NO_SYS=0
|
||||
#endif
|
||||
|
||||
#ifndef CYW43_TASK_PRIORITY
|
||||
#define CYW43_TASK_PRIORITY ( tskIDLE_PRIORITY + 4)
|
||||
static async_context_freertos_t cyw43_async_context_freertos;
|
||||
|
||||
async_context_t *cyw43_arch_init_default_async_context(void) {
|
||||
async_context_freertos_config_t config = async_context_freertos_default_config();
|
||||
#ifdef CYW43_TASK_PRIORITY
|
||||
config.task_priority = CYW43_TASK_PRIORITY;
|
||||
#endif
|
||||
|
||||
#ifndef CYW43_SLEEP_CHECK_MS
|
||||
#define CYW43_SLEEP_CHECK_MS 50 // How often to run lwip callback
|
||||
#ifdef CYW43_TASK_STACK_SIZE
|
||||
config.task_stack_size = CYW43_TASK_STACK_SIZE;
|
||||
#endif
|
||||
|
||||
#define CYW43_GPIO_IRQ_HANDLER_PRIORITY 0x40
|
||||
|
||||
static void signal_cyw43_task(void);
|
||||
|
||||
#if !LWIP_TCPIP_CORE_LOCKING_INPUT
|
||||
static SemaphoreHandle_t cyw43_mutex;
|
||||
#endif
|
||||
static TimerHandle_t timer_handle;
|
||||
static TaskHandle_t cyw43_task_handle;
|
||||
static volatile bool cyw43_task_should_exit;
|
||||
static SemaphoreHandle_t cyw43_worker_ran_sem;
|
||||
static uint8_t cyw43_core_num;
|
||||
|
||||
// Called in low priority pendsv interrupt only to do lwip processing and check cyw43 sleep
|
||||
static void periodic_worker(__unused TimerHandle_t handle)
|
||||
{
|
||||
#if CYW43_USE_STATS
|
||||
static uint32_t counter;
|
||||
if (counter++ % (30000 / LWIP_SYS_CHECK_MS) == 0) {
|
||||
cyw43_dump_stats();
|
||||
}
|
||||
#endif
|
||||
|
||||
CYW43_STAT_INC(LWIP_RUN_COUNT);
|
||||
if (cyw43_poll) {
|
||||
if (cyw43_sleep > 0) {
|
||||
if (--cyw43_sleep == 0) {
|
||||
signal_cyw43_task();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void cyw43_await_background_or_timeout_us(uint32_t timeout_us) {
|
||||
// if we are called from within an IRQ, then don't wait (we are only ever called in a polling loop)
|
||||
assert(!portCHECK_IF_IN_ISR());
|
||||
xSemaphoreTake(cyw43_worker_ran_sem, pdMS_TO_TICKS(timeout_us / 1000));
|
||||
}
|
||||
|
||||
// GPIO interrupt handler to tell us there's cyw43 has work to do
|
||||
static void gpio_irq_handler(void)
|
||||
{
|
||||
uint32_t events = gpio_get_irq_event_mask(CYW43_PIN_WL_HOST_WAKE);
|
||||
if (events & GPIO_IRQ_LEVEL_HIGH) {
|
||||
// As we use a high level interrupt, it will go off forever until it's serviced
|
||||
// So disable the interrupt until this is done. It's re-enabled again by CYW43_POST_POLL_HOOK
|
||||
// which is called at the end of cyw43_poll_func
|
||||
gpio_set_irq_enabled(CYW43_PIN_WL_HOST_WAKE, GPIO_IRQ_LEVEL_HIGH, false);
|
||||
signal_cyw43_task();
|
||||
CYW43_STAT_INC(IRQ_COUNT);
|
||||
}
|
||||
}
|
||||
|
||||
// Low priority interrupt handler to perform background processing
|
||||
static void cyw43_task(__unused void *param) {
|
||||
do {
|
||||
ulTaskNotifyTake(pdFALSE, portMAX_DELAY);
|
||||
if (cyw43_task_should_exit) break;
|
||||
cyw43_thread_enter();
|
||||
if (cyw43_poll) cyw43_poll();
|
||||
cyw43_thread_exit();
|
||||
xSemaphoreGive(cyw43_worker_ran_sem);
|
||||
__sev(); // it is possible regular code is waiting on a WFE on the other core
|
||||
} while (true);
|
||||
vTaskDelete(NULL);
|
||||
}
|
||||
|
||||
static void tcpip_init_done(void *param) {
|
||||
xSemaphoreGive((SemaphoreHandle_t)param);
|
||||
if (async_context_freertos_init(&cyw43_async_context_freertos, &config))
|
||||
return &cyw43_async_context_freertos.core;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int cyw43_arch_init(void) {
|
||||
cyw43_core_num = get_core_num();
|
||||
#if configUSE_CORE_AFFINITY && configNUM_CORES > 1
|
||||
TaskHandle_t task_handle = xTaskGetCurrentTaskHandle();
|
||||
UBaseType_t affinity = vTaskCoreAffinityGet(task_handle);
|
||||
// we must bind the main task to one core during init
|
||||
vTaskCoreAffinitySet(task_handle, 1 << portGET_CORE_ID());
|
||||
#endif
|
||||
#if !LWIP_TCPIP_CORE_LOCKING_INPUT
|
||||
cyw43_mutex = xSemaphoreCreateRecursiveMutex();
|
||||
#endif
|
||||
cyw43_init(&cyw43_state);
|
||||
cyw43_worker_ran_sem = xSemaphoreCreateBinary();
|
||||
|
||||
#if CYW43_LWIP
|
||||
SemaphoreHandle_t init_sem = xSemaphoreCreateBinary();
|
||||
tcpip_init(tcpip_init_done, init_sem);
|
||||
xSemaphoreTake(init_sem, portMAX_DELAY);
|
||||
#endif
|
||||
|
||||
timer_handle = xTimerCreate( "cyw43_sleep_timer", // Just a text name, not used by the kernel.
|
||||
pdMS_TO_TICKS(CYW43_SLEEP_CHECK_MS),
|
||||
pdTRUE, // The timers will auto-reload themselves when they expire.
|
||||
NULL,
|
||||
periodic_worker);
|
||||
|
||||
if (!timer_handle) {
|
||||
return PICO_ERROR_GENERIC;
|
||||
async_context_t *context = cyw43_arch_async_context();
|
||||
if (!context) {
|
||||
context = cyw43_arch_init_default_async_context();
|
||||
if (!context) return PICO_ERROR_GENERIC;
|
||||
cyw43_arch_set_async_context(context);
|
||||
}
|
||||
|
||||
gpio_add_raw_irq_handler_with_order_priority(CYW43_PIN_WL_HOST_WAKE, gpio_irq_handler, CYW43_GPIO_IRQ_HANDLER_PRIORITY);
|
||||
gpio_set_irq_enabled(CYW43_PIN_WL_HOST_WAKE, GPIO_IRQ_LEVEL_HIGH, true);
|
||||
irq_set_enabled(IO_IRQ_BANK0, true);
|
||||
|
||||
cyw43_task_should_exit = false;
|
||||
xTaskCreate(cyw43_task, "CYW43 Worker", configMINIMAL_STACK_SIZE, NULL, CYW43_TASK_PRIORITY, &cyw43_task_handle);
|
||||
#if configUSE_CORE_AFFINITY && configNUM_CORES > 1
|
||||
// the cyw43 task mus tbe on the same core so it can restore IRQs
|
||||
vTaskCoreAffinitySet(cyw43_task_handle, 1 << portGET_CORE_ID());
|
||||
bool ok = cyw43_driver_init(context);
|
||||
#if CYW43_LWIP
|
||||
ok &= lwip_freertos_init(context);
|
||||
#endif
|
||||
|
||||
#if configUSE_CORE_AFFINITY && configNUM_CORES > 1
|
||||
vTaskCoreAffinitySet(task_handle, affinity);
|
||||
#endif
|
||||
|
||||
return PICO_OK;
|
||||
if (!ok) {
|
||||
cyw43_arch_deinit();
|
||||
return PICO_ERROR_GENERIC;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void cyw43_arch_deinit(void) {
|
||||
assert(cyw43_core_num == get_core_num());
|
||||
if (timer_handle) {
|
||||
xTimerDelete(timer_handle, 0);
|
||||
timer_handle = 0;
|
||||
}
|
||||
if (cyw43_task_handle) {
|
||||
cyw43_task_should_exit = true;
|
||||
signal_cyw43_task();
|
||||
}
|
||||
gpio_set_irq_enabled(CYW43_PIN_WL_HOST_WAKE, GPIO_IRQ_LEVEL_HIGH, false);
|
||||
gpio_remove_raw_irq_handler(CYW43_PIN_WL_HOST_WAKE, gpio_irq_handler);
|
||||
cyw43_deinit(&cyw43_state);
|
||||
}
|
||||
|
||||
void cyw43_post_poll_hook(void) {
|
||||
assert(cyw43_core_num == get_core_num());
|
||||
gpio_set_irq_enabled(CYW43_PIN_WL_HOST_WAKE, GPIO_IRQ_LEVEL_HIGH, true);
|
||||
}
|
||||
|
||||
// This is called in the gpio and low_prio_irq interrupts and on either core
|
||||
static void signal_cyw43_task(void) {
|
||||
if (cyw43_task_handle) {
|
||||
if (portCHECK_IF_IN_ISR()) {
|
||||
vTaskNotifyGiveFromISR(cyw43_task_handle, NULL);
|
||||
} else {
|
||||
xTaskNotifyGive(cyw43_task_handle);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void cyw43_schedule_internal_poll_dispatch(void (*func)(void)) {
|
||||
assert(func == cyw43_poll);
|
||||
signal_cyw43_task();
|
||||
}
|
||||
|
||||
static int nesting;
|
||||
// Prevent background processing in pensv and access by the other core
|
||||
// These methods are called in pensv context and on either core
|
||||
// They can be called recursively
|
||||
void cyw43_thread_enter(void) {
|
||||
// Lock the other core and stop low_prio_irq running
|
||||
assert(!portCHECK_IF_IN_ISR());
|
||||
#if LWIP_TCPIP_CORE_LOCKING_INPUT
|
||||
// we must share their mutex otherwise we can get deadlocks with two different recursive mutexes
|
||||
LOCK_TCPIP_CORE();
|
||||
#else
|
||||
xSemaphoreTakeRecursive(cyw43_mutex, portMAX_DELAY);
|
||||
async_context_t *context = cyw43_arch_async_context();
|
||||
// there is a bit of a circular dependency here between lwIP and cyw43_driver. We
|
||||
// shut down cyw43_driver first as it has IRQs calling back into lwIP. Also lwIP itself
|
||||
// does not actually get shut down.
|
||||
// todo add a "pause" method to async_context if we need to provide some atomicity (we
|
||||
// don't want to take the lock as these methods may invoke execute_sync()
|
||||
cyw43_driver_deinit(context);
|
||||
#if CYW43_LWIP
|
||||
lwip_freertos_deinit(context);
|
||||
#endif
|
||||
nesting++;
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
void cyw43_thread_lock_check(void) {
|
||||
// Lock the other core and stop low_prio_irq running
|
||||
#if LWIP_TCPIP_CORE_LOCKING_INPUT
|
||||
assert(xSemaphoreGetMutexHolder(lock_tcpip_core.mut) == xTaskGetCurrentTaskHandle());
|
||||
#else
|
||||
assert(xSemaphoreGetMutexHolder(cyw43_mutex) == xTaskGetCurrentTaskHandle());
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
// Re-enable background processing
|
||||
void cyw43_thread_exit(void) {
|
||||
// Run low_prio_irq if needed
|
||||
--nesting;
|
||||
#if LWIP_TCPIP_CORE_LOCKING_INPUT
|
||||
// we must share their mutex otherwise we can get deadlocks with two different recursive mutexes
|
||||
UNLOCK_TCPIP_CORE();
|
||||
#else
|
||||
xSemaphoreGiveRecursive(cyw43_mutex);
|
||||
#endif
|
||||
|
||||
if (!nesting && cyw43_task_handle != xTaskGetCurrentTaskHandle())
|
||||
signal_cyw43_task();
|
||||
}
|
||||
|
||||
void cyw43_delay_ms(uint32_t ms) {
|
||||
assert(!portCHECK_IF_IN_ISR());
|
||||
vTaskDelay(pdMS_TO_TICKS(ms));
|
||||
}
|
||||
|
||||
void cyw43_delay_us(uint32_t us) {
|
||||
if (us >= 1000) {
|
||||
cyw43_delay_ms(us / 1000);
|
||||
} else {
|
||||
vTaskDelay(1);
|
||||
// if it is our context, then we de-init it.
|
||||
if (context == &cyw43_async_context_freertos.core) {
|
||||
async_context_deinit(context);
|
||||
cyw43_arch_set_async_context(NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void cyw43_arch_poll() {
|
||||
}
|
||||
|
||||
#endif
|
@ -4,110 +4,62 @@
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include "hardware/gpio.h"
|
||||
#include "hardware/irq.h"
|
||||
#include "pico/sem.h"
|
||||
#include "pico/cyw43_arch.h"
|
||||
#include "cyw43_stats.h"
|
||||
#include "pico/cyw43_driver.h"
|
||||
|
||||
#if PICO_CYW43_ARCH_POLL
|
||||
#include <lwip/init.h>
|
||||
#include "lwip/timeouts.h"
|
||||
#include "pico/async_context_poll.h"
|
||||
#if CYW43_LWIP
|
||||
#include "pico/lwip_nosys.h"
|
||||
#endif
|
||||
|
||||
#if CYW43_LWIP && !NO_SYS
|
||||
#error PICO_CYW43_ARCH_POLL requires lwIP NO_SYS=1
|
||||
#endif
|
||||
|
||||
#define CYW43_GPIO_IRQ_HANDLER_PRIORITY 0x40
|
||||
static async_context_poll_t cyw43_async_context_poll;
|
||||
|
||||
#ifndef NDEBUG
|
||||
uint8_t cyw43_core_num;
|
||||
#endif
|
||||
|
||||
bool cyw43_poll_required;
|
||||
|
||||
// GPIO interrupt handler to tell us there's cyw43 has work to do
|
||||
static void gpio_irq_handler(void)
|
||||
{
|
||||
uint32_t events = gpio_get_irq_event_mask(CYW43_PIN_WL_HOST_WAKE);
|
||||
if (events & GPIO_IRQ_LEVEL_HIGH) {
|
||||
// As we use a high level interrupt, it will go off forever until it's serviced
|
||||
// So disable the interrupt until this is done. It's re-enabled again by CYW43_POST_POLL_HOOK
|
||||
// which is called at the end of cyw43_poll_func
|
||||
gpio_set_irq_enabled(CYW43_PIN_WL_HOST_WAKE, GPIO_IRQ_LEVEL_HIGH, false);
|
||||
// also clear the force bit which we use to programmatically cause this handler to fire (on the right core)
|
||||
io_irq_ctrl_hw_t *irq_ctrl_base = get_core_num() ?
|
||||
&iobank0_hw->proc1_irq_ctrl : &iobank0_hw->proc0_irq_ctrl;
|
||||
hw_clear_bits(&irq_ctrl_base->intf[CYW43_PIN_WL_HOST_WAKE/8], GPIO_IRQ_LEVEL_HIGH << (4 * (CYW43_PIN_WL_HOST_WAKE & 7)));
|
||||
cyw43_schedule_internal_poll_dispatch(cyw43_poll);
|
||||
CYW43_STAT_INC(IRQ_COUNT);
|
||||
}
|
||||
}
|
||||
|
||||
void cyw43_post_poll_hook(void) {
|
||||
gpio_set_irq_enabled(CYW43_PIN_WL_HOST_WAKE, GPIO_IRQ_LEVEL_HIGH, true);
|
||||
async_context_t *cyw43_arch_init_default_async_context(void) {
|
||||
if (async_context_poll_init_with_defaults(&cyw43_async_context_poll))
|
||||
return &cyw43_async_context_poll.core;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int cyw43_arch_init(void) {
|
||||
#ifndef NDEBUG
|
||||
cyw43_core_num = (uint8_t)get_core_num();
|
||||
#endif
|
||||
cyw43_init(&cyw43_state);
|
||||
static bool done_lwip_init;
|
||||
if (!done_lwip_init) {
|
||||
lwip_init();
|
||||
done_lwip_init = true;
|
||||
async_context_t *context = cyw43_arch_async_context();
|
||||
if (!context) {
|
||||
context = cyw43_arch_init_default_async_context();
|
||||
if (!context) return PICO_ERROR_GENERIC;
|
||||
cyw43_arch_set_async_context(context);
|
||||
}
|
||||
bool ok = cyw43_driver_init(context);
|
||||
#if CYW43_LWIP
|
||||
ok &= lwip_nosys_init(context);
|
||||
#endif
|
||||
if (!ok) {
|
||||
cyw43_arch_deinit();
|
||||
return PICO_ERROR_GENERIC;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
gpio_add_raw_irq_handler_with_order_priority(CYW43_PIN_WL_HOST_WAKE, gpio_irq_handler, CYW43_GPIO_IRQ_HANDLER_PRIORITY);
|
||||
gpio_set_irq_enabled(CYW43_PIN_WL_HOST_WAKE, GPIO_IRQ_LEVEL_HIGH, true);
|
||||
irq_set_enabled(IO_IRQ_BANK0, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cyw43_arch_deinit(void) {
|
||||
gpio_set_irq_enabled(CYW43_PIN_WL_HOST_WAKE, GPIO_IRQ_LEVEL_HIGH, false);
|
||||
gpio_remove_raw_irq_handler(CYW43_PIN_WL_HOST_WAKE, gpio_irq_handler);
|
||||
cyw43_deinit(&cyw43_state);
|
||||
}
|
||||
|
||||
|
||||
void cyw43_schedule_internal_poll_dispatch(__unused void (*func)(void)) {
|
||||
cyw43_poll_required = true;
|
||||
}
|
||||
|
||||
void cyw43_arch_poll(void)
|
||||
{
|
||||
CYW43_STAT_INC(LWIP_RUN_COUNT);
|
||||
sys_check_timeouts();
|
||||
if (cyw43_poll) {
|
||||
if (cyw43_sleep > 0) {
|
||||
// todo check this; but we don't want to advance too quickly
|
||||
static absolute_time_t last_poll_time;
|
||||
absolute_time_t current = get_absolute_time();
|
||||
if (absolute_time_diff_us(last_poll_time, current) > 1000) {
|
||||
if (--cyw43_sleep == 0) {
|
||||
cyw43_poll_required = 1;
|
||||
}
|
||||
last_poll_time = current;
|
||||
}
|
||||
}
|
||||
// todo graham i removed this because otherwise polling can do nothing during connect.
|
||||
// in the polling only case, the caller is responsible for throttling how often they call anyway.
|
||||
// The alternative would be to have the call to this function from the init set the poll_required flag first
|
||||
// if (cyw43_poll_required) {
|
||||
cyw43_poll();
|
||||
// cyw43_poll_required = false;
|
||||
// }
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
void cyw43_thread_check() {
|
||||
if (__get_current_exception() || get_core_num() != cyw43_core_num) {
|
||||
panic("cyw43_thread_lock_check failed");
|
||||
}
|
||||
}
|
||||
async_context_t *context = cyw43_arch_async_context();
|
||||
// there is a bit of a circular dependency here between lwIP and cyw43_driver. We
|
||||
// shut down cyw43_driver first as it has IRQs calling back into lwIP. Also lwIP itself
|
||||
// does not actually get shut down.
|
||||
// todo add a "pause" method to async_context if we need to provide some atomicity (we
|
||||
// don't want to take the lock as these methods may invoke execute_sync()
|
||||
cyw43_driver_deinit(context);
|
||||
#if CYW43_LWIP
|
||||
lwip_nosys_deinit(context);
|
||||
#endif
|
||||
// if it is our context, then we de-init it.
|
||||
if (context == &cyw43_async_context_poll.core) {
|
||||
async_context_deinit(context);
|
||||
cyw43_arch_set_async_context(NULL);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@ -4,317 +4,68 @@
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include "pico/cyw43_arch.h"
|
||||
#include "pico/mutex.h"
|
||||
#include "pico/sem.h"
|
||||
|
||||
#include "hardware/gpio.h"
|
||||
#include "hardware/irq.h"
|
||||
|
||||
#include "cyw43_stats.h"
|
||||
|
||||
#if CYW43_LWIP
|
||||
#include <lwip/init.h>
|
||||
#include "lwip/timeouts.h"
|
||||
#endif
|
||||
|
||||
// note same code
|
||||
#if PICO_CYW43_ARCH_THREADSAFE_BACKGROUND
|
||||
|
||||
#if PICO_CYW43_ARCH_THREADSAFE_BACKGROUND && CYW43_LWIP && !NO_SYS
|
||||
#include "pico/cyw43_arch.h"
|
||||
#include "pico/cyw43_driver.h"
|
||||
#include "pico/async_context_threadsafe_background.h"
|
||||
|
||||
#if CYW43_LWIP
|
||||
#include "pico/lwip_nosys.h"
|
||||
#endif
|
||||
|
||||
#if CYW43_LWIP && !NO_SYS
|
||||
#error PICO_CYW43_ARCH_THREADSAFE_BACKGROUND requires lwIP NO_SYS=1
|
||||
#endif
|
||||
#if PICO_CYW43_ARCH_THREADSAFE_BACKGROUND && CYW43_LWIP && MEM_LIBC_MALLOC
|
||||
#if CYW43_LWIP && MEM_LIBC_MALLOC
|
||||
// would attempt to use malloc from IRQ context
|
||||
#error MEM_LIBC_MALLOC is incompatible with PICO_CYW43_ARCH_THREADSAFE_BACKGROUND
|
||||
#endif
|
||||
// todo right now we are now always doing a cyw43_dispatch along with a lwip one when hopping cores in low_prio_irq_schedule_dispatch
|
||||
|
||||
#ifndef CYW43_SLEEP_CHECK_MS
|
||||
#define CYW43_SLEEP_CHECK_MS 50 // How often to run lwip callback
|
||||
#endif
|
||||
static alarm_id_t periodic_alarm = -1;
|
||||
static async_context_threadsafe_background_t cyw43_async_context_threadsafe_background;
|
||||
|
||||
static inline uint recursive_mutex_enter_count(recursive_mutex_t *mutex) {
|
||||
return mutex->enter_count;
|
||||
}
|
||||
|
||||
static inline lock_owner_id_t recursive_mutex_owner(recursive_mutex_t *mutex) {
|
||||
return mutex->owner;
|
||||
}
|
||||
|
||||
#define CYW43_GPIO_IRQ_HANDLER_PRIORITY 0x40
|
||||
|
||||
enum {
|
||||
CYW43_DISPATCH_SLOT_CYW43 = 0,
|
||||
CYW43_DISPATCH_SLOT_ADAPTER,
|
||||
CYW43_DISPATCH_SLOT_ENUM_COUNT
|
||||
};
|
||||
#ifndef CYW43_DISPATCH_SLOT_COUNT
|
||||
#define CYW43_DISPATCH_SLOT_COUNT CYW43_DISPATCH_SLOT_ENUM_COUNT
|
||||
#endif
|
||||
|
||||
typedef void (*low_prio_irq_dispatch_t)(void);
|
||||
static void low_prio_irq_schedule_dispatch(size_t slot, low_prio_irq_dispatch_t f);
|
||||
|
||||
static uint8_t cyw43_core_num;
|
||||
#ifndef NDEBUG
|
||||
static bool in_low_priority_irq;
|
||||
#endif
|
||||
static uint8_t low_priority_irq_num;
|
||||
static bool low_priority_irq_missed;
|
||||
static low_prio_irq_dispatch_t low_priority_irq_dispatch_slots[CYW43_DISPATCH_SLOT_COUNT];
|
||||
static recursive_mutex_t cyw43_mutex;
|
||||
semaphore_t cyw43_irq_sem;
|
||||
|
||||
// Called in low priority pendsv interrupt only to do lwip processing and check cyw43 sleep
|
||||
static void periodic_worker(void)
|
||||
{
|
||||
#if CYW43_USE_STATS
|
||||
static uint32_t counter;
|
||||
if (counter++ % (30000 / LWIP_SYS_CHECK_MS) == 0) {
|
||||
cyw43_dump_stats();
|
||||
}
|
||||
#endif
|
||||
|
||||
CYW43_STAT_INC(LWIP_RUN_COUNT);
|
||||
#if CYW43_LWIP
|
||||
sys_check_timeouts();
|
||||
#endif
|
||||
if (cyw43_poll) {
|
||||
if (cyw43_sleep > 0) {
|
||||
if (--cyw43_sleep == 0) {
|
||||
low_prio_irq_schedule_dispatch(CYW43_DISPATCH_SLOT_CYW43, cyw43_poll);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Regular callback to get lwip to check for timeouts
|
||||
static int64_t periodic_alarm_handler(__unused alarm_id_t id, __unused void *user_data)
|
||||
{
|
||||
// Do lwip processing in low priority pendsv interrupt
|
||||
low_prio_irq_schedule_dispatch(CYW43_DISPATCH_SLOT_ADAPTER, periodic_worker);
|
||||
return CYW43_SLEEP_CHECK_MS * 1000;
|
||||
}
|
||||
|
||||
void cyw43_await_background_or_timeout_us(uint32_t timeout_us) {
|
||||
// if we are called from within an IRQ, then don't wait (we are only ever called in a polling loop)
|
||||
if (!__get_current_exception()) {
|
||||
sem_acquire_timeout_us(&cyw43_irq_sem, timeout_us);
|
||||
}
|
||||
}
|
||||
|
||||
// GPIO interrupt handler to tell us there's cyw43 has work to do
|
||||
static void gpio_irq_handler(void)
|
||||
{
|
||||
uint32_t events = gpio_get_irq_event_mask(CYW43_PIN_WL_HOST_WAKE);
|
||||
if (events & GPIO_IRQ_LEVEL_HIGH) {
|
||||
// As we use a high level interrupt, it will go off forever until it's serviced
|
||||
// So disable the interrupt until this is done. It's re-enabled again by CYW43_POST_POLL_HOOK
|
||||
// which is called at the end of cyw43_poll_func
|
||||
gpio_set_irq_enabled(CYW43_PIN_WL_HOST_WAKE, GPIO_IRQ_LEVEL_HIGH, false);
|
||||
// also clear the force bit which we use to progratically cause this handler to fire (on the right core)
|
||||
io_irq_ctrl_hw_t *irq_ctrl_base = get_core_num() ?
|
||||
&iobank0_hw->proc1_irq_ctrl : &iobank0_hw->proc0_irq_ctrl;
|
||||
hw_clear_bits(&irq_ctrl_base->intf[CYW43_PIN_WL_HOST_WAKE/8], GPIO_IRQ_LEVEL_HIGH << (4 * (CYW43_PIN_WL_HOST_WAKE & 7)));
|
||||
low_prio_irq_schedule_dispatch(CYW43_DISPATCH_SLOT_CYW43, cyw43_poll);
|
||||
CYW43_STAT_INC(IRQ_COUNT);
|
||||
}
|
||||
}
|
||||
|
||||
// Low priority interrupt handler to perform background processing
|
||||
static void low_priority_irq_handler(void) {
|
||||
assert(cyw43_core_num == get_core_num());
|
||||
if (recursive_mutex_try_enter(&cyw43_mutex, NULL)) {
|
||||
if (recursive_mutex_enter_count(&cyw43_mutex) != 1) {
|
||||
low_priority_irq_missed = true;
|
||||
CYW43_STAT_INC(PENDSV_DISABLED_COUNT);
|
||||
} else {
|
||||
CYW43_STAT_INC(PENDSV_RUN_COUNT);
|
||||
#ifndef NDEBUG
|
||||
in_low_priority_irq = true;
|
||||
#endif
|
||||
for (size_t i = 0; i < count_of(low_priority_irq_dispatch_slots); i++) {
|
||||
if (low_priority_irq_dispatch_slots[i] != NULL) {
|
||||
low_prio_irq_dispatch_t f = low_priority_irq_dispatch_slots[i];
|
||||
low_priority_irq_dispatch_slots[i] = NULL;
|
||||
f();
|
||||
}
|
||||
}
|
||||
#ifndef NDEBUG
|
||||
in_low_priority_irq = false;
|
||||
#endif
|
||||
}
|
||||
recursive_mutex_exit(&cyw43_mutex);
|
||||
} else {
|
||||
CYW43_STAT_INC(PENDSV_DISABLED_COUNT);
|
||||
low_priority_irq_missed = true;
|
||||
}
|
||||
sem_release(&cyw43_irq_sem);
|
||||
}
|
||||
|
||||
static bool low_prio_irq_init(uint8_t priority) {
|
||||
assert(get_core_num() == cyw43_core_num);
|
||||
int irq = user_irq_claim_unused(false);
|
||||
if (irq < 0) return false;
|
||||
low_priority_irq_num = (uint8_t) irq;
|
||||
irq_set_exclusive_handler(low_priority_irq_num, low_priority_irq_handler);
|
||||
irq_set_enabled(low_priority_irq_num, true);
|
||||
irq_set_priority(low_priority_irq_num, priority);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void low_prio_irq_deinit(void) {
|
||||
if (low_priority_irq_num > 0) {
|
||||
irq_set_enabled(low_priority_irq_num, false);
|
||||
irq_remove_handler(low_priority_irq_num, low_priority_irq_handler);
|
||||
user_irq_unclaim(low_priority_irq_num);
|
||||
low_priority_irq_num = 0;
|
||||
}
|
||||
async_context_t *cyw43_arch_init_default_async_context(void) {
|
||||
async_context_threadsafe_background_config_t config = async_context_threadsafe_background_default_config();
|
||||
if (async_context_threadsafe_background_init(&cyw43_async_context_threadsafe_background, &config))
|
||||
return &cyw43_async_context_threadsafe_background.core;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int cyw43_arch_init(void) {
|
||||
cyw43_core_num = get_core_num();
|
||||
recursive_mutex_init(&cyw43_mutex);
|
||||
cyw43_init(&cyw43_state);
|
||||
sem_init(&cyw43_irq_sem, 0, 1);
|
||||
|
||||
// Start regular lwip callback to handle timeouts
|
||||
periodic_alarm = add_alarm_in_us(CYW43_SLEEP_CHECK_MS * 1000, periodic_alarm_handler, NULL, true);
|
||||
if (periodic_alarm < 0) {
|
||||
return PICO_ERROR_GENERIC;
|
||||
async_context_t *context = cyw43_arch_async_context();
|
||||
if (!context) {
|
||||
context = cyw43_arch_init_default_async_context();
|
||||
if (!context) return PICO_ERROR_GENERIC;
|
||||
cyw43_arch_set_async_context(context);
|
||||
}
|
||||
|
||||
gpio_add_raw_irq_handler_with_order_priority(CYW43_PIN_WL_HOST_WAKE, gpio_irq_handler, CYW43_GPIO_IRQ_HANDLER_PRIORITY);
|
||||
gpio_set_irq_enabled(CYW43_PIN_WL_HOST_WAKE, GPIO_IRQ_LEVEL_HIGH, true);
|
||||
irq_set_enabled(IO_IRQ_BANK0, true);
|
||||
|
||||
bool ok = cyw43_driver_init(context);
|
||||
#if CYW43_LWIP
|
||||
lwip_init();
|
||||
ok &= lwip_nosys_init(context);
|
||||
#endif
|
||||
// start low priority handler (no background work is done before this)
|
||||
bool ok = low_prio_irq_init(PICO_LOWEST_IRQ_PRIORITY);
|
||||
if (!ok) {
|
||||
cyw43_arch_deinit();
|
||||
return PICO_ERROR_GENERIC;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
return PICO_OK;
|
||||
}
|
||||
|
||||
void cyw43_arch_deinit(void) {
|
||||
if (periodic_alarm >= 0) {
|
||||
cancel_alarm(periodic_alarm);
|
||||
periodic_alarm = -1;
|
||||
}
|
||||
gpio_set_irq_enabled(CYW43_PIN_WL_HOST_WAKE, GPIO_IRQ_LEVEL_HIGH, false);
|
||||
gpio_remove_raw_irq_handler(CYW43_PIN_WL_HOST_WAKE, gpio_irq_handler);
|
||||
low_prio_irq_deinit();
|
||||
cyw43_deinit(&cyw43_state);
|
||||
}
|
||||
|
||||
void cyw43_post_poll_hook(void) {
|
||||
gpio_set_irq_enabled(CYW43_PIN_WL_HOST_WAKE, GPIO_IRQ_LEVEL_HIGH, true);
|
||||
}
|
||||
|
||||
// This is called in the gpio and low_prio_irq interrupts and on either core
|
||||
static void low_prio_irq_schedule_dispatch(size_t slot, low_prio_irq_dispatch_t f) {
|
||||
assert(slot < count_of(low_priority_irq_dispatch_slots));
|
||||
low_priority_irq_dispatch_slots[slot] = f;
|
||||
if (cyw43_core_num == get_core_num()) {
|
||||
//on same core, can dispatch directly
|
||||
irq_set_pending(low_priority_irq_num);
|
||||
} else {
|
||||
// on wrong core, so force via GPIO IRQ which itself calls this method for the CYW43 slot.
|
||||
// since the CYW43 slot always uses the same function, this is fine with the addition of an
|
||||
// extra (but harmless) CYW43 slot call when another SLOT is invoked.
|
||||
// We could do better, but would have to track why the IRQ was called.
|
||||
io_irq_ctrl_hw_t *irq_ctrl_base = cyw43_core_num ?
|
||||
&iobank0_hw->proc1_irq_ctrl : &iobank0_hw->proc0_irq_ctrl;
|
||||
hw_set_bits(&irq_ctrl_base->intf[CYW43_PIN_WL_HOST_WAKE/8], GPIO_IRQ_LEVEL_HIGH << (4 * (CYW43_PIN_WL_HOST_WAKE & 7)));
|
||||
}
|
||||
}
|
||||
|
||||
void cyw43_schedule_internal_poll_dispatch(void (*func)(void)) {
|
||||
low_prio_irq_schedule_dispatch(CYW43_DISPATCH_SLOT_CYW43, func);
|
||||
}
|
||||
|
||||
// Prevent background processing in pensv and access by the other core
|
||||
// These methods are called in pensv context and on either core
|
||||
// They can be called recursively
|
||||
void cyw43_thread_enter(void) {
|
||||
// Lock the other core and stop low_prio_irq running
|
||||
recursive_mutex_enter_blocking(&cyw43_mutex);
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
void cyw43_thread_lock_check(void) {
|
||||
// Lock the other core and stop low_prio_irq running
|
||||
if (recursive_mutex_enter_count(&cyw43_mutex) < 1 || recursive_mutex_owner(&cyw43_mutex) != lock_get_caller_owner_id()) {
|
||||
panic("cyw43_thread_lock_check failed");
|
||||
}
|
||||
}
|
||||
async_context_t *context = cyw43_arch_async_context();
|
||||
// there is a bit of a circular dependency here between lwIP and cyw43_driver. We
|
||||
// shut down cyw43_driver first as it has IRQs calling back into lwIP. Also lwIP itself
|
||||
// does not actually get shut down.
|
||||
// todo add a "pause" method to async_context if we need to provide some atomicity (we
|
||||
// don't want to take the lock as these methods may invoke execute_sync()
|
||||
cyw43_driver_deinit(context);
|
||||
#if CYW43_LWIP
|
||||
lwip_nosys_deinit(context);
|
||||
#endif
|
||||
|
||||
// Re-enable background processing
|
||||
void cyw43_thread_exit(void) {
|
||||
// Run low_prio_irq if needed
|
||||
if (1 == recursive_mutex_enter_count(&cyw43_mutex)) {
|
||||
// note the outer release of the mutex is not via cyw43_exit in the low_priority_irq case (it is a direct mutex exit)
|
||||
assert(!in_low_priority_irq);
|
||||
// if (low_priority_irq_missed) {
|
||||
// low_priority_irq_missed = false;
|
||||
if (low_priority_irq_dispatch_slots[CYW43_DISPATCH_SLOT_CYW43]) {
|
||||
low_prio_irq_schedule_dispatch(CYW43_DISPATCH_SLOT_CYW43, cyw43_poll);
|
||||
}
|
||||
// }
|
||||
}
|
||||
recursive_mutex_exit(&cyw43_mutex);
|
||||
}
|
||||
|
||||
|
||||
static void cyw43_delay_until(absolute_time_t until) {
|
||||
// sleep can be called in IRQs, so there's not much we can do there
|
||||
if (__get_current_exception()) {
|
||||
busy_wait_until(until);
|
||||
} else {
|
||||
sleep_until(until);
|
||||
// if it is our context, then we de-init it.
|
||||
if (context == &cyw43_async_context_threadsafe_background.core) {
|
||||
async_context_deinit(context);
|
||||
cyw43_arch_set_async_context(NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void cyw43_delay_ms(uint32_t ms) {
|
||||
cyw43_delay_until(make_timeout_time_ms(ms));
|
||||
}
|
||||
|
||||
void cyw43_delay_us(uint32_t us) {
|
||||
cyw43_delay_until(make_timeout_time_us(us));
|
||||
}
|
||||
|
||||
void cyw43_arch_poll() {
|
||||
// should not be necessary
|
||||
// if (cyw43_poll) {
|
||||
// low_prio_irq_schedule_dispatch(CYW43_DISPATCH_SLOT_CYW43, cyw43_poll);
|
||||
// }
|
||||
}
|
||||
|
||||
#if !CYW43_LWIP
|
||||
static void no_lwip_fail() {
|
||||
panic("You cannot use IP with pico_cyw43_arch_none");
|
||||
}
|
||||
void cyw43_cb_tcpip_init(cyw43_t *self, int itf) {
|
||||
}
|
||||
void cyw43_cb_tcpip_deinit(cyw43_t *self, int itf) {
|
||||
}
|
||||
void cyw43_cb_tcpip_set_link_up(cyw43_t *self, int itf) {
|
||||
no_lwip_fail();
|
||||
}
|
||||
void cyw43_cb_tcpip_set_link_down(cyw43_t *self, int itf) {
|
||||
no_lwip_fail();
|
||||
}
|
||||
void cyw43_cb_process_ethernet(void *cb_data, int itf, size_t len, const uint8_t *buf) {
|
||||
no_lwip_fail();
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@ -104,12 +104,12 @@ extern "C" {
|
||||
#define PARAM_ASSERTIONS_ENABLED_CYW43_ARCH 0
|
||||
#endif
|
||||
|
||||
// PICO_CONFIG: CYW43_ARCH_DEBUG_ENABLED, Enable/disable some debugging output in the pico_cyw43_arch module, type=bool, default=1 in debug builds, group=pico_cyw43_arch
|
||||
#ifndef CYW43_ARCH_DEBUG_ENABLED
|
||||
// PICO_CONFIG: PICO_CYW43_ARCH_DEBUG_ENABLED, Enable/disable some debugging output in the pico_cyw43_arch module, type=bool, default=1 in debug builds, group=pico_cyw43_arch
|
||||
#ifndef PICO_CYW43_ARCH_DEBUG_ENABLED
|
||||
#ifndef NDEBUG
|
||||
#define CYW43_ARCH_DEBUG_ENABLED 1
|
||||
#define PICO_CYW43_ARCH_DEBUG_ENABLED 1
|
||||
#else
|
||||
#define CYW43_ARCH_DEBUG_ENABLED 0
|
||||
#define PICO_CYW43_ARCH_DEBUG_ENABLED 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@ -129,6 +129,11 @@ extern "C" {
|
||||
* \note this method initializes wireless with a country code of \c PICO_CYW43_ARCH_DEFAULT_COUNTRY_CODE
|
||||
* which defaults to \c CYW43_COUNTRY_WORLDWIDE. Worldwide settings may not give the best performance; consider
|
||||
* setting PICO_CYW43_ARCH_DEFAULT_COUNTRY_CODE to a different value or calling \ref cyw43_arch_init_with_country
|
||||
*
|
||||
* By default this method initializes the cyw43_arch code's own \ref async_context by calling
|
||||
* \ref cyw43_arch_init_default_async_context, however the user can specify use of their own async_context
|
||||
* by calling \ref cyw43_arch_set_async_context() before calling this method
|
||||
*
|
||||
* \return 0 if the initialization is successful, an error code otherwise \see pico_error_codes
|
||||
*/
|
||||
int cyw43_arch_init(void);
|
||||
@ -141,31 +146,15 @@ int cyw43_arch_init(void);
|
||||
* was enabled at build time). This method must be called prior to using any other \c pico_cyw43_arch,
|
||||
* \c cyw43_driver or lwIP functions.
|
||||
*
|
||||
* By default this method initializes the cyw43_arch code's own \ref async_context by calling
|
||||
* \ref cyw43_arch_init_default_async_context, however the user can specify use of their own async_context
|
||||
* by calling \ref cyw43_arch_set_async_context() before calling this method
|
||||
*
|
||||
* \param country the country code to use (see \ref CYW43_COUNTRY_)
|
||||
* \return 0 if the initialization is successful, an error code otherwise \see pico_error_codes
|
||||
*/
|
||||
int cyw43_arch_init_with_country(uint32_t country);
|
||||
|
||||
/*!
|
||||
* \brief Enables Wi-Fi STA (Station) mode.
|
||||
* \ingroup pico_cyw43_arch
|
||||
*
|
||||
* This enables the Wi-Fi in \em Station mode such that connections can be made to other Wi-Fi Access Points
|
||||
*/
|
||||
void cyw43_arch_enable_sta_mode(void);
|
||||
|
||||
/*!
|
||||
* \brief Enables Wi-Fi AP (Access point) mode.
|
||||
* \ingroup pico_cyw43_arch
|
||||
*
|
||||
* This enables the Wi-Fi in \em Access \em Point mode such that connections can be made to the device by other Wi-Fi clients
|
||||
* \param ssid the name for the access point
|
||||
* \param password the password to use or NULL for no password.
|
||||
* \param auth the authorization type to use when the password is enabled. Values are \ref CYW43_AUTH_WPA_TKIP_PSK,
|
||||
* \ref CYW43_AUTH_WPA2_AES_PSK, or \ref CYW43_AUTH_WPA2_MIXED_PSK (see \ref CYW43_AUTH_)
|
||||
*/
|
||||
void cyw43_arch_enable_ap_mode(const char *ssid, const char *password, uint32_t auth);
|
||||
|
||||
/*!
|
||||
* \brief De-initialize the CYW43 architecture
|
||||
* \ingroup pico_cyw43_arch
|
||||
@ -173,80 +162,41 @@ void cyw43_arch_enable_ap_mode(const char *ssid, const char *password, uint32_t
|
||||
* This method de-initializes the `cyw43_driver` code and de-initializes the lwIP stack (if it
|
||||
* was enabled at build time). Note this method should always be called from the same core (or RTOS
|
||||
* task, depending on the environment) as \ref cyw43_arch_init.
|
||||
*
|
||||
* Additionally if the cyw43_arch is using its own async_context instance, then that instance is de-initialized.
|
||||
*/
|
||||
void cyw43_arch_deinit(void);
|
||||
|
||||
/*!
|
||||
* \brief Attempt to connect to a wireless access point, blocking until the network is joined or a failure is detected.
|
||||
* \brief Return the current async_context currently in use by the cyw43_arch code
|
||||
* \ingroup pico_cyw43_arch
|
||||
*
|
||||
* \param ssid the network name to connect to
|
||||
* \param pw the network password or NULL if there is no password required
|
||||
* \param auth the authorization type to use when the password is enabled. Values are \ref CYW43_AUTH_WPA_TKIP_PSK,
|
||||
* \ref CYW43_AUTH_WPA2_AES_PSK, or \ref CYW43_AUTH_WPA2_MIXED_PSK (see \ref CYW43_AUTH_)
|
||||
*
|
||||
* \return 0 if the initialization is successful, an error code otherwise \see pico_error_codes
|
||||
* \return the async_context.
|
||||
*/
|
||||
int cyw43_arch_wifi_connect_blocking(const char *ssid, const char *pw, uint32_t auth);
|
||||
async_context_t *cyw43_arch_async_context(void);
|
||||
|
||||
/*!
|
||||
* \brief Attempt to connect to a wireless access point, blocking until the network is joined, a failure is detected or a timeout occurs
|
||||
* \brief Set the async_context to be used by the cyw43_arch_init
|
||||
* \ingroup pico_cyw43_arch
|
||||
*
|
||||
* \param ssid the network name to connect to
|
||||
* \param pw the network password or NULL if there is no password required
|
||||
* \param auth the authorization type to use when the password is enabled. Values are \ref CYW43_AUTH_WPA_TKIP_PSK,
|
||||
* \ref CYW43_AUTH_WPA2_AES_PSK, or \ref CYW43_AUTH_WPA2_MIXED_PSK (see \ref CYW43_AUTH_)
|
||||
* \note This method must be called before calling cyw43_arch_init or cyw43_arch_init_with_country
|
||||
* if you wish to use a custom async_context instance.
|
||||
*
|
||||
* \return 0 if the initialization is successful, an error code otherwise \see pico_error_codes
|
||||
* \param context the async_context to be used
|
||||
*/
|
||||
int cyw43_arch_wifi_connect_timeout_ms(const char *ssid, const char *pw, uint32_t auth, uint32_t timeout_ms);
|
||||
void cyw43_arch_set_async_context(async_context_t *context);
|
||||
|
||||
/*!
|
||||
* \brief Start attempting to connect to a wireless access point
|
||||
* \brief Initialize the default \ref async_context for the current cyw43_arch type
|
||||
* \ingroup pico_cyw43_arch
|
||||
*
|
||||
* This method tells the CYW43 driver to start connecting to an access point. You should subsequently check the
|
||||
* status by calling \ref cyw43_wifi_link_status.
|
||||
* This method initializes and returns a pointer to the static async_context associated
|
||||
* with cyw43_arch. This method is called by \ref cyw43_arch_init automatically
|
||||
* if a different async_context has not been set by \ref cyw43_arch_set_async_context
|
||||
*
|
||||
* \param ssid the network name to connect to
|
||||
* \param pw the network password or NULL if there is no password required
|
||||
* \param auth the authorization type to use when the password is enabled. Values are \ref CYW43_AUTH_WPA_TKIP_PSK,
|
||||
* \ref CYW43_AUTH_WPA2_AES_PSK, or \ref CYW43_AUTH_WPA2_MIXED_PSK (see \ref CYW43_AUTH_)
|
||||
*
|
||||
* \return 0 if the scan was started successfully, an error code otherwise \see pico_error_codes
|
||||
* \return the context or NULL if initialization failed.
|
||||
*/
|
||||
int cyw43_arch_wifi_connect_async(const char *ssid, const char *pw, uint32_t auth);
|
||||
|
||||
/*!
|
||||
* \brief Return the country code used to initialize cyw43_arch
|
||||
* \ingroup pico_cyw43_arch
|
||||
*
|
||||
* \return the country code (see \ref CYW43_COUNTRY_)
|
||||
*/
|
||||
uint32_t cyw43_arch_get_country_code(void);
|
||||
|
||||
/*!
|
||||
* \brief Set a GPIO pin on the wireless chip to a given value
|
||||
* \ingroup pico_cyw43_arch
|
||||
* \note this method does not check for errors setting the GPIO. You can use the lower level \ref cyw43_gpio_set instead if you wish
|
||||
* to check for errors.
|
||||
*
|
||||
* \param wl_gpio the GPIO number on the wireless chip
|
||||
* \param value true to set the GPIO, false to clear it.
|
||||
*/
|
||||
void cyw43_arch_gpio_put(uint wl_gpio, bool value);
|
||||
|
||||
/*!
|
||||
* \brief Read the value of a GPIO pin on the wireless chip
|
||||
* \ingroup pico_cyw43_arch
|
||||
* \note this method does not check for errors setting the GPIO. You can use the lower level \ref cyw43_gpio_get instead if you wish
|
||||
* to check for errors.
|
||||
*
|
||||
* \param wl_gpio the GPIO number on the wireless chip
|
||||
* \return true if the GPIO is high, false otherwise
|
||||
*/
|
||||
bool cyw43_arch_gpio_get(uint wl_gpio);
|
||||
async_context_t *cyw43_arch_init_default_async_context(void);
|
||||
|
||||
/*!
|
||||
* \brief Perform any processing required by the \c cyw43_driver or the TCP/IP stack
|
||||
@ -258,6 +208,18 @@ bool cyw43_arch_gpio_get(uint wl_gpio);
|
||||
*/
|
||||
void cyw43_arch_poll(void);
|
||||
|
||||
/*!
|
||||
* \brief Sleep until there is cyw43_driver work to be done
|
||||
* \ingroup pico_cyw43_arch
|
||||
*
|
||||
* This method may be called by code that is waiting for an event to
|
||||
* come from the cyw43_driver, and has no work to do, but would like
|
||||
* to sleep without blocking any background work associated with the cyw43_driver.
|
||||
*
|
||||
* \param until the time to wait until if there is no work to do.
|
||||
*/
|
||||
void cyw43_arch_wait_for_work_until(absolute_time_t until);
|
||||
|
||||
/*!
|
||||
* \fn cyw43_arch_lwip_begin
|
||||
* \brief Acquire any locks required to call into lwIP
|
||||
@ -321,6 +283,98 @@ void cyw43_arch_poll(void);
|
||||
* \sa cyw43_arch_lwip_protect
|
||||
*/
|
||||
|
||||
/*!
|
||||
* \brief Return the country code used to initialize cyw43_arch
|
||||
* \ingroup pico_cyw43_arch
|
||||
*
|
||||
* \return the country code (see \ref CYW43_COUNTRY_)
|
||||
*/
|
||||
uint32_t cyw43_arch_get_country_code(void);
|
||||
|
||||
/*!
|
||||
* \brief Enables Wi-Fi STA (Station) mode.
|
||||
* \ingroup pico_cyw43_arch
|
||||
*
|
||||
* This enables the Wi-Fi in \emStation mode such that connections can be made to other Wi-Fi Access Points
|
||||
*/
|
||||
void cyw43_arch_enable_sta_mode(void);
|
||||
|
||||
/*!
|
||||
* \brief Enables Wi-Fi AP (Access point) mode.
|
||||
* \ingroup pico_cyw43_arch
|
||||
*
|
||||
* This enables the Wi-Fi in \em Access \em Point mode such that connections can be made to the device by other Wi-Fi clients
|
||||
* \param ssid the name for the access point
|
||||
* \param password the password to use or NULL for no password.
|
||||
* \param auth the authorization type to use when the password is enabled. Values are \ref CYW43_AUTH_WPA_TKIP_PSK,
|
||||
* \ref CYW43_AUTH_WPA2_AES_PSK, or \ref CYW43_AUTH_WPA2_MIXED_PSK (see \ref CYW43_AUTH_)
|
||||
*/
|
||||
void cyw43_arch_enable_ap_mode(const char *ssid, const char *password, uint32_t auth);
|
||||
|
||||
/*!
|
||||
* \brief Attempt to connect to a wireless access point, blocking until the network is joined or a failure is detected.
|
||||
* \ingroup pico_cyw43_arch
|
||||
*
|
||||
* \param ssid the network name to connect to
|
||||
* \param pw the network password or NULL if there is no password required
|
||||
* \param auth the authorization type to use when the password is enabled. Values are \ref CYW43_AUTH_WPA_TKIP_PSK,
|
||||
* \ref CYW43_AUTH_WPA2_AES_PSK, or \ref CYW43_AUTH_WPA2_MIXED_PSK (see \ref CYW43_AUTH_)
|
||||
*
|
||||
* \return 0 if the initialization is successful, an error code otherwise \see pico_error_codes
|
||||
*/
|
||||
int cyw43_arch_wifi_connect_blocking(const char *ssid, const char *pw, uint32_t auth);
|
||||
|
||||
/*!
|
||||
* \brief Attempt to connect to a wireless access point, blocking until the network is joined, a failure is detected or a timeout occurs
|
||||
* \ingroup pico_cyw43_arch
|
||||
*
|
||||
* \param ssid the network name to connect to
|
||||
* \param pw the network password or NULL if there is no password required
|
||||
* \param auth the authorization type to use when the password is enabled. Values are \ref CYW43_AUTH_WPA_TKIP_PSK,
|
||||
* \ref CYW43_AUTH_WPA2_AES_PSK, or \ref CYW43_AUTH_WPA2_MIXED_PSK (see \ref CYW43_AUTH_)
|
||||
*
|
||||
* \return 0 if the initialization is successful, an error code otherwise \see pico_error_codes
|
||||
*/
|
||||
int cyw43_arch_wifi_connect_timeout_ms(const char *ssid, const char *pw, uint32_t auth, uint32_t timeout);
|
||||
|
||||
/*!
|
||||
* \brief Start attempting to connect to a wireless access point
|
||||
* \ingroup pico_cyw43_arch
|
||||
*
|
||||
* This method tells the CYW43 driver to start connecting to an access point. You should subsequently check the
|
||||
* status by calling \ref cyw43_wifi_link_status.
|
||||
*
|
||||
* \param ssid the network name to connect to
|
||||
* \param pw the network password or NULL if there is no password required
|
||||
* \param auth the authorization type to use when the password is enabled. Values are \ref CYW43_AUTH_WPA_TKIP_PSK,
|
||||
* \ref CYW43_AUTH_WPA2_AES_PSK, or \ref CYW43_AUTH_WPA2_MIXED_PSK (see \ref CYW43_AUTH_)
|
||||
*
|
||||
* \return 0 if the scan was started successfully, an error code otherwise \see pico_error_codes
|
||||
*/
|
||||
int cyw43_arch_wifi_connect_async(const char *ssid, const char *pw, uint32_t auth);
|
||||
|
||||
/*!
|
||||
* \brief Set a GPIO pin on the wireless chip to a given value
|
||||
* \ingroup pico_cyw43_arch
|
||||
* \note this method does not check for errors setting the GPIO. You can use the lower level \ref cyw43_gpio_set instead if you wish
|
||||
* to check for errors.
|
||||
*
|
||||
* \param wl_gpio the GPIO number on the wireless chip
|
||||
* \param value true to set the GPIO, false to clear it.
|
||||
*/
|
||||
void cyw43_arch_gpio_put(uint wl_gpio, bool value);
|
||||
|
||||
/*!
|
||||
* \brief Read the value of a GPIO pin on the wireless chip
|
||||
* \ingroup pico_cyw43_arch
|
||||
* \note this method does not check for errors setting the GPIO. You can use the lower level \ref cyw43_gpio_get instead if you wish
|
||||
* to check for errors.
|
||||
*
|
||||
* \param wl_gpio the GPIO number on the wireless chip
|
||||
* \return true if the GPIO is high, false otherwise
|
||||
*/
|
||||
bool cyw43_arch_gpio_get(uint wl_gpio);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include "pico/time.h"
|
||||
#include "hardware/gpio.h"
|
||||
#include "pico/error.h"
|
||||
#include "pico/async_context.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@ -66,6 +67,62 @@ void cyw43_hal_get_mac(int idx, uint8_t buf[6]);
|
||||
|
||||
void cyw43_hal_generate_laa_mac(int idx, uint8_t buf[6]);
|
||||
|
||||
|
||||
void cyw43_thread_enter(void);
|
||||
void cyw43_thread_exit(void);
|
||||
|
||||
#define CYW43_THREAD_ENTER cyw43_thread_enter();
|
||||
#define CYW43_THREAD_EXIT cyw43_thread_exit();
|
||||
#ifndef NDEBUG
|
||||
void cyw43_thread_lock_check(void);
|
||||
#define cyw43_arch_lwip_check() cyw43_thread_lock_check()
|
||||
#define CYW43_THREAD_LOCK_CHECK cyw43_arch_lwip_check();
|
||||
#else
|
||||
#define cyw43_arch_lwip_check() ((void)0)
|
||||
#define CYW43_THREAD_LOCK_CHECK
|
||||
#endif
|
||||
|
||||
void cyw43_await_background_or_timeout_us(uint32_t timeout_us);
|
||||
// todo not 100% sure about the timeouts here; MP uses __WFI which will always wakeup periodically
|
||||
#define CYW43_SDPCM_SEND_COMMON_WAIT cyw43_await_background_or_timeout_us(1000);
|
||||
#define CYW43_DO_IOCTL_WAIT cyw43_await_background_or_timeout_us(1000);
|
||||
|
||||
void cyw43_delay_ms(uint32_t ms);
|
||||
void cyw43_delay_us(uint32_t us);
|
||||
|
||||
void cyw43_schedule_internal_poll_dispatch(void (*func)(void));
|
||||
|
||||
void cyw43_post_poll_hook(void);
|
||||
#define CYW43_POST_POLL_HOOK cyw43_post_poll_hook();
|
||||
|
||||
static inline void cyw43_arch_lwip_begin(void) {
|
||||
cyw43_thread_enter();
|
||||
}
|
||||
|
||||
static inline void cyw43_arch_lwip_end(void) {
|
||||
cyw43_thread_exit();
|
||||
}
|
||||
|
||||
static inline int cyw43_arch_lwip_protect(int (*func)(void *param), void *param) {
|
||||
cyw43_arch_lwip_begin();
|
||||
int rc = func(param);
|
||||
cyw43_arch_lwip_end();
|
||||
return rc;
|
||||
}
|
||||
|
||||
#if CYW43_USE_BTSTACK
|
||||
static inline int cyw43_bt_init(void) { return 0; }
|
||||
static inline void cyw43_bt_deinit(void) {}
|
||||
|
||||
static inline void cyw43_arch_btstack_begin(void) {
|
||||
cyw43_thread_enter();
|
||||
}
|
||||
|
||||
static inline void cyw43_arch_btstack_end(void) {
|
||||
cyw43_thread_exit();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -13,49 +13,18 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
void cyw43_thread_enter(void);
|
||||
void cyw43_thread_exit(void);
|
||||
|
||||
#define CYW43_THREAD_ENTER cyw43_thread_enter();
|
||||
#define CYW43_THREAD_EXIT cyw43_thread_exit();
|
||||
#ifndef NDEBUG
|
||||
void cyw43_thread_lock_check(void);
|
||||
#define cyw43_arch_lwip_check() cyw43_thread_lock_check()
|
||||
#define CYW43_THREAD_LOCK_CHECK cyw43_arch_lwip_check();
|
||||
#else
|
||||
#define cyw43_arch_lwip_check() ((void)0)
|
||||
#define CYW43_THREAD_LOCK_CHECK
|
||||
// PICO_CONFIG: CYW43_TASK_STACK_SIZE, Stack size for the CYW43 FreeRTOS task in 4-byte words, type=int, default=1024, group=pico_cyw43_arch
|
||||
#ifndef CYW43_TASK_STACK_SIZE
|
||||
#define CYW43_TASK_STACK_SIZE 1024
|
||||
#endif
|
||||
|
||||
void cyw43_await_background_or_timeout_us(uint32_t timeout_us);
|
||||
// todo not 100% sure about the timeouts here; MP uses __WFI which will always wakeup periodically
|
||||
#define CYW43_SDPCM_SEND_COMMON_WAIT cyw43_await_background_or_timeout_us(1000);
|
||||
#define CYW43_DO_IOCTL_WAIT cyw43_await_background_or_timeout_us(1000);
|
||||
|
||||
void cyw43_delay_ms(uint32_t ms);
|
||||
void cyw43_delay_us(uint32_t us);
|
||||
|
||||
void cyw43_schedule_internal_poll_dispatch(void (*func)(void));
|
||||
|
||||
void cyw43_post_poll_hook(void);
|
||||
#define CYW43_POST_POLL_HOOK cyw43_post_poll_hook();
|
||||
|
||||
static inline void cyw43_arch_lwip_begin(void) {
|
||||
cyw43_thread_enter();
|
||||
}
|
||||
static inline void cyw43_arch_lwip_end(void) {
|
||||
cyw43_thread_exit();
|
||||
}
|
||||
|
||||
static inline int cyw43_arch_lwip_protect(int (*func)(void *param), void *param) {
|
||||
cyw43_arch_lwip_begin();
|
||||
int rc = func(param);
|
||||
cyw43_arch_lwip_end();
|
||||
return rc;
|
||||
}
|
||||
// PICO_CONFIG: CYW43_TASK_PRIORITY, Priority for the CYW43 FreeRTOS task, type=int default=4, group=pico_cyw43_arch
|
||||
#ifndef CYW43_TASK_PRIORITY
|
||||
#define CYW43_TASK_PRIORITY (tskIDLE_PRIORITY + 4)
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@ -15,42 +15,6 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define CYW43_THREAD_ENTER
|
||||
#define CYW43_THREAD_EXIT
|
||||
#ifndef NDEBUG
|
||||
|
||||
void cyw43_thread_check(void);
|
||||
|
||||
#define cyw43_arch_lwip_check() cyw43_thread_check()
|
||||
#define CYW43_THREAD_LOCK_CHECK cyw43_arch_lwip_check();
|
||||
#else
|
||||
#define cyw43_arch_lwip_check() ((void)0)
|
||||
#define CYW43_THREAD_LOCK_CHECK
|
||||
#endif
|
||||
|
||||
#define CYW43_SDPCM_SEND_COMMON_WAIT cyw43_poll_required = true;
|
||||
#define CYW43_DO_IOCTL_WAIT cyw43_poll_required = true;
|
||||
|
||||
#define cyw43_delay_ms sleep_ms
|
||||
#define cyw43_delay_us sleep_us
|
||||
|
||||
void cyw43_schedule_internal_poll_dispatch(void (*func)(void));
|
||||
|
||||
void cyw43_post_poll_hook(void);
|
||||
|
||||
extern bool cyw43_poll_required;
|
||||
|
||||
#define CYW43_POST_POLL_HOOK cyw43_post_poll_hook();
|
||||
#endif
|
||||
|
||||
#ifndef DOXYGEN_GENERATION // multiple definitions in separate headers seems to confused doxygen
|
||||
#define cyw43_arch_lwip_begin() ((void)0)
|
||||
#define cyw43_arch_lwip_end() ((void)0)
|
||||
|
||||
static inline int cyw43_arch_lwip_protect(int (*func)(void *param), void *param) {
|
||||
return func(param);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -13,53 +13,6 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
void cyw43_thread_enter(void);
|
||||
|
||||
void cyw43_thread_exit(void);
|
||||
|
||||
#define CYW43_THREAD_ENTER cyw43_thread_enter();
|
||||
#define CYW43_THREAD_EXIT cyw43_thread_exit();
|
||||
#ifndef NDEBUG
|
||||
|
||||
void cyw43_thread_lock_check(void);
|
||||
|
||||
#define cyw43_arch_lwip_check() cyw43_thread_lock_check()
|
||||
#define CYW43_THREAD_LOCK_CHECK cyw43_arch_lwip_check();
|
||||
#else
|
||||
#define cyw43_arch_lwip_check() ((void)0)
|
||||
#define CYW43_THREAD_LOCK_CHECK
|
||||
#endif
|
||||
|
||||
void cyw43_await_background_or_timeout_us(uint32_t timeout_us);
|
||||
// todo not 100% sure about the timeouts here; MP uses __WFI which will always wakeup periodically
|
||||
#define CYW43_SDPCM_SEND_COMMON_WAIT cyw43_await_background_or_timeout_us(1000);
|
||||
#define CYW43_DO_IOCTL_WAIT cyw43_await_background_or_timeout_us(1000);
|
||||
|
||||
void cyw43_delay_ms(uint32_t ms);
|
||||
|
||||
void cyw43_delay_us(uint32_t us);
|
||||
|
||||
void cyw43_schedule_internal_poll_dispatch(void (*func)(void));
|
||||
|
||||
void cyw43_post_poll_hook(void);
|
||||
|
||||
#define CYW43_POST_POLL_HOOK cyw43_post_poll_hook();
|
||||
|
||||
static inline void cyw43_arch_lwip_begin(void) {
|
||||
cyw43_thread_enter();
|
||||
}
|
||||
|
||||
static inline void cyw43_arch_lwip_end(void) {
|
||||
cyw43_thread_exit();
|
||||
}
|
||||
|
||||
static inline int cyw43_arch_lwip_protect(int (*func)(void *param), void *param) {
|
||||
cyw43_arch_lwip_begin();
|
||||
int rc = func(param);
|
||||
cyw43_arch_lwip_end();
|
||||
return rc;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -21,19 +21,24 @@ if (EXISTS ${PICO_CYW43_DRIVER_PATH}/${CYW43_DRIVER_TEST_FILE})
|
||||
pico_register_common_scope_var(PICO_CYW43_DRIVER_PATH)
|
||||
|
||||
# base driver without our bus
|
||||
add_library(cyw43_driver_base INTERFACE)
|
||||
target_sources(cyw43_driver_base INTERFACE
|
||||
add_library(cyw43_driver INTERFACE)
|
||||
target_sources(cyw43_driver INTERFACE
|
||||
${PICO_CYW43_DRIVER_PATH}/src/cyw43_ll.c
|
||||
${PICO_CYW43_DRIVER_PATH}/src/cyw43_stats.c
|
||||
${PICO_CYW43_DRIVER_PATH}/src/cyw43_lwip.c
|
||||
${PICO_CYW43_DRIVER_PATH}/src/cyw43_ctrl.c
|
||||
)
|
||||
target_include_directories(cyw43_driver_base INTERFACE
|
||||
target_include_directories(cyw43_driver INTERFACE
|
||||
${PICO_CYW43_DRIVER_PATH}/src
|
||||
${PICO_CYW43_DRIVER_PATH}/firmware
|
||||
)
|
||||
|
||||
# Build the driver for cyw43 for pico w
|
||||
# pico_cyw43_driver adds async_context integration to cyw43_driver
|
||||
add_library(pico_cyw43_driver INTERFACE)
|
||||
target_sources(pico_cyw43_driver INTERFACE
|
||||
cyw43_driver.c)
|
||||
target_include_directories(pico_cyw43_driver INTERFACE ${CMAKE_CURRENT_LIST_DIR}/include)
|
||||
target_link_libraries(pico_cyw43_driver INTERFACE cyw43_driver)
|
||||
|
||||
# Firmware stuff
|
||||
set(CYW43_FIRMWARE_BIN 43439A0-7.95.49.00.combined)
|
||||
@ -60,6 +65,7 @@ if (EXISTS ${PICO_CYW43_DRIVER_PATH}/${CYW43_DRIVER_TEST_FILE})
|
||||
${CYW43_FIRMWARE_BIN} ${CYW43_FIRMWARE_OBJ}
|
||||
VERBATIM)
|
||||
|
||||
# cyw43_driver_picow is cyw43_driver plus Pico W specific bus implementation, and Pico W firmware
|
||||
add_library(cyw43_driver_picow INTERFACE)
|
||||
target_sources(cyw43_driver_picow INTERFACE
|
||||
${CMAKE_CURRENT_LIST_DIR}/cyw43_bus_pio_spi.c
|
||||
@ -70,8 +76,7 @@ if (EXISTS ${PICO_CYW43_DRIVER_PATH}/${CYW43_DRIVER_TEST_FILE})
|
||||
${CYW43_FIRMWARE_OBJ}
|
||||
)
|
||||
target_link_libraries(cyw43_driver_picow INTERFACE
|
||||
cyw43_driver_base
|
||||
pico_stdlib
|
||||
cyw43_driver
|
||||
hardware_pio
|
||||
hardware_dma
|
||||
hardware_exception
|
122
src/rp2_common/pico_cyw43_driver/cyw43_driver.c
Normal file
122
src/rp2_common/pico_cyw43_driver/cyw43_driver.c
Normal file
@ -0,0 +1,122 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include "hardware/gpio.h"
|
||||
#include "hardware/irq.h"
|
||||
#include "cyw43.h"
|
||||
#include "pico/cyw43_driver.h"
|
||||
|
||||
#ifndef CYW43_GPIO_IRQ_HANDLER_PRIORITY
|
||||
#define CYW43_GPIO_IRQ_HANDLER_PRIORITY 0x40
|
||||
#endif
|
||||
|
||||
#ifndef CYW43_SLEEP_CHECK_MS
|
||||
#define CYW43_SLEEP_CHECK_MS 50
|
||||
#endif
|
||||
|
||||
static async_context_t *cyw43_async_context;
|
||||
|
||||
static void cyw43_sleep_timeout_reached(async_context_t *context, async_at_time_worker_t *worker);
|
||||
static void cyw43_do_poll(async_context_t *context, async_when_pending_worker_t *worker);
|
||||
|
||||
static async_at_time_worker_t sleep_timeout_worker = {
|
||||
.do_work = cyw43_sleep_timeout_reached
|
||||
};
|
||||
|
||||
static async_when_pending_worker_t cyw43_poll_worker = {
|
||||
.do_work = cyw43_do_poll
|
||||
};
|
||||
|
||||
static void cyw43_set_irq_enabled(bool enabled) {
|
||||
gpio_set_irq_enabled(CYW43_PIN_WL_HOST_WAKE, GPIO_IRQ_LEVEL_HIGH, enabled);
|
||||
}
|
||||
|
||||
// GPIO interrupt handler to tell us there's cyw43 has work to do
|
||||
static void cyw43_gpio_irq_handler(void)
|
||||
{
|
||||
uint32_t events = gpio_get_irq_event_mask(CYW43_PIN_WL_HOST_WAKE);
|
||||
if (events & GPIO_IRQ_LEVEL_HIGH) {
|
||||
// As we use a high level interrupt, it will go off forever until it's serviced
|
||||
// So disable the interrupt until this is done. It's re-enabled again by CYW43_POST_POLL_HOOK
|
||||
// which is called at the end of cyw43_poll_func
|
||||
cyw43_set_irq_enabled(false);
|
||||
async_context_set_work_pending(cyw43_async_context, &cyw43_poll_worker);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t cyw43_irq_init(__unused void *param) {
|
||||
#ifndef NDEBUG
|
||||
assert(get_core_num() == async_context_core_num(cyw43_async_context));
|
||||
#endif
|
||||
gpio_add_raw_irq_handler_with_order_priority(CYW43_PIN_WL_HOST_WAKE, cyw43_gpio_irq_handler, CYW43_GPIO_IRQ_HANDLER_PRIORITY);
|
||||
cyw43_set_irq_enabled(true);
|
||||
irq_set_enabled(IO_IRQ_BANK0, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t cyw43_irq_deinit(__unused void *param) {
|
||||
#ifndef NDEBUG
|
||||
assert(get_core_num() == async_context_core_num(cyw43_async_context));
|
||||
#endif
|
||||
gpio_remove_raw_irq_handler(CYW43_PIN_WL_HOST_WAKE, cyw43_gpio_irq_handler);
|
||||
cyw43_set_irq_enabled(false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cyw43_post_poll_hook(void) {
|
||||
#ifndef NDEBUG
|
||||
assert(get_core_num() == async_context_core_num(cyw43_async_context));
|
||||
#endif
|
||||
cyw43_set_irq_enabled(true);
|
||||
}
|
||||
|
||||
void cyw43_schedule_internal_poll_dispatch(__unused void (*func)(void)) {
|
||||
assert(func == cyw43_poll);
|
||||
async_context_set_work_pending(cyw43_async_context, &cyw43_poll_worker);
|
||||
}
|
||||
|
||||
static void cyw43_do_poll(async_context_t *context, __unused async_when_pending_worker_t *worker) {
|
||||
#ifndef NDEBUG
|
||||
assert(get_core_num() == async_context_core_num(cyw43_async_context));
|
||||
#endif
|
||||
if (cyw43_poll) {
|
||||
if (cyw43_sleep > 0) {
|
||||
cyw43_sleep--;
|
||||
}
|
||||
cyw43_poll();
|
||||
if (cyw43_sleep) {
|
||||
async_context_add_at_time_worker_in_ms(context, &sleep_timeout_worker, CYW43_SLEEP_CHECK_MS);
|
||||
} else {
|
||||
async_context_remove_at_time_worker(context, &sleep_timeout_worker);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void cyw43_sleep_timeout_reached(async_context_t *context, async_at_time_worker_t *worker) {
|
||||
assert(context == cyw43_async_context);
|
||||
assert(worker == &sleep_timeout_worker);
|
||||
async_context_set_work_pending(cyw43_async_context, &cyw43_poll_worker);
|
||||
}
|
||||
|
||||
bool cyw43_driver_init(async_context_t *context) {
|
||||
cyw43_init(&cyw43_state);
|
||||
cyw43_async_context = context;
|
||||
// we need the IRQ to be on the same core as the context, because we need to be able to enable/disable the IRQ
|
||||
// from there later
|
||||
async_context_execute_sync(context, cyw43_irq_init, NULL);
|
||||
async_context_add_when_pending_worker(context, &cyw43_poll_worker);
|
||||
return true;
|
||||
}
|
||||
|
||||
void cyw43_driver_deinit(async_context_t *context) {
|
||||
assert(context == cyw43_async_context);
|
||||
async_context_remove_at_time_worker(context, &sleep_timeout_worker);
|
||||
async_context_remove_when_pending_worker(context, &cyw43_poll_worker);
|
||||
// the IRQ IS on the same core as the context, so must be de-initialized there
|
||||
async_context_execute_sync(context, cyw43_irq_deinit, NULL);
|
||||
cyw43_deinit(&cyw43_state);
|
||||
cyw43_async_context = NULL;
|
||||
}
|
44
src/rp2_common/pico_cyw43_driver/include/pico/cyw43_driver.h
Normal file
44
src/rp2_common/pico_cyw43_driver/include/pico/cyw43_driver.h
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#ifndef _PICO_CYW43_DRIVER_ASYNC_CONTEXT_H
|
||||
#define _PICO_CYW43_DRIVER_ASYNC_CONTEXT_H
|
||||
|
||||
/** \file pico/cyw43_driver.h
|
||||
* \defgroup pico_cyw43_driver pico_cyw43_driver
|
||||
*
|
||||
* A wrapper around the lower level cyw43_driver, that integrates it with \ref pico_async_context
|
||||
* for handling background work.
|
||||
*/
|
||||
|
||||
#include "pico.h"
|
||||
#include "pico/async_context.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*! \brief Initializes the lower level cyw43_driver and integrates it with the provided async_context
|
||||
* \ingroup pico_cyw43_driver
|
||||
*
|
||||
* If the initialization succeeds, \ref lwip_nosys_deinit() can be called to shutdown lwIP support
|
||||
*
|
||||
* \param context the async_context instance that provides the abstraction for handling asynchronous work.
|
||||
* \return true if the initialization succeeded
|
||||
*/
|
||||
bool cyw43_driver_init(async_context_t *context);
|
||||
|
||||
/*! \brief De-initialize the lowever level cyw43_driver and unhooks it from the async_context
|
||||
* \ingroup pico_cyw43_driver
|
||||
*
|
||||
* \param context the async_context the cyw43_driver support was added to via \ref cyw43_driver_init
|
||||
*/
|
||||
void cyw43_driver_deinit(async_context_t *context);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif
|
@ -6,10 +6,10 @@ endif ()
|
||||
set(LWIP_TEST_PATH "src/Filelists.cmake")
|
||||
if (NOT PICO_LWIP_PATH)
|
||||
set(PICO_LWIP_PATH ${PROJECT_SOURCE_DIR}/lib/lwip)
|
||||
# if (NOT EXISTS ${PICO_LWIP_PATH}/${LWIP_TEST_PATH})
|
||||
# message(WARNING "LWIP submodule has not been initialized; Pico W wireless support will be unavailable
|
||||
if (PICO_CYW43_SUPPORTED AND NOT EXISTS ${PICO_LWIP_PATH}/${LWIP_TEST_PATH})
|
||||
message(WARNING "LWIP submodule has not been initialized; Pico W wireless support will be unavailable
|
||||
#hint: try 'git submodule update --init' from your SDK directory (${PICO_SDK_PATH}).")
|
||||
# endif()
|
||||
endif()
|
||||
elseif (NOT EXISTS ${PICO_LWIP_PATH}/${LWIP_TEST_PATH})
|
||||
message(WARNING "PICO_LWIP_PATH specified but content not present.")
|
||||
endif()
|
||||
@ -46,7 +46,7 @@ if (EXISTS ${PICO_LWIP_PATH}/${LWIP_TEST_PATH})
|
||||
${PICO_LWIP_PATH}/src/core/tcp_out.c
|
||||
${PICO_LWIP_PATH}/src/core/timeouts.c
|
||||
${PICO_LWIP_PATH}/src/core/udp.c
|
||||
${CMAKE_CURRENT_LIST_DIR}/random.c
|
||||
${CMAKE_CURRENT_LIST_DIR}/lwip_random.c
|
||||
)
|
||||
target_include_directories(pico_lwip_core INTERFACE
|
||||
${PICO_LWIP_PATH}/src/include)
|
||||
@ -269,11 +269,12 @@ if (EXISTS ${PICO_LWIP_PATH}/${LWIP_TEST_PATH})
|
||||
# our nosys impl
|
||||
add_library(pico_lwip_nosys INTERFACE)
|
||||
target_sources(pico_lwip_nosys INTERFACE
|
||||
${CMAKE_CURRENT_LIST_DIR}/nosys.c
|
||||
${CMAKE_CURRENT_LIST_DIR}/lwip_nosys.c
|
||||
)
|
||||
target_link_libraries(pico_lwip_nosys INTERFACE
|
||||
pico_lwip_arch)
|
||||
|
||||
pico_async_context_base
|
||||
pico_lwip_arch
|
||||
pico_lwip)
|
||||
|
||||
if (NOT PICO_LWIP_CONTRIB_PATH)
|
||||
set(PICO_LWIP_CONTRIB_PATH ${PICO_LWIP_PATH}/contrib)
|
||||
@ -291,5 +292,14 @@ if (EXISTS ${PICO_LWIP_PATH}/${LWIP_TEST_PATH})
|
||||
target_link_libraries(pico_lwip_contrib_freertos INTERFACE
|
||||
pico_lwip_arch)
|
||||
|
||||
add_library(pico_lwip_freertos INTERFACE)
|
||||
target_sources(pico_lwip_freertos INTERFACE
|
||||
${CMAKE_CURRENT_LIST_DIR}/lwip_freertos.c
|
||||
)
|
||||
target_link_libraries(pico_lwip_freertos INTERFACE
|
||||
pico_async_context_base
|
||||
pico_lwip
|
||||
pico_lwip_contrib_freertos)
|
||||
|
||||
pico_promote_common_scope_vars()
|
||||
endif()
|
||||
|
@ -34,9 +34,18 @@
|
||||
|
||||
#include <sys/time.h>
|
||||
|
||||
#ifndef PICO_LWIP_CUSTOM_LOCK_TCPIP_CORE
|
||||
#define PICO_LWIP_CUSTOM_LOCK_TCPIP_CORE 1
|
||||
#endif
|
||||
|
||||
#if NO_SYS
|
||||
// todo really we should just not allow SYS_LIGHTWEIGHT_PROT for nosys mode (it doesn't do anything anyway)
|
||||
typedef int sys_prot_t;
|
||||
#elif PICO_LWIP_CUSTOM_LOCK_TCPIP_CORE
|
||||
void pico_lwip_custom_lock_tcpip_core(void);
|
||||
void pico_lwip_custom_unlock_tcpip_core(void);
|
||||
#define LOCK_TCPIP_CORE() pico_lwip_custom_lock_tcpip_core()
|
||||
#define UNLOCK_TCPIP_CORE() pico_lwip_custom_unlock_tcpip_core()
|
||||
#endif
|
||||
|
||||
/* define compiler specific symbols */
|
||||
@ -76,8 +85,14 @@ typedef int sys_prot_t;
|
||||
#define LWIP_PLATFORM_ASSERT(x) panic(x)
|
||||
#endif
|
||||
|
||||
unsigned int pico_lwip_rand(void);
|
||||
#ifndef LWIP_RAND
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
unsigned int pico_lwip_rand(void);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
// Use ROSC based random number generation, more for the fact that rand() may not be seeded, than anything else
|
||||
#define LWIP_RAND pico_lwip_rand
|
||||
#endif
|
||||
|
44
src/rp2_common/pico_lwip/include/pico/lwip_freertos.h
Normal file
44
src/rp2_common/pico_lwip/include/pico/lwip_freertos.h
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#ifndef _PICO_LWIP_FREERTOS_H
|
||||
#define _PICO_LWIP_FREERTOS_H
|
||||
|
||||
#include "pico.h"
|
||||
#include "pico/async_context.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*! \brief Initializes lwIP (NO_SYS=0 mode) support support for FreeRTOS using the provided async_context
|
||||
* \ingroup pico_lwip
|
||||
*
|
||||
* If the initialization succeeds, \ref lwip_freertos_deinit() can be called to shutdown lwIP support
|
||||
*
|
||||
* \param context the async_context instance that provides the abstraction for handling asynchronous work. Note in general
|
||||
* this would be an \ref async_context_freertos instance, though it doesn't have to be.
|
||||
*
|
||||
* \return true if the initialization succeeded
|
||||
*/
|
||||
bool lwip_freertos_init(async_context_t *context);
|
||||
|
||||
/*! \brief De-initialize lwIP (NO_SYS=0 mode) support for FreeRTOS
|
||||
* \ingroup pico_lwip
|
||||
*
|
||||
* Note that since lwIP may only be initialized once, and doesn't itself provide a shutdown mechanism, lwIP
|
||||
* itself may still consume resources.
|
||||
*
|
||||
* It is however safe to call \ref lwip_freertos_init again later.
|
||||
*
|
||||
* \param context the async_context the lwip_freertos support was added to via \ref lwip_freertos_init
|
||||
*/
|
||||
void lwip_freertos_deinit(async_context_t *context);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif
|
42
src/rp2_common/pico_lwip/include/pico/lwip_nosys.h
Normal file
42
src/rp2_common/pico_lwip/include/pico/lwip_nosys.h
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#ifndef _PICO_LWIP_NOSYS_H
|
||||
#define _PICO_LWIP_NOSYS_H
|
||||
|
||||
#include "pico.h"
|
||||
#include "pico/async_context.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*! \brief Initializes lwIP (NO_SYS=1 mode) support support using the provided async_context
|
||||
* \ingroup pico_lwip
|
||||
*
|
||||
* If the initialization succeeds, \ref lwip_nosys_deinit() can be called to shutdown lwIP support
|
||||
*
|
||||
* \param context the async_context instance that provides the abstraction for handling asynchronous work.
|
||||
* \return true if the initialization succeeded
|
||||
*/
|
||||
bool lwip_nosys_init(async_context_t *context);
|
||||
|
||||
/*! \brief De-initialize lwIP (NO_SYS=1 mode) support
|
||||
* \ingroup pico_lwip
|
||||
*
|
||||
* Note that since lwIP may only be initialized once, and doesn't itself provide a shutdown mechanism, lwIP
|
||||
* itself may still consume resources
|
||||
*
|
||||
* It is however safe to call \ref lwip_nosys_init again later.
|
||||
*
|
||||
* \param context the async_context the lwip_nosys support was added to via \ref lwip_nosys_init
|
||||
*/
|
||||
void lwip_nosys_deinit(async_context_t *context);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif
|
65
src/rp2_common/pico_lwip/lwip_freertos.c
Normal file
65
src/rp2_common/pico_lwip/lwip_freertos.c
Normal file
@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
// todo graham #ifdef for LWIP inclusion?
|
||||
|
||||
#include "pico/async_context.h"
|
||||
#include "pico/time.h"
|
||||
#include "lwip/tcpip.h"
|
||||
#include "lwip/timeouts.h"
|
||||
|
||||
#include "FreeRTOS.h"
|
||||
#include "semphr.h"
|
||||
|
||||
#if NO_SYS
|
||||
#error lwip_freertos_async_context_bindings requires NO_SYS=0
|
||||
#endif
|
||||
|
||||
static async_context_t * volatile lwip_context;
|
||||
// lwIP tcpip_task cannot be shutdown, so we block it when we are de-initialized.
|
||||
static SemaphoreHandle_t tcpip_task_blocker;
|
||||
|
||||
static void tcpip_init_done(void *param) {
|
||||
xSemaphoreGive((SemaphoreHandle_t)param);
|
||||
}
|
||||
|
||||
bool lwip_freertos_init(async_context_t *context) {
|
||||
assert(!lwip_context);
|
||||
lwip_context = context;
|
||||
static bool done_lwip_init;
|
||||
if (!done_lwip_init) {
|
||||
done_lwip_init = true;
|
||||
SemaphoreHandle_t init_sem = xSemaphoreCreateBinary();
|
||||
tcpip_task_blocker = xSemaphoreCreateBinary();
|
||||
tcpip_init(tcpip_init_done, init_sem);
|
||||
xSemaphoreTake(init_sem, portMAX_DELAY);
|
||||
vSemaphoreDelete(init_sem);
|
||||
} else {
|
||||
xSemaphoreGive(tcpip_task_blocker);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static uint32_t clear_lwip_context(__unused void *param) {
|
||||
lwip_context = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void lwip_freertos_deinit(__unused async_context_t *context) {
|
||||
// clear the lwip context under lock as lwIP may still be running in tcpip_task
|
||||
async_context_execute_sync(context, clear_lwip_context, NULL);
|
||||
}
|
||||
|
||||
void pico_lwip_custom_lock_tcpip_core(void) {
|
||||
while (!lwip_context) {
|
||||
xSemaphoreTake(tcpip_task_blocker, portMAX_DELAY);
|
||||
}
|
||||
async_context_acquire_lock_blocking(lwip_context);
|
||||
}
|
||||
|
||||
void pico_lwip_custom_unlock_tcpip_core(void) {
|
||||
async_context_release_lock(lwip_context);
|
||||
}
|
74
src/rp2_common/pico_lwip/lwip_nosys.c
Normal file
74
src/rp2_common/pico_lwip/lwip_nosys.c
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include "pico/async_context.h"
|
||||
|
||||
#include <lwip/init.h>
|
||||
#include "lwip/timeouts.h"
|
||||
|
||||
static void update_next_timeout(async_context_t *context, async_when_pending_worker_t *worker);
|
||||
static void lwip_timeout_reached(async_context_t *context, async_at_time_worker_t *worker);
|
||||
|
||||
static async_when_pending_worker_t always_pending_update_timeout_worker = {
|
||||
.do_work = update_next_timeout
|
||||
};
|
||||
|
||||
static async_at_time_worker_t lwip_timeout_worker = {
|
||||
.do_work = lwip_timeout_reached,
|
||||
};
|
||||
|
||||
static void lwip_timeout_reached(__unused async_context_t *context, __unused async_at_time_worker_t *worker) {
|
||||
assert(worker == &lwip_timeout_worker);
|
||||
sys_check_timeouts();
|
||||
}
|
||||
|
||||
static void update_next_timeout(async_context_t *context, async_when_pending_worker_t *worker) {
|
||||
assert(worker == &always_pending_update_timeout_worker);
|
||||
// we want to run on every execution of the helper to re-reflect any changes
|
||||
// to the underlying lwIP timers which may have happened in the interim
|
||||
// (note that worker will be called on every outermost exit of the async_context
|
||||
// lock, and lwIP timers should not be modified whilst not holding the lock.
|
||||
worker->work_pending = true;
|
||||
uint32_t sleep_ms = sys_timeouts_sleeptime();
|
||||
if (sleep_ms == SYS_TIMEOUTS_SLEEPTIME_INFINITE) {
|
||||
lwip_timeout_worker.next_time = at_the_end_of_time;
|
||||
} else {
|
||||
lwip_timeout_worker.next_time = make_timeout_time_ms(sleep_ms);
|
||||
}
|
||||
async_context_add_at_time_worker(context, &lwip_timeout_worker);
|
||||
}
|
||||
|
||||
bool lwip_nosys_init(async_context_t *context) {
|
||||
static bool done_lwip_init;
|
||||
if (!done_lwip_init) {
|
||||
lwip_init();
|
||||
done_lwip_init = true;
|
||||
}
|
||||
// we want the worker to be called on every async helper run (starting with the next)
|
||||
always_pending_update_timeout_worker.work_pending = true;
|
||||
async_context_add_when_pending_worker(context, &always_pending_update_timeout_worker);
|
||||
return true;
|
||||
}
|
||||
|
||||
void lwip_nosys_deinit(async_context_t *context) {
|
||||
async_context_remove_at_time_worker(context, &lwip_timeout_worker);
|
||||
async_context_remove_when_pending_worker(context, &always_pending_update_timeout_worker);
|
||||
}
|
||||
|
||||
#if NO_SYS
|
||||
/* lwip has provision for using a mutex, when applicable */
|
||||
sys_prot_t sys_arch_protect(void) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
void sys_arch_unprotect(__unused sys_prot_t pval) {
|
||||
}
|
||||
|
||||
/* lwip needs a millisecond time source, and the TinyUSB board support code has one available */
|
||||
uint32_t sys_now(void) {
|
||||
return to_ms_since_boot(get_absolute_time());
|
||||
}
|
||||
#endif
|
@ -1,26 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include "lwip/init.h"
|
||||
#include "pico/time.h"
|
||||
|
||||
#if NO_SYS
|
||||
/* lwip has provision for using a mutex, when applicable */
|
||||
sys_prot_t sys_arch_protect(void) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
void sys_arch_unprotect(sys_prot_t pval) {
|
||||
(void) pval;
|
||||
}
|
||||
|
||||
/* lwip needs a millisecond time source, and the TinyUSB board support code has one available */
|
||||
uint32_t sys_now(void) {
|
||||
return to_ms_since_boot(get_absolute_time());
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -314,6 +314,12 @@ void __attribute__((noreturn)) panic_unsupported(void);
|
||||
*/
|
||||
void __attribute__((noreturn)) panic(const char *fmt, ...);
|
||||
|
||||
#ifdef NDEBUG
|
||||
#define panic_compact(...) panic(__VA_ARGS__)
|
||||
#else
|
||||
#define panic_compact(...) panic("")
|
||||
#endif
|
||||
|
||||
// PICO_CONFIG: PICO_NO_FPGA_CHECK, Remove the FPGA platform check for small code size reduction, type=bool, default=0, advanced=true, group=pico_runtime
|
||||
#ifndef PICO_NO_FPGA_CHECK
|
||||
#define PICO_NO_FPGA_CHECK 0
|
||||
|
Loading…
Reference in New Issue
Block a user