Initial Release

This commit is contained in:
graham sanderson
2021-01-20 10:44:27 -06:00
commit 26653ea81e
404 changed files with 135614 additions and 0 deletions

View File

@ -0,0 +1,44 @@
if (NOT TARGET pico_sync_headers)
add_library(pico_sync_headers INTERFACE)
target_include_directories(pico_sync_headers INTERFACE ${CMAKE_CURRENT_LIST_DIR}/include)
target_link_libraries(pico_sync_headers INTERFACE hardware_sync pico_time)
endif()
if (NOT TARGET pico_sync_core)
add_library(pico_sync_core INTERFACE)
target_sources(pico_sync_core INTERFACE
${CMAKE_CURRENT_LIST_DIR}/lock_core.c
)
target_link_libraries(pico_sync_core INTERFACE pico_sync_headers)
endif()
if (NOT TARGET pico_sync_sem)
add_library(pico_sync_sem INTERFACE)
target_sources(pico_sync_sem INTERFACE
${CMAKE_CURRENT_LIST_DIR}/sem.c
)
target_link_libraries(pico_sync_sem INTERFACE pico_sync_core pico_time)
endif()
if (NOT TARGET pico_sync_mutex)
add_library(pico_sync_mutex INTERFACE)
target_sources(pico_sync_mutex INTERFACE
${CMAKE_CURRENT_LIST_DIR}/mutex.c
)
target_link_libraries(pico_sync_mutex INTERFACE pico_sync_core pico_time)
endif()
if (NOT TARGET pico_sync_critical_section)
add_library(pico_sync_critical_section INTERFACE)
target_sources(pico_sync_critical_section INTERFACE
${CMAKE_CURRENT_LIST_DIR}/critical_section.c
)
target_link_libraries(pico_sync_critical_section INTERFACE pico_sync_core pico_time)
endif()
if (NOT TARGET pico_sync)
add_library(pico_sync INTERFACE)
target_link_libraries(pico_sync INTERFACE pico_sync_sem pico_sync_mutex pico_sync_critical_section pico_sync_core)
endif()

View File

@ -0,0 +1,24 @@
/*
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "pico/critical_section.h"
#if !PICO_NO_HARDWARE
static_assert(sizeof(critical_section_t) == 8, "");
#endif
void critical_section_init(critical_section_t *critsec) {
critical_section_init_with_lock_num(critsec, spin_lock_claim_unused(true));
}
void critical_section_init_with_lock_num(critical_section_t *critsec, uint lock_num) {
lock_init(&critsec->core, lock_num);
__mem_fence_release();
}
void critical_section_deinit(critical_section_t *critsec) {
spin_lock_unclaim(spin_lock_get_num(critsec->core.spin_lock));
}

View File

@ -0,0 +1,75 @@
/*
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _PLATFORM_CRITICAL_SECTION_H
#define _PLATFORM_CRITICAL_SECTION_H
#include "pico/lock_core.h"
#ifdef __cplusplus
extern "C" {
#endif
/** \file critical_section.h
* \defgroup critical_section critical_section
* \ingroup pico_sync
* \brief Critical Section API for short-lived mutual exclusion safe for IRQ and multi-core
*
* A critical section is non-reentrant, and provides mutual exclusion using a spin-lock to prevent access
* from the other core, and from (higher priority) interrupts on the same core. It does the former
* using a spin lock and the latter by disabling interrupts on the calling core.
*
* Because interrupts are disabled by this function, uses of the critical_section should be as short as possible.
*/
typedef struct __packed_aligned critical_section {
lock_core_t core;
uint32_t save;
} critical_section_t;
/*! \brief Initialise a critical_section structure allowing the system to assign a spin lock number
* \ingroup critical_section
*
* The critical section is initialized ready for use, and will use a (possibly shared) spin lock
* number assigned by the system. Note that in general it is unlikely that you would be nesting
* critical sections, however if you do so you *must* use \ref critical_section_init_with_lock_num
* to ensure that the spin lock's used are different.
*
* \param critsec Pointer to critical_section structure
*/
void critical_section_init(critical_section_t *critsec);
/*! \brief Initialise a critical_section structure assigning a specific spin lock number
* \ingroup critical_section
* \param critsec Pointer to critical_section structure
* \param lock_num the specific spin lock number to use
*/
void critical_section_init_with_lock_num(critical_section_t *critsec, uint lock_num);
/*! \brief Enter a critical_section
* \ingroup critical_section
*
* If the spin lock associated with this critical section is in use, then this
* method will block until it is released.
*
* \param critsec Pointer to critical_section structure
*/
static inline void critical_section_enter_blocking(critical_section_t *critsec) {
critsec->save = spin_lock_blocking(critsec->core.spin_lock);
}
/*! \brief Release a critical_section
* \ingroup critical_section
*
* \param critsec Pointer to critical_section structure
*/
static inline void critical_section_exit(critical_section_t *critsec) {
spin_unlock(critsec->core.spin_lock, critsec->save);
}
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,28 @@
/*
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _PICO_LOCK_CORE_H
#define _PICO_LOCK_CORE_H
#include "pico.h"
#include "hardware/sync.h"
/** \file lock_core.h
* \ingroup pico_sync
*
* Base implementation for locking primitives protected by a spin lock
*/
typedef struct lock_core {
// spin lock protecting this lock's state
spin_lock_t *spin_lock;
// note any lock members in containing structures need not be volatile;
// they are protected by memory/compiler barriers when gaining and release spin locks
} lock_core_t;
void lock_init(lock_core_t *core, uint lock_num);
#endif

View File

@ -0,0 +1,136 @@
/*
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _PLATFORM_MUTEX_H
#define _PLATFORM_MUTEX_H
#include "pico/lock_core.h"
#ifdef __cplusplus
extern "C" {
#endif
/** \file mutex.h
* \defgroup mutex mutex
* \ingroup pico_sync
* \brief Mutex API for non IRQ mutual exclusion between cores
*
* Mutexes are application level locks usually used protecting data structures that might be used by
* multiple cores. Unlike critical sections, the mutex protected code is not necessarily
* required/expected to complete quickly, as no other sytemwide locks are held on account of a locked mutex.
*
* Because they are not re-entrant on the same core, blocking on a mutex should never be done in an IRQ
* handler. It is valid to call \ref mutex_try_enter from within an IRQ handler, if the operation
* that would be conducted under lock can be skipped if the mutex is locked (at least by the same core).
*
* See \ref critical_section.h for protecting access between multiple cores AND IRQ handlers
*/
typedef struct __packed_aligned mutex {
lock_core_t core;
bool owned;
int8_t owner;
} mutex_t;
/*! \brief Initialise a mutex structure
* \ingroup mutex
*
* \param mtx Pointer to mutex structure
*/
void mutex_init(mutex_t *mtx);
/*! \brief Take ownership of a mutex
* \ingroup mutex
*
* This function will block until the calling core can claim ownership of the mutex.
* On return the caller core owns the mutex
*
* \param mtx Pointer to mutex structure
*/
void mutex_enter_blocking(mutex_t *mtx);
/*! \brief Check to see if a mutex is available
* \ingroup mutex
*
* Will return true if the mutex is unowned, false otherwise
*
* \param mtx Pointer to mutex structure
* \param owner_out If mutex is owned, and this pointer is non-zero, it will be filled in with the core number of the current owner of the mutex
*/
bool mutex_try_enter(mutex_t *mtx, uint32_t *owner_out);
/*! \brief Wait for mutex with timeout
* \ingroup mutex
*
* Wait for up to the specific time to take ownership of the mutex. If the calling
* core can take ownership of the mutex before the timeout expires, then true will be returned
* and the calling core will own the mutex, otherwise false will be returned and the calling
* core will *NOT* own the mutex.
*
* \param mtx Pointer to mutex structure
* \param timeout_ms The timeout in milliseconds.
* \return true if mutex now owned, false if timeout occurred before mutex became available
*/
bool mutex_enter_timeout_ms(mutex_t *mtx, uint32_t timeout_ms);
/*! \brief Wait for mutex until a specific time
* \ingroup mutex
*
* Wait until the specific time to take ownership of the mutex. If the calling
* core can take ownership of the mutex before the timeout expires, then true will be returned
* and the calling core will own the mutex, otherwise false will be returned and the calling
* core will *NOT* own the mutex.
*
* \param mtx Pointer to mutex structure
* \param until The time after which to return if the core cannot take owner ship of the mutex
* \return true if mutex now owned, false if timeout occurred before mutex became available
*/
bool mutex_enter_block_until(mutex_t *mtx, absolute_time_t until);
/*! \brief Release ownership of a mutex
* \ingroup mutex
*
* \param mtx Pointer to mutex structure
*/
void mutex_exit(mutex_t *mtx);
/*! \brief Test for mutex initialised state
* \ingroup mutex
*
* \param mtx Pointer to mutex structure
* \return true if the mutex is initialised, false otherwise
*/
static inline bool mutex_is_initialzed(mutex_t *mtx) {
return mtx->core.spin_lock != 0;
}
/*! \brief Helper macro for static definition of mutexes
* \ingroup mutex
*
* A mutex defined as follows:
*
* ```c
* auto_init_mutex(my_mutex);
* ```
*
* Is equivalent to doing
*
* ```c
* static mutex_t my_mutex;
*
* void my_init_function() {
* mutex_init(&my_mutex);
* }
* ```
*
* But the initialization of the mutex is performed automatically during runtime initialization
*/
#define auto_init_mutex(name) static __attribute__((section(".mutex_array"))) mutex_t name
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,101 @@
/*
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _PLATFORM_SEM_H
#define _PLATFORM_SEM_H
#include "pico/lock_core.h"
/** \file sem.h
* \defgroup sem sem
* \ingroup pico_sync
* \brief Semaphore API for restricting access to a resource
*
* A semaphore holds a number of available permits. `sem_acquire` methods will acquire a permit if available
* (reducing the available count by 1) or block if the number of available permits is 0.
* \ref sem_release() increases the number of available permits by one potentially unblocking a `sem_acquire` method.
*
* Note that \ref sem_release() may be called an arbitrary number of times, however the number of available
* permits is capped to the max_permit value specified during semaphore initialization.
*
* Although these semaphore related functions can be used from IRQ handlers, it is obviously preferable to only
* release semaphores from within an IRQ handler (i.e. avoid blocking)
*/
#ifdef __cplusplus
extern "C" {
#endif
typedef struct __packed_aligned semaphore {
struct lock_core core;
int16_t permits;
int16_t max_permits;
} semaphore_t;
/*! \brief Initialise a semaphore structure
* \ingroup sem
*
* \param sem Pointer to semaphore structure
* \param initial_permits How many permits are initially acquired
* \param max_permits Total number of permits allowed for this semaphore
*/
void sem_init(semaphore_t *sem, int16_t initial_permits, int16_t max_permits);
/*! \brief Return number of available permits on the semaphore
* \ingroup sem
*
* \param sem Pointer to semaphore structure
* \return The number of permits available on the semaphore.
*/
int sem_available(semaphore_t *sem);
/*! \brief Release a permit on a semaphore
* \ingroup sem
*
* Increases the number of permits by one (unless the number of permits is already at the maximum).
* A blocked `sem_acquire` will be released if the number of permits is increased.
*
* \param sem Pointer to semaphore structure
* \return true if the number of permits available was increased.
*/
bool sem_release(semaphore_t *sem);
/*! \brief Reset semaphore to a specific number of available permits
* \ingroup sem
*
* Reset value should be from 0 to the max_permits specified in the init function
*
* \param sem Pointer to semaphore structure
* \param permits the new number of available permits
*/
void sem_reset(semaphore_t *sem, int16_t permits);
/*! \brief Acquire a permit from the semaphore
* \ingroup sem
*
* This function will block and wait if no permits are available.
*
* \param sem Pointer to semaphore structure
*/
void sem_acquire_blocking(semaphore_t *sem);
/*! \brief Acquire a permit from a semaphore, with timeout
* \ingroup sem
*
* This function will block and wait if no permits are available, until the
* defined timeout has been reached. If the timeout is reached the function will
* return false, otherwise it will return true.
*
* \param sem Pointer to semaphore structure
* \param timeout_ms Time to wait to acquire the semaphore, in ms.
* \return false if timeout reached, true if permit was acquired.
*/
bool sem_acquire_timeout_ms(semaphore_t *sem, uint32_t timeout_ms);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,19 @@
/*
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _PICO_SYNC_H
#define _PICO_SYNC_H
/** \file pico/sync.h
* \defgroup pico_sync pico_sync
* Synchronization primitives and mutual exclusion
*/
#include "pico/sem.h"
#include "pico/mutex.h"
#include "pico/critical_section.h"
#endif

View File

@ -0,0 +1,13 @@
/*
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "pico/lock_core.h"
void lock_init(lock_core_t *core, uint lock_num) {
assert(lock_num >= 0 && lock_num < NUM_SPIN_LOCKS);
core->spin_lock = spin_lock_instance(lock_num);
}

View File

@ -0,0 +1,84 @@
/*
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "pico/mutex.h"
#include "pico/time.h"
#if !PICO_NO_HARDWARE
static_assert(sizeof(mutex_t) == 8, "");
#endif
void mutex_init(mutex_t *mtx) {
lock_init(&mtx->core, next_striped_spin_lock_num());
__mem_fence_release();
}
void __time_critical_func(mutex_enter_blocking)(mutex_t *mtx) {
assert(mtx->core.spin_lock);
bool block = true;
do {
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
if (!mtx->owned) {
mtx->owned = true;
mtx->owner = get_core_num();
block = false;
}
spin_unlock(mtx->core.spin_lock, save);
if (block) {
__wfe();
}
} while (block);
}
bool __time_critical_func(mutex_try_enter)(mutex_t *mtx, uint32_t *owner_out) {
bool entered;
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
if (!mtx->owned) {
mtx->owned = true;
mtx->owner = get_core_num();
entered = true;
} else {
if (owner_out) *owner_out = mtx->owner;
entered = false;
}
spin_unlock(mtx->core.spin_lock, save);
return entered;
}
bool __time_critical_func(mutex_enter_timeout_ms)(mutex_t *mtx, uint32_t timeout_ms) {
return mutex_enter_block_until(mtx, make_timeout_time_ms(timeout_ms));
}
bool __time_critical_func(mutex_enter_block_until)(mutex_t *mtx, absolute_time_t until) {
assert(mtx->core.spin_lock);
bool block = true;
do {
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
if (!mtx->owned) {
mtx->owned = true;
mtx->owner = get_core_num();
block = false;
}
spin_unlock(mtx->core.spin_lock, save);
if (block) {
if (best_effort_wfe_or_timeout(until)) {
return false;
}
}
} while (block);
return true;
}
void __time_critical_func(mutex_exit)(mutex_t *mtx) {
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
assert(mtx->owned);
mtx->owned = 0;
#ifndef NDEBUG
mtx->owner = -1;
#endif
__sev();
spin_unlock(mtx->core.spin_lock, save);
}

View File

@ -0,0 +1,82 @@
/*
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "pico/sem.h"
#include "pico/time.h"
void sem_init(semaphore_t *sem, int16_t initial_permits, int16_t max_permits) {
lock_init(&sem->core, next_striped_spin_lock_num());
sem->permits = initial_permits;
sem->max_permits = max_permits;
__mem_fence_release();
}
int __time_critical_func(sem_available)(semaphore_t *sem) {
return *(volatile typeof(sem->permits) *) &sem->permits;
}
void __time_critical_func(sem_acquire_blocking)(semaphore_t *sem) {
bool block = true;
do {
uint32_t save = spin_lock_blocking(sem->core.spin_lock);
if (sem->permits > 0) {
sem->permits--;
__sev();
block = false;
}
spin_unlock(sem->core.spin_lock, save);
if (block) {
__wfe();
}
} while (block);
}
bool __time_critical_func(sem_acquire_timeout_ms)(semaphore_t *sem, uint32_t timeout_ms) {
bool block = true;
absolute_time_t target = nil_time;
do {
uint32_t save = spin_lock_blocking(sem->core.spin_lock);
if (sem->permits > 0) {
sem->permits--;
__sev();
block = false;
}
spin_unlock(sem->core.spin_lock, save);
if (block) {
if (is_nil_time(target)) {
target = make_timeout_time_ms(timeout_ms);
}
if (best_effort_wfe_or_timeout(target)) {
return false;
}
}
} while (block);
return true;
}
// todo this should really have a blocking variant for when permits are maxed out
bool __time_critical_func(sem_release)(semaphore_t *sem) {
bool rc;
uint32_t save = spin_lock_blocking(sem->core.spin_lock);
int32_t count = sem->permits;
if (count < sem->max_permits) {
sem->permits = count + 1;
__sev();
rc = true;
} else {
rc = false;
}
spin_unlock(sem->core.spin_lock, save);
return rc;
}
void __time_critical_func(sem_reset)(semaphore_t *sem, int16_t permits) {
assert(permits >= 0 && permits <= sem->max_permits);
uint32_t save = spin_lock_blocking(sem->core.spin_lock);
if (permits > sem->permits) __sev();
sem->permits = permits;
spin_unlock(sem->core.spin_lock, save);
}