Fix a bunch of doxygen typos (#391)
This commit is contained in:
parent
ecf66bf514
commit
9396b9c7e1
@ -20,7 +20,7 @@
|
||||
* lock which is used only to protect the contents of the rest of the structure as part of implementing the synchronization
|
||||
* primitive. As such, the spin_lock member of lock core is never still held on return from any function for the primitive.
|
||||
*
|
||||
* \ref critical_section is an exceptional case in that it does not have a lock_core_t and simply wraps a pin lock, providing
|
||||
* \ref critical_section is an exceptional case in that it does not have a lock_core_t and simply wraps a spin lock, providing
|
||||
* methods to lock and unlock said spin lock.
|
||||
*
|
||||
* lock_core based structures work by locking the spin lock, checking state, and then deciding whether they additionally need to block
|
||||
@ -28,11 +28,11 @@
|
||||
*
|
||||
* By default the SDK just uses the processors' events via SEV and WEV for notification and blocking as these are sufficient for
|
||||
* cross core, and notification from interrupt handlers. However macros are defined in this file that abstract the wait
|
||||
* and notify mechanisms to allow the SDK locking functions to effectively be used within an RTOS on other environment.
|
||||
* and notify mechanisms to allow the SDK locking functions to effectively be used within an RTOS or other environment.
|
||||
*
|
||||
* When implementing an RTOS, it is desirable for the SDK synchronization primitives that wait, to block the calling task (and immediately yield),
|
||||
* and those that notify, to wake a blocked task which isn't on processor. At least the wait macro implementation needs to be atomic with the protecting
|
||||
* spin_lock unlock from the callers point of view; i.e. the task should unlock the spin lock when as it starts its wait. Such implementation is
|
||||
* spin_lock unlock from the callers point of view; i.e. the task should unlock the spin lock when it starts its wait. Such implementation is
|
||||
* up to the RTOS integration, however the macros are defined such that such operations are always combined into a single call
|
||||
* (so they can be perfomed atomically) even though the default implementation does not need this, as a WFE which starts
|
||||
* following the corresponding SEV is not missed.
|
||||
@ -178,7 +178,7 @@ void lock_init(lock_core_t *core, uint lock_num);
|
||||
* This method is provided for cases where the caller has no useful work to do
|
||||
* until the specified time.
|
||||
*
|
||||
* By default this method does nothing, however if can be overridden (for example by an
|
||||
* By default this method does nothing, however it can be overridden (for example by an
|
||||
* RTOS which is able to block the current task until the scheduler tick before
|
||||
* the given time)
|
||||
*
|
||||
@ -187,4 +187,4 @@ void lock_init(lock_core_t *core, uint lock_num);
|
||||
#define sync_internal_yield_until_before(until) ((void)0)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@ -100,7 +100,7 @@ bool mutex_enter_timeout_ms(mutex_t *mtx, uint32_t timeout_ms);
|
||||
* core will *NOT* own the mutex.
|
||||
*
|
||||
* \param mtx Pointer to mutex structure
|
||||
* \param until The time after which to return if the core cannot take owner ship of the mutex
|
||||
* \param until The time after which to return if the core cannot take ownership of the mutex
|
||||
* \return true if mutex now owned, false if timeout occurred before mutex became available
|
||||
*/
|
||||
bool mutex_enter_block_until(mutex_t *mtx, absolute_time_t until);
|
||||
|
@ -353,7 +353,7 @@ static inline void dma_channel_set_read_addr(uint channel, const volatile void *
|
||||
}
|
||||
}
|
||||
|
||||
/*! \brief Set the DMA initial read address
|
||||
/*! \brief Set the DMA initial write address
|
||||
* \ingroup hardware_dma
|
||||
*
|
||||
* \param channel DMA channel
|
||||
|
@ -59,7 +59,7 @@ enum exception_number {
|
||||
/*! \brief Exception handler function type
|
||||
* \ingroup hardware_exception
|
||||
*
|
||||
* All exceptions handlers should be of this type, and follow normal ARM EABI register saving conventions
|
||||
* All exception handlers should be of this type, and follow normal ARM EABI register saving conventions
|
||||
*/
|
||||
typedef void (*exception_handler_t)(void);
|
||||
|
||||
|
@ -75,7 +75,7 @@ __vectors:
|
||||
.word isr_irq31
|
||||
|
||||
// all default exception handlers do nothing, and we can check for them being set to our
|
||||
// default values by them pointing to between __defaults_isrs_start and __default_isrs_end
|
||||
// default values by seeing if they point to somewhere between __defaults_isrs_start and __default_isrs_end
|
||||
.global __default_isrs_start
|
||||
__default_isrs_start:
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user