|
file | hqatomic.h |
| Macros implementing atomic operations for multi-thread variable access.
|
|
file | hqspin.h |
| Spinlocks implemented using atomic operations.
|
|
|
#define | HQ_ATOMIC_SUPPORTED 1 |
|
#define | HqAtomicIncrement(ptr_, before_) |
|
#define | HqAtomicDecrement(ptr_, after_) |
|
#define | HqAtomicAdd(ptr_, value_, after_) |
|
#define | HqAtomicAdd(ptr_, value_, after_) |
|
#define | HqAtomicMaximum(ptr_, value_, after_) |
|
#define | HqAtomicMinimum(ptr_, value_, after_) |
|
#define | HqAtomicCAS(ptr_, compareto_, swapfor_, swapped_) |
|
#define | HqAtomicCASPointer(ptr_, compareto_, swapfor_, swapped_, type_) |
|
#define | yield_processor() (void)SwitchToThread() |
| Yield the processor to another thread, if there is one runnable.
|
|
#define | spinlock_pointer(addr_, locked_, count_) |
| Lock a pointer using atomic operations. More...
|
|
#define | spinlock_pointer_incomplete(addr_, locked_, count_) |
| Lock a pointer using atomic operations. More...
|
|
#define | spintrylock_pointer(addr_, locked_, didlock_) |
| Try to lock a pointer using atomic operations. More...
|
|
#define | spinunlock_pointer(addr_, unlocked_) |
| Unlock a pointer using atomic operations. More...
|
|
#define | spinunlock_pointer_incomplete(addr_, unlocked_) |
| Unlock a pointer using atomic operations. More...
|
|
#define | spin_pointer_without_lock(ptr_, uptr_) |
| Get the value of a potentially spinlocked pointer in its unlocked state. More...
|
|
#define | spinlock_counter(addr_, count_) |
| Lock a counter semaphore using atomic operations. More...
|
|
#define | spinunlock_counter(addr_) |
| Unlock a semaphore counter using atomic operations. More...
|
|
|
typedef long | hq_atomic_counter_t |
|
typedef void * | __attribute__((__may_alias__)) spinlock_void_ptr |
| Aliased void pointer, to quieten GCC warnings and prevent harmful optimisations.
|
|
We provide a set of minimal atomic operations, which we can support effectively across multiple compilers and processor architectures, using either compiler intrinsics or in-line assembly. The minimal set of operations provided are:
In principle, all other atomic operations can be built on top of CAS (compare and swap). However, it's just a bit too clunky having to implement some simple operations on top of CAS.
A couple of additional atomic integer operations are also supplied:
The equivalent prototypes for the operations implemented are:
long hq_atomic_counter_t
Definition: hqatomic.h:260
An integer type suitable for the atomic increment and decrement.
#define HqAtomicIncrement(ptr_, before_)
Definition: hqatomic.h:263
Atomically increment the contents of *ptr
, and return the value before it was incremented in before
.
#define HqAtomicDecrement(ptr_, after_)
Definition: hqatomic.h:268
Atomically decrement the contents of *ptr
, and return the value after it was decremented in after
.
#define HqAtomicCAS(ptr_, compareto_, swapfor_, swapped_)
Definition: hqatomic.h:273
int HqBool
Harlequin standard boolean type.
Definition: hqtypes.h:502
Atomically compare the value of *ptr
with compare
, and if they match, swap the contents of *ptr
for swap
. Store a boolean in swapped
indicating if the swap was performed.
#define HqAtomicCASPointer(ptr_, compareto_, swapfor_, swapped_, type_)
Definition: hqatomic.h:280
#define HqAtomicAdd(ptr_, value_, after_)
Definition: hqatomic.h:296
Atomically set *ptr
to the sum of *ptr
and value
, returning the value after the addition in after
.
#define HqAtomicMaximum(ptr_, value_, after_)
Definition: hqatomic.h:235
Atomically set *ptr
to the maximum of *ptr
and value
, returning the maximum value in after
.
#define HqAtomicMinimum(ptr_, value_, after_)
Definition: hqatomic.h:244
Atomically set *ptr
to the minimum of *ptr
and value
, returning the minimum value in after
.
◆ HQ_ATOMIC_SUPPORTED
#define HQ_ATOMIC_SUPPORTED 1 |
Major compiler versions that support atomic operations are defined first. If the define HQ_ATOMIC_SUPPORTED
is not defined, then the architecture specific assembly versions may be used.
◆ HqAtomicAdd [1/2]
#define HqAtomicAdd |
( |
|
ptr_, |
|
|
|
value_, |
|
|
|
after_ |
|
) |
| |
Value: MACRO_START \
hq_atomic_counter_t _sum_, _value_ = (value_) ; \
HqBool _done_ =
FALSE ; \
do { \
hq_atomic_counter_t _prev_ = *(ptr_) ; \
_sum_ = _prev_ + _value_ ; \
HqAtomicCAS(ptr_, _prev_, _sum_, _done_) ; \
} while ( !_done_ ) { \
after_ = _sum_ ; \
MACRO_END
#define FALSE
HqBool boolean false value.
Definition: hqtypes.h:512
- Parameters
-
[in,out] | ptr_ | A pointer to an atomic counter |
[in] | value_ | A value to add to the atomic counter stored in *ptr_. |
[out] | after_ | The name of an atomic counter variable that will be set to the sum of *ptr_ and value_. |
Atomically ensure that the value of *ptr_ is the sum of *ptr_, and value_, and return the sum in after_.
◆ HqAtomicAdd [2/2]
#define HqAtomicAdd |
( |
|
ptr_, |
|
|
|
value_, |
|
|
|
after_ |
|
) |
| |
Value: MACRO_START \
after_ = _InterlockedExchangeAdd(ptr_, value_) + (value_) ; \
MACRO_END
- Parameters
-
[in,out] | ptr_ | A pointer to an atomic counter |
[in] | value_ | A value to add to the atomic counter stored in *ptr_. |
[out] | after_ | The name of an atomic counter variable that will be set to the sum of *ptr_ and value_. |
Atomically ensure that the value of *ptr_ is the sum of *ptr_, and value_, and return the sum in after_.
◆ HqAtomicCAS
#define HqAtomicCAS |
( |
|
ptr_, |
|
|
|
compareto_, |
|
|
|
swapfor_, |
|
|
|
swapped_ |
|
) |
| |
Value: MACRO_START \
hq_atomic_counter_t _compareto_ = (compareto_) ; \
swapped_ = (_compareto_ == _InterlockedCompareExchange((ptr_), (swapfor_), _compareto_)) ; \
MACRO_END
- Parameters
-
[in,out] | ptr_ | A pointer to an atomic counter |
[in] | compareto_ | A value to compare the atomic counter stored in *ptr_ with. |
[in] | swapfor_ | A value to swap the atomic counter stored in *ptr_ with. |
[out] | swapped_ | The name of a boolean variable that will be set to TRUE if the swap was made, FALSE if the swap was not made. |
Atomically compare the value of *ptr_ with compareto_, and if they match, swap the contents of *ptr_ for swapfor_. Store a boolean in swapped_ indicating if the swap was performed.
◆ HqAtomicCASPointer
#define HqAtomicCASPointer |
( |
|
ptr_, |
|
|
|
compareto_, |
|
|
|
swapfor_, |
|
|
|
swapped_, |
|
|
|
type_ |
|
) |
| |
Value:- Parameters
-
[in,out] | ptr_ | A pointer to a pointer |
[in] | compareto_ | A value to compare the pointer stored in *ptr_ with. |
[in] | swapfor_ | A value to swap the pointer stored in *ptr_ with. |
[out] | swapped_ | The name of a boolean variable that will be set to TRUE if the swap was made, FALSE if the swap was not made. |
[in] | type_ | The type of the pointer stored in *ptr_. |
Atomically compare the value of *ptr_ with compareto_, and if they match, swap the contents of *ptr_ for swapfor_. Store a boolean in swapped_ indicating if the swap was performed. (This is the same operation as HqAtomicCAS(), but allows pointers to be compared and swapped.)
◆ HqAtomicDecrement
#define HqAtomicDecrement |
( |
|
ptr_, |
|
|
|
after_ |
|
) |
| |
Value: MACRO_START \
after_ = _InterlockedDecrement(ptr_) ; \
MACRO_END
- Parameters
-
[in,out] | ptr_ | A pointer to an atomic counter to decrement |
[out] | after_ | The name of an atomic counter variable that will be set to the value of the counter after this decrement. |
Atomically decrement the contents of *ptr_, and return the value after it was decremented in after_.
◆ HqAtomicIncrement
#define HqAtomicIncrement |
( |
|
ptr_, |
|
|
|
before_ |
|
) |
| |
Value: MACRO_START \
before_ = _InterlockedIncrement(ptr_) - 1 ; \
MACRO_END
- Parameters
-
[in,out] | ptr_ | A pointer to an atomic counter to increment |
[out] | before_ | The name of an atomic counter variable that will be set to the value of the counter before this increment. |
Atomically increment the contents of *ptr_, and return the value before it was incremented in before_.
◆ HqAtomicMaximum
#define HqAtomicMaximum |
( |
|
ptr_, |
|
|
|
value_, |
|
|
|
after_ |
|
) |
| |
Value: MACRO_START \
hq_atomic_counter_t _prev_, _value_ = (value_) ; \
HqBool _done_ =
FALSE ; \
while ( !_done_ && (_prev_ = *(ptr_)) < _value_ ) { \
HqAtomicCAS(ptr_, _prev_, _value_, _done_) ; \
} \
after_ = _done_ ? _value_ : _prev_ ; \
MACRO_END
- Parameters
-
[in,out] | ptr_ | A pointer to an atomic counter |
[in] | value_ | A value to compare against the atomic counter stored in *ptr_. |
[out] | after_ | The name of an atomic counter variable that will be set to the maximum of *ptr_ and value_. |
Atomically ensure that the value of *ptr_ is the maximum of *ptr_, and value_, and return the maximum in after_.
◆ HqAtomicMinimum
#define HqAtomicMinimum |
( |
|
ptr_, |
|
|
|
value_, |
|
|
|
after_ |
|
) |
| |
Value: MACRO_START \
hq_atomic_counter_t _prev_, _value_ = (value_) ; \
HqBool _done_ =
FALSE ; \
while ( !_done_ && (_prev_ = *(ptr_)) > _value_ ) { \
HqAtomicCAS(ptr_, _prev_, _value_, _done_) ; \
} \
after_ = _done_ ? _value_ : _prev_ ; \
MACRO_END
- Parameters
-
[in,out] | ptr_ | A pointer to an atomic counter |
[in] | value_ | A value to compare against the atomic counter stored in *ptr_. |
[out] | after_ | The name of an atomic counter variable that will be set to the minimum of *ptr_ and value_. |
Atomically ensure that the value of *ptr_ is the minimum of *ptr_, and value_, and return the minimum in after_.
◆ spin_pointer_without_lock
#define spin_pointer_without_lock |
( |
|
ptr_, |
|
|
|
uptr_ |
|
) |
| |
Value:
HQASSERT(
sizeof(*(ptr_)) > 1 &&
sizeof(*(ptr_)) ==
sizeof(*(uptr_)), \
"Spinlock pointer type alignment invalid") ; \
uptr_ = (void *)((intptr_t)(ptr_) & ~(intptr_t)1) ; \
MACRO_END
#define HQASSERT(fCondition, pszMessage)
Definition: hqassert.h:200
Get the value of a potentially spinlocked pointer in its unlocked state.
- Parameters
-
[in] | ptr_ | The pointer that may be spinlocked. |
[out] | uptr_ | The pointer without the spinlock mark. |
Some data structure algorithms can use atomic compare and swap directly to update pointers. The pointers may also be subject to spinlocks for data structure mutation. spin_pointer_without_lock() gets the value of a pointer without the spinlock mark. This can be used to prepare pointer values so that atomic updates can be done when a spinlock is unlocked.
◆ spinlock_counter
#define spinlock_counter |
( |
|
addr_, |
|
|
|
count_ |
|
) |
| |
Lock a counter semaphore using atomic operations.
- Parameters
-
[in,out] | addr_ | The address of the counter to lock. |
[in] | count_ | Number of spin cycles before each processor yield. |
Semaphore counters are locked when non-zero, unlocked when zero. spinlock_counter() will wait until a counter is zero, and lock it using the value 1. The semaphore counter may be test-locked by using an atomic increment, testing if the value before increment was zero, and decrementing the semaphore to release it.
◆ spinlock_pointer
#define spinlock_pointer |
( |
|
addr_, |
|
|
|
locked_, |
|
|
|
count_ |
|
) |
| |
Value:
HQASSERT(
sizeof(**(addr_)) > 1 &&
sizeof(**(addr_)) ==
sizeof(*(locked_)), \
"Spinlock pointer type alignment invalid") ; \
spinlock_pointer_incomplete(addr_, locked_, count_) ; \
MACRO_END
Lock a pointer using atomic operations.
- Parameters
-
[in,out] | addr_ | The address of the pointer to lock. |
[out] | locked_ | The pointer value to dereference in the locked section. |
[in] | count_ | Number of spin cycles before each processor yield. |
spinlock_pointer() uses the lowest bit as lock mark, so is only valid for halfword or greater aligned pointers. It modifies the stored value of the pointer in memory, and loads a dereferencable version of the pointer into a variable. Within the locked section, code must use the dereferencable version of the pointer, and not re-load the original pointer from memory.
◆ spinlock_pointer_incomplete
#define spinlock_pointer_incomplete |
( |
|
addr_, |
|
|
|
locked_, |
|
|
|
count_ |
|
) |
| |
Lock a pointer using atomic operations.
- Note
- This macro should only be used for incomplete pointers, if the type is known use spinlock_pointer() instead to get extra checks. You must not use spinlocking on pointers to
char
or uint8
.
- Parameters
-
[in,out] | addr_ | The address of the pointer to lock. |
[out] | locked_ | The pointer value to dereference in the locked section. |
[in] | count_ | Number of spin cycles before each processor yield. |
spinlock_pointer() uses the lowest bit as lock mark, so is only valid for halfword or greater aligned pointers. It modifies the stored value of the pointer in memory, and loads a dereferencable version of the pointer into a variable. Within the locked section, code must use the dereferencable version of the pointer, and not re-load the original pointer from memory.
◆ spintrylock_pointer
#define spintrylock_pointer |
( |
|
addr_, |
|
|
|
locked_, |
|
|
|
didlock_ |
|
) |
| |
Try to lock a pointer using atomic operations.
- Parameters
-
[in,out] | addr_ | The address of the pointer to lock. |
[out] | locked_ | The pointer value to dereference in the locked section, only set if didlock_ is TRUE . |
[out] | didlock_ | A Boolean to indicate whether the lock was taken. |
This is like spinlock_pointer(), except that it doesn't spin if it can't get the lock, it just returns with didlock_ set to FALSE.
◆ spinunlock_counter
#define spinunlock_counter |
( |
|
addr_ | ) |
|
Value: MACRO_START \
hq_atomic_counter_t _after_ ; \
HqAtomicDecrement((addr_), _after_) ; \
HQASSERT(_after_ >= 0, "Counter semaphore was not locked") ; \
MACRO_END
Unlock a semaphore counter using atomic operations.
- Parameters
-
[out] | addr_ | The address of the counter to unlock. |
◆ spinunlock_pointer
#define spinunlock_pointer |
( |
|
addr_, |
|
|
|
unlocked_ |
|
) |
| |
Value:
HQASSERT(
sizeof(**(addr_)) > 1,
"Spinlock pointer type alignment invalid") ; \
spinunlock_pointer_incomplete(addr_, unlocked_) ; \
MACRO_END
Unlock a pointer using atomic operations.
- Parameters
-
[out] | addr_ | The address of the pointer to unlock. |
[in] | unlocked_ | The new value of the unlocked pointer. |
spinunlock_pointer() unlocks a pointer that was locked using spinlock_pointer(). The lowest bit of the pointer is used as lock mark, so this is only valid for halfword or greater aligned pointers. The new value of the unlocked pointer will usually be the dereferencable pointer value saved by spinlock_pointer(), but it may also be a different pointer of the correct type. This can be used to safely replace objects, by locking the only pointer reference to the object, but unlocking with NULL or a different object pointer.
◆ spinunlock_pointer_incomplete
#define spinunlock_pointer_incomplete |
( |
|
addr_, |
|
|
|
unlocked_ |
|
) |
| |
Value: MACRO_START \
HqBool _didcas_ ; \
HQASSERT(((intptr_t)*(addr_) & 1) != 0, "Pointer is not currently locked") ; \
\
HqAtomicCASPointer(addr_, *(addr_), unlocked_, _didcas_, spinlock_void_ptr) ; \
HQASSERT(_didcas_, "Failed to unlock spinlock") ; \
UNUSED_VARIABLE(
HqBool, _didcas_) ; \
MACRO_END
Unlock a pointer using atomic operations.
- Note
- This macro should only be used for incomplete pointers, if the type is known use spinunlock_pointer() instead to get extra checks. You must not use spinlocking on pointers to
char
or uint8
.
- Parameters
-
[out] | addr_ | The address of the pointer to unlock. |
[in] | unlocked_ | The new value of the unlocked pointer. |
spinunlock_pointer() unlocks a pointer that was locked using spinlock_pointer(). The lowest bit of the pointer is used as lock mark, so this is only valid for halfword or greater aligned pointers. The new value of the unlocked pointer will usually be the dereferencable pointer value saved by spinlock_pointer(), but it may also be a different pointer of the correct type. This can be used to safely replace objects, by locking the only pointer reference to the object, but unlocking with NULL or a different object pointer.
◆ hq_atomic_counter_t
An integer type suitable for the atomic increment and decrement.
◆ anonymous enum
Constants for yields per spinlock cycle.
Enumerator |
---|
HQSPIN_YIELD_NEVER | Practically never yield processor.
|
HQSPIN_YIELD_ALWAYS | Yield processor every spinlock cycle.
|