remove macros for atomic operations
This commit is contained in:
parent
0640032505
commit
b3b6622259
@ -132,19 +132,13 @@ static inline u64 pCPUTimerRead();
|
|||||||
|
|
||||||
// ::Platform::Atomics::Header::
|
// ::Platform::Atomics::Header::
|
||||||
|
|
||||||
static inline void AtomicSignalFenceSeqCst();
|
static inline void pAtomicSignalFenceSeqCst();
|
||||||
|
static inline u8 pAtomicFetchSubU8(u8 volatile *ptr, u8 count);
|
||||||
#define DefSigAtomicFetchIncr(T) static inline T p##T##AtomicFetchIncr(T volatile *ptr)
|
static inline u32 pAtomicFetchSubU32(u32 volatile *ptr, u32 count);
|
||||||
#define DefSigAtomicFetchSub(T) static inline T p##T##AtomicFetchSub(T volatile *ptr, T count)
|
static inline u32 pAtomicFetchIncrU32(u32 volatile *ptr);
|
||||||
#define DefSigAtomicIncr(T) static inline void p##T##AtomicIncr(T volatile *ptr)
|
static inline void pAtomicIncrU8(u8 volatile *ptr);
|
||||||
#define DefSigAtomicStore(T) static inline void p##T##AtomicStore(T volatile *ptr, T value)
|
static inline void pAtomicIncrU32(u32 volatile *ptr);
|
||||||
#define DefSigAtomicLoad(T) static inline T p##T##AtomicLoad(T volatile *ptr)
|
static inline u32 AtomicLoadU32(u32 volatile *ptr);
|
||||||
#define DefSigAtomicCompareExchange(T) static inline b32 p##T##AtomicCompareExchange(T volatile *ptr, T *expected, T desired)
|
static inline void pAtomicStoreB32(b32 volatile *ptr, b32 value);
|
||||||
|
static inline b32 pAtomicCompareExchangeB32(b32 volatile *ptr, b32 *expect, b32 desired);
|
||||||
DefScalarSig(AtomicFetchIncr);
|
|
||||||
DefScalarSig(AtomicFetchSub);
|
|
||||||
DefScalarSig(AtomicIncr);
|
|
||||||
DefScalarSig(AtomicStore);
|
|
||||||
DefScalarSig(AtomicLoad);
|
|
||||||
DefScalarSig(AtomicCompareExchange);
|
|
||||||
|
|
||||||
|
|||||||
@ -87,40 +87,3 @@ pKeyboardInput pInputEventConvert(u32 x_key);
|
|||||||
|
|
||||||
b32 pSyscallErrCheck(void *ptr);
|
b32 pSyscallErrCheck(void *ptr);
|
||||||
|
|
||||||
// ::Platform::Linux::Atomics::Header::
|
|
||||||
|
|
||||||
#define DefAtomicFetchIncr(T) \
|
|
||||||
static inline T p##T##AtomicFetchIncr(T volatile *ptr) \
|
|
||||||
{ \
|
|
||||||
return __atomic_fetch_add(ptr, (T)1, __ATOMIC_ACQUIRE); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define DefAtomicIncr(T) \
|
|
||||||
static inline void p##T##AtomicIncr(T volatile *ptr) \
|
|
||||||
{ \
|
|
||||||
__atomic_fetch_add(ptr, (T)1, __ATOMIC_RELEASE); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define DefAtomicStore(T) \
|
|
||||||
static inline void p##T##AtomicStore(T volatile *ptr, T value) \
|
|
||||||
{ \
|
|
||||||
__atomic_store_n(ptr, value, __ATOMIC_RELEASE); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define DefAtomicLoad(T) \
|
|
||||||
static inline T p##T##AtomicLoad(T volatile *ptr) \
|
|
||||||
{ \
|
|
||||||
return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define DefAtomicFetchSub(T) \
|
|
||||||
static inline T p##T##AtomicFetchSub(T volatile *ptr, T count) \
|
|
||||||
{ \
|
|
||||||
return __atomic_fetch_sub(ptr, count, __ATOMIC_ACQUIRE); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define DefAtomicCompareExchange(T) \
|
|
||||||
static inline b32 p##T##AtomicCompareExchange(T volatile *ptr, T *expected, T desired) \
|
|
||||||
{ \
|
|
||||||
return __atomic_compare_exchange_n(ptr, expected, desired, true, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); \
|
|
||||||
}
|
|
||||||
|
|||||||
@ -334,16 +334,49 @@ static inline u64 pCPUTimerRead()
|
|||||||
|
|
||||||
// ::Platform::Atomics::Functions::Start::
|
// ::Platform::Atomics::Functions::Start::
|
||||||
|
|
||||||
static inline void AtomicSignalFenceSeqCst()
|
static inline void pAtomicSignalFenceSeqCst()
|
||||||
{
|
{
|
||||||
__atomic_signal_fence(__ATOMIC_SEQ_CST);
|
__atomic_signal_fence(__ATOMIC_SEQ_CST);
|
||||||
}
|
}
|
||||||
|
|
||||||
DefScalarImpl(AtomicFetchIncr);
|
static inline u8 pAtomicFetchSubU8(u8 volatile *ptr, u8 count)
|
||||||
DefScalarImpl(AtomicIncr);
|
{
|
||||||
DefScalarImpl(AtomicStore);
|
return __atomic_fetch_sub(ptr, count, __ATOMIC_ACQUIRE);
|
||||||
DefScalarImpl(AtomicLoad);
|
}
|
||||||
DefScalarImpl(AtomicFetchSub);
|
|
||||||
DefScalarImpl(AtomicCompareExchange);
|
static inline u32 pAtomicFetchSubU32(u32 volatile *ptr, u32 count)
|
||||||
|
{
|
||||||
|
return __atomic_fetch_sub(ptr, count, __ATOMIC_ACQUIRE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pAtomicFetchIncrU32(u32 volatile *ptr)
|
||||||
|
{
|
||||||
|
return __atomic_fetch_add(ptr, (u32)1, __ATOMIC_ACQUIRE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void pAtomicIncrU8(u8 volatile *ptr)
|
||||||
|
{
|
||||||
|
__atomic_fetch_add(ptr, (u8)1, __ATOMIC_RELEASE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void pAtomicIncrU32(u32 volatile *ptr)
|
||||||
|
{
|
||||||
|
__atomic_fetch_add(ptr, (u32)1, __ATOMIC_RELEASE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 AtomicLoadU32(u32 volatile *ptr)
|
||||||
|
{
|
||||||
|
return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void pAtomicStoreB32(b32 volatile *ptr, b32 value)
|
||||||
|
{
|
||||||
|
__atomic_store_n(ptr, value, __ATOMIC_RELEASE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline b32 pAtomicCompareExchangeB32(b32 volatile *ptr, b32 *expected, b32 desired)
|
||||||
|
{
|
||||||
|
return __atomic_compare_exchange_n(ptr, expected, desired, true, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
|
||||||
|
}
|
||||||
|
|
||||||
// ::Platform::Atomics::Functions::End::
|
// ::Platform::Atomics::Functions::End::
|
||||||
|
|||||||
@ -433,7 +433,7 @@ static void vTextureCleanUp()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
AtomicSignalFenceSeqCst();
|
pAtomicSignalFenceSeqCst();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vImagePush(TextureAsset asset_id, vImageView *view)
|
static void vImagePush(TextureAsset asset_id, vImageView *view)
|
||||||
@ -1887,12 +1887,12 @@ void *vLoaderStart(void *i)
|
|||||||
}
|
}
|
||||||
else if (job_count == 0)
|
else if (job_count == 0)
|
||||||
{
|
{
|
||||||
pu8AtomicIncr(&v_Renderer.async.sleeping);
|
pAtomicIncrU8(&v_Renderer.async.sleeping);
|
||||||
TicketMutUnlock(&v_Renderer.upload.mut);
|
TicketMutUnlock(&v_Renderer.upload.mut);
|
||||||
pthread_mutex_lock(&mut);
|
pthread_mutex_lock(&mut);
|
||||||
pthread_cond_wait(&v_Renderer.async.cond, &mut);
|
pthread_cond_wait(&v_Renderer.async.cond, &mut);
|
||||||
pthread_mutex_unlock(&mut);
|
pthread_mutex_unlock(&mut);
|
||||||
pu8AtomicFetchSub(&v_Renderer.async.sleeping, 1);
|
pAtomicFetchSubU8(&v_Renderer.async.sleeping, 1);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|||||||
24
src/util.c
24
src/util.c
@ -305,59 +305,59 @@ static inline void EndProfileBlock(ProfileBlock *block)
|
|||||||
static inline b32 MutTryLock(Mut *mut)
|
static inline b32 MutTryLock(Mut *mut)
|
||||||
{
|
{
|
||||||
b32 lock = false;
|
b32 lock = false;
|
||||||
return pb32AtomicCompareExchange(&mut->lock, &lock, 1);
|
return pAtomicCompareExchangeB32(&mut->lock, &lock, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void MutUnlock(Mut *mut)
|
static inline void MutUnlock(Mut *mut)
|
||||||
{
|
{
|
||||||
pb32AtomicStore(&mut->lock, 0);
|
pAtomicStoreB32(&mut->lock, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void TicketMutLock(TicketMut *mut)
|
static inline void TicketMutLock(TicketMut *mut)
|
||||||
{
|
{
|
||||||
u32 ticket = pu32AtomicFetchIncr(&mut->ticket);
|
u32 ticket = pAtomicFetchIncrU32(&mut->ticket);
|
||||||
while (ticket != mut->next_ticket);
|
while (ticket != mut->next_ticket);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void TicketMutUnlock(TicketMut *mut)
|
static inline void TicketMutUnlock(TicketMut *mut)
|
||||||
{
|
{
|
||||||
pu32AtomicIncr(&mut->next_ticket);
|
pAtomicIncrU32(&mut->next_ticket);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 JobQueueAdd(JobQueue *queue, u32 count)
|
static inline u32 JobQueueAdd(JobQueue *queue, u32 count)
|
||||||
{
|
{
|
||||||
u32 job_idx = pu32AtomicFetchIncr(&queue->queued);
|
u32 job_idx = pAtomicFetchIncrU32(&queue->queued);
|
||||||
pu32AtomicFetchIncr(&queue->remaining);
|
pAtomicFetchIncrU32(&queue->remaining);
|
||||||
|
|
||||||
return job_idx;
|
return job_idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 JobQueueGetCount(JobQueue *queue)
|
static inline u32 JobQueueGetCount(JobQueue *queue)
|
||||||
{
|
{
|
||||||
return pu32AtomicLoad(&queue->queued);
|
return AtomicLoadU32(&queue->queued);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void JobQueueMarkUnqueued(JobQueue *queue, u32 count)
|
static inline void JobQueueMarkUnqueued(JobQueue *queue, u32 count)
|
||||||
{
|
{
|
||||||
Assert(queue->queued != 0, "queue queued is 0 before trying to mark dequeued");
|
Assert(queue->queued != 0, "queue queued is 0 before trying to mark dequeued");
|
||||||
pu32AtomicFetchSub(&queue->queued, count);
|
pAtomicFetchSubU32(&queue->queued, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void JobQueueMarkCompleted(JobQueue *queue, u32 count)
|
static inline void JobQueueMarkCompleted(JobQueue *queue, u32 count)
|
||||||
{
|
{
|
||||||
Assert(queue->remaining != 0, "queue remaining is 0 before trying to mark completed");
|
Assert(queue->remaining != 0, "queue remaining is 0 before trying to mark completed");
|
||||||
pu32AtomicFetchSub(&queue->remaining, count);
|
pAtomicFetchSubU32(&queue->remaining, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void JobQueueReset(JobQueue *queue)
|
static inline void JobQueueReset(JobQueue *queue)
|
||||||
{
|
{
|
||||||
pu32AtomicFetchSub(&queue->queued, queue->queued);
|
pAtomicFetchSubU32(&queue->queued, queue->queued);
|
||||||
pu32AtomicFetchSub(&queue->remaining, queue->remaining);
|
pAtomicFetchSubU32(&queue->remaining, queue->remaining);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline b32 JobQueueCompleted(JobQueue *queue)
|
static inline b32 JobQueueCompleted(JobQueue *queue)
|
||||||
{
|
{
|
||||||
u32 remaining = pu32AtomicLoad(&queue->remaining);
|
u32 remaining = AtomicLoadU32(&queue->remaining);
|
||||||
return remaining == 0;
|
return remaining == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user