Use asm implements some atomic functions

Use asm implements for i386/x86_64/arm/aarch64/riscv:

 __atomic_load_[1,2,4,8]
 __atomic_store_[1,2,4,8]
 __atomic_compare_exchange_[1,2,4,8]
 __atomic_test_and_set_[1,2,4,8]
 atomic_thread_fence
 atomic_signal_fence
 atomic_flag_test_and_set
 atomic_flag_test_and_set_explicit
 atomic_flag_clear
 atomic_flag_clear_explicit

`atomic.S` is extracted from `gcc` v15.1.0 `libatomic.a`.
And generated by https://github.com/kbkpbot/gen_atomic.git
This commit is contained in:
kbkpbot
2025-06-24 22:40:07 +08:00
parent 3f06cb0f2e
commit 7f13f24e12
3 changed files with 2375 additions and 858 deletions

View File

@@ -80,16 +80,6 @@ typedef struct {
#define ATOMIC_FLAG_INIT {0}
#define ATOMIC_VAR_INIT(value) (value)
#define atomic_flag_test_and_set_explicit(object, order) \
__atomic_test_and_set((void *)(&((object)->value)), order)
#define atomic_flag_test_and_set(object) \
atomic_flag_test_and_set_explicit(object, __ATOMIC_SEQ_CST)
#define atomic_flag_clear_explicit(object, order) \
__atomic_clear((bool *)(&((object)->value)), order)
#define atomic_flag_clear(object) \
atomic_flag_clear_explicit(object, __ATOMIC_SEQ_CST)
/* Generic routines */
#define atomic_init(object, desired) \
atomic_store_explicit(object, desired, __ATOMIC_RELAXED)
@@ -167,15 +157,15 @@ typedef struct {
#define atomic_fetch_and_explicit __atomic_fetch_and
extern void atomic_thread_fence (memory_order);
extern void __atomic_thread_fence (memory_order);
#define atomic_thread_fence(order) __atomic_thread_fence (order)
#define __atomic_thread_fence(order) atomic_thread_fence (order)
extern void atomic_signal_fence (memory_order);
extern void __atomic_signal_fence (memory_order);
#define __atomic_signal_fence(order) atomic_signal_fence(order)
#define atomic_signal_fence(order) __atomic_signal_fence (order)
extern bool __atomic_is_lock_free(size_t size, void *ptr);
#define atomic_is_lock_free(OBJ) __atomic_is_lock_free (sizeof (*(OBJ)), (OBJ))
extern bool __atomic_test_and_set (void *, memory_order);
extern void __atomic_clear (bool *, memory_order);
extern bool atomic_flag_test_and_set(void *object);
extern bool atomic_flag_test_and_set_explicit(void *object, memory_order order);
extern void atomic_flag_clear(void *object);
extern void atomic_flag_clear_explicit(void *object, memory_order order);
#endif /* _STDATOMIC_H */

File diff suppressed because it is too large Load Diff

View File

@@ -15,47 +15,6 @@
#define __ATOMIC_SEQ_CST 5
typedef __SIZE_TYPE__ size_t;
void __atomic_thread_fence(int memorder);
#define MemoryBarrier(memorder) __atomic_thread_fence(memorder)
#if defined __i386__ || defined __x86_64__
#define ATOMIC_COMPARE_EXCHANGE(TYPE, MODE, SUFFIX) \
bool __atomic_compare_exchange_##MODE \
(volatile void *atom, void *ref, TYPE xchg, \
bool weak, int success_memorder, int failure_memorder) \
{ \
TYPE rv; \
TYPE cmp = *(TYPE *)ref; \
__asm__ volatile( \
"lock cmpxchg" SUFFIX " %2,%1\n" \
: "=a" (rv), "+m" (*(TYPE *)atom) \
: "q" (xchg), "0" (cmp) \
: "memory" \
); \
*(TYPE *)ref = rv; \
return (rv == cmp); \
}
#else
#define ATOMIC_COMPARE_EXCHANGE(TYPE, MODE, SUFFIX) \
extern bool __atomic_compare_exchange_##MODE \
(volatile void *atom, void *ref, TYPE xchg, \
bool weak, int success_memorder, int failure_memorder);
#endif
#define ATOMIC_LOAD(TYPE, MODE) \
TYPE __atomic_load_##MODE(const volatile void *atom, int memorder) \
{ \
MemoryBarrier(__ATOMIC_ACQUIRE); \
return *(volatile TYPE *)atom; \
}
#define ATOMIC_STORE(TYPE, MODE) \
void __atomic_store_##MODE(volatile void *atom, TYPE value, int memorder) \
{ \
*(volatile TYPE *)atom = value; \
MemoryBarrier(__ATOMIC_ACQ_REL); \
}
#define ATOMIC_GEN_OP(TYPE, MODE, NAME, OP, RET) \
TYPE __atomic_##NAME##_##MODE(volatile void *atom, TYPE value, int memorder) \
{ \
@@ -95,10 +54,7 @@ void __atomic_thread_fence(int memorder);
#define ATOMIC_FETCH_NAND(TYPE, MODE) \
ATOMIC_GEN_OP(TYPE, MODE, fetch_nand, ~(cmp & value), cmp)
#define ATOMIC_GEN(TYPE, SIZE, SUFFIX) \
ATOMIC_STORE(TYPE, SIZE) \
ATOMIC_LOAD(TYPE, SIZE) \
ATOMIC_COMPARE_EXCHANGE(TYPE, SIZE, SUFFIX) \
#define ATOMIC_GEN(TYPE, SIZE) \
ATOMIC_EXCHANGE(TYPE, SIZE) \
ATOMIC_ADD_FETCH(TYPE, SIZE) \
ATOMIC_SUB_FETCH(TYPE, SIZE) \
@@ -113,12 +69,10 @@ void __atomic_thread_fence(int memorder);
ATOMIC_FETCH_XOR(TYPE, SIZE) \
ATOMIC_FETCH_NAND(TYPE, SIZE)
ATOMIC_GEN(uint8_t, 1, "b")
ATOMIC_GEN(uint16_t, 2, "w")
ATOMIC_GEN(uint32_t, 4, "l")
#if defined __x86_64__ || defined __aarch64__ || defined __riscv
ATOMIC_GEN(uint64_t, 8, "q")
#endif
ATOMIC_GEN(uint8_t, 1)
ATOMIC_GEN(uint16_t, 2)
ATOMIC_GEN(uint32_t, 4)
ATOMIC_GEN(uint64_t, 8)
/* uses alias to allow building with gcc/clang */
#ifdef __TINYC__
@@ -127,25 +81,6 @@ ATOMIC_GEN(uint64_t, 8, "q")
#define ATOMIC(x) __tcc_atomic_##x
#endif
void ATOMIC(signal_fence) (int memorder)
{
}
void ATOMIC(thread_fence) (int memorder)
{
#if defined __i386__
__asm__ volatile("lock orl $0, (%esp)");
#elif defined __x86_64__
__asm__ volatile("lock orq $0, (%rsp)");
#elif defined __arm__
__asm__ volatile(".int 0xee070fba"); // mcr p15, 0, r0, c7, c10, 5
#elif defined __aarch64__
__asm__ volatile(".int 0xd5033bbf"); // dmb ish
#elif defined __riscv
__asm__ volatile(".int 0x0ff0000f"); // fence iorw,iorw
#endif
}
bool ATOMIC(is_lock_free) (unsigned long size, const volatile void *ptr)
{
bool ret;
@@ -165,7 +100,5 @@ bool ATOMIC(is_lock_free) (unsigned long size, const volatile void *ptr)
}
#ifndef __TINYC__
void __atomic_signal_fence(int memorder) __attribute__((alias("__tcc_atomic_signal_fence")));
void __atomic_thread_fence(int memorder) __attribute__((alias("__tcc_atomic_thread_fence")));
bool __atomic_is_lock_free(unsigned long size, const volatile void *ptr) __attribute__((alias("__tcc_atomic_is_lock_free")));
#endif