diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..6c649f0 --- /dev/null +++ b/Makefile @@ -0,0 +1,41 @@ +CC ?= cc +AR ?= ar +CFLAGS ?= -O2 + +PREFIX ?= /usr/local +LIBDIR ?= $(PREFIX)/lib + +SOBASE = libatomic.so +SONAME = $(SOBASE).1 +SHAREDLIB = $(SONAME).69.0 +STATICLIB = libatomic.a + +EXTRA_CFLAGS = -std=c99 -Wall -Wextra -fPIC + +OBJS = atomic.o + +all: $(SHAREDLIB) $(STATICLIB) + +.c.o: + $(CC) $(EXTRA_CFLAGS) $(CFLAGS) -c -o $@ $< + +$(SHAREDLIB): $(OBJS) + $(CC) $(OBJS) $(EXTRA_CFLAGS) $(CFLAGS) $(LDFLAGS) \ + -nolibc -shared -Wl,-soname,$(SONAME) -o $(SHAREDLIB) + +$(STATICLIB): $(OBJS) + $(AR) -rcs $(STATICLIB) $(OBJS) + +# no tests +check: + : + +clean: + rm -f $(OBJS) $(SHAREDLIB) $(STATICLIB) + +install: $(SHAREDLIB) $(STATICLIB) + install -d $(DESTDIR)$(LIBDIR) + install -m 755 $(SHAREDLIB) $(DESTDIR)$(LIBDIR) + install -m 755 $(STATICLIB) $(DESTDIR)$(LIBDIR) + ln -sf $(SHAREDLIB) $(DESTDIR)$(LIBDIR)/$(SOBASE) + ln -sf $(SHAREDLIB) $(DESTDIR)$(LIBDIR)/$(SONAME) diff --git a/README.md b/README.md new file mode 100644 index 0000000..37dfbfa --- /dev/null +++ b/README.md @@ -0,0 +1,16 @@ +# libatomic-chimera + +This is a replacement for GCC libatomic. It's ABI-compatible with libatomic +and provides the same compiler helpers and other APIs. + +It does not come with a header file. Its purposes are two: + +* to provide a compatibility library for existing binaries +* to provide emulation symbols for compiler intrinsics + +To build and install it, just use the supplied Makefile, or compile it by hand. +The Clang compiler is needed to build this; it may not compile with GCC. + +The code is based on `atomic.c` from LLVM's `compiler-rt`, and therefore is +available under the same license (Apache-2.0). Extra implementations have been +added for completeness/compatibility with GCC libatomic. diff --git a/atomic.c b/atomic.c new file mode 100644 index 0000000..343e48b --- /dev/null +++ b/atomic.c @@ -0,0 +1,428 @@ +// this is an implementation of libatomic created for Chimera Linux +// +// based on atomic.c from llvm's compiler-rt builtins +// +// missing implementations were filled in, some parts were rewritten +// +// changes by q66 +// +// provided under the same license as llvm (apache-2.0) +// +// original header follows: +// +//===-- atomic.c - Implement support functions for atomic operations.------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// atomic.c defines a set of functions for performing atomic accesses on +// arbitrary-sized memory locations. This design uses locks that should +// be fast in the uncontended case, for two reasons: +// +// 1) This code must work with C programs that do not link to anything +// (including pthreads) and so it should not depend on any pthread +// functions. +// 2) Atomic operations, rather than explicit mutexes, are most commonly used +// on code where contended operations are rate. +// +// To avoid needing a per-object lock, this code allocates an array of +// locks and hashes the object pointers to find the one that it should use. +// For operations that must be atomic on two locations, the lower lock is +// always acquired first, to avoid deadlock. +// +//===----------------------------------------------------------------------===// + +#include +#include +#include +#include +#include + +#define SPINLOCK_COUNT (1 << 10) + +static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1; + +/// lock implementation; clear and tas are guaranteed to be lock free + +typedef bool lock_t; + +static inline __attribute__((always_inline)) void unlock(lock_t *l) { + __atomic_clear(l, __ATOMIC_RELEASE); +} + +static inline __attribute__((always_inline)) void lock(lock_t *l) { + while (__atomic_test_and_set(l, __ATOMIC_ACQUIRE)); +} + +static lock_t locks[SPINLOCK_COUNT]; + +static inline __attribute__((always_inline)) lock_t *lock_for_pointer(void *ptr) { + intptr_t hash = (intptr_t)ptr; + // Disregard the lowest 4 bits. We want all values that may be part of the + // same memory operation to hash to the same value and therefore use the same + // lock. + hash >>= 4; + // Use the next bits as the basis for the hash + intptr_t low = hash & SPINLOCK_MASK; + // Now use the high(er) set of bits to perturb the hash, so that we don't + // get collisions from atomic fields in a single object + hash >>= 16; + hash ^= low; + // Return a pointer to the word to use + return locks + (hash & SPINLOCK_MASK); +} + +/// Macros for determining whether a size is lock free. +#define ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(size, p) \ + (__atomic_always_lock_free(size, p) || \ + (__atomic_always_lock_free(size, 0) && ((uintptr_t)p % size) == 0)) + +#define IS_LOCK_FREE_1(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(1, p) +#define IS_LOCK_FREE_2(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(2, p) +#define IS_LOCK_FREE_4(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(4, p) +#define IS_LOCK_FREE_8(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(8, p) +#define IS_LOCK_FREE_16(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(16, p) + +/// Macro that calls the compiler-generated lock-free versions of functions +/// when they exist. +#define TRY_LOCK_FREE_CASE(n, type, ptr) \ + case n: \ + if (IS_LOCK_FREE_##n(ptr)) { \ + LOCK_FREE_ACTION(type); \ + } \ + break; +#ifdef __SIZEOF_INT128__ +#define TRY_LOCK_FREE_CASE_16(p) TRY_LOCK_FREE_CASE(16, __uint128_t, p) +#else +#define TRY_LOCK_FREE_CASE_16(p) /* __uint128_t not available */ +#endif + +#define LOCK_FREE_CASES(ptr) \ + do { \ + switch (size) { \ + TRY_LOCK_FREE_CASE(1, uint8_t, ptr) \ + TRY_LOCK_FREE_CASE(2, uint16_t, ptr) \ + TRY_LOCK_FREE_CASE(4, uint32_t, ptr) \ + TRY_LOCK_FREE_CASE(8, uint64_t, ptr) \ + TRY_LOCK_FREE_CASE_16(ptr) /* __uint128_t may not be supported */ \ + default: \ + break; \ + } \ + } while (0) + +/// Whether atomic operations for the given size (and alignment) are lock-free. +#pragma redefine_extname __atomic_is_lock_free_c __atomic_is_lock_free +bool __atomic_is_lock_free_c(size_t size, void *ptr) { +#define LOCK_FREE_ACTION(type) return true; + LOCK_FREE_CASES(ptr); +#undef LOCK_FREE_ACTION + return false; +} + +/// An atomic load operation. This is atomic with respect to the source +/// pointer only. +#pragma redefine_extname __atomic_load_c __atomic_load +void __atomic_load_c(int size, void *src, void *dest, int model) { +#define LOCK_FREE_ACTION(type) \ + *((type *)dest) = __atomic_load_n((type *)src, model); \ + return; + LOCK_FREE_CASES(src); +#undef LOCK_FREE_ACTION + lock_t *l = lock_for_pointer(src); + lock(l); + memcpy(dest, src, size); + unlock(l); +} + +/// An atomic store operation. This is atomic with respect to the destination +/// pointer only. +#pragma redefine_extname __atomic_store_c __atomic_store +void __atomic_store_c(int size, void *dest, void *src, int model) { +#define LOCK_FREE_ACTION(type) \ + __atomic_store_n((type *)dest, *(type *)src, model); \ + return; + LOCK_FREE_CASES(dest); +#undef LOCK_FREE_ACTION + lock_t *l = lock_for_pointer(dest); + lock(l); + memcpy(dest, src, size); + unlock(l); +} + +/// Atomic compare and exchange operation. If the value at *ptr is identical +/// to the value at *expected, then this copies value at *desired to *ptr. If +/// they are not, then this stores the current value from *ptr in *expected. +/// +/// This function returns 1 if the exchange takes place or 0 if it fails. +#pragma redefine_extname __atomic_compare_exchange_c __atomic_compare_exchange +int __atomic_compare_exchange_c(int size, void *ptr, void *expected, + void *desired, int success, int failure) { +#define LOCK_FREE_ACTION(type) \ + return __atomic_compare_exchange_n( \ + (type *)ptr, (type *)expected, *(type *)desired, 0, success, \ + failure) + LOCK_FREE_CASES(ptr); +#undef LOCK_FREE_ACTION + lock_t *l = lock_for_pointer(ptr); + lock(l); + if (memcmp(ptr, expected, size) == 0) { + memcpy(ptr, desired, size); + unlock(l); + return 1; + } + memcpy(expected, ptr, size); + unlock(l); + return 0; +} + +/// Performs an atomic exchange operation between two pointers. This is atomic +/// with respect to the target address. +#pragma redefine_extname __atomic_exchange_c __atomic_exchange +void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) { +#define LOCK_FREE_ACTION(type) \ + *(type *)old = \ + __atomic_exchange_n((type *)ptr, *(type *)val, model); \ + return; + LOCK_FREE_CASES(ptr); +#undef LOCK_FREE_ACTION + lock_t *l = lock_for_pointer(ptr); + lock(l); + memcpy(old, ptr, size); + memcpy(ptr, val, size); + unlock(l); +} + +//////////////////////////////////////////////////////////////////////////////// +// Where the size is known at compile time, the compiler may emit calls to +// specialised versions of the above functions. +//////////////////////////////////////////////////////////////////////////////// +#ifdef __SIZEOF_INT128__ +#define OPTIMISED_CASES \ + OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \ + OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \ + OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \ + OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t) \ + OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t) +#else +#define OPTIMISED_CASES \ + OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \ + OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \ + OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \ + OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t) +#endif + +#define OPTIMISED_CASE(n, lockfree, type) \ + type __atomic_load_##n(type *src, int model) { \ + if (lockfree(src)) \ + return __atomic_load_n(src, model); \ + lock_t *l = lock_for_pointer(src); \ + lock(l); \ + type val = *src; \ + unlock(l); \ + return val; \ + } +OPTIMISED_CASES +#undef OPTIMISED_CASE + +#define OPTIMISED_CASE(n, lockfree, type) \ + void __atomic_store_##n(type *dest, type val, int model) { \ + if (lockfree(dest)) { \ + __atomic_store_n(dest, val, model); \ + return; \ + } \ + lock_t *l = lock_for_pointer(dest); \ + lock(l); \ + *dest = val; \ + unlock(l); \ + return; \ + } +OPTIMISED_CASES +#undef OPTIMISED_CASE + +#define OPTIMISED_CASE(n, lockfree, type) \ + type __atomic_exchange_##n(type *dest, type val, int model) { \ + if (lockfree(dest)) \ + return __atomic_exchange_n(dest, val, model); \ + lock_t *l = lock_for_pointer(dest); \ + lock(l); \ + type tmp = *dest; \ + *dest = val; \ + unlock(l); \ + return tmp; \ + } +OPTIMISED_CASES +#undef OPTIMISED_CASE + +#define OPTIMISED_CASE(n, lockfree, type) \ + bool __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \ + int success, int failure) { \ + if (lockfree(ptr)) \ + return __atomic_compare_exchange_n( \ + ptr, expected, desired, 0, success, failure); \ + lock_t *l = lock_for_pointer(ptr); \ + lock(l); \ + if (*ptr == *expected) { \ + *ptr = desired; \ + unlock(l); \ + return true; \ + } \ + *expected = *ptr; \ + unlock(l); \ + return false; \ + } +OPTIMISED_CASES +#undef OPTIMISED_CASE + +//////////////////////////////////////////////////////////////////////////////// +// Atomic read-modify-write operations for integers of various sizes. +//////////////////////////////////////////////////////////////////////////////// +#define ATOMIC_RMW(n, lockfree, type, opname, op) \ + type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) { \ + if (lockfree(ptr)) \ + return __atomic_fetch_##opname(ptr, val, model); \ + lock_t *l = lock_for_pointer(ptr); \ + lock(l); \ + type tmp = *ptr; \ + *ptr = tmp op val; \ + unlock(l); \ + return tmp; \ + } \ + \ + type __atomic_##opname##_fetch_##n(type *ptr, type val, int model) { \ + if (lockfree(ptr)) \ + return __atomic_##opname##_fetch(ptr, val, model); \ + lock_t *l = lock_for_pointer(ptr); \ + lock(l); \ + type tmp = *ptr; \ + tmp = tmp op val; \ + *ptr = tmp; \ + unlock(l); \ + return tmp; \ + } + +#define ATOMIC_RMW_NAND(n, lockfree, type) \ + type __atomic_fetch_nand_##n(type *ptr, type val, int model) { \ + if (lockfree(ptr)) \ + return __atomic_fetch_nand(ptr, val, model); \ + lock_t *l = lock_for_pointer(ptr); \ + lock(l); \ + type tmp = *ptr; \ + *ptr = ~(tmp & val); \ + unlock(l); \ + return tmp; \ + } \ + \ + type __atomic_nand_fetch_##n(type *ptr, type val, int model) { \ + if (lockfree(ptr)) \ + return __atomic_nand_fetch(ptr, val, model); \ + lock_t *l = lock_for_pointer(ptr); \ + lock(l); \ + type tmp = *ptr; \ + tmp = ~(tmp & val); \ + *ptr = tmp; \ + unlock(l); \ + return tmp; \ + } + +#define ATOMIC_TAS(n, lockfree, type) \ + bool __atomic_test_and_set_##n(type *ptr, int model) { \ + if (lockfree(ptr)) \ + return __atomic_test_and_set(ptr, model); \ + lock_t *l = lock_for_pointer(ptr); \ + lock(l); \ + type tmp = *ptr; \ + *ptr = 1; \ + unlock(l); \ + return !!tmp; \ + } + +#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +) +OPTIMISED_CASES +#undef OPTIMISED_CASE +#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -) +OPTIMISED_CASES +#undef OPTIMISED_CASE +#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &) +OPTIMISED_CASES +#undef OPTIMISED_CASE +#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |) +OPTIMISED_CASES +#undef OPTIMISED_CASE +#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^) +OPTIMISED_CASES +#undef OPTIMISED_CASE +#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW_NAND(n, lockfree, type) +OPTIMISED_CASES +#undef OPTIMISED_CASE +#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_TAS(n, lockfree, type) +OPTIMISED_CASES +#undef OPTIMISED_CASE + +/* atomic_flag */ + +typedef struct atomic_flag { bool v; } atomic_flag; + +void atomic_flag_clear(volatile atomic_flag *o) { + __atomic_clear(&o->v, __ATOMIC_SEQ_CST); +} + +void atomic_flag_clear_explicit(volatile atomic_flag *o, int model) { + __atomic_clear(&o->v, model); +} + +bool atomic_flag_test_and_set(volatile atomic_flag *o) { + return __atomic_test_and_set(&o->v, __ATOMIC_SEQ_CST); +} + +bool atomic_flag_test_and_set_explicit(volatile atomic_flag *o, int model) { + return __atomic_test_and_set(&o->v, model); +} + +/* fence */ + +void atomic_thread_fence(int model) { + __atomic_thread_fence(model); +} + +void atomic_signal_fence(int model) { + __atomic_signal_fence(model); +} + +/* provided by gcc libatomic */ + +void __atomic_feraiseexcept(int e) { + volatile float r __attribute__((unused)); +#ifdef FE_INVALID + if (e & FE_INVALID) { + volatile float z = 0.0f; + r = z / z; + } +#endif +#ifdef FE_DIVBYZERO + if (e & FE_DIVBYZERO) { + volatile float z = 0.0f; + r = 1.0f / z; + } +#endif +#ifdef FE_OVERFLOW + if (e & FE_OVERFLOW) { + volatile float m = __FLT_MAX__; + r = m * m; + } +#endif +#ifdef FE_UNDERFLOW + if (e & FE_UNDERFLOW) { + volatile float m = __FLT_MIN__; + r = m * m; + } +#endif +#ifdef FE_INEXACT + if (e & FE_INEXACT) { + volatile float t = 3.0f; + r = 1.0f / t; + } +#endif +}