@@ 0,0 1,98 @@
+// Support for atomic operations -*- C++ -*-
+
+// Copyright (C) 2004-2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+/** @file ext/atomicity.h
+ * This file is a GNU extension to the Standard C++ Library.
+ */
+
+#ifndef _GLIBCXX_ATOMICITY_H
+#define _GLIBCXX_ATOMICITY_H 1
+
+#pragma GCC system_header
+
+#include <bits/c++config.h>
+#include <bits/gthr.h>
+#include <bits/atomic_word.h>
+
+namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
+{
+ _GLIBCXX_BEGIN_NAMESPACE_VERSION
+
+ // Functions for portable atomic access.
+ // To abstract locking primitives across all thread policies, use:
+ // __exchange_and_add_dispatch
+ // __atomic_add_dispatch
+#ifdef _GLIBCXX_ATOMIC_BUILTINS
+ static inline _Atomic_word __attribute__((__always_inline__))
+ __exchange_and_add(volatile _Atomic_word * __mem, int __val)
+ {
+ return __atomic_add_fetch(__mem, __val, __ATOMIC_ACQ_REL);
+ }
+
+ static inline void __attribute__((__always_inline__)) __atomic_add(volatile _Atomic_word * __mem, int __val)
+ {
+ __atomic_add_fetch(__mem, __val, __ATOMIC_ACQ_REL);
+ }
+#else
+ _Atomic_word __exchange_and_add(volatile _Atomic_word *, int) _GLIBCXX_NOTHROW;
+
+ void __atomic_add(volatile _Atomic_word *, int) _GLIBCXX_NOTHROW;
+#endif
+
+ static inline _Atomic_word __attribute__((__always_inline__))
+ __exchange_and_add_single(_Atomic_word * __mem, int __val)
+ {
+ return __atomic_add_fetch(__mem, __val, __ATOMIC_ACQ_REL);
+ }
+
+ static inline void __attribute__((__always_inline__)) __atomic_add_single(_Atomic_word * __mem, int __val)
+ {
+ __atomic_add_fetch(__mem, __val, __ATOMIC_ACQ_REL);
+ }
+
+ static inline _Atomic_word __attribute__((__always_inline__))
+ __exchange_and_add_dispatch(_Atomic_word * __mem, int __val)
+ {
+ return __exchange_and_add_single(__mem, __val);
+ }
+
+ static inline void __attribute__((__always_inline__)) __atomic_add_dispatch(_Atomic_word * __mem, int __val)
+ {
+ __atomic_add_single(__mem, __val);
+ }
+
+ _GLIBCXX_END_NAMESPACE_VERSION
+} // namespace )
+
+// Even if the CPU doesn't need a memory barrier, we need to ensure
+// that the compiler doesn't reorder memory accesses across the
+// barriers.
+#ifndef _GLIBCXX_READ_MEM_BARRIER
+#define _GLIBCXX_READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE)
+#endif
+#ifndef _GLIBCXX_WRITE_MEM_BARRIER
+#define _GLIBCXX_WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE)
+#endif
+
+#endif