From 8a70664e5248cd6b9d63951729e93bf73eff004c Mon Sep 17 00:00:00 2001 From: Kim Barrett Date: Tue, 2 May 2023 21:27:01 +0000 Subject: [PATCH] 8293117: Add atomic bitset functions Reviewed-by: shade, coleenp, dholmes --- src/hotspot/share/runtime/atomic.hpp | 207 +++++++++++++++++++++ test/hotspot/gtest/runtime/test_atomic.cpp | 98 +++++++++- 2 files changed, 304 insertions(+), 1 deletion(-) diff --git a/src/hotspot/share/runtime/atomic.hpp b/src/hotspot/share/runtime/atomic.hpp index f9cafaf7be4..45ebaeadbcb 100644 --- a/src/hotspot/share/runtime/atomic.hpp +++ b/src/hotspot/share/runtime/atomic.hpp @@ -160,6 +160,82 @@ public: inline static bool replace_if_null(D* volatile* dest, T* value, atomic_memory_order order = memory_order_conservative); + // Bitwise logical operations (and, or, xor) + // + // All operations apply the corresponding operation to the value in dest and + // bits, storing the result in dest. They return either the old value + // (fetch_then_BITOP) or the newly updated value (BITOP_then_fetch). + // + // Requirements: + // - T is an integral type + // - sizeof(T) == sizeof(int) || sizeof(T) == sizeof(void*) + + // Performs atomic bitwise-and of *dest and bits, storing the result in + // *dest. Returns the prior value of *dest. That is, atomically performs + // this sequence of operations: + // { tmp = *dest; *dest &= bits; return tmp; } + template + static T fetch_then_and(volatile T* dest, T bits, + atomic_memory_order order = memory_order_conservative) { + static_assert(std::is_integral::value, "bitop with non-integral type"); + return PlatformBitops().fetch_then_and(dest, bits, order); + } + + // Performs atomic bitwise-or of *dest and bits, storing the result in + // *dest. Returns the prior value of *dest. That is, atomically performs + // this sequence of operations: + // { tmp = *dest; *dest |= bits; return tmp; } + template + static T fetch_then_or(volatile T* dest, T bits, + atomic_memory_order order = memory_order_conservative) { + static_assert(std::is_integral::value, "bitop with non-integral type"); + return PlatformBitops().fetch_then_or(dest, bits, order); + } + + // Performs atomic bitwise-xor of *dest and bits, storing the result in + // *dest. Returns the prior value of *dest. That is, atomically performs + // this sequence of operations: + // { tmp = *dest; *dest ^= bits; return tmp; } + template + static T fetch_then_xor(volatile T* dest, T bits, + atomic_memory_order order = memory_order_conservative) { + static_assert(std::is_integral::value, "bitop with non-integral type"); + return PlatformBitops().fetch_then_xor(dest, bits, order); + } + + // Performs atomic bitwise-and of *dest and bits, storing the result in + // *dest. Returns the new value of *dest. That is, atomically performs + // this operation: + // { return *dest &= bits; } + template + static T and_then_fetch(volatile T* dest, T bits, + atomic_memory_order order = memory_order_conservative) { + static_assert(std::is_integral::value, "bitop with non-integral type"); + return PlatformBitops().and_then_fetch(dest, bits, order); + } + + // Performs atomic bitwise-or of *dest and bits, storing the result in + // *dest. Returns the new value of *dest. That is, atomically performs + // this operation: + // { return *dest |= bits; } + template + static T or_then_fetch(volatile T* dest, T bits, + atomic_memory_order order = memory_order_conservative) { + static_assert(std::is_integral::value, "bitop with non-integral type"); + return PlatformBitops().or_then_fetch(dest, bits, order); + } + + // Performs atomic bitwise-xor of *dest and bits, storing the result in + // *dest. Returns the new value of *dest. That is, atomically performs + // this operation: + // { return *dest ^= bits; } + template + static T xor_then_fetch(volatile T* dest, T bits, + atomic_memory_order order = memory_order_conservative) { + static_assert(std::is_integral::value, "bitop with non-integral type"); + return PlatformBitops().xor_then_fetch(dest, bits, order); + } + private: // Test whether From is implicitly convertible to To. // From and To must be pointer types. @@ -367,6 +443,44 @@ private: static T xchg_using_helper(Fn fn, T volatile* dest, T exchange_value); + + // Platform-specific implementation of the bitops (and, or, xor). Support + // for sizes of 4 bytes and (if different) pointer size bytes are required. + // The class is a function object that must be default constructable, with + // these requirements: + // + // - T is an integral type. + // - dest is of type T*. + // - bits is of type T. + // - order is of type atomic_memory_order. + // - platform_bitops is an object of type PlatformBitops. + // + // Then + // platform_bitops.fetch_then_and(dest, bits, order) + // platform_bitops.fetch_then_or(dest, bits, order) + // platform_bitops.fetch_then_xor(dest, bits, order) + // platform_bitops.and_then_fetch(dest, bits, order) + // platform_bitops.or_then_fetch(dest, bits, order) + // platform_bitops.xor_then_fetch(dest, bits, order) + // must all be valid expressions, returning a result convertible to T. + // + // A default definition is provided, which implements all of the operations + // using cmpxchg. + // + // For each required size, a platform must either use the default or + // entirely specialize the class for that size by providing all of the + // required operations. + // + // The second (bool) template parameter allows platforms to provide a + // partial specialization with a parameterized size, and is otherwise + // unused. The default value for that bool parameter means specializations + // don't need to mention it. + template class PlatformBitops; + + // Helper base classes that may be used to implement PlatformBitops. + class PrefetchBitopsUsingCmpxchg; + class PostfetchBitopsUsingCmpxchg; + class PostfetchBitopsUsingPrefetch; }; template @@ -576,6 +690,99 @@ struct Atomic::PlatformXchg { atomic_memory_order order) const; }; +// Implement fetch_then_bitop operations using a CAS loop. +class Atomic::PrefetchBitopsUsingCmpxchg { + template + T bitop(T volatile* dest, atomic_memory_order order, Op operation) const { + T old_value; + T new_value; + T fetched_value = Atomic::load(dest); + do { + old_value = fetched_value; + new_value = operation(old_value); + fetched_value = Atomic::cmpxchg(dest, old_value, new_value, order); + } while (old_value != fetched_value); + return fetched_value; + } + +public: + template + T fetch_then_and(T volatile* dest, T bits, atomic_memory_order order) const { + return bitop(dest, order, [&](T value) -> T { return value & bits; }); + } + + template + T fetch_then_or(T volatile* dest, T bits, atomic_memory_order order) const { + return bitop(dest, order, [&](T value) -> T { return value | bits; }); + } + + template + T fetch_then_xor(T volatile* dest, T bits, atomic_memory_order order) const { + return bitop(dest, order, [&](T value) -> T { return value ^ bits; }); + } +}; + +// Implement bitop_then_fetch operations using a CAS loop. +class Atomic::PostfetchBitopsUsingCmpxchg { + template + T bitop(T volatile* dest, atomic_memory_order order, Op operation) const { + T old_value; + T new_value; + T fetched_value = Atomic::load(dest); + do { + old_value = fetched_value; + new_value = operation(old_value); + fetched_value = Atomic::cmpxchg(dest, old_value, new_value, order); + } while (old_value != fetched_value); + return new_value; + } + +public: + template + T and_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const { + return bitop(dest, order, [&](T value) -> T { return value & bits; }); + } + + template + T or_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const { + return bitop(dest, order, [&](T value) -> T { return value | bits; }); + } + + template + T xor_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const { + return bitop(dest, order, [&](T value) -> T { return value ^ bits; }); + } +}; + +// Implement bitop_then_fetch operations by calling fetch_then_bitop and +// applying the operation to the result and the bits argument. +class Atomic::PostfetchBitopsUsingPrefetch { +public: + template + T and_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const { + return bits & Atomic::fetch_then_and(dest, bits, order); + } + + template + T or_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const { + return bits | Atomic::fetch_then_or(dest, bits, order); + } + + template + T xor_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const { + return bits ^ Atomic::fetch_then_xor(dest, bits, order); + } +}; + +// The default definition uses cmpxchg. Platforms can override by defining a +// partial specialization providing size, either as a template parameter or as +// a specific value. +template +class Atomic::PlatformBitops + : public PrefetchBitopsUsingCmpxchg, + public PostfetchBitopsUsingCmpxchg +{}; + template class ScopedFenceGeneral: public StackObj { public: diff --git a/test/hotspot/gtest/runtime/test_atomic.cpp b/test/hotspot/gtest/runtime/test_atomic.cpp index 9e8da172e56..b6b4db06e1d 100644 --- a/test/hotspot/gtest/runtime/test_atomic.cpp +++ b/test/hotspot/gtest/runtime/test_atomic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -199,3 +199,99 @@ TEST(AtomicEnumTest, scoped_enum) { Support().test_cmpxchg(B, C); Support().test_xchg(B, C); } + +template +struct AtomicBitopsTestSupport { + volatile T _test_value; + + // At least one byte differs between _old_value and _old_value op _change_value. + static const T _old_value = static_cast(UCONST64(0x7f5300007f530000)); + static const T _change_value = static_cast(UCONST64(0x3800530038005300)); + + AtomicBitopsTestSupport() : _test_value(0) {} + + void fetch_then_and() { + Atomic::store(&_test_value, _old_value); + T expected = _old_value & _change_value; + T result = Atomic::fetch_then_and(&_test_value, _change_value); + EXPECT_EQ(_old_value, result); + EXPECT_EQ(expected, Atomic::load(&_test_value)); + } + + void fetch_then_or() { + Atomic::store(&_test_value, _old_value); + T expected = _old_value | _change_value; + T result = Atomic::fetch_then_or(&_test_value, _change_value); + EXPECT_EQ(_old_value, result); + EXPECT_EQ(expected, Atomic::load(&_test_value)); + } + + void fetch_then_xor() { + Atomic::store(&_test_value, _old_value); + T expected = _old_value ^ _change_value; + T result = Atomic::fetch_then_xor(&_test_value, _change_value); + EXPECT_EQ(_old_value, result); + EXPECT_EQ(expected, Atomic::load(&_test_value)); + } + + void and_then_fetch() { + Atomic::store(&_test_value, _old_value); + T expected = _old_value & _change_value; + T result = Atomic::and_then_fetch(&_test_value, _change_value); + EXPECT_EQ(expected, result); + EXPECT_EQ(expected, Atomic::load(&_test_value)); + } + + void or_then_fetch() { + Atomic::store(&_test_value, _old_value); + T expected = _old_value | _change_value; + T result = Atomic::or_then_fetch(&_test_value, _change_value); + EXPECT_EQ(expected, result); + EXPECT_EQ(expected, Atomic::load(&_test_value)); + } + + void xor_then_fetch() { + Atomic::store(&_test_value, _old_value); + T expected = _old_value ^ _change_value; + T result = Atomic::xor_then_fetch(&_test_value, _change_value); + EXPECT_EQ(expected, result); + EXPECT_EQ(expected, Atomic::load(&_test_value)); + } + +#define TEST_BITOP(name) { SCOPED_TRACE(XSTR(name)); name(); } + + void operator()() { + TEST_BITOP(fetch_then_and) + TEST_BITOP(fetch_then_or) + TEST_BITOP(fetch_then_xor) + TEST_BITOP(and_then_fetch) + TEST_BITOP(or_then_fetch) + TEST_BITOP(xor_then_fetch) + } + +#undef TEST_BITOP +}; + +template +const T AtomicBitopsTestSupport::_old_value; + +template +const T AtomicBitopsTestSupport::_change_value; + +TEST(AtomicBitopsTest, int32) { + AtomicBitopsTestSupport()(); +} + +TEST(AtomicBitopsTest, uint32) { + AtomicBitopsTestSupport()(); +} + +#ifdef _LP64 +TEST(AtomicBitopsTest, int64) { + AtomicBitopsTestSupport()(); +} + +TEST(AtomicBitopsTest, uint64) { + AtomicBitopsTestSupport()(); +} +#endif // _LP64