mirror of
https://github.com/torvalds/linux.git
synced 2025-08-15 14:11:42 +02:00
bitmap-for-6.17
Bits-related patched for 6.17: - find_random_bit() series (Yury); - GENMASK() consolidation (Vincent); - random cleanups (Shaopeng, Ben, Yury) -----BEGIN PGP SIGNATURE----- iQGzBAABCgAdFiEEi8GdvG6xMhdgpu/4sUSA/TofvsgFAmiLkz0ACgkQsUSA/Tof vsj7MgwAvRSyevYSm9cm1Y99098M/7gWeJUeLAIy0GJdFaBQIcMkXkRGXJ9A0ZHb RoCFG4eiukHIDHRzJjncXUNTk0zVCbEifUF43BdnJrhjTePlou5SNVh6xhJfQ1Ai ENB4Q+nAZyIm43cUnoDhR24ne3pgJcY+oe6e7sQTRFF/6+nB4RDHmjIAMVsYgH30 w8iPBxNXXULAZNDgOPA3J5bACEnZPfOAhtoiNBC9s4MsE4o+Q8E9FVhReI2tiIhk t98kVZu7TFyrGcCdLz8EgbcG4KPFBmwOwOv8S1Mzgy46MwS//dd7MZA7y3MqTvJ/ VEMoTMAK14/VrgDxu/vdBsUJt/T1wPc+ZbUt/rNb530oSDkvjIo+4ihg1nfswqhn u+fj65wAHRW7CSkgpHn3bM/wvxmtIaE6AoY6jWwyuZ1zGIEV+5iPBo56kkmpJlYj GlnbiTHkNR/jGa1GwB3PDG2kzoqXVLz6EeFdZncUX53MGa90g0+5/k0ld+oBJTDh 7QbkZlW1 =uj9U -----END PGP SIGNATURE----- Merge tag 'bitmap-for-6.17' of https://github.com/norov/linux Pull bitmap updates from Yury Norov: - find_random_bit() series (Yury) - GENMASK() consolidation (Vincent) - random cleanups (Shaopeng, Ben, Yury) * tag 'bitmap-for-6.17' of https://github.com/norov/linux: bitfield: Ensure the return values of helper functions are checked test_bits: add tests for __GENMASK() and __GENMASK_ULL() bits: unify the non-asm GENMASK*() bits: split the definition of the asm and non-asm GENMASK*() cpumask: Remove unnecessary cpumask_nth_andnot() watchdog: fix opencoded cpumask_next_wrap() in watchdog_next_cpu() clocksource: Improve randomness in clocksource_verify_choose_cpus() cpumask: introduce cpumask_random() bitmap: generalize node_random()
This commit is contained in:
commit
f2d282e1df
9 changed files with 71 additions and 92 deletions
|
@ -189,14 +189,14 @@ static __always_inline u64 field_mask(u64 field)
|
||||||
}
|
}
|
||||||
#define field_max(field) ((typeof(field))field_mask(field))
|
#define field_max(field) ((typeof(field))field_mask(field))
|
||||||
#define ____MAKE_OP(type,base,to,from) \
|
#define ____MAKE_OP(type,base,to,from) \
|
||||||
static __always_inline __##type type##_encode_bits(base v, base field) \
|
static __always_inline __##type __must_check type##_encode_bits(base v, base field) \
|
||||||
{ \
|
{ \
|
||||||
if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
|
if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
|
||||||
__field_overflow(); \
|
__field_overflow(); \
|
||||||
return to((v & field_mask(field)) * field_multiplier(field)); \
|
return to((v & field_mask(field)) * field_multiplier(field)); \
|
||||||
} \
|
} \
|
||||||
static __always_inline __##type type##_replace_bits(__##type old, \
|
static __always_inline __##type __must_check type##_replace_bits(__##type old, \
|
||||||
base val, base field) \
|
base val, base field) \
|
||||||
{ \
|
{ \
|
||||||
return (old & ~to(field)) | type##_encode_bits(val, field); \
|
return (old & ~to(field)) | type##_encode_bits(val, field); \
|
||||||
} \
|
} \
|
||||||
|
@ -205,7 +205,7 @@ static __always_inline void type##p_replace_bits(__##type *p, \
|
||||||
{ \
|
{ \
|
||||||
*p = (*p & ~to(field)) | type##_encode_bits(val, field); \
|
*p = (*p & ~to(field)) | type##_encode_bits(val, field); \
|
||||||
} \
|
} \
|
||||||
static __always_inline base type##_get_bits(__##type v, base field) \
|
static __always_inline base __must_check type##_get_bits(__##type v, base field) \
|
||||||
{ \
|
{ \
|
||||||
return (from(v) & field)/field_multiplier(field); \
|
return (from(v) & field)/field_multiplier(field); \
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,10 +2,8 @@
|
||||||
#ifndef __LINUX_BITS_H
|
#ifndef __LINUX_BITS_H
|
||||||
#define __LINUX_BITS_H
|
#define __LINUX_BITS_H
|
||||||
|
|
||||||
#include <linux/const.h>
|
|
||||||
#include <vdso/bits.h>
|
#include <vdso/bits.h>
|
||||||
#include <uapi/linux/bits.h>
|
#include <uapi/linux/bits.h>
|
||||||
#include <asm/bitsperlong.h>
|
|
||||||
|
|
||||||
#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
|
#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
|
||||||
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
|
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||||
|
@ -50,10 +48,14 @@
|
||||||
(type_max(t) << (l) & \
|
(type_max(t) << (l) & \
|
||||||
type_max(t) >> (BITS_PER_TYPE(t) - 1 - (h)))))
|
type_max(t) >> (BITS_PER_TYPE(t) - 1 - (h)))))
|
||||||
|
|
||||||
|
#define GENMASK(h, l) GENMASK_TYPE(unsigned long, h, l)
|
||||||
|
#define GENMASK_ULL(h, l) GENMASK_TYPE(unsigned long long, h, l)
|
||||||
|
|
||||||
#define GENMASK_U8(h, l) GENMASK_TYPE(u8, h, l)
|
#define GENMASK_U8(h, l) GENMASK_TYPE(u8, h, l)
|
||||||
#define GENMASK_U16(h, l) GENMASK_TYPE(u16, h, l)
|
#define GENMASK_U16(h, l) GENMASK_TYPE(u16, h, l)
|
||||||
#define GENMASK_U32(h, l) GENMASK_TYPE(u32, h, l)
|
#define GENMASK_U32(h, l) GENMASK_TYPE(u32, h, l)
|
||||||
#define GENMASK_U64(h, l) GENMASK_TYPE(u64, h, l)
|
#define GENMASK_U64(h, l) GENMASK_TYPE(u64, h, l)
|
||||||
|
#define GENMASK_U128(h, l) GENMASK_TYPE(u128, h, l)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fixed-type variants of BIT(), with additional checks like GENMASK_TYPE(). The
|
* Fixed-type variants of BIT(), with additional checks like GENMASK_TYPE(). The
|
||||||
|
@ -79,28 +81,9 @@
|
||||||
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,
|
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,
|
||||||
* disable the input check if that is the case.
|
* disable the input check if that is the case.
|
||||||
*/
|
*/
|
||||||
#define GENMASK_INPUT_CHECK(h, l) 0
|
#define GENMASK(h, l) __GENMASK(h, l)
|
||||||
|
#define GENMASK_ULL(h, l) __GENMASK_ULL(h, l)
|
||||||
|
|
||||||
#endif /* !defined(__ASSEMBLY__) */
|
#endif /* !defined(__ASSEMBLY__) */
|
||||||
|
|
||||||
#define GENMASK(h, l) \
|
|
||||||
(GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
|
|
||||||
#define GENMASK_ULL(h, l) \
|
|
||||||
(GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
|
|
||||||
|
|
||||||
#if !defined(__ASSEMBLY__)
|
|
||||||
/*
|
|
||||||
* Missing asm support
|
|
||||||
*
|
|
||||||
* __GENMASK_U128() depends on _BIT128() which would not work
|
|
||||||
* in the asm code, as it shifts an 'unsigned __int128' data
|
|
||||||
* type instead of direct representation of 128 bit constants
|
|
||||||
* such as long and unsigned long. The fundamental problem is
|
|
||||||
* that a 128 bit constant will get silently truncated by the
|
|
||||||
* gcc compiler.
|
|
||||||
*/
|
|
||||||
#define GENMASK_U128(h, l) \
|
|
||||||
(GENMASK_INPUT_CHECK(h, l) + __GENMASK_U128(h, l))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* __LINUX_BITS_H */
|
#endif /* __LINUX_BITS_H */
|
||||||
|
|
|
@ -354,6 +354,18 @@ unsigned int cpumask_next_wrap(int n, const struct cpumask *src)
|
||||||
return find_next_bit_wrap(cpumask_bits(src), small_cpumask_bits, n + 1);
|
return find_next_bit_wrap(cpumask_bits(src), small_cpumask_bits, n + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cpumask_random - get random cpu in *src.
|
||||||
|
* @src: cpumask pointer
|
||||||
|
*
|
||||||
|
* Return: random set bit, or >= nr_cpu_ids if @src is empty.
|
||||||
|
*/
|
||||||
|
static __always_inline
|
||||||
|
unsigned int cpumask_random(const struct cpumask *src)
|
||||||
|
{
|
||||||
|
return find_random_bit(cpumask_bits(src), nr_cpu_ids);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* for_each_cpu - iterate over every cpu in a mask
|
* for_each_cpu - iterate over every cpu in a mask
|
||||||
* @cpu: the (optionally unsigned) integer iterator
|
* @cpu: the (optionally unsigned) integer iterator
|
||||||
|
@ -546,22 +558,6 @@ unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
|
||||||
small_cpumask_bits, cpumask_check(cpu));
|
small_cpumask_bits, cpumask_check(cpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* cpumask_nth_andnot - get the Nth cpu set in 1st cpumask, and clear in 2nd.
|
|
||||||
* @srcp1: the cpumask pointer
|
|
||||||
* @srcp2: the cpumask pointer
|
|
||||||
* @cpu: the Nth cpu to find, starting from 0
|
|
||||||
*
|
|
||||||
* Return: >= nr_cpu_ids if such cpu doesn't exist.
|
|
||||||
*/
|
|
||||||
static __always_inline
|
|
||||||
unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
|
|
||||||
const struct cpumask *srcp2)
|
|
||||||
{
|
|
||||||
return find_nth_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
|
|
||||||
small_cpumask_bits, cpumask_check(cpu));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpumask_nth_and_andnot - get the Nth cpu set in 1st and 2nd cpumask, and clear in 3rd.
|
* cpumask_nth_and_andnot - get the Nth cpu set in 1st and 2nd cpumask, and clear in 3rd.
|
||||||
* @srcp1: the cpumask pointer
|
* @srcp1: the cpumask pointer
|
||||||
|
|
|
@ -44,6 +44,8 @@ unsigned long _find_next_bit_le(const unsigned long *addr, unsigned
|
||||||
long size, unsigned long offset);
|
long size, unsigned long offset);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
unsigned long find_random_bit(const unsigned long *addr, unsigned long size);
|
||||||
|
|
||||||
#ifndef find_next_bit
|
#ifndef find_next_bit
|
||||||
/**
|
/**
|
||||||
* find_next_bit - find the next set bit in a memory region
|
* find_next_bit - find the next set bit in a memory region
|
||||||
|
@ -267,33 +269,6 @@ unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *
|
||||||
return __find_nth_and_bit(addr1, addr2, size, n);
|
return __find_nth_and_bit(addr1, addr2, size, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* find_nth_andnot_bit - find N'th set bit in 2 memory regions,
|
|
||||||
* flipping bits in 2nd region
|
|
||||||
* @addr1: The 1st address to start the search at
|
|
||||||
* @addr2: The 2nd address to start the search at
|
|
||||||
* @size: The maximum number of bits to search
|
|
||||||
* @n: The number of set bit, which position is needed, counting from 0
|
|
||||||
*
|
|
||||||
* Returns the bit number of the N'th set bit.
|
|
||||||
* If no such, returns @size.
|
|
||||||
*/
|
|
||||||
static __always_inline
|
|
||||||
unsigned long find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
|
|
||||||
unsigned long size, unsigned long n)
|
|
||||||
{
|
|
||||||
if (n >= size)
|
|
||||||
return size;
|
|
||||||
|
|
||||||
if (small_const_nbits(size)) {
|
|
||||||
unsigned long val = *addr1 & (~*addr2) & GENMASK(size - 1, 0);
|
|
||||||
|
|
||||||
return val ? fns(val, n) : size;
|
|
||||||
}
|
|
||||||
|
|
||||||
return __find_nth_andnot_bit(addr1, addr2, size, n);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* find_nth_and_andnot_bit - find N'th set bit in 2 memory regions,
|
* find_nth_and_andnot_bit - find N'th set bit in 2 memory regions,
|
||||||
* excluding those set in 3rd region
|
* excluding those set in 3rd region
|
||||||
|
|
|
@ -492,21 +492,9 @@ static __always_inline int num_node_state(enum node_states state)
|
||||||
static __always_inline int node_random(const nodemask_t *maskp)
|
static __always_inline int node_random(const nodemask_t *maskp)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
|
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
|
||||||
int w, bit;
|
int node = find_random_bit(maskp->bits, MAX_NUMNODES);
|
||||||
|
|
||||||
w = nodes_weight(*maskp);
|
return node < MAX_NUMNODES ? node : NUMA_NO_NODE;
|
||||||
switch (w) {
|
|
||||||
case 0:
|
|
||||||
bit = NUMA_NO_NODE;
|
|
||||||
break;
|
|
||||||
case 1:
|
|
||||||
bit = first_node(*maskp);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_u32_below(w));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return bit;
|
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -340,10 +340,7 @@ static void clocksource_verify_choose_cpus(void)
|
||||||
* CPUs that are currently online.
|
* CPUs that are currently online.
|
||||||
*/
|
*/
|
||||||
for (i = 1; i < n; i++) {
|
for (i = 1; i < n; i++) {
|
||||||
cpu = get_random_u32_below(nr_cpu_ids);
|
cpu = cpumask_random(cpu_online_mask);
|
||||||
cpu = cpumask_next(cpu - 1, cpu_online_mask);
|
|
||||||
if (cpu >= nr_cpu_ids)
|
|
||||||
cpu = cpumask_first(cpu_online_mask);
|
|
||||||
if (!WARN_ON_ONCE(cpu >= nr_cpu_ids))
|
if (!WARN_ON_ONCE(cpu >= nr_cpu_ids))
|
||||||
cpumask_set_cpu(cpu, &cpus_chosen);
|
cpumask_set_cpu(cpu, &cpus_chosen);
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,10 +12,7 @@ static unsigned int watchdog_next_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
unsigned int next_cpu;
|
unsigned int next_cpu;
|
||||||
|
|
||||||
next_cpu = cpumask_next(cpu, &watchdog_cpus);
|
next_cpu = cpumask_next_wrap(cpu, &watchdog_cpus);
|
||||||
if (next_cpu >= nr_cpu_ids)
|
|
||||||
next_cpu = cpumask_first(&watchdog_cpus);
|
|
||||||
|
|
||||||
if (next_cpu == cpu)
|
if (next_cpu == cpu)
|
||||||
return nr_cpu_ids;
|
return nr_cpu_ids;
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include <linux/math.h>
|
#include <linux/math.h>
|
||||||
#include <linux/minmax.h>
|
#include <linux/minmax.h>
|
||||||
#include <linux/swab.h>
|
#include <linux/swab.h>
|
||||||
|
#include <linux/random.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Common helper for find_bit() function family
|
* Common helper for find_bit() function family
|
||||||
|
@ -291,3 +292,26 @@ EXPORT_SYMBOL(_find_next_bit_le);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* __BIG_ENDIAN */
|
#endif /* __BIG_ENDIAN */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* find_random_bit - find a set bit at random position
|
||||||
|
* @addr: The address to base the search on
|
||||||
|
* @size: The bitmap size in bits
|
||||||
|
*
|
||||||
|
* Returns: a position of a random set bit; >= @size otherwise
|
||||||
|
*/
|
||||||
|
unsigned long find_random_bit(const unsigned long *addr, unsigned long size)
|
||||||
|
{
|
||||||
|
int w = bitmap_weight(addr, size);
|
||||||
|
|
||||||
|
switch (w) {
|
||||||
|
case 0:
|
||||||
|
return size;
|
||||||
|
case 1:
|
||||||
|
/* Performance trick for single-bit bitmaps */
|
||||||
|
return find_first_bit(addr, size);
|
||||||
|
default:
|
||||||
|
return find_nth_bit(addr, size, get_random_u32_below(w));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(find_random_bit);
|
||||||
|
|
|
@ -26,6 +26,23 @@ static_assert(assert_type(u16, GENMASK_U16(15, 0)) == U16_MAX);
|
||||||
static_assert(assert_type(u32, GENMASK_U32(31, 0)) == U32_MAX);
|
static_assert(assert_type(u32, GENMASK_U32(31, 0)) == U32_MAX);
|
||||||
static_assert(assert_type(u64, GENMASK_U64(63, 0)) == U64_MAX);
|
static_assert(assert_type(u64, GENMASK_U64(63, 0)) == U64_MAX);
|
||||||
|
|
||||||
|
/* FIXME: add a test case written in asm for GENMASK() and GENMASK_ULL() */
|
||||||
|
|
||||||
|
static void __genmask_test(struct kunit *test)
|
||||||
|
{
|
||||||
|
KUNIT_EXPECT_EQ(test, 1ul, __GENMASK(0, 0));
|
||||||
|
KUNIT_EXPECT_EQ(test, 3ul, __GENMASK(1, 0));
|
||||||
|
KUNIT_EXPECT_EQ(test, 6ul, __GENMASK(2, 1));
|
||||||
|
KUNIT_EXPECT_EQ(test, 0xFFFFFFFFul, __GENMASK(31, 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __genmask_ull_test(struct kunit *test)
|
||||||
|
{
|
||||||
|
KUNIT_EXPECT_EQ(test, 1ull, __GENMASK_ULL(0, 0));
|
||||||
|
KUNIT_EXPECT_EQ(test, 3ull, __GENMASK_ULL(1, 0));
|
||||||
|
KUNIT_EXPECT_EQ(test, 0x000000ffffe00000ull, __GENMASK_ULL(39, 21));
|
||||||
|
KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, __GENMASK_ULL(63, 0));
|
||||||
|
}
|
||||||
|
|
||||||
static void genmask_test(struct kunit *test)
|
static void genmask_test(struct kunit *test)
|
||||||
{
|
{
|
||||||
|
@ -123,6 +140,8 @@ static void genmask_input_check_test(struct kunit *test)
|
||||||
|
|
||||||
|
|
||||||
static struct kunit_case bits_test_cases[] = {
|
static struct kunit_case bits_test_cases[] = {
|
||||||
|
KUNIT_CASE(__genmask_test),
|
||||||
|
KUNIT_CASE(__genmask_ull_test),
|
||||||
KUNIT_CASE(genmask_test),
|
KUNIT_CASE(genmask_test),
|
||||||
KUNIT_CASE(genmask_ull_test),
|
KUNIT_CASE(genmask_ull_test),
|
||||||
KUNIT_CASE(genmask_u128_test),
|
KUNIT_CASE(genmask_u128_test),
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue