aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-07-08 10:02:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-07-08 10:02:24 -0700
commitad8258e87729e4337569c4b7d30cfdd4b299179d (patch)
tree3b69119f025fa86d6271c94f813d373f31cc13c1
parent8689f4f2ea561dd080118eeb05c0255ac9542905 (diff)
parent2a3110e3f97ddc0f53bb766797b926a35edd07e6 (diff)
downloadlinux-pm-ad8258e87729e4337569c4b7d30cfdd4b299179d.tar.gz
Merge tag 'bitmap-6.5-rc1' of https://github.com/norov/linux
Pull bitmap updates from Yury Norov: "Fixes for different bitmap pieces: - lib/test_bitmap: increment failure counter properly The tests that don't use expect_eq() macro to determine that a test is failured must increment failed_tests explicitly. - lib/bitmap: drop optimization of bitmap_{from,to}_arr64 bitmap_{from,to}_arr64() optimization is overly optimistic on 32-bit LE architectures when it's wired to bitmap_copy_clear_tail(). - nodemask: Drop duplicate check in for_each_node_mask() As the return value type of first_node() became unsigned, the node >= 0 became unnecessary. - cpumask: fix function description kernel-doc notation - MAINTAINERS: Add bits.h and bitfield.h to the BITMAP API record Add linux/bits.h and linux/bitfield.h for visibility" * tag 'bitmap-6.5-rc1' of https://github.com/norov/linux: MAINTAINERS: Add bitfield.h to the BITMAP API record MAINTAINERS: Add bits.h to the BITMAP API record cpumask: fix function description kernel-doc notation nodemask: Drop duplicate check in for_each_node_mask() lib/bitmap: drop optimization of bitmap_{from,to}_arr64 lib/test_bitmap: increment failure counter properly
-rw-r--r--MAINTAINERS6
-rw-r--r--include/linux/bitmap.h8
-rw-r--r--include/linux/nodemask.h2
-rw-r--r--lib/bitmap.c2
-rw-r--r--lib/cpumask.c2
-rw-r--r--lib/test_bitmap.c18
6 files changed, 27 insertions, 11 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 5761b02183a719..64f10559d3b2f3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3499,18 +3499,24 @@ M: Yury Norov <yury.norov@gmail.com>
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
R: Rasmus Villemoes <linux@rasmusvillemoes.dk>
S: Maintained
+F: include/linux/bitfield.h
F: include/linux/bitmap.h
+F: include/linux/bits.h
F: include/linux/cpumask.h
F: include/linux/find.h
F: include/linux/nodemask.h
+F: include/vdso/bits.h
F: lib/bitmap.c
F: lib/cpumask.c
F: lib/cpumask_kunit.c
F: lib/find_bit.c
F: lib/find_bit_benchmark.c
F: lib/test_bitmap.c
+F: tools/include/linux/bitfield.h
F: tools/include/linux/bitmap.h
+F: tools/include/linux/bits.h
F: tools/include/linux/find.h
+F: tools/include/vdso/bits.h
F: tools/lib/bitmap.c
F: tools/lib/find_bit.c
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 7d6d73b781472f..03644237e1efb5 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -302,12 +302,10 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
#endif
/*
- * On 64-bit systems bitmaps are represented as u64 arrays internally. On LE32
- * machines the order of hi and lo parts of numbers match the bitmap structure.
- * In both cases conversion is not needed when copying data from/to arrays of
- * u64.
+ * On 64-bit systems bitmaps are represented as u64 arrays internally. So,
+ * the conversion is not needed when copying data from/to arrays of u64.
*/
-#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
+#if BITS_PER_LONG == 32
void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits);
void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
#else
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index bb0ee80526b2dc..8d07116caaf1b0 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -385,7 +385,7 @@ static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
#if MAX_NUMNODES > 1
#define for_each_node_mask(node, mask) \
for ((node) = first_node(mask); \
- (node >= 0) && (node) < MAX_NUMNODES; \
+ (node) < MAX_NUMNODES; \
(node) = next_node((node), (mask)))
#else /* MAX_NUMNODES == 1 */
#define for_each_node_mask(node, mask) \
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 1c81413c51f86d..ddb31015e38ae2 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -1495,7 +1495,7 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits)
EXPORT_SYMBOL(bitmap_to_arr32);
#endif
-#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
+#if BITS_PER_LONG == 32
/**
* bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap
* @bitmap: array of unsigned longs, the destination bitmap
diff --git a/lib/cpumask.c b/lib/cpumask.c
index e7258836b60b7f..de356f16773a04 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -157,7 +157,7 @@ EXPORT_SYMBOL(cpumask_local_spread);
static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
/**
- * Returns an arbitrary cpu within srcp1 & srcp2.
+ * cpumask_any_and_distribute - Return an arbitrary cpu within srcp1 & srcp2.
*
* Iterated calls using the same srcp1 and srcp2 will be distributed within
* their intersection.
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index a8005ad3bd5893..187f5b2db4cf16 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -470,6 +470,7 @@ static void __init test_bitmap_parselist(void)
if (err != ptest.errno) {
pr_err("parselist: %d: input is %s, errno is %d, expected %d\n",
i, ptest.in, err, ptest.errno);
+ failed_tests++;
continue;
}
@@ -478,6 +479,7 @@ static void __init test_bitmap_parselist(void)
pr_err("parselist: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
i, ptest.in, bmap[0],
*ptest.expected);
+ failed_tests++;
continue;
}
@@ -511,11 +513,13 @@ static void __init test_bitmap_printlist(void)
if (ret != slen + 1) {
pr_err("bitmap_print_to_pagebuf: result is %d, expected %d\n", ret, slen);
+ failed_tests++;
goto out;
}
if (strncmp(buf, expected, slen)) {
pr_err("bitmap_print_to_pagebuf: result is %s, expected %s\n", buf, expected);
+ failed_tests++;
goto out;
}
@@ -583,6 +587,7 @@ static void __init test_bitmap_parse(void)
if (err != test.errno) {
pr_err("parse: %d: input is %s, errno is %d, expected %d\n",
i, test.in, err, test.errno);
+ failed_tests++;
continue;
}
@@ -591,6 +596,7 @@ static void __init test_bitmap_parse(void)
pr_err("parse: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
i, test.in, bmap[0],
*test.expected);
+ failed_tests++;
continue;
}
@@ -615,10 +621,12 @@ static void __init test_bitmap_arr32(void)
next_bit = find_next_bit(bmap2,
round_up(nbits, BITS_PER_LONG), nbits);
- if (next_bit < round_up(nbits, BITS_PER_LONG))
+ if (next_bit < round_up(nbits, BITS_PER_LONG)) {
pr_err("bitmap_copy_arr32(nbits == %d:"
" tail is not safely cleared: %d\n",
nbits, next_bit);
+ failed_tests++;
+ }
if (nbits < EXP1_IN_BITS - 32)
expect_eq_uint(arr[DIV_ROUND_UP(nbits, 32)],
@@ -641,15 +649,19 @@ static void __init test_bitmap_arr64(void)
expect_eq_bitmap(bmap2, exp1, nbits);
next_bit = find_next_bit(bmap2, round_up(nbits, BITS_PER_LONG), nbits);
- if (next_bit < round_up(nbits, BITS_PER_LONG))
+ if (next_bit < round_up(nbits, BITS_PER_LONG)) {
pr_err("bitmap_copy_arr64(nbits == %d:"
" tail is not safely cleared: %d\n", nbits, next_bit);
+ failed_tests++;
+ }
if ((nbits % 64) &&
- (arr[(nbits - 1) / 64] & ~GENMASK_ULL((nbits - 1) % 64, 0)))
+ (arr[(nbits - 1) / 64] & ~GENMASK_ULL((nbits - 1) % 64, 0))) {
pr_err("bitmap_to_arr64(nbits == %d): tail is not safely cleared: 0x%016llx (must be 0x%016llx)\n",
nbits, arr[(nbits - 1) / 64],
GENMASK_ULL((nbits - 1) % 64, 0));
+ failed_tests++;
+ }
if (nbits < EXP1_IN_BITS - 64)
expect_eq_uint(arr[DIV_ROUND_UP(nbits, 64)], 0xa5a5a5a5);