From 16ab4646c9057e0528b985ad772e3cb88c613db2 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Tue, 27 Feb 2024 21:50:15 +0100 Subject: Revert "riscv: mm: support Svnapot in huge vmap" This reverts commit ce173474cf19fe7fbe8f0fc74e3c81ec9c3d9807. We cannot correctly deal with NAPOT mappings in vmalloc/vmap because if some part of a NAPOT mapping is unmapped, the remaining mapping is not updated accordingly. For example: ptr = vmalloc_huge(64 * 1024, GFP_KERNEL); vunmap_range((unsigned long)(ptr + PAGE_SIZE), (unsigned long)(ptr + 64 * 1024)); leads to the following kernel page table dump: 0xffff8f8000ef0000-0xffff8f8000ef1000 0x00000001033c0000 4K PTE N .. .. D A G . . W R V Meaning the first entry which was not unmapped still has the N bit set, which, if accessed first and cached in the TLB, could allow access to the unmapped range. That's because the logic to break the NAPOT mapping does not exist and likely won't. Indeed, to break a NAPOT mapping, we first have to clear the whole mapping, flush the TLB and then set the new mapping ("break- before-make" equivalent). That works fine in userspace since we can handle any pagefault occurring on the remaining mapping but we can't handle a kernel pagefault on such mapping. So fix this by reverting the commit that introduced the vmap/vmalloc support. Fixes: ce173474cf19 ("riscv: mm: support Svnapot in huge vmap") Signed-off-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20240227205016.121901-2-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/vmalloc.h | 61 +--------------------------------------- 1 file changed, 1 insertion(+), 60 deletions(-) diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h index 924d01b56c9a1..51f6dfe19745a 100644 --- a/arch/riscv/include/asm/vmalloc.h +++ b/arch/riscv/include/asm/vmalloc.h @@ -19,65 +19,6 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot) return true; } -#ifdef CONFIG_RISCV_ISA_SVNAPOT -#include +#endif -#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size -static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end, - u64 pfn, unsigned int max_page_shift) -{ - unsigned long map_size = PAGE_SIZE; - unsigned long size, order; - - if (!has_svnapot()) - return map_size; - - for_each_napot_order_rev(order) { - if (napot_cont_shift(order) > max_page_shift) - continue; - - size = napot_cont_size(order); - if (end - addr < size) - continue; - - if (!IS_ALIGNED(addr, size)) - continue; - - if (!IS_ALIGNED(PFN_PHYS(pfn), size)) - continue; - - map_size = size; - break; - } - - return map_size; -} - -#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift -static inline int arch_vmap_pte_supported_shift(unsigned long size) -{ - int shift = PAGE_SHIFT; - unsigned long order; - - if (!has_svnapot()) - return shift; - - WARN_ON_ONCE(size >= PMD_SIZE); - - for_each_napot_order_rev(order) { - if (napot_cont_size(order) > size) - continue; - - if (!IS_ALIGNED(size, napot_cont_size(order))) - continue; - - shift = napot_cont_shift(order); - break; - } - - return shift; -} - -#endif /* CONFIG_RISCV_ISA_SVNAPOT */ -#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ #endif /* _ASM_RISCV_VMALLOC_H */ -- cgit 1.2.3-korg From e0fe5ab4192c171c111976dbe90bbd37d3976be0 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Tue, 27 Feb 2024 21:50:16 +0100 Subject: riscv: Fix pte_leaf_size() for NAPOT pte_leaf_size() must be reimplemented to add support for NAPOT mappings. Fixes: 82a1a1f3bfb6 ("riscv: mm: support Svnapot in hugetlb page") Signed-off-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20240227205016.121901-3-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/pgtable.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 0c94260b5d0c1..89f5f1bd6e463 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -439,6 +439,10 @@ static inline pte_t pte_mkhuge(pte_t pte) return pte; } +#define pte_leaf_size(pte) (pte_napot(pte) ? \ + napot_cont_size(napot_cont_order(pte)) :\ + PAGE_SIZE) + #ifdef CONFIG_NUMA_BALANCING /* * See the comment in include/asm-generic/pgtable.h -- cgit 1.2.3-korg From b5b4287accd702f562a49a60b10dbfaf7d40270f Mon Sep 17 00:00:00 2001 From: Charlie Jenkins Date: Tue, 30 Jan 2024 17:07:00 -0800 Subject: riscv: mm: Use hint address in mmap if available On riscv it is guaranteed that the address returned by mmap is less than the hint address. Allow mmap to return an address all the way up to addr, if provided, rather than just up to the lower address space. This provides a performance benefit as well, allowing mmap to exit after checking that the address is in range rather than searching for a valid address. It is possible to provide an address that uses at most the same number of bits, however it is significantly more computationally expensive to provide that number rather than setting the max to be the hint address. There is the instruction clz/clzw in Zbb that returns the highest set bit which could be used to performantly implement this, but it would still be slower than the current implementation. At worst case, half of the address would not be able to be allocated when a hint address is provided. Signed-off-by: Charlie Jenkins Link: https://lore.kernel.org/r/20240130-use_mmap_hint_address-v3-1-8a655cfa8bcb@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/processor.h | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index f19f861cda549..8ece7a8f0e18b 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -14,22 +14,16 @@ #include -#ifdef CONFIG_64BIT -#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1)) -#define STACK_TOP_MAX TASK_SIZE_64 - #define arch_get_mmap_end(addr, len, flags) \ ({ \ unsigned long mmap_end; \ typeof(addr) _addr = (addr); \ - if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \ + if ((_addr) == 0 || \ + (IS_ENABLED(CONFIG_COMPAT) && is_compat_task()) || \ + ((_addr + len) > BIT(VA_BITS - 1))) \ mmap_end = STACK_TOP_MAX; \ - else if ((_addr) >= VA_USER_SV57) \ - mmap_end = STACK_TOP_MAX; \ - else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \ - mmap_end = VA_USER_SV48; \ else \ - mmap_end = VA_USER_SV39; \ + mmap_end = (_addr + len); \ mmap_end; \ }) @@ -39,17 +33,18 @@ typeof(addr) _addr = (addr); \ typeof(base) _base = (base); \ unsigned long rnd_gap = DEFAULT_MAP_WINDOW - (_base); \ - if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \ + if ((_addr) == 0 || \ + (IS_ENABLED(CONFIG_COMPAT) && is_compat_task()) || \ + ((_addr + len) > BIT(VA_BITS - 1))) \ mmap_base = (_base); \ - else if (((_addr) >= VA_USER_SV57) && (VA_BITS >= VA_BITS_SV57)) \ - mmap_base = VA_USER_SV57 - rnd_gap; \ - else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \ - mmap_base = VA_USER_SV48 - rnd_gap; \ else \ - mmap_base = VA_USER_SV39 - rnd_gap; \ + mmap_base = (_addr + len) - rnd_gap; \ mmap_base; \ }) +#ifdef CONFIG_64BIT +#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1)) +#define STACK_TOP_MAX TASK_SIZE_64 #else #define DEFAULT_MAP_WINDOW TASK_SIZE #define STACK_TOP_MAX TASK_SIZE -- cgit 1.2.3-korg From 73d05262a2ca28c72c5aec57d79b47c251fe77c5 Mon Sep 17 00:00:00 2001 From: Charlie Jenkins Date: Tue, 30 Jan 2024 17:07:01 -0800 Subject: selftests: riscv: Generalize mm selftests The behavior of mmap on riscv is defined to not provide an address that uses more bits than the hint address, if provided. Make the tests reflect that. Signed-off-by: Charlie Jenkins Link: https://lore.kernel.org/r/20240130-use_mmap_hint_address-v3-2-8a655cfa8bcb@rivosinc.com Signed-off-by: Palmer Dabbelt --- tools/testing/selftests/riscv/mm/mmap_bottomup.c | 23 +---- tools/testing/selftests/riscv/mm/mmap_default.c | 23 +---- tools/testing/selftests/riscv/mm/mmap_test.h | 107 ++++++++++++++--------- 3 files changed, 67 insertions(+), 86 deletions(-) diff --git a/tools/testing/selftests/riscv/mm/mmap_bottomup.c b/tools/testing/selftests/riscv/mm/mmap_bottomup.c index 1757d19ca89b1..7f7d3eb8b9c92 100644 --- a/tools/testing/selftests/riscv/mm/mmap_bottomup.c +++ b/tools/testing/selftests/riscv/mm/mmap_bottomup.c @@ -6,30 +6,9 @@ TEST(infinite_rlimit) { -// Only works on 64 bit -#if __riscv_xlen == 64 - struct addresses mmap_addresses; - EXPECT_EQ(BOTTOM_UP, memory_layout()); - do_mmaps(&mmap_addresses); - - EXPECT_NE(MAP_FAILED, mmap_addresses.no_hint); - EXPECT_NE(MAP_FAILED, mmap_addresses.on_37_addr); - EXPECT_NE(MAP_FAILED, mmap_addresses.on_38_addr); - EXPECT_NE(MAP_FAILED, mmap_addresses.on_46_addr); - EXPECT_NE(MAP_FAILED, mmap_addresses.on_47_addr); - EXPECT_NE(MAP_FAILED, mmap_addresses.on_55_addr); - EXPECT_NE(MAP_FAILED, mmap_addresses.on_56_addr); - - EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.no_hint); - EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_37_addr); - EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_38_addr); - EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_46_addr); - EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_47_addr); - EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_55_addr); - EXPECT_GT(1UL << 56, (unsigned long)mmap_addresses.on_56_addr); -#endif + TEST_MMAPS; } TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/riscv/mm/mmap_default.c b/tools/testing/selftests/riscv/mm/mmap_default.c index c63c60b9397e7..2ba3ec9900064 100644 --- a/tools/testing/selftests/riscv/mm/mmap_default.c +++ b/tools/testing/selftests/riscv/mm/mmap_default.c @@ -6,30 +6,9 @@ TEST(default_rlimit) { -// Only works on 64 bit -#if __riscv_xlen == 64 - struct addresses mmap_addresses; - EXPECT_EQ(TOP_DOWN, memory_layout()); - do_mmaps(&mmap_addresses); - - EXPECT_NE(MAP_FAILED, mmap_addresses.no_hint); - EXPECT_NE(MAP_FAILED, mmap_addresses.on_37_addr); - EXPECT_NE(MAP_FAILED, mmap_addresses.on_38_addr); - EXPECT_NE(MAP_FAILED, mmap_addresses.on_46_addr); - EXPECT_NE(MAP_FAILED, mmap_addresses.on_47_addr); - EXPECT_NE(MAP_FAILED, mmap_addresses.on_55_addr); - EXPECT_NE(MAP_FAILED, mmap_addresses.on_56_addr); - - EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.no_hint); - EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_37_addr); - EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_38_addr); - EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_46_addr); - EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_47_addr); - EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_55_addr); - EXPECT_GT(1UL << 56, (unsigned long)mmap_addresses.on_56_addr); -#endif + TEST_MMAPS; } TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/riscv/mm/mmap_test.h b/tools/testing/selftests/riscv/mm/mmap_test.h index 9b8434f62f570..36e78d991d5e2 100644 --- a/tools/testing/selftests/riscv/mm/mmap_test.h +++ b/tools/testing/selftests/riscv/mm/mmap_test.h @@ -4,60 +4,83 @@ #include #include #include +#include +#include "../../kselftest_harness.h" #define TOP_DOWN 0 #define BOTTOM_UP 1 -struct addresses { - int *no_hint; - int *on_37_addr; - int *on_38_addr; - int *on_46_addr; - int *on_47_addr; - int *on_55_addr; - int *on_56_addr; +#if __riscv_xlen == 64 +uint64_t random_addresses[] = { + 0x19764f0d73b3a9f0, 0x016049584cecef59, 0x3580bdd3562f4acd, + 0x1164219f20b17da0, 0x07d97fcb40ff2373, 0x76ec528921272ee7, + 0x4dd48c38a3de3f70, 0x2e11415055f6997d, 0x14b43334ac476c02, + 0x375a60795aff19f6, 0x47f3051725b8ee1a, 0x4e697cf240494a9f, + 0x456b59b5c2f9e9d1, 0x101724379d63cb96, 0x7fe9ad31619528c1, + 0x2f417247c495c2ea, 0x329a5a5b82943a5e, 0x06d7a9d6adcd3827, + 0x327b0b9ee37f62d5, 0x17c7b1851dfd9b76, 0x006ebb6456ec2cd9, + 0x00836cd14146a134, 0x00e5c4dcde7126db, 0x004c29feadf75753, + 0x00d8b20149ed930c, 0x00d71574c269387a, 0x0006ebe4a82acb7a, + 0x0016135df51f471b, 0x00758bdb55455160, 0x00d0bdd949b13b32, + 0x00ecea01e7c5f54b, 0x00e37b071b9948b1, 0x0011fdd00ff57ab3, + 0x00e407294b52f5ea, 0x00567748c200ed20, 0x000d073084651046, + 0x00ac896f4365463c, 0x00eb0d49a0b26216, 0x0066a2564a982a31, + 0x002e0d20237784ae, 0x0000554ff8a77a76, 0x00006ce07a54c012, + 0x000009570516d799, 0x00000954ca15b84d, 0x0000684f0d453379, + 0x00002ae5816302b5, 0x0000042403fb54bf, 0x00004bad7392bf30, + 0x00003e73bfa4b5e3, 0x00005442c29978e0, 0x00002803f11286b6, + 0x000073875d745fc6, 0x00007cede9cb8240, 0x000027df84cc6a4f, + 0x00006d7e0e74242a, 0x00004afd0b836e02, 0x000047d0e837cd82, + 0x00003b42405efeda, 0x00001531bafa4c95, 0x00007172cae34ac4, }; +#else +uint32_t random_addresses[] = { + 0x8dc302e0, 0x929ab1e0, 0xb47683ba, 0xea519c73, 0xa19f1c90, 0xc49ba213, + 0x8f57c625, 0xadfe5137, 0x874d4d95, 0xaa20f09d, 0xcf21ebfc, 0xda7737f1, + 0xcedf392a, 0x83026c14, 0xccedca52, 0xc6ccf826, 0xe0cd9415, 0x997472ca, + 0xa21a44c1, 0xe82196f5, 0xa23fd66b, 0xc28d5590, 0xd009cdce, 0xcf0be646, + 0x8fc8c7ff, 0xe2a85984, 0xa3d3236b, 0x89a0619d, 0xc03db924, 0xb5d4cc1b, + 0xb96ee04c, 0xd191da48, 0xb432a000, 0xaa2bebbc, 0xa2fcb289, 0xb0cca89b, + 0xb0c18d6a, 0x88f58deb, 0xa4d42d1c, 0xe4d74e86, 0x99902b09, 0x8f786d31, + 0xbec5e381, 0x9a727e65, 0xa9a65040, 0xa880d789, 0x8f1b335e, 0xfc821c1e, + 0x97e34be4, 0xbbef84ed, 0xf447d197, 0xfd7ceee2, 0xe632348d, 0xee4590f4, + 0x958992a5, 0xd57e05d6, 0xfd240970, 0xc5b0dcff, 0xd96da2c2, 0xa7ae041d, +}; +#endif -static inline void do_mmaps(struct addresses *mmap_addresses) -{ - /* - * Place all of the hint addresses on the boundaries of mmap - * sv39, sv48, sv57 - * User addresses end at 1<<38, 1<<47, 1<<56 respectively - */ - void *on_37_bits = (void *)(1UL << 37); - void *on_38_bits = (void *)(1UL << 38); - void *on_46_bits = (void *)(1UL << 46); - void *on_47_bits = (void *)(1UL << 47); - void *on_55_bits = (void *)(1UL << 55); - void *on_56_bits = (void *)(1UL << 56); +#define PROT (PROT_READ | PROT_WRITE) +#define FLAGS (MAP_PRIVATE | MAP_ANONYMOUS) - int prot = PROT_READ | PROT_WRITE; - int flags = MAP_PRIVATE | MAP_ANONYMOUS; +/* mmap must return a value that doesn't use more bits than the hint address. */ +static inline unsigned long get_max_value(unsigned long input) +{ + unsigned long max_bit = (1UL << (((sizeof(unsigned long) * 8) - 1 - + __builtin_clzl(input)))); - mmap_addresses->no_hint = - mmap(NULL, 5 * sizeof(int), prot, flags, 0, 0); - mmap_addresses->on_37_addr = - mmap(on_37_bits, 5 * sizeof(int), prot, flags, 0, 0); - mmap_addresses->on_38_addr = - mmap(on_38_bits, 5 * sizeof(int), prot, flags, 0, 0); - mmap_addresses->on_46_addr = - mmap(on_46_bits, 5 * sizeof(int), prot, flags, 0, 0); - mmap_addresses->on_47_addr = - mmap(on_47_bits, 5 * sizeof(int), prot, flags, 0, 0); - mmap_addresses->on_55_addr = - mmap(on_55_bits, 5 * sizeof(int), prot, flags, 0, 0); - mmap_addresses->on_56_addr = - mmap(on_56_bits, 5 * sizeof(int), prot, flags, 0, 0); + return max_bit + (max_bit - 1); } +#define TEST_MMAPS \ + ({ \ + void *mmap_addr; \ + for (int i = 0; i < ARRAY_SIZE(random_addresses); i++) { \ + mmap_addr = mmap((void *)random_addresses[i], \ + 5 * sizeof(int), PROT, FLAGS, 0, 0); \ + EXPECT_NE(MAP_FAILED, mmap_addr); \ + EXPECT_GE((void *)get_max_value(random_addresses[i]), \ + mmap_addr); \ + mmap_addr = mmap((void *)random_addresses[i], \ + 5 * sizeof(int), PROT, FLAGS, 0, 0); \ + EXPECT_NE(MAP_FAILED, mmap_addr); \ + EXPECT_GE((void *)get_max_value(random_addresses[i]), \ + mmap_addr); \ + } \ + }) + static inline int memory_layout(void) { - int prot = PROT_READ | PROT_WRITE; - int flags = MAP_PRIVATE | MAP_ANONYMOUS; - - void *value1 = mmap(NULL, sizeof(int), prot, flags, 0, 0); - void *value2 = mmap(NULL, sizeof(int), prot, flags, 0, 0); + void *value1 = mmap(NULL, sizeof(int), PROT, FLAGS, 0, 0); + void *value2 = mmap(NULL, sizeof(int), PROT, FLAGS, 0, 0); return value2 > value1; } -- cgit 1.2.3-korg From 371a3c2055dbb8a88754902fe19aa4fcc4318c29 Mon Sep 17 00:00:00 2001 From: Charlie Jenkins Date: Tue, 30 Jan 2024 17:07:02 -0800 Subject: docs: riscv: Define behavior of mmap Define mmap on riscv to not provide an address that uses more bits than the hint address, if provided. Signed-off-by: Charlie Jenkins Link: https://lore.kernel.org/r/20240130-use_mmap_hint_address-v3-3-8a655cfa8bcb@rivosinc.com Signed-off-by: Palmer Dabbelt --- Documentation/arch/riscv/vm-layout.rst | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/Documentation/arch/riscv/vm-layout.rst b/Documentation/arch/riscv/vm-layout.rst index 69ff6da1dbf8f..e476b4386bd9d 100644 --- a/Documentation/arch/riscv/vm-layout.rst +++ b/Documentation/arch/riscv/vm-layout.rst @@ -144,14 +144,8 @@ passing 0 into the hint address parameter of mmap. On CPUs with an address space smaller than sv48, the CPU maximum supported address space will be the default. Software can "opt-in" to receiving VAs from another VA space by providing -a hint address to mmap. A hint address passed to mmap will cause the largest -address space that fits entirely into the hint to be used, unless there is no -space left in the address space. If there is no space available in the requested -address space, an address in the next smallest available address space will be -returned. - -For example, in order to obtain 48-bit VA space, a hint address greater than -:code:`1 << 47` must be provided. Note that this is 47 due to sv48 userspace -ending at :code:`1 << 47` and the addresses beyond this are reserved for the -kernel. Similarly, to obtain 57-bit VA space addresses, a hint address greater -than or equal to :code:`1 << 56` must be provided. +a hint address to mmap. When a hint address is passed to mmap, the returned +address will never use more bits than the hint address. For example, if a hint +address of `1 << 40` is passed to mmap, a valid returned address will never use +bits 41 through 63. If no mappable addresses are available in that range, mmap +will return `MAP_FAILED`. -- cgit 1.2.3-korg From 2bb7e0c49302feec1c2f777bbfe8726169986ed8 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Mon, 4 Mar 2024 09:02:47 +0100 Subject: riscv: Fix compilation error with FAST_GUP and rv32 By surrounding the definition of pte_leaf_size() with a ifdef napot as it should have been. Fixes: e0fe5ab4192c ("riscv: Fix pte_leaf_size() for NAPOT") Signed-off-by: Alexandre Ghiti Reviewed-by: Randy Dunlap Tested-by: Randy Dunlap # build-tested Link: https://lore.kernel.org/r/20240304080247.387710-1-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/pgtable.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 89f5f1bd6e463..2fdf7c85066fb 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -439,9 +439,11 @@ static inline pte_t pte_mkhuge(pte_t pte) return pte; } +#ifdef CONFIG_RISCV_ISA_SVNAPOT #define pte_leaf_size(pte) (pte_napot(pte) ? \ napot_cont_size(napot_cont_order(pte)) :\ PAGE_SIZE) +#endif #ifdef CONFIG_NUMA_BALANCING /* -- cgit 1.2.3-korg From 700c2d9b1b179e723a1b6201d3307c37ede90f99 Mon Sep 17 00:00:00 2001 From: Song Shuai Date: Wed, 21 Feb 2024 18:02:52 +0800 Subject: riscv: vector: Fix a typo of preempt_v The term "preempt_v" represents the RISCV_PREEMPT_V field of riscv_v_flags and is used in lots of comments. Here corrects the miss-spelling "prempt_v". And s/acheived/achieved/. Reviewed-by: Andy Chiu Signed-off-by: Song Shuai Link: https://lore.kernel.org/r/20240221100252.3990445-1-songshuaishuai@tinylab.org Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/simd.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/riscv/include/asm/simd.h b/arch/riscv/include/asm/simd.h index 54efbf523d49c..adb50f3ec2057 100644 --- a/arch/riscv/include/asm/simd.h +++ b/arch/riscv/include/asm/simd.h @@ -34,9 +34,9 @@ static __must_check inline bool may_use_simd(void) return false; /* - * Nesting is acheived in preempt_v by spreading the control for + * Nesting is achieved in preempt_v by spreading the control for * preemptible and non-preemptible kernel-mode Vector into two fields. - * Always try to match with prempt_v if kernel V-context exists. Then, + * Always try to match with preempt_v if kernel V-context exists. Then, * fallback to check non preempt_v if nesting happens, or if the config * is not set. */ -- cgit 1.2.3-korg From 6be7ee4bebd14b8e7e040a5e7fd6aec3d9167c72 Mon Sep 17 00:00:00 2001 From: Leonardo Bras Date: Wed, 3 Jan 2024 13:00:19 -0300 Subject: riscv: Improve arch_get_mmap_end() macro This macro caused me some confusion, which took some reviewer's time to make it clear, so I propose adding a short comment in code to avoid confusion in the future. Also, added some improvements to the macro, such as removing the assumption of VA_USER_SV57 being the largest address space. Signed-off-by: Leonardo Bras Reviewed-by: Guo Ren Link: https://lore.kernel.org/r/20240103160024.70305-3-leobras@redhat.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/processor.h | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index f19f861cda549..2278e2a8362af 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -18,15 +18,21 @@ #define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1)) #define STACK_TOP_MAX TASK_SIZE_64 +/* + * addr is a hint to the maximum userspace address that mmap should provide, so + * this macro needs to return the largest address space available so that + * mmap_end < addr, being mmap_end the top of that address space. + * See Documentation/arch/riscv/vm-layout.rst for more details. + */ #define arch_get_mmap_end(addr, len, flags) \ ({ \ unsigned long mmap_end; \ typeof(addr) _addr = (addr); \ if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \ mmap_end = STACK_TOP_MAX; \ - else if ((_addr) >= VA_USER_SV57) \ - mmap_end = STACK_TOP_MAX; \ - else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \ + else if (((_addr) >= VA_USER_SV57) && (VA_BITS >= VA_BITS_SV57)) \ + mmap_end = VA_USER_SV57; \ + else if (((_addr) >= VA_USER_SV48) && (VA_BITS >= VA_BITS_SV48)) \ mmap_end = VA_USER_SV48; \ else \ mmap_end = VA_USER_SV39; \ -- cgit 1.2.3-korg From 9dc30419248f78dfebea7a554ec212dd1d82f8d7 Mon Sep 17 00:00:00 2001 From: Leonardo Bras Date: Wed, 3 Jan 2024 13:00:20 -0300 Subject: riscv: Replace direct thread flag check with is_compat_task() There is some code that detects compat mode into a task by checking the flag directly, and other code that check using the helper is_compat_task(). Since the helper already exists, use it instead of checking the flags directly. Signed-off-by: Leonardo Bras Link: https://lore.kernel.org/r/20240103160024.70305-4-leobras@redhat.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/elf.h | 2 +- arch/riscv/include/asm/pgtable.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h index 06c236bfab53b..59a08367fddd7 100644 --- a/arch/riscv/include/asm/elf.h +++ b/arch/riscv/include/asm/elf.h @@ -54,7 +54,7 @@ extern bool compat_elf_check_arch(Elf32_Ehdr *hdr); #ifdef CONFIG_64BIT #ifdef CONFIG_COMPAT -#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \ +#define STACK_RND_MASK (is_compat_task() ? \ 0x7ff >> (PAGE_SHIFT - 12) : \ 0x3ffff >> (PAGE_SHIFT - 12)) #else diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 294044429e8e1..9a7a92edcb46a 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -882,7 +882,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) #ifdef CONFIG_COMPAT #define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE) -#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ +#define TASK_SIZE (is_compat_task() ? \ TASK_SIZE_32 : TASK_SIZE_64) #else #define TASK_SIZE TASK_SIZE_64 -- cgit 1.2.3-korg From 4c0b5a451675e9a95be98a16ddb889bb0486d2ad Mon Sep 17 00:00:00 2001 From: Leonardo Bras Date: Wed, 3 Jan 2024 13:00:21 -0300 Subject: riscv: add compile-time test into is_compat_task() Currently several places will test for CONFIG_COMPAT before testing is_compat_task(), probably in order to avoid a run-time test into the task structure. Since is_compat_task() is an inlined function, it would be helpful to add a compile-time test of CONFIG_COMPAT, making sure it always returns zero when the option is not enabled during the kernel build. With this, the compiler is able to understand in build-time that is_compat_task() will always return 0, and optimize-out some of the extra code introduced by the option. This will also allow removing a lot #ifdefs that were introduced, and make the code more clean. Signed-off-by: Leonardo Bras Reviewed-by: Guo Ren Reviewed-by: Andy Chiu Link: https://lore.kernel.org/r/20240103160024.70305-5-leobras@redhat.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/compat.h | 3 +++ arch/riscv/include/asm/elf.h | 4 ---- arch/riscv/include/asm/pgtable.h | 6 ------ arch/riscv/include/asm/processor.h | 4 ++-- 4 files changed, 5 insertions(+), 12 deletions(-) diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h index 2ac955b51148f..91517b51b8e27 100644 --- a/arch/riscv/include/asm/compat.h +++ b/arch/riscv/include/asm/compat.h @@ -14,6 +14,9 @@ static inline int is_compat_task(void) { + if (!IS_ENABLED(CONFIG_COMPAT)) + return 0; + return test_thread_flag(TIF_32BIT); } diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h index 59a08367fddd7..2e88257cafaea 100644 --- a/arch/riscv/include/asm/elf.h +++ b/arch/riscv/include/asm/elf.h @@ -53,13 +53,9 @@ extern bool compat_elf_check_arch(Elf32_Ehdr *hdr); #define ELF_ET_DYN_BASE ((DEFAULT_MAP_WINDOW / 3) * 2) #ifdef CONFIG_64BIT -#ifdef CONFIG_COMPAT #define STACK_RND_MASK (is_compat_task() ? \ 0x7ff >> (PAGE_SHIFT - 12) : \ 0x3ffff >> (PAGE_SHIFT - 12)) -#else -#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12)) -#endif #endif /* diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 9a7a92edcb46a..f5b504265d76f 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -127,16 +127,10 @@ #define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1)) #define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1)) -#ifdef CONFIG_COMPAT #define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS) #define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39) #define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64) #define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64) -#else -#define MMAP_VA_BITS ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS) -#define MMAP_MIN_VA_BITS (VA_BITS_SV39) -#endif /* CONFIG_COMPAT */ - #else #include #endif /* CONFIG_64BIT */ diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index 2278e2a8362af..d2d7ce30baf3e 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -28,7 +28,7 @@ ({ \ unsigned long mmap_end; \ typeof(addr) _addr = (addr); \ - if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \ + if ((_addr) == 0 || is_compat_task()) \ mmap_end = STACK_TOP_MAX; \ else if (((_addr) >= VA_USER_SV57) && (VA_BITS >= VA_BITS_SV57)) \ mmap_end = VA_USER_SV57; \ @@ -45,7 +45,7 @@ typeof(addr) _addr = (addr); \ typeof(base) _base = (base); \ unsigned long rnd_gap = DEFAULT_MAP_WINDOW - (_base); \ - if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \ + if ((_addr) == 0 || is_compat_task()) \ mmap_base = (_base); \ else if (((_addr) >= VA_USER_SV57) && (VA_BITS >= VA_BITS_SV57)) \ mmap_base = VA_USER_SV57 - rnd_gap; \ -- cgit 1.2.3-korg From 5917ea17ad07f35bb5be4fd5fdcd408f090e347b Mon Sep 17 00:00:00 2001 From: Leonardo Bras Date: Wed, 3 Jan 2024 13:00:22 -0300 Subject: riscv: Introduce is_compat_thread() into compat.h task_user_regset_view() makes use of a function very similar to is_compat_task(), but pointing to a any thread. In arm64 asm/compat.h there is a function very similar to that: is_compat_thread(struct thread_info *thread) Copy this function to riscv asm/compat.h and make use of it into task_user_regset_view(). Also, introduce a compile-time test for CONFIG_COMPAT and simplify the function code by removing the #ifdef. Signed-off-by: Leonardo Bras Reviewed-by: Guo Ren Reviewed-by: Andy Chiu Link: https://lore.kernel.org/r/20240103160024.70305-6-leobras@redhat.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/compat.h | 8 ++++++++ arch/riscv/kernel/ptrace.c | 6 +++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h index 91517b51b8e27..da4b28cd01a95 100644 --- a/arch/riscv/include/asm/compat.h +++ b/arch/riscv/include/asm/compat.h @@ -20,6 +20,14 @@ static inline int is_compat_task(void) return test_thread_flag(TIF_32BIT); } +static inline int is_compat_thread(struct thread_info *thread) +{ + if (!IS_ENABLED(CONFIG_COMPAT)) + return 0; + + return test_ti_thread_flag(thread, TIF_32BIT); +} + struct compat_user_regs_struct { compat_ulong_t pc; compat_ulong_t ra; diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index 2afe460de16a6..f362832123616 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -374,14 +374,14 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, return ret; } +#else +static const struct user_regset_view compat_riscv_user_native_view = {}; #endif /* CONFIG_COMPAT */ const struct user_regset_view *task_user_regset_view(struct task_struct *task) { -#ifdef CONFIG_COMPAT - if (test_tsk_thread_flag(task, TIF_32BIT)) + if (is_compat_thread(&task->thread_info)) return &compat_riscv_user_native_view; else -#endif return &riscv_user_native_view; } -- cgit 1.2.3-korg From 2a8986fc5e1cb686dd3aae3022459aea23b9823a Mon Sep 17 00:00:00 2001 From: Leonardo Bras Date: Wed, 3 Jan 2024 13:00:23 -0300 Subject: riscv: Introduce set_compat_task() in asm/compat.h In order to have all task compat bit access directly in compat.h, introduce set_compat_task() to set/reset those when needed. Also, since it's only used on an if/else scenario, simplify the macro using it. Signed-off-by: Leonardo Bras Reviewed-by: Guo Ren Link: https://lore.kernel.org/r/20240103160024.70305-7-leobras@redhat.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/compat.h | 8 ++++++++ arch/riscv/include/asm/elf.h | 5 +---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h index da4b28cd01a95..aa103530a5c83 100644 --- a/arch/riscv/include/asm/compat.h +++ b/arch/riscv/include/asm/compat.h @@ -28,6 +28,14 @@ static inline int is_compat_thread(struct thread_info *thread) return test_ti_thread_flag(thread, TIF_32BIT); } +static inline void set_compat_task(bool is_compat) +{ + if (is_compat) + set_thread_flag(TIF_32BIT); + else + clear_thread_flag(TIF_32BIT); +} + struct compat_user_regs_struct { compat_ulong_t pc; compat_ulong_t ra; diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h index 2e88257cafaea..c7aea7886d22a 100644 --- a/arch/riscv/include/asm/elf.h +++ b/arch/riscv/include/asm/elf.h @@ -135,10 +135,7 @@ do { \ #ifdef CONFIG_COMPAT #define SET_PERSONALITY(ex) \ -do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ - set_thread_flag(TIF_32BIT); \ - else \ - clear_thread_flag(TIF_32BIT); \ +do { set_compat_task((ex).e_ident[EI_CLASS] == ELFCLASS32); \ if (personality(current->personality) != PER_LINUX32) \ set_personality(PER_LINUX | \ (current->personality & (~PER_MASK))); \ -- cgit 1.2.3-korg From 6649182a383c9872e9543e2e7d4981d971bf0998 Mon Sep 17 00:00:00 2001 From: Sunil V L Date: Thu, 18 Jan 2024 11:59:28 +0530 Subject: cpuidle: RISC-V: Move few functions to arch/riscv To support ACPI Low Power Idle (LPI), few functions are required which are currently static functions in the DT based cpuidle driver. Hence, move them under arch/riscv so that ACPI driver also can use them. Since they are no longer static functions, append "riscv_" prefix to the function name. Signed-off-by: Sunil V L Reviewed-by: Andrew Jones Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/20240118062930.245937-2-sunilvl@ventanamicro.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/suspend.h | 3 +++ arch/riscv/kernel/suspend.c | 49 +++++++++++++++++++++++++++++++++++++ drivers/cpuidle/cpuidle-riscv-sbi.c | 49 ++++--------------------------------- 3 files changed, 57 insertions(+), 44 deletions(-) diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h index 02f87867389a9..076f8a9437cf5 100644 --- a/arch/riscv/include/asm/suspend.h +++ b/arch/riscv/include/asm/suspend.h @@ -55,4 +55,7 @@ int hibernate_resume_nonboot_cpu_disable(void); asmlinkage void hibernate_restore_image(unsigned long resume_satp, unsigned long satp_temp, unsigned long cpu_resume); asmlinkage int hibernate_core_restore_code(void); +bool riscv_sbi_hsm_is_supported(void); +bool riscv_sbi_suspend_state_is_valid(u32 state); +int riscv_sbi_hart_suspend(u32 state); #endif diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c index 239509367e423..b20f2cb5879f1 100644 --- a/arch/riscv/kernel/suspend.c +++ b/arch/riscv/kernel/suspend.c @@ -128,4 +128,53 @@ static int __init sbi_system_suspend_init(void) } arch_initcall(sbi_system_suspend_init); + +static int sbi_suspend_finisher(unsigned long suspend_type, + unsigned long resume_addr, + unsigned long opaque) +{ + struct sbiret ret; + + ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, + suspend_type, resume_addr, opaque, 0, 0, 0); + + return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0; +} + +int riscv_sbi_hart_suspend(u32 state) +{ + if (state & SBI_HSM_SUSP_NON_RET_BIT) + return cpu_suspend(state, sbi_suspend_finisher); + else + return sbi_suspend_finisher(state, 0, 0); +} + +bool riscv_sbi_suspend_state_is_valid(u32 state) +{ + if (state > SBI_HSM_SUSPEND_RET_DEFAULT && + state < SBI_HSM_SUSPEND_RET_PLATFORM) + return false; + + if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT && + state < SBI_HSM_SUSPEND_NON_RET_PLATFORM) + return false; + + return true; +} + +bool riscv_sbi_hsm_is_supported(void) +{ + /* + * The SBI HSM suspend function is only available when: + * 1) SBI version is 0.3 or higher + * 2) SBI HSM extension is available + */ + if (sbi_spec_version < sbi_mk_version(0, 3) || + !sbi_probe_extension(SBI_EXT_HSM)) { + pr_info("HSM suspend not available\n"); + return false; + } + + return true; +} #endif /* CONFIG_RISCV_SBI */ diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c index e8094fc92491e..a6e123dfe394d 100644 --- a/drivers/cpuidle/cpuidle-riscv-sbi.c +++ b/drivers/cpuidle/cpuidle-riscv-sbi.c @@ -73,26 +73,6 @@ static inline bool sbi_is_domain_state_available(void) return data->available; } -static int sbi_suspend_finisher(unsigned long suspend_type, - unsigned long resume_addr, - unsigned long opaque) -{ - struct sbiret ret; - - ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, - suspend_type, resume_addr, opaque, 0, 0, 0); - - return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0; -} - -static int sbi_suspend(u32 state) -{ - if (state & SBI_HSM_SUSP_NON_RET_BIT) - return cpu_suspend(state, sbi_suspend_finisher); - else - return sbi_suspend_finisher(state, 0, 0); -} - static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { @@ -100,9 +80,9 @@ static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev, u32 state = states[idx]; if (state & SBI_HSM_SUSP_NON_RET_BIT) - return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, state); + return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend, idx, state); else - return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(sbi_suspend, + return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend, idx, state); } @@ -133,7 +113,7 @@ static __cpuidle int __sbi_enter_domain_idle_state(struct cpuidle_device *dev, else state = states[idx]; - ret = sbi_suspend(state) ? -1 : idx; + ret = riscv_sbi_hart_suspend(state) ? -1 : idx; ct_cpuidle_exit(); @@ -206,17 +186,6 @@ static const struct of_device_id sbi_cpuidle_state_match[] = { { }, }; -static bool sbi_suspend_state_is_valid(u32 state) -{ - if (state > SBI_HSM_SUSPEND_RET_DEFAULT && - state < SBI_HSM_SUSPEND_RET_PLATFORM) - return false; - if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT && - state < SBI_HSM_SUSPEND_NON_RET_PLATFORM) - return false; - return true; -} - static int sbi_dt_parse_state_node(struct device_node *np, u32 *state) { int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state); @@ -226,7 +195,7 @@ static int sbi_dt_parse_state_node(struct device_node *np, u32 *state) return err; } - if (!sbi_suspend_state_is_valid(*state)) { + if (!riscv_sbi_suspend_state_is_valid(*state)) { pr_warn("Invalid SBI suspend state %#x\n", *state); return -EINVAL; } @@ -607,16 +576,8 @@ static int __init sbi_cpuidle_init(void) int ret; struct platform_device *pdev; - /* - * The SBI HSM suspend function is only available when: - * 1) SBI version is 0.3 or higher - * 2) SBI HSM extension is available - */ - if ((sbi_spec_version < sbi_mk_version(0, 3)) || - !sbi_probe_extension(SBI_EXT_HSM)) { - pr_info("HSM suspend not available\n"); + if (!riscv_sbi_hsm_is_supported()) return 0; - } ret = platform_driver_register(&sbi_cpuidle_driver); if (ret) -- cgit 1.2.3-korg From 4877fc92142f635be418d8c915eb48ef87681108 Mon Sep 17 00:00:00 2001 From: Sunil V L Date: Thu, 18 Jan 2024 11:59:29 +0530 Subject: ACPI: RISC-V: Add LPI driver Enable Low Power Idle (LPI) based cpuidle driver for RISC-V platforms. It depends on SBI HSM calls for idle state transitions. Signed-off-by: Sunil V L Reviewed-by: Andrew Jones Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/20240118062930.245937-3-sunilvl@ventanamicro.com Signed-off-by: Palmer Dabbelt --- drivers/acpi/riscv/Makefile | 3 +- drivers/acpi/riscv/cpuidle.c | 81 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 1 deletion(-) create mode 100644 drivers/acpi/riscv/cpuidle.c diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile index 8b3b126e0b940..7309d92dd4772 100644 --- a/drivers/acpi/riscv/Makefile +++ b/drivers/acpi/riscv/Makefile @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-y += rhct.o +obj-y += rhct.o +obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o diff --git a/drivers/acpi/riscv/cpuidle.c b/drivers/acpi/riscv/cpuidle.c new file mode 100644 index 0000000000000..624f9bbdb58c4 --- /dev/null +++ b/drivers/acpi/riscv/cpuidle.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024, Ventana Micro Systems Inc + * Author: Sunil V L + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define RISCV_FFH_LPI_TYPE_MASK GENMASK_ULL(63, 60) +#define RISCV_FFH_LPI_RSVD_MASK GENMASK_ULL(59, 32) + +#define RISCV_FFH_LPI_TYPE_SBI BIT_ULL(60) + +static int acpi_cpu_init_idle(unsigned int cpu) +{ + int i; + struct acpi_lpi_state *lpi; + struct acpi_processor *pr = per_cpu(processors, cpu); + + if (unlikely(!pr || !pr->flags.has_lpi)) + return -EINVAL; + + if (!riscv_sbi_hsm_is_supported()) + return -ENODEV; + + if (pr->power.count <= 1) + return -ENODEV; + + for (i = 1; i < pr->power.count; i++) { + u32 state; + + lpi = &pr->power.lpi_states[i]; + + /* + * Validate Entry Method as per FFH spec. + * bits[63:60] should be 0x1 + * bits[59:32] should be 0x0 + * bits[31:0] represent a SBI power_state + */ + if (((lpi->address & RISCV_FFH_LPI_TYPE_MASK) != RISCV_FFH_LPI_TYPE_SBI) || + (lpi->address & RISCV_FFH_LPI_RSVD_MASK)) { + pr_warn("Invalid LPI entry method %#llx\n", lpi->address); + return -EINVAL; + } + + state = lpi->address; + if (!riscv_sbi_suspend_state_is_valid(state)) { + pr_warn("Invalid SBI power state %#x\n", state); + return -EINVAL; + } + } + + return 0; +} + +int acpi_processor_ffh_lpi_probe(unsigned int cpu) +{ + return acpi_cpu_init_idle(cpu); +} + +int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) +{ + u32 state = lpi->address; + + if (state & SBI_HSM_SUSP_NON_RET_BIT) + return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend, + lpi->index, + state); + else + return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend, + lpi->index, + state); +} -- cgit 1.2.3-korg From 359df7c5be4ba5c57f641010be7237ad9f09ea53 Mon Sep 17 00:00:00 2001 From: Sunil V L Date: Thu, 18 Jan 2024 11:59:30 +0530 Subject: ACPI: Enable ACPI_PROCESSOR for RISC-V The ACPI processor driver is not currently enabled for RISC-V. This is required to enable CPU related functionalities like LPI and CPPC. Hence, enable ACPI_PROCESSOR for RISC-V. Signed-off-by: Sunil V L Reviewed-by: Andrew Jones Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/20240118062930.245937-4-sunilvl@ventanamicro.com Signed-off-by: Palmer Dabbelt --- drivers/acpi/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 3c3f8037ebedd..1606eb622a9ff 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -286,7 +286,7 @@ config ACPI_CPPC_LIB config ACPI_PROCESSOR tristate "Processor" - depends on X86 || ARM64 || LOONGARCH + depends on X86 || ARM64 || LOONGARCH || RISCV select ACPI_PROCESSOR_IDLE select ACPI_CPU_FREQ_PSS if X86 || LOONGARCH select THERMAL -- cgit 1.2.3-korg From 30f3ffbee86b576705aabdd9093165a49cd66011 Mon Sep 17 00:00:00 2001 From: Sunil V L Date: Thu, 8 Feb 2024 09:14:12 +0530 Subject: ACPI: RISC-V: Add CPPC driver Add cpufreq driver based on ACPI CPPC for RISC-V. The driver uses either SBI CPPC interfaces or the CSRs to access the CPPC registers as defined by the RISC-V FFH spec. Signed-off-by: Sunil V L Reviewed-by: Pierre Gondois Acked-by: Rafael J. Wysocki Acked-by: Sudeep Holla Link: https://lore.kernel.org/r/20240208034414.22579-2-sunilvl@ventanamicro.com Signed-off-by: Palmer Dabbelt --- drivers/acpi/riscv/Makefile | 1 + drivers/acpi/riscv/cppc.c | 157 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 158 insertions(+) create mode 100644 drivers/acpi/riscv/cppc.c diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile index 7309d92dd4772..86b0925f612d9 100644 --- a/drivers/acpi/riscv/Makefile +++ b/drivers/acpi/riscv/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-y += rhct.o obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o +obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c new file mode 100644 index 0000000000000..4cdff387deff6 --- /dev/null +++ b/drivers/acpi/riscv/cppc.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Implement CPPC FFH helper routines for RISC-V. + * + * Copyright (C) 2024 Ventana Micro Systems Inc. + */ + +#include +#include +#include + +#define SBI_EXT_CPPC 0x43505043 + +/* CPPC interfaces defined in SBI spec */ +#define SBI_CPPC_PROBE 0x0 +#define SBI_CPPC_READ 0x1 +#define SBI_CPPC_READ_HI 0x2 +#define SBI_CPPC_WRITE 0x3 + +/* RISC-V FFH definitions from RISC-V FFH spec */ +#define FFH_CPPC_TYPE(r) (((r) & GENMASK_ULL(63, 60)) >> 60) +#define FFH_CPPC_SBI_REG(r) ((r) & GENMASK(31, 0)) +#define FFH_CPPC_CSR_NUM(r) ((r) & GENMASK(11, 0)) + +#define FFH_CPPC_SBI 0x1 +#define FFH_CPPC_CSR 0x2 + +struct sbi_cppc_data { + u64 val; + u32 reg; + struct sbiret ret; +}; + +static bool cppc_ext_present; + +static int __init sbi_cppc_init(void) +{ + if (sbi_spec_version >= sbi_mk_version(2, 0) && + sbi_probe_extension(SBI_EXT_CPPC) > 0) { + pr_info("SBI CPPC extension detected\n"); + cppc_ext_present = true; + } else { + pr_info("SBI CPPC extension NOT detected!!\n"); + cppc_ext_present = false; + } + + return 0; +} +device_initcall(sbi_cppc_init); + +static void sbi_cppc_read(void *read_data) +{ + struct sbi_cppc_data *data = (struct sbi_cppc_data *)read_data; + + data->ret = sbi_ecall(SBI_EXT_CPPC, SBI_CPPC_READ, + data->reg, 0, 0, 0, 0, 0); +} + +static void sbi_cppc_write(void *write_data) +{ + struct sbi_cppc_data *data = (struct sbi_cppc_data *)write_data; + + data->ret = sbi_ecall(SBI_EXT_CPPC, SBI_CPPC_WRITE, + data->reg, data->val, 0, 0, 0, 0); +} + +static void cppc_ffh_csr_read(void *read_data) +{ + struct sbi_cppc_data *data = (struct sbi_cppc_data *)read_data; + + switch (data->reg) { + /* Support only TIME CSR for now */ + case CSR_TIME: + data->ret.value = csr_read(CSR_TIME); + data->ret.error = 0; + break; + default: + data->ret.error = -EINVAL; + break; + } +} + +static void cppc_ffh_csr_write(void *write_data) +{ + struct sbi_cppc_data *data = (struct sbi_cppc_data *)write_data; + + data->ret.error = -EINVAL; +} + +/* + * Refer to drivers/acpi/cppc_acpi.c for the description of the functions + * below. + */ +bool cpc_ffh_supported(void) +{ + return true; +} + +int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val) +{ + struct sbi_cppc_data data; + + if (WARN_ON_ONCE(irqs_disabled())) + return -EPERM; + + if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_SBI) { + if (!cppc_ext_present) + return -EINVAL; + + data.reg = FFH_CPPC_SBI_REG(reg->address); + + smp_call_function_single(cpu, sbi_cppc_read, &data, 1); + + *val = data.ret.value; + + return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0; + } else if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_CSR) { + data.reg = FFH_CPPC_CSR_NUM(reg->address); + + smp_call_function_single(cpu, cppc_ffh_csr_read, &data, 1); + + *val = data.ret.value; + + return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0; + } + + return -EINVAL; +} + +int cpc_write_ffh(int cpu, struct cpc_reg *reg, u64 val) +{ + struct sbi_cppc_data data; + + if (WARN_ON_ONCE(irqs_disabled())) + return -EPERM; + + if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_SBI) { + if (!cppc_ext_present) + return -EINVAL; + + data.reg = FFH_CPPC_SBI_REG(reg->address); + data.val = val; + + smp_call_function_single(cpu, sbi_cppc_write, &data, 1); + + return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0; + } else if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_CSR) { + data.reg = FFH_CPPC_CSR_NUM(reg->address); + data.val = val; + + smp_call_function_single(cpu, cppc_ffh_csr_write, &data, 1); + + return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0; + } + + return -EINVAL; +} -- cgit 1.2.3-korg From 7ee1378736f09fceef95d2c9122d2cff14a375da Mon Sep 17 00:00:00 2001 From: Sunil V L Date: Thu, 8 Feb 2024 09:14:13 +0530 Subject: cpufreq: Move CPPC configs to common Kconfig and add RISC-V CPPC related config options are currently defined only in ARM specific file. However, they are required for RISC-V as well. Instead of creating a new Kconfig.riscv file and duplicating them, move them to the common Kconfig file and enable RISC-V too. Signed-off-by: Sunil V L Acked-by: Viresh Kumar Reviewed-by: Pierre Gondois Acked-by: Rafael J. Wysocki Acked-by: Sudeep Holla Link: https://lore.kernel.org/r/20240208034414.22579-3-sunilvl@ventanamicro.com Signed-off-by: Palmer Dabbelt --- drivers/cpufreq/Kconfig | 29 +++++++++++++++++++++++++++++ drivers/cpufreq/Kconfig.arm | 26 -------------------------- 2 files changed, 29 insertions(+), 26 deletions(-) diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 35efb53d5492a..94e55c40970a6 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -302,4 +302,33 @@ config QORIQ_CPUFREQ which are capable of changing the CPU's frequency dynamically. endif + +config ACPI_CPPC_CPUFREQ + tristate "CPUFreq driver based on the ACPI CPPC spec" + depends on ACPI_PROCESSOR + depends on ARM || ARM64 || RISCV + select ACPI_CPPC_LIB + help + This adds a CPUFreq driver which uses CPPC methods + as described in the ACPIv5.1 spec. CPPC stands for + Collaborative Processor Performance Controls. It + is based on an abstract continuous scale of CPU + performance values which allows the remote power + processor to flexibly optimize for power and + performance. CPPC relies on power management firmware + support for its operation. + + If in doubt, say N. + +config ACPI_CPPC_CPUFREQ_FIE + bool "Frequency Invariance support for CPPC cpufreq driver" + depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY + depends on ARM || ARM64 || RISCV + default y + help + This extends frequency invariance support in the CPPC cpufreq driver, + by using CPPC delivered and reference performance counters. + + If in doubt, say N. + endmenu diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index f911606897b8d..987b3d900a89b 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -3,32 +3,6 @@ # ARM CPU Frequency scaling drivers # -config ACPI_CPPC_CPUFREQ - tristate "CPUFreq driver based on the ACPI CPPC spec" - depends on ACPI_PROCESSOR - select ACPI_CPPC_LIB - help - This adds a CPUFreq driver which uses CPPC methods - as described in the ACPIv5.1 spec. CPPC stands for - Collaborative Processor Performance Controls. It - is based on an abstract continuous scale of CPU - performance values which allows the remote power - processor to flexibly optimize for power and - performance. CPPC relies on power management firmware - support for its operation. - - If in doubt, say N. - -config ACPI_CPPC_CPUFREQ_FIE - bool "Frequency Invariance support for CPPC cpufreq driver" - depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY - default y - help - This extends frequency invariance support in the CPPC cpufreq driver, - by using CPPC delivered and reference performance counters. - - If in doubt, say N. - config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM tristate "Allwinner nvmem based SUN50I CPUFreq driver" depends on ARCH_SUNXI -- cgit 1.2.3-korg From 282b9df4e9603bbb5c9cbf3ea60bc393287e8a2f Mon Sep 17 00:00:00 2001 From: Sunil V L Date: Thu, 8 Feb 2024 09:14:14 +0530 Subject: RISC-V: defconfig: Enable CONFIG_ACPI_CPPC_CPUFREQ CONFIG_ACPI_CPPC_CPUFREQ is required to enable CPPC for RISC-V. Signed-off-by: Sunil V L Reviewed-by: Pierre Gondois Acked-by: Rafael J. Wysocki Acked-by: Sudeep Holla Link: https://lore.kernel.org/r/20240208034414.22579-4-sunilvl@ventanamicro.com Signed-off-by: Palmer Dabbelt --- arch/riscv/configs/defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index eaf34e871e308..2988ecd3eb4d6 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -44,6 +44,7 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m CONFIG_CPUFREQ_DT=y +CONFIG_ACPI_CPPC_CPUFREQ=m CONFIG_VIRTUALIZATION=y CONFIG_KVM=m CONFIG_ACPI=y -- cgit 1.2.3-korg From 89f4fd7b1ab7733d9d817e7123c58996ca38ae98 Mon Sep 17 00:00:00 2001 From: Eric Chan Date: Sat, 17 Feb 2024 13:12:49 +0000 Subject: riscv/barrier: Define __{mb,rmb,wmb} Introduce __{mb,rmb,wmb}, and rely on the generic definitions for {mb,rmb,wmb}. Although KCSAN is not supported yet, the definitions can be made more consistent with generic instrumentation. Also add a space to make the changes pass check by checkpatch.pl. Without the space, the error message is as below: ERROR: space required after that ',' (ctx:VxV) 26: FILE: arch/riscv/include/asm/barrier.h:23: +#define __mb() RISCV_FENCE(iorw,iorw) ^ Signed-off-by: Eric Chan Reviewed-by: Andrea Parri Reviewed-by: Samuel Holland Tested-by: Samuel Holland Link: https://lore.kernel.org/r/20240217131249.3668103-1-ericchancf@google.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/barrier.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index 110752594228e..173b44a989f8b 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h @@ -20,9 +20,9 @@ __asm__ __volatile__ ("fence " #p "," #s : : : "memory") /* These barriers need to enforce ordering on both devices or memory. */ -#define mb() RISCV_FENCE(iorw,iorw) -#define rmb() RISCV_FENCE(ir,ir) -#define wmb() RISCV_FENCE(ow,ow) +#define __mb() RISCV_FENCE(iorw, iorw) +#define __rmb() RISCV_FENCE(ir, ir) +#define __wmb() RISCV_FENCE(ow, ow) /* These barriers do not need to enforce ordering on devices, just memory. */ #define __smp_mb() RISCV_FENCE(rw,rw) -- cgit 1.2.3-korg From b3c8064ccc447be45a3bdc2c4a9ea0491f011920 Mon Sep 17 00:00:00 2001 From: Eric Chan Date: Sat, 17 Feb 2024 13:13:02 +0000 Subject: riscv/barrier: Define RISCV_FULL_BARRIER Introduce RISCV_FULL_BARRIER and use in arch_atomic* function. like RISCV_ACQUIRE_BARRIER and RISCV_RELEASE_BARRIER, the fence instruction can be eliminated When SMP is not enabled. Signed-off-by: Eric Chan Reviewed-by: Andrea Parri Reviewed-by: Samuel Holland Tested-by: Samuel Holland Link: https://lore.kernel.org/r/20240217131302.3668481-1-ericchancf@google.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/atomic.h | 16 ++++++++-------- arch/riscv/include/asm/cmpxchg.h | 4 ++-- arch/riscv/include/asm/fence.h | 2 ++ 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h index f5dfef6c2153f..31e6e2e7cc181 100644 --- a/arch/riscv/include/asm/atomic.h +++ b/arch/riscv/include/asm/atomic.h @@ -207,7 +207,7 @@ static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int " add %[rc], %[p], %[a]\n" " sc.w.rl %[rc], %[rc], %[c]\n" " bnez %[rc], 0b\n" - " fence rw, rw\n" + RISCV_FULL_BARRIER "1:\n" : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) : [a]"r" (a), [u]"r" (u) @@ -228,7 +228,7 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, " add %[rc], %[p], %[a]\n" " sc.d.rl %[rc], %[rc], %[c]\n" " bnez %[rc], 0b\n" - " fence rw, rw\n" + RISCV_FULL_BARRIER "1:\n" : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) : [a]"r" (a), [u]"r" (u) @@ -248,7 +248,7 @@ static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v) " addi %[rc], %[p], 1\n" " sc.w.rl %[rc], %[rc], %[c]\n" " bnez %[rc], 0b\n" - " fence rw, rw\n" + RISCV_FULL_BARRIER "1:\n" : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) : @@ -268,7 +268,7 @@ static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v) " addi %[rc], %[p], -1\n" " sc.w.rl %[rc], %[rc], %[c]\n" " bnez %[rc], 0b\n" - " fence rw, rw\n" + RISCV_FULL_BARRIER "1:\n" : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) : @@ -288,7 +288,7 @@ static __always_inline int arch_atomic_dec_if_positive(atomic_t *v) " bltz %[rc], 1f\n" " sc.w.rl %[rc], %[rc], %[c]\n" " bnez %[rc], 0b\n" - " fence rw, rw\n" + RISCV_FULL_BARRIER "1:\n" : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) : @@ -310,7 +310,7 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v) " addi %[rc], %[p], 1\n" " sc.d.rl %[rc], %[rc], %[c]\n" " bnez %[rc], 0b\n" - " fence rw, rw\n" + RISCV_FULL_BARRIER "1:\n" : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) : @@ -331,7 +331,7 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v) " addi %[rc], %[p], -1\n" " sc.d.rl %[rc], %[rc], %[c]\n" " bnez %[rc], 0b\n" - " fence rw, rw\n" + RISCV_FULL_BARRIER "1:\n" : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) : @@ -352,7 +352,7 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) " bltz %[rc], 1f\n" " sc.d.rl %[rc], %[rc], %[c]\n" " bnez %[rc], 0b\n" - " fence rw, rw\n" + RISCV_FULL_BARRIER "1:\n" : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) : diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index 2f4726d3cfcc2..a608e4d1a0a41 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -313,7 +313,7 @@ " bne %0, %z3, 1f\n" \ " sc.w.rl %1, %z4, %2\n" \ " bnez %1, 0b\n" \ - " fence rw, rw\n" \ + RISCV_FULL_BARRIER \ "1:\n" \ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ : "rJ" ((long)__old), "rJ" (__new) \ @@ -325,7 +325,7 @@ " bne %0, %z3, 1f\n" \ " sc.d.rl %1, %z4, %2\n" \ " bnez %1, 0b\n" \ - " fence rw, rw\n" \ + RISCV_FULL_BARRIER \ "1:\n" \ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ : "rJ" (__old), "rJ" (__new) \ diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h index 2b443a3a487f3..6c26c44dfcd62 100644 --- a/arch/riscv/include/asm/fence.h +++ b/arch/riscv/include/asm/fence.h @@ -4,9 +4,11 @@ #ifdef CONFIG_SMP #define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n" #define RISCV_RELEASE_BARRIER "\tfence rw, w\n" +#define RISCV_FULL_BARRIER "\tfence rw, rw\n" #else #define RISCV_ACQUIRE_BARRIER #define RISCV_RELEASE_BARRIER +#define RISCV_FULL_BARRIER #endif #endif /* _ASM_RISCV_FENCE_H */ -- cgit 1.2.3-korg From c85688e2b0f0afbce7ea3cd8c47f2be67c09b9f4 Mon Sep 17 00:00:00 2001 From: Eric Chan Date: Sat, 17 Feb 2024 13:13:16 +0000 Subject: riscv/barrier: Consolidate fence definitions Disparate fence implementations are consolidated into fence.h. Also introduce RISCV_FENCE_ASM to make fence macro more reusable. Signed-off-by: Eric Chan Reviewed-by: Andrea Parri Reviewed-by: Samuel Holland Tested-by: Samuel Holland Link: https://lore.kernel.org/r/20240217131316.3668927-1-ericchancf@google.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/atomic.h | 1 - arch/riscv/include/asm/barrier.h | 3 +-- arch/riscv/include/asm/cmpxchg.h | 1 - arch/riscv/include/asm/fence.h | 10 +++++++--- arch/riscv/include/asm/io.h | 8 ++++---- arch/riscv/include/asm/mmio.h | 5 +++-- arch/riscv/include/asm/mmiowb.h | 2 +- 7 files changed, 16 insertions(+), 14 deletions(-) diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h index 31e6e2e7cc181..0e0522e588ca6 100644 --- a/arch/riscv/include/asm/atomic.h +++ b/arch/riscv/include/asm/atomic.h @@ -17,7 +17,6 @@ #endif #include -#include #define __atomic_acquire_fence() \ __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory") diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index 173b44a989f8b..15857dbc2279f 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h @@ -11,13 +11,12 @@ #define _ASM_RISCV_BARRIER_H #ifndef __ASSEMBLY__ +#include #define nop() __asm__ __volatile__ ("nop") #define __nops(n) ".rept " #n "\nnop\n.endr\n" #define nops(n) __asm__ __volatile__ (__nops(n)) -#define RISCV_FENCE(p, s) \ - __asm__ __volatile__ ("fence " #p "," #s : : : "memory") /* These barriers need to enforce ordering on both devices or memory. */ #define __mb() RISCV_FENCE(iorw, iorw) diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index a608e4d1a0a41..2fee65cc84432 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -8,7 +8,6 @@ #include -#include #include #define __xchg_relaxed(ptr, new, size) \ diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h index 6c26c44dfcd62..6bcd80325dfc1 100644 --- a/arch/riscv/include/asm/fence.h +++ b/arch/riscv/include/asm/fence.h @@ -1,10 +1,14 @@ #ifndef _ASM_RISCV_FENCE_H #define _ASM_RISCV_FENCE_H +#define RISCV_FENCE_ASM(p, s) "\tfence " #p "," #s "\n" +#define RISCV_FENCE(p, s) \ + ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); }) + #ifdef CONFIG_SMP -#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n" -#define RISCV_RELEASE_BARRIER "\tfence rw, w\n" -#define RISCV_FULL_BARRIER "\tfence rw, rw\n" +#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r, rw) +#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw, w) +#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw, rw) #else #define RISCV_ACQUIRE_BARRIER #define RISCV_RELEASE_BARRIER diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index 42497d487a174..1c5c641075d2f 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -47,10 +47,10 @@ * sufficient to ensure this works sanely on controllers that support I/O * writes. */ -#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory"); -#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory"); -#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory"); -#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory"); +#define __io_pbr() RISCV_FENCE(io, i) +#define __io_par(v) RISCV_FENCE(i, ior) +#define __io_pbw() RISCV_FENCE(iow, o) +#define __io_paw() RISCV_FENCE(o, io) /* * Accesses from a single hart to a single I/O address must be ordered. This diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h index 4c58ee7f95ecf..06cadfd7a237a 100644 --- a/arch/riscv/include/asm/mmio.h +++ b/arch/riscv/include/asm/mmio.h @@ -12,6 +12,7 @@ #define _ASM_RISCV_MMIO_H #include +#include #include /* Generic IO read/write. These perform native-endian accesses. */ @@ -131,8 +132,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * doesn't define any ordering between the memory space and the I/O space. */ #define __io_br() do {} while (0) -#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); }) -#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); }) +#define __io_ar(v) RISCV_FENCE(i, ir) +#define __io_bw() RISCV_FENCE(w, o) #define __io_aw() mmiowb_set_pending() #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h index 0b2333e71fdc5..52ce4a399d9b2 100644 --- a/arch/riscv/include/asm/mmiowb.h +++ b/arch/riscv/include/asm/mmiowb.h @@ -7,7 +7,7 @@ * "o,w" is sufficient to ensure that all writes to the device have completed * before the write to the spinlock is allowed to commit. */ -#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory"); +#define mmiowb() RISCV_FENCE(o, w) #include #include -- cgit 1.2.3-korg From 9133e6e6908d95812e89619f8a86abd0deb17bf3 Mon Sep 17 00:00:00 2001 From: Eric Chan Date: Sat, 17 Feb 2024 13:13:28 +0000 Subject: riscv/barrier: Add missing space after ',' The past form of RISCV_FENCE would cause checkpatch.pl to issue error messages, the example is as follows: ERROR: space required after that ',' (ctx:VxV) 26: FILE: arch/riscv/include/asm/barrier.h:27: +#define __smp_mb() RISCV_FENCE(rw,rw) ^ fix the remaining of RISCV_FENCE. Signed-off-by: Eric Chan Reviewed-by: Andrea Parri Reviewed-by: Samuel Holland Tested-by: Samuel Holland Link: https://lore.kernel.org/r/20240217131328.3669364-1-ericchancf@google.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/barrier.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index 15857dbc2279f..880b56d8480d1 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h @@ -24,14 +24,14 @@ #define __wmb() RISCV_FENCE(ow, ow) /* These barriers do not need to enforce ordering on devices, just memory. */ -#define __smp_mb() RISCV_FENCE(rw,rw) -#define __smp_rmb() RISCV_FENCE(r,r) -#define __smp_wmb() RISCV_FENCE(w,w) +#define __smp_mb() RISCV_FENCE(rw, rw) +#define __smp_rmb() RISCV_FENCE(r, r) +#define __smp_wmb() RISCV_FENCE(w, w) #define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ - RISCV_FENCE(rw,w); \ + RISCV_FENCE(rw, w); \ WRITE_ONCE(*p, v); \ } while (0) @@ -39,7 +39,7 @@ do { \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ - RISCV_FENCE(r,rw); \ + RISCV_FENCE(r, rw); \ ___p1; \ }) @@ -68,7 +68,7 @@ do { \ * instances the scheduler pairs this with an mb(), so nothing is necessary on * the new hart. */ -#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw) +#define smp_mb__after_spinlock() RISCV_FENCE(iorw, iorw) #include -- cgit 1.2.3-korg From 28e4748e5e3d313056ed38abeff4b455abd02c1b Mon Sep 17 00:00:00 2001 From: Erick Archer Date: Sat, 20 Jan 2024 14:54:00 +0100 Subject: riscv: Use kcalloc() instead of kzalloc() As noted in the "Deprecated Interfaces, Language Features, Attributes, and Conventions" documentation [1], size calculations (especially multiplication) should not be performed in memory allocator (or similar) function arguments due to the risk of them overflowing. This could lead to values wrapping around and a smaller allocation being made than the caller was expecting. Using those allocations could lead to linear overflows of heap memory and other misbehaviors. So, use the purpose specific kcalloc() function instead of the argument count * size in the kzalloc() function. Also, it is preferred to use sizeof(*pointer) instead of sizeof(type) due to the type of the variable can change and one needs not change the former (unlike the latter). Link: https://www.kernel.org/doc/html/next/process/deprecated.html#open-coded-arithmetic-in-allocator-arguments [1] Link: https://github.com/KSPP/linux/issues/162 Signed-off-by: Erick Archer Reviewed-by: Alexandre Ghiti Reviewed-by: Andrew Jones Reviewed-by: Gustavo A. R. Silva Link: https://lore.kernel.org/r/20240120135400.4710-1-erick.archer@gmx.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/unaligned_access_speed.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c index 52264ea4f0bd7..a9a6bcb02acf1 100644 --- a/arch/riscv/kernel/unaligned_access_speed.c +++ b/arch/riscv/kernel/unaligned_access_speed.c @@ -218,8 +218,7 @@ static int check_unaligned_access_speed_all_cpus(void) { unsigned int cpu; unsigned int cpu_count = num_possible_cpus(); - struct page **bufs = kzalloc(cpu_count * sizeof(struct page *), - GFP_KERNEL); + struct page **bufs = kcalloc(cpu_count, sizeof(*bufs), GFP_KERNEL); if (!bufs) { pr_warn("Allocation failure, not measuring misaligned performance\n"); -- cgit 1.2.3-korg From 01261e24cfab69c65043e1e61168348ae23a64c2 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Fri, 2 Feb 2024 13:47:11 +0100 Subject: riscv: Only flush the mm icache when setting an exec pte We used to emit a flush_icache_all() whenever a dirty executable mapping is set in the page table but we can instead call flush_icache_mm() which will only send IPIs to cores that currently run this mm and add a deferred icache flush to the others. The number of calls to sbi_remote_fence_i() (tested without IPI support): With a simple buildroot rootfs: * Before: ~5k * After : 4 (!) Tested on HW, the boot to login is ~4.5% faster. With an ubuntu rootfs: * Before: ~24k * After : ~13k Signed-off-by: Alexandre Ghiti Reviewed-by: Charlie Jenkins Link: https://lore.kernel.org/r/20240202124711.256146-1-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/pgtable.h | 14 +++++++------- arch/riscv/mm/cacheflush.c | 4 ++-- arch/riscv/mm/pgtable.c | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 9c45810806ff4..b2e6965748f21 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -513,12 +513,12 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) WRITE_ONCE(*ptep, pteval); } -void flush_icache_pte(pte_t pte); +void flush_icache_pte(struct mm_struct *mm, pte_t pte); -static inline void __set_pte_at(pte_t *ptep, pte_t pteval) +static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval) { if (pte_present(pteval) && pte_exec(pteval)) - flush_icache_pte(pteval); + flush_icache_pte(mm, pteval); set_pte(ptep, pteval); } @@ -529,7 +529,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, page_table_check_ptes_set(mm, ptep, pteval, nr); for (;;) { - __set_pte_at(ptep, pteval); + __set_pte_at(mm, ptep, pteval); if (--nr == 0) break; ptep++; @@ -541,7 +541,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - __set_pte_at(ptep, __pte(0)); + __set_pte_at(mm, ptep, __pte(0)); } #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */ @@ -713,14 +713,14 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { page_table_check_pmd_set(mm, pmdp, pmd); - return __set_pte_at((pte_t *)pmdp, pmd_pte(pmd)); + return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd)); } static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, pud_t *pudp, pud_t pud) { page_table_check_pud_set(mm, pudp, pud); - return __set_pte_at((pte_t *)pudp, pud_pte(pud)); + return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud)); } #ifdef CONFIG_PAGE_TABLE_CHECK diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 55a34f2020a85..bc61ee5975e41 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -82,12 +82,12 @@ void flush_icache_mm(struct mm_struct *mm, bool local) #endif /* CONFIG_SMP */ #ifdef CONFIG_MMU -void flush_icache_pte(pte_t pte) +void flush_icache_pte(struct mm_struct *mm, pte_t pte) { struct folio *folio = page_folio(pte_page(pte)); if (!test_bit(PG_dcache_clean, &folio->flags)) { - flush_icache_all(); + flush_icache_mm(mm, false); set_bit(PG_dcache_clean, &folio->flags); } } diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c index ef887efcb6790..533ec9055fa0d 100644 --- a/arch/riscv/mm/pgtable.c +++ b/arch/riscv/mm/pgtable.c @@ -10,7 +10,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, pte_t entry, int dirty) { if (!pte_same(ptep_get(ptep), entry)) - __set_pte_at(ptep, entry); + __set_pte_at(vma->vm_mm, ptep, entry); /* * update_mmu_cache will unconditionally execute, handling both * the case that the PTE changed and the spurious fault case. -- cgit 1.2.3-korg From da215b089b5d4ff30745c59922b54b309d55a5d8 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 7 Feb 2024 22:08:51 -0800 Subject: crypto: riscv - parallelize AES-CBC decryption Since CBC decryption is parallelizable, make the RISC-V implementation of AES-CBC decryption process multiple blocks at a time, instead of processing the blocks one by one. This should improve performance. Signed-off-by: Eric Biggers Link: https://lore.kernel.org/r/20240208060851.154129-1-ebiggers@kernel.org Signed-off-by: Palmer Dabbelt --- arch/riscv/crypto/aes-riscv64-zvkned.S | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/arch/riscv/crypto/aes-riscv64-zvkned.S b/arch/riscv/crypto/aes-riscv64-zvkned.S index 78d4e1186c074..43541aad6386c 100644 --- a/arch/riscv/crypto/aes-riscv64-zvkned.S +++ b/arch/riscv/crypto/aes-riscv64-zvkned.S @@ -139,19 +139,25 @@ SYM_FUNC_END(aes_ecb_decrypt_zvkned) .endm .macro aes_cbc_decrypt keylen + srli LEN, LEN, 2 // Convert LEN from bytes to words vle32.v v16, (IVP) // Load IV 1: - vle32.v v17, (INP) // Load ciphertext block - vmv.v.v v18, v17 // Save ciphertext block - aes_decrypt v17, \keylen // Decrypt - vxor.vv v17, v17, v16 // XOR with IV or prev ciphertext block - vse32.v v17, (OUTP) // Store plaintext block - vmv.v.v v16, v18 // Next "IV" is prev ciphertext block - addi INP, INP, 16 - addi OUTP, OUTP, 16 - addi LEN, LEN, -16 + vsetvli t0, LEN, e32, m4, ta, ma + vle32.v v20, (INP) // Load ciphertext blocks + vslideup.vi v16, v20, 4 // Setup prev ciphertext blocks + addi t1, t0, -4 + vslidedown.vx v24, v20, t1 // Save last ciphertext block + aes_decrypt v20, \keylen // Decrypt the blocks + vxor.vv v20, v20, v16 // XOR with prev ciphertext blocks + vse32.v v20, (OUTP) // Store plaintext blocks + vmv.v.v v16, v24 // Next "IV" is last ciphertext block + slli t1, t0, 2 // Words to bytes + add INP, INP, t1 + add OUTP, OUTP, t1 + sub LEN, LEN, t0 bnez LEN, 1b + vsetivli zero, 4, e32, m1, ta, ma vse32.v v16, (IVP) // Store next IV ret .endm -- cgit 1.2.3-korg From c70dfa4a2723ff5046fdc6d8a054713483f64f1b Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 12 Feb 2024 21:54:42 -0800 Subject: crypto: riscv - add vector crypto accelerated AES-CBC-CTS Add an implementation of cts(cbc(aes)) accelerated using the Zvkned RISC-V vector crypto extension. This is mainly useful for fscrypt, where cts(cbc(aes)) is the "default" filenames encryption algorithm. In that use case, typically most messages are short and are block-aligned. The CBC-CTS variant implemented is CS3; this is the variant Linux uses. To perform well on short messages, the new implementation processes the full message in one call to the assembly function if the data is contiguous. Otherwise it falls back to CBC operations followed by CTS at the end. For decryption, to further improve performance on short messages, especially block-aligned messages, the CBC-CTS assembly function parallelizes the AES decryption of all full blocks. This improves on the arm64 implementation of cts(cbc(aes)), which always splits the CBC part(s) from the CTS part, doing the AES decryptions for the last two blocks serially and usually loading the round keys twice. Tested in QEMU with CONFIG_CRYPTO_MANAGER_EXTRA_TESTS=y. Signed-off-by: Eric Biggers Link: https://lore.kernel.org/r/20240213055442.35954-1-ebiggers@kernel.org Signed-off-by: Palmer Dabbelt --- arch/riscv/crypto/Kconfig | 4 +- arch/riscv/crypto/aes-riscv64-glue.c | 93 +++++++++++++++++++- arch/riscv/crypto/aes-riscv64-zvkned.S | 153 +++++++++++++++++++++++++++++++++ 3 files changed, 245 insertions(+), 5 deletions(-) diff --git a/arch/riscv/crypto/Kconfig b/arch/riscv/crypto/Kconfig index 2ad44e1d464af..ad58dad9a5807 100644 --- a/arch/riscv/crypto/Kconfig +++ b/arch/riscv/crypto/Kconfig @@ -3,14 +3,14 @@ menu "Accelerated Cryptographic Algorithms for CPU (riscv)" config CRYPTO_AES_RISCV64 - tristate "Ciphers: AES, modes: ECB, CBC, CTR, XTS" + tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XTS" depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO select CRYPTO_ALGAPI select CRYPTO_LIB_AES select CRYPTO_SKCIPHER help Block cipher: AES cipher algorithms - Length-preserving ciphers: AES with ECB, CBC, CTR, XTS + Length-preserving ciphers: AES with ECB, CBC, CTS, CTR, XTS Architecture: riscv64 using: - Zvkned vector crypto extension diff --git a/arch/riscv/crypto/aes-riscv64-glue.c b/arch/riscv/crypto/aes-riscv64-glue.c index 37bc6ef0be40e..f814ee048555b 100644 --- a/arch/riscv/crypto/aes-riscv64-glue.c +++ b/arch/riscv/crypto/aes-riscv64-glue.c @@ -1,13 +1,15 @@ // SPDX-License-Identifier: GPL-2.0-only /* * AES using the RISC-V vector crypto extensions. Includes the bare block - * cipher and the ECB, CBC, CTR, and XTS modes. + * cipher and the ECB, CBC, CBC-CTS, CTR, and XTS modes. * * Copyright (C) 2023 VRULL GmbH * Author: Heiko Stuebner * * Copyright (C) 2023 SiFive, Inc. * Author: Jerry Shih + * + * Copyright 2024 Google LLC */ #include @@ -40,6 +42,10 @@ asmlinkage void aes_cbc_decrypt_zvkned(const struct crypto_aes_ctx *key, const u8 *in, u8 *out, size_t len, u8 iv[AES_BLOCK_SIZE]); +asmlinkage void aes_cbc_cts_crypt_zvkned(const struct crypto_aes_ctx *key, + const u8 *in, u8 *out, size_t len, + const u8 iv[AES_BLOCK_SIZE], bool enc); + asmlinkage void aes_ctr32_crypt_zvkned_zvkb(const struct crypto_aes_ctx *key, const u8 *in, u8 *out, size_t len, u8 iv[AES_BLOCK_SIZE]); @@ -164,7 +170,7 @@ static int riscv64_aes_ecb_decrypt(struct skcipher_request *req) /* AES-CBC */ -static inline int riscv64_aes_cbc_crypt(struct skcipher_request *req, bool enc) +static int riscv64_aes_cbc_crypt(struct skcipher_request *req, bool enc) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); @@ -202,6 +208,70 @@ static int riscv64_aes_cbc_decrypt(struct skcipher_request *req) return riscv64_aes_cbc_crypt(req, false); } +/* AES-CBC-CTS */ + +static int riscv64_aes_cbc_cts_crypt(struct skcipher_request *req, bool enc) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct scatterlist sg_src[2], sg_dst[2]; + struct skcipher_request subreq; + struct scatterlist *src, *dst; + struct skcipher_walk walk; + unsigned int cbc_len; + int err; + + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + + err = skcipher_walk_virt(&walk, req, false); + if (err) + return err; + /* + * If the full message is available in one step, decrypt it in one call + * to the CBC-CTS assembly function. This reduces overhead, especially + * on short messages. Otherwise, fall back to doing CBC up to the last + * two blocks, then invoke CTS just for the ciphertext stealing. + */ + if (unlikely(walk.nbytes != req->cryptlen)) { + cbc_len = round_down(req->cryptlen - AES_BLOCK_SIZE - 1, + AES_BLOCK_SIZE); + skcipher_walk_abort(&walk); + skcipher_request_set_tfm(&subreq, tfm); + skcipher_request_set_callback(&subreq, + skcipher_request_flags(req), + NULL, NULL); + skcipher_request_set_crypt(&subreq, req->src, req->dst, + cbc_len, req->iv); + err = riscv64_aes_cbc_crypt(&subreq, enc); + if (err) + return err; + dst = src = scatterwalk_ffwd(sg_src, req->src, cbc_len); + if (req->dst != req->src) + dst = scatterwalk_ffwd(sg_dst, req->dst, cbc_len); + skcipher_request_set_crypt(&subreq, src, dst, + req->cryptlen - cbc_len, req->iv); + err = skcipher_walk_virt(&walk, &subreq, false); + if (err) + return err; + } + kernel_vector_begin(); + aes_cbc_cts_crypt_zvkned(ctx, walk.src.virt.addr, walk.dst.virt.addr, + walk.nbytes, req->iv, enc); + kernel_vector_end(); + return skcipher_walk_done(&walk, 0); +} + +static int riscv64_aes_cbc_cts_encrypt(struct skcipher_request *req) +{ + return riscv64_aes_cbc_cts_crypt(req, true); +} + +static int riscv64_aes_cbc_cts_decrypt(struct skcipher_request *req) +{ + return riscv64_aes_cbc_cts_crypt(req, false); +} + /* AES-CTR */ static int riscv64_aes_ctr_crypt(struct skcipher_request *req) @@ -434,6 +504,22 @@ static struct skcipher_alg riscv64_zvkned_aes_skcipher_algs[] = { .cra_driver_name = "cbc-aes-riscv64-zvkned", .cra_module = THIS_MODULE, }, + }, { + .setkey = riscv64_aes_setkey_skcipher, + .encrypt = riscv64_aes_cbc_cts_encrypt, + .decrypt = riscv64_aes_cbc_cts_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .walksize = 4 * AES_BLOCK_SIZE, /* matches LMUL=4 */ + .base = { + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto_aes_ctx), + .cra_priority = 300, + .cra_name = "cts(cbc(aes))", + .cra_driver_name = "cts-cbc-aes-riscv64-zvkned", + .cra_module = THIS_MODULE, + }, } }; @@ -540,11 +626,12 @@ static void __exit riscv64_aes_mod_exit(void) module_init(riscv64_aes_mod_init); module_exit(riscv64_aes_mod_exit); -MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS (RISC-V accelerated)"); +MODULE_DESCRIPTION("AES-ECB/CBC/CTS/CTR/XTS (RISC-V accelerated)"); MODULE_AUTHOR("Jerry Shih "); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("aes"); MODULE_ALIAS_CRYPTO("ecb(aes)"); MODULE_ALIAS_CRYPTO("cbc(aes)"); +MODULE_ALIAS_CRYPTO("cts(cbc(aes))"); MODULE_ALIAS_CRYPTO("ctr(aes)"); MODULE_ALIAS_CRYPTO("xts(aes)"); diff --git a/arch/riscv/crypto/aes-riscv64-zvkned.S b/arch/riscv/crypto/aes-riscv64-zvkned.S index 43541aad6386c..23d063f94ce61 100644 --- a/arch/riscv/crypto/aes-riscv64-zvkned.S +++ b/arch/riscv/crypto/aes-riscv64-zvkned.S @@ -184,3 +184,156 @@ SYM_FUNC_START(aes_cbc_decrypt_zvkned) 192: aes_cbc_decrypt 192 SYM_FUNC_END(aes_cbc_decrypt_zvkned) + +.macro aes_cbc_cts_encrypt keylen + + // CBC-encrypt all blocks except the last. But don't store the + // second-to-last block to the output buffer yet, since it will be + // handled specially in the ciphertext stealing step. Exception: if the + // message is single-block, still encrypt the last (and only) block. + li t0, 16 + j 2f +1: + vse32.v v16, (OUTP) // Store ciphertext block + addi OUTP, OUTP, 16 +2: + vle32.v v17, (INP) // Load plaintext block + vxor.vv v16, v16, v17 // XOR with IV or prev ciphertext block + aes_encrypt v16, \keylen // Encrypt + addi INP, INP, 16 + addi LEN, LEN, -16 + bgt LEN, t0, 1b // Repeat if more than one block remains + + // Special case: if the message is a single block, just do CBC. + beqz LEN, .Lcts_encrypt_done\@ + + // Encrypt the last two blocks using ciphertext stealing as follows: + // C[n-1] = Encrypt(Encrypt(P[n-1] ^ C[n-2]) ^ P[n]) + // C[n] = Encrypt(P[n-1] ^ C[n-2])[0..LEN] + // + // C[i] denotes the i'th ciphertext block, and likewise P[i] the i'th + // plaintext block. Block n, the last block, may be partial; its length + // is 1 <= LEN <= 16. If there are only 2 blocks, C[n-2] means the IV. + // + // v16 already contains Encrypt(P[n-1] ^ C[n-2]). + // INP points to P[n]. OUTP points to where C[n-1] should go. + // To support in-place encryption, load P[n] before storing C[n]. + addi t0, OUTP, 16 // Get pointer to where C[n] should go + vsetvli zero, LEN, e8, m1, tu, ma + vle8.v v17, (INP) // Load P[n] + vse8.v v16, (t0) // Store C[n] + vxor.vv v16, v16, v17 // v16 = Encrypt(P[n-1] ^ C[n-2]) ^ P[n] + vsetivli zero, 4, e32, m1, ta, ma + aes_encrypt v16, \keylen +.Lcts_encrypt_done\@: + vse32.v v16, (OUTP) // Store C[n-1] (or C[n] in single-block case) + ret +.endm + +#define LEN32 t4 // Length of remaining full blocks in 32-bit words +#define LEN_MOD16 t5 // Length of message in bytes mod 16 + +.macro aes_cbc_cts_decrypt keylen + andi LEN32, LEN, ~15 + srli LEN32, LEN32, 2 + andi LEN_MOD16, LEN, 15 + + // Save C[n-2] in v28 so that it's available later during the ciphertext + // stealing step. If there are fewer than three blocks, C[n-2] means + // the IV, otherwise it means the third-to-last ciphertext block. + vmv.v.v v28, v16 // IV + add t0, LEN, -33 + bltz t0, .Lcts_decrypt_loop\@ + andi t0, t0, ~15 + add t0, t0, INP + vle32.v v28, (t0) + + // CBC-decrypt all full blocks. For the last full block, or the last 2 + // full blocks if the message is block-aligned, this doesn't write the + // correct output blocks (unless the message is only a single block), + // because it XORs the wrong values with the raw AES plaintexts. But we + // fix this after this loop without redoing the AES decryptions. This + // approach allows more of the AES decryptions to be parallelized. +.Lcts_decrypt_loop\@: + vsetvli t0, LEN32, e32, m4, ta, ma + addi t1, t0, -4 + vle32.v v20, (INP) // Load next set of ciphertext blocks + vmv.v.v v24, v16 // Get IV or last ciphertext block of prev set + vslideup.vi v24, v20, 4 // Setup prev ciphertext blocks + vslidedown.vx v16, v20, t1 // Save last ciphertext block of this set + aes_decrypt v20, \keylen // Decrypt this set of blocks + vxor.vv v24, v24, v20 // XOR prev ciphertext blocks with decrypted blocks + vse32.v v24, (OUTP) // Store this set of plaintext blocks + sub LEN32, LEN32, t0 + slli t0, t0, 2 // Words to bytes + add INP, INP, t0 + add OUTP, OUTP, t0 + bnez LEN32, .Lcts_decrypt_loop\@ + + vsetivli zero, 4, e32, m4, ta, ma + vslidedown.vx v20, v20, t1 // Extract raw plaintext of last full block + addi t0, OUTP, -16 // Get pointer to last full plaintext block + bnez LEN_MOD16, .Lcts_decrypt_non_block_aligned\@ + + // Special case: if the message is a single block, just do CBC. + li t1, 16 + beq LEN, t1, .Lcts_decrypt_done\@ + + // Block-aligned message. Just fix up the last 2 blocks. We need: + // + // P[n-1] = Decrypt(C[n]) ^ C[n-2] + // P[n] = Decrypt(C[n-1]) ^ C[n] + // + // We have C[n] in v16, Decrypt(C[n]) in v20, and C[n-2] in v28. + // Together with Decrypt(C[n-1]) ^ C[n-2] from the output buffer, this + // is everything needed to fix the output without re-decrypting blocks. + addi t1, OUTP, -32 // Get pointer to where P[n-1] should go + vxor.vv v20, v20, v28 // Decrypt(C[n]) ^ C[n-2] == P[n-1] + vle32.v v24, (t1) // Decrypt(C[n-1]) ^ C[n-2] + vse32.v v20, (t1) // Store P[n-1] + vxor.vv v20, v24, v16 // Decrypt(C[n-1]) ^ C[n-2] ^ C[n] == P[n] ^ C[n-2] + j .Lcts_decrypt_finish\@ + +.Lcts_decrypt_non_block_aligned\@: + // Decrypt the last two blocks using ciphertext stealing as follows: + // + // P[n-1] = Decrypt(C[n] || Decrypt(C[n-1])[LEN_MOD16..16]) ^ C[n-2] + // P[n] = (Decrypt(C[n-1]) ^ C[n])[0..LEN_MOD16] + // + // We already have Decrypt(C[n-1]) in v20 and C[n-2] in v28. + vmv.v.v v16, v20 // v16 = Decrypt(C[n-1]) + vsetvli zero, LEN_MOD16, e8, m1, tu, ma + vle8.v v20, (INP) // v20 = C[n] || Decrypt(C[n-1])[LEN_MOD16..16] + vxor.vv v16, v16, v20 // v16 = Decrypt(C[n-1]) ^ C[n] + vse8.v v16, (OUTP) // Store P[n] + vsetivli zero, 4, e32, m1, ta, ma + aes_decrypt v20, \keylen // v20 = Decrypt(C[n] || Decrypt(C[n-1])[LEN_MOD16..16]) +.Lcts_decrypt_finish\@: + vxor.vv v20, v20, v28 // XOR with C[n-2] + vse32.v v20, (t0) // Store last full plaintext block +.Lcts_decrypt_done\@: + ret +.endm + +.macro aes_cbc_cts_crypt keylen + vle32.v v16, (IVP) // Load IV + beqz a5, .Lcts_decrypt\@ + aes_cbc_cts_encrypt \keylen +.Lcts_decrypt\@: + aes_cbc_cts_decrypt \keylen +.endm + +// void aes_cbc_cts_crypt_zvkned(const struct crypto_aes_ctx *key, +// const u8 *in, u8 *out, size_t len, +// const u8 iv[16], bool enc); +// +// Encrypts or decrypts a message with the CS3 variant of AES-CBC-CTS. +// This is the variant that unconditionally swaps the last two blocks. +SYM_FUNC_START(aes_cbc_cts_crypt_zvkned) + aes_begin KEYP, 128f, 192f + aes_cbc_cts_crypt 256 +128: + aes_cbc_cts_crypt 128 +192: + aes_cbc_cts_crypt 192 +SYM_FUNC_END(aes_cbc_cts_crypt_zvkned) -- cgit 1.2.3-korg