aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-04-01 13:31:57 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-04-01 13:31:57 -0700
commita3dfc532b8731843c12bdc45e804eacc47e51e50 (patch)
treeef8d299d42306426758041c4ac55a5407bf44441
parent9ae24d5aa001622035270de8f46c0634e6c6d55a (diff)
parent1464d00b27b2e29a5556f6a4099cf083886e883f (diff)
downloadlinux-a3dfc532b8731843c12bdc45e804eacc47e51e50.tar.gz
Merge tag 'riscv-for-linus-5.18-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull more RISC-V updates from Palmer Dabbelt: "This has a handful of new features: - Support for CURRENT_STACK_POINTER, which enables some extra stack debugging for HARDENED_USERCOPY. - Support for the new SBI CPU idle extension, via cpuidle and suspend drivers. - Profiling has been enabled in the defconfigs. but is mostly fixes and cleanups" * tag 'riscv-for-linus-5.18-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (21 commits) RISC-V: K210 defconfigs: Drop redundant MEMBARRIER=n RISC-V: defconfig: Drop redundant SBI HVC and earlycon Documentation: riscv: remove non-existent directory from table of contents riscv: cpu.c: don't use kernel-doc markers for comments RISC-V: Enable profiling by default RISC-V: module: fix apply_r_riscv_rcv_branch_rela typo RISC-V: Declare per cpu boot data as static RISC-V: Fix a comment typo in riscv_of_parent_hartid() riscv: Increase stack size under KASAN riscv: Fix fill_callchain return value riscv: dts: canaan: Fix SPI3 bus width riscv: Rename "sp_in_global" to "current_stack_pointer" riscv module: remove (NOLOAD) RISC-V: Enable RISC-V SBI CPU Idle driver for QEMU virt machine dt-bindings: Add common bindings for ARM and RISC-V idle states cpuidle: Add RISC-V SBI CPU idle driver cpuidle: Factor-out power domain related code from PSCI domain driver RISC-V: Add SBI HSM suspend related defines RISC-V: Add arch functions for non-retentive suspend entry/exit RISC-V: Rename relocate() and make it global ...
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/psci.yaml2
-rw-r--r--Documentation/devicetree/bindings/cpu/idle-states.yaml (renamed from Documentation/devicetree/bindings/arm/idle-states.yaml)228
-rw-r--r--Documentation/devicetree/bindings/riscv/cpus.yaml6
-rw-r--r--Documentation/riscv/index.rst1
-rw-r--r--MAINTAINERS14
-rw-r--r--arch/riscv/Kconfig8
-rw-r--r--arch/riscv/Kconfig.socs3
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts2
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts2
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_go.dts2
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maixduino.dts2
-rw-r--r--arch/riscv/configs/defconfig5
-rw-r--r--arch/riscv/configs/nommu_k210_defconfig1
-rw-r--r--arch/riscv/configs/nommu_k210_sdcard_defconfig1
-rw-r--r--arch/riscv/configs/nommu_virt_defconfig1
-rw-r--r--arch/riscv/configs/rv32_defconfig5
-rw-r--r--arch/riscv/include/asm/asm.h26
-rw-r--r--arch/riscv/include/asm/cpuidle.h24
-rw-r--r--arch/riscv/include/asm/current.h2
-rw-r--r--arch/riscv/include/asm/module.lds.h6
-rw-r--r--arch/riscv/include/asm/suspend.h36
-rw-r--r--arch/riscv/include/asm/thread_info.h10
-rw-r--r--arch/riscv/kernel/Makefile2
-rw-r--r--arch/riscv/kernel/asm-offsets.c3
-rw-r--r--arch/riscv/kernel/cpu.c6
-rw-r--r--arch/riscv/kernel/cpu_ops_sbi.c2
-rw-r--r--arch/riscv/kernel/head.S27
-rw-r--r--arch/riscv/kernel/module.c4
-rw-r--r--arch/riscv/kernel/perf_callchain.c2
-rw-r--r--arch/riscv/kernel/process.c3
-rw-r--r--arch/riscv/kernel/stacktrace.c6
-rw-r--r--arch/riscv/kernel/suspend.c87
-rw-r--r--arch/riscv/kernel/suspend_entry.S124
-rw-r--r--drivers/cpuidle/Kconfig9
-rw-r--r--drivers/cpuidle/Kconfig.arm1
-rw-r--r--drivers/cpuidle/Kconfig.riscv15
-rw-r--r--drivers/cpuidle/Makefile5
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c138
-rw-r--r--drivers/cpuidle/cpuidle-psci.h15
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c627
-rw-r--r--drivers/cpuidle/dt_idle_genpd.c178
-rw-r--r--drivers/cpuidle/dt_idle_genpd.h50
43 files changed, 1491 insertions, 202 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt b/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt
index 6ce0b212ec6d64..606b4b1b709da8 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt
@@ -81,4 +81,4 @@ Example:
};
};
-[1]. Documentation/devicetree/bindings/arm/idle-states.yaml
+[1]. Documentation/devicetree/bindings/cpu/idle-states.yaml
diff --git a/Documentation/devicetree/bindings/arm/psci.yaml b/Documentation/devicetree/bindings/arm/psci.yaml
index 8b77cf83a095a3..dd83ef278af03b 100644
--- a/Documentation/devicetree/bindings/arm/psci.yaml
+++ b/Documentation/devicetree/bindings/arm/psci.yaml
@@ -101,7 +101,7 @@ properties:
bindings in [1]) must specify this property.
[1] Kernel documentation - ARM idle states bindings
- Documentation/devicetree/bindings/arm/idle-states.yaml
+ Documentation/devicetree/bindings/cpu/idle-states.yaml
patternProperties:
"^power-domain-":
diff --git a/Documentation/devicetree/bindings/arm/idle-states.yaml b/Documentation/devicetree/bindings/cpu/idle-states.yaml
index 4d381fa1ee5768..5daa219ceb7bdb 100644
--- a/Documentation/devicetree/bindings/arm/idle-states.yaml
+++ b/Documentation/devicetree/bindings/cpu/idle-states.yaml
@@ -1,25 +1,30 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/idle-states.yaml#
+$id: http://devicetree.org/schemas/cpu/idle-states.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: ARM idle states binding description
+title: Idle states binding description
maintainers:
- Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ - Anup Patel <anup@brainfault.org>
description: |+
==========================================
1 - Introduction
==========================================
- ARM systems contain HW capable of managing power consumption dynamically,
- where cores can be put in different low-power states (ranging from simple wfi
- to power gating) according to OS PM policies. The CPU states representing the
- range of dynamic idle states that a processor can enter at run-time, can be
- specified through device tree bindings representing the parameters required to
- enter/exit specific idle states on a given processor.
+ ARM and RISC-V systems contain HW capable of managing power consumption
+ dynamically, where cores can be put in different low-power states (ranging
+ from simple wfi to power gating) according to OS PM policies. The CPU states
+ representing the range of dynamic idle states that a processor can enter at
+ run-time, can be specified through device tree bindings representing the
+ parameters required to enter/exit specific idle states on a given processor.
+
+ ==========================================
+ 2 - ARM idle states
+ ==========================================
According to the Server Base System Architecture document (SBSA, [3]), the
power states an ARM CPU can be put into are identified by the following list:
@@ -43,8 +48,23 @@ description: |+
The device tree binding definition for ARM idle states is the subject of this
document.
+ ==========================================
+ 3 - RISC-V idle states
+ ==========================================
+
+ On RISC-V systems, the HARTs (or CPUs) [6] can be put in platform specific
+ suspend (or idle) states (ranging from simple WFI, power gating, etc). The
+ RISC-V SBI v0.3 (or higher) [7] hart state management extension provides a
+ standard mechanism for OS to request HART state transitions.
+
+ The platform specific suspend (or idle) states of a hart can be either
+ retentive or non-rententive in nature. A retentive suspend state will
+ preserve HART registers and CSR values for all privilege modes whereas
+ a non-retentive suspend state will not preserve HART registers and CSR
+ values.
+
===========================================
- 2 - idle-states definitions
+ 4 - idle-states definitions
===========================================
Idle states are characterized for a specific system through a set of
@@ -211,10 +231,10 @@ description: |+
properties specification that is the subject of the following sections.
===========================================
- 3 - idle-states node
+ 5 - idle-states node
===========================================
- ARM processor idle states are defined within the idle-states node, which is
+ The processor idle states are defined within the idle-states node, which is
a direct child of the cpus node [1] and provides a container where the
processor idle states, defined as device tree nodes, are listed.
@@ -223,7 +243,7 @@ description: |+
just supports idle_standby, an idle-states node is not required.
===========================================
- 4 - References
+ 6 - References
===========================================
[1] ARM Linux Kernel documentation - CPUs bindings
@@ -238,9 +258,15 @@ description: |+
[4] ARM Architecture Reference Manuals
http://infocenter.arm.com/help/index.jsp
- [6] ARM Linux Kernel documentation - Booting AArch64 Linux
+ [5] ARM Linux Kernel documentation - Booting AArch64 Linux
Documentation/arm64/booting.rst
+ [6] RISC-V Linux Kernel documentation - CPUs bindings
+ Documentation/devicetree/bindings/riscv/cpus.yaml
+
+ [7] RISC-V Supervisor Binary Interface (SBI)
+ http://github.com/riscv/riscv-sbi-doc/riscv-sbi.adoc
+
properties:
$nodename:
const: idle-states
@@ -253,7 +279,7 @@ properties:
On ARM 32-bit systems this property is optional
This assumes that the "enable-method" property is set to "psci" in the cpu
- node[6] that is responsible for setting up CPU idle management in the OS
+ node[5] that is responsible for setting up CPU idle management in the OS
implementation.
const: psci
@@ -265,8 +291,8 @@ patternProperties:
as follows.
The idle state entered by executing the wfi instruction (idle_standby
- SBSA,[3][4]) is considered standard on all ARM platforms and therefore
- must not be listed.
+ SBSA,[3][4]) is considered standard on all ARM and RISC-V platforms and
+ therefore must not be listed.
In addition to the properties listed above, a state node may require
additional properties specific to the entry-method defined in the
@@ -275,7 +301,27 @@ patternProperties:
properties:
compatible:
- const: arm,idle-state
+ enum:
+ - arm,idle-state
+ - riscv,idle-state
+
+ arm,psci-suspend-param:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ power_state parameter to pass to the ARM PSCI suspend call.
+
+ Device tree nodes that require usage of PSCI CPU_SUSPEND function
+ (i.e. idle states node with entry-method property is set to "psci")
+ must specify this property.
+
+ riscv,sbi-suspend-param:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ suspend_type parameter to pass to the RISC-V SBI HSM suspend call.
+
+ This property is required in idle state nodes of device tree meant
+ for RISC-V systems. For more details on the suspend_type parameter
+ refer the SBI specifiation v0.3 (or higher) [7].
local-timer-stop:
description:
@@ -317,6 +363,8 @@ patternProperties:
description:
A string used as a descriptive name for the idle state.
+ additionalProperties: false
+
required:
- compatible
- entry-latency-us
@@ -658,4 +706,150 @@ examples:
};
};
+ - |
+ // Example 3 (RISC-V 64-bit, 4-cpu systems, two clusters):
+
+ cpus {
+ #size-cells = <0>;
+ #address-cells = <1>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "riscv";
+ reg = <0x0>;
+ riscv,isa = "rv64imafdc";
+ mmu-type = "riscv,sv48";
+ cpu-idle-states = <&CPU_RET_0_0 &CPU_NONRET_0_0
+ &CLUSTER_RET_0 &CLUSTER_NONRET_0>;
+
+ cpu_intc0: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "riscv";
+ reg = <0x1>;
+ riscv,isa = "rv64imafdc";
+ mmu-type = "riscv,sv48";
+ cpu-idle-states = <&CPU_RET_0_0 &CPU_NONRET_0_0
+ &CLUSTER_RET_0 &CLUSTER_NONRET_0>;
+
+ cpu_intc1: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+
+ cpu@10 {
+ device_type = "cpu";
+ compatible = "riscv";
+ reg = <0x10>;
+ riscv,isa = "rv64imafdc";
+ mmu-type = "riscv,sv48";
+ cpu-idle-states = <&CPU_RET_1_0 &CPU_NONRET_1_0
+ &CLUSTER_RET_1 &CLUSTER_NONRET_1>;
+
+ cpu_intc10: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+
+ cpu@11 {
+ device_type = "cpu";
+ compatible = "riscv";
+ reg = <0x11>;
+ riscv,isa = "rv64imafdc";
+ mmu-type = "riscv,sv48";
+ cpu-idle-states = <&CPU_RET_1_0 &CPU_NONRET_1_0
+ &CLUSTER_RET_1 &CLUSTER_NONRET_1>;
+
+ cpu_intc11: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+
+ idle-states {
+ CPU_RET_0_0: cpu-retentive-0-0 {
+ compatible = "riscv,idle-state";
+ riscv,sbi-suspend-param = <0x10000000>;
+ entry-latency-us = <20>;
+ exit-latency-us = <40>;
+ min-residency-us = <80>;
+ };
+
+ CPU_NONRET_0_0: cpu-nonretentive-0-0 {
+ compatible = "riscv,idle-state";
+ riscv,sbi-suspend-param = <0x90000000>;
+ entry-latency-us = <250>;
+ exit-latency-us = <500>;
+ min-residency-us = <950>;
+ };
+
+ CLUSTER_RET_0: cluster-retentive-0 {
+ compatible = "riscv,idle-state";
+ riscv,sbi-suspend-param = <0x11000000>;
+ local-timer-stop;
+ entry-latency-us = <50>;
+ exit-latency-us = <100>;
+ min-residency-us = <250>;
+ wakeup-latency-us = <130>;
+ };
+
+ CLUSTER_NONRET_0: cluster-nonretentive-0 {
+ compatible = "riscv,idle-state";
+ riscv,sbi-suspend-param = <0x91000000>;
+ local-timer-stop;
+ entry-latency-us = <600>;
+ exit-latency-us = <1100>;
+ min-residency-us = <2700>;
+ wakeup-latency-us = <1500>;
+ };
+
+ CPU_RET_1_0: cpu-retentive-1-0 {
+ compatible = "riscv,idle-state";
+ riscv,sbi-suspend-param = <0x10000010>;
+ entry-latency-us = <20>;
+ exit-latency-us = <40>;
+ min-residency-us = <80>;
+ };
+
+ CPU_NONRET_1_0: cpu-nonretentive-1-0 {
+ compatible = "riscv,idle-state";
+ riscv,sbi-suspend-param = <0x90000010>;
+ entry-latency-us = <250>;
+ exit-latency-us = <500>;
+ min-residency-us = <950>;
+ };
+
+ CLUSTER_RET_1: cluster-retentive-1 {
+ compatible = "riscv,idle-state";
+ riscv,sbi-suspend-param = <0x11000010>;
+ local-timer-stop;
+ entry-latency-us = <50>;
+ exit-latency-us = <100>;
+ min-residency-us = <250>;
+ wakeup-latency-us = <130>;
+ };
+
+ CLUSTER_NONRET_1: cluster-nonretentive-1 {
+ compatible = "riscv,idle-state";
+ riscv,sbi-suspend-param = <0x91000010>;
+ local-timer-stop;
+ entry-latency-us = <600>;
+ exit-latency-us = <1100>;
+ min-residency-us = <2700>;
+ wakeup-latency-us = <1500>;
+ };
+ };
+ };
+
...
diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml
index aa5fb64d57eb4f..f62f646bc6954a 100644
--- a/Documentation/devicetree/bindings/riscv/cpus.yaml
+++ b/Documentation/devicetree/bindings/riscv/cpus.yaml
@@ -99,6 +99,12 @@ properties:
- compatible
- interrupt-controller
+ cpu-idle-states:
+ $ref: '/schemas/types.yaml#/definitions/phandle-array'
+ description: |
+ List of phandles to idle state nodes supported
+ by this hart (see ./idle-states.yaml).
+
required:
- riscv,isa
- interrupt-controller
diff --git a/Documentation/riscv/index.rst b/Documentation/riscv/index.rst
index ea915c1960488a..e23b876ad6ebb6 100644
--- a/Documentation/riscv/index.rst
+++ b/Documentation/riscv/index.rst
@@ -7,7 +7,6 @@ RISC-V architecture
boot-image-header
vm-layout
- pmu
patch-acceptance
features
diff --git a/MAINTAINERS b/MAINTAINERS
index 4b73a79ffb7f2c..9c2b2dd4fbac01 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5157,6 +5157,20 @@ S: Supported
F: drivers/cpuidle/cpuidle-psci.h
F: drivers/cpuidle/cpuidle-psci-domain.c
+CPUIDLE DRIVER - DT IDLE PM DOMAIN
+M: Ulf Hansson <ulf.hansson@linaro.org>
+L: linux-pm@vger.kernel.org
+S: Supported
+F: drivers/cpuidle/dt_idle_genpd.c
+F: drivers/cpuidle/dt_idle_genpd.h
+
+CPUIDLE DRIVER - RISC-V SBI
+M: Anup Patel <anup@brainfault.org>
+L: linux-pm@vger.kernel.org
+L: linux-riscv@lists.infradead.org
+S: Maintained
+F: drivers/cpuidle/cpuidle-riscv-sbi.c
+
CRAMFS FILESYSTEM
M: Nicolas Pitre <nico@fluxnic.net>
S: Maintained
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index ea8ec8a960bda8..00fd9c548f2631 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -16,6 +16,7 @@ config RISCV
select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
select ARCH_HAS_BINFMT_FLAT
+ select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEBUG_WX
@@ -47,6 +48,7 @@ config RISCV
select CLONE_BACKWARDS
select CLINT_TIMER if !MMU
select COMMON_CLK
+ select CPU_PM if CPU_IDLE
select EDAC_SUPPORT
select GENERIC_ARCH_TOPOLOGY if SMP
select GENERIC_ATOMIC64 if !64BIT
@@ -533,4 +535,10 @@ source "kernel/power/Kconfig"
endmenu
+menu "CPU Power Management"
+
+source "drivers/cpuidle/Kconfig"
+
+endmenu
+
source "arch/riscv/kvm/Kconfig"
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index c112ab2a90520a..34592d00dde8c6 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -36,6 +36,9 @@ config SOC_VIRT
select GOLDFISH
select RTC_DRV_GOLDFISH if RTC_CLASS
select SIFIVE_PLIC
+ select PM_GENERIC_DOMAINS if PM
+ select PM_GENERIC_DOMAINS_OF if PM && OF
+ select RISCV_SBI_CPUIDLE if CPU_IDLE
help
This enables support for QEMU Virt Machine.
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
index 984872f3d3a9b9..b9e30df127fefe 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
@@ -203,6 +203,8 @@
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <50000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
m25p,fast-read;
broken-flash-reset;
};
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
index 7ba99b4da30421..8d23401b0bbb6b 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
@@ -205,6 +205,8 @@
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <50000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
m25p,fast-read;
broken-flash-reset;
};
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
index be9b12c9b374ac..24fd83b43d9d54 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
@@ -213,6 +213,8 @@
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <50000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
m25p,fast-read;
broken-flash-reset;
};
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
index 031c0c28f81957..25341f38292aab 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
@@ -178,6 +178,8 @@
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <50000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
m25p,fast-read;
broken-flash-reset;
};
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 7cd10ded7bf8f6..30e3017f22bc77 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -15,11 +15,14 @@ CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
+CONFIG_PROFILING=y
CONFIG_SOC_MICROCHIP_POLARFIRE=y
CONFIG_SOC_SIFIVE=y
CONFIG_SOC_VIRT=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
+CONFIG_PM=y
+CONFIG_CPU_IDLE=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m
CONFIG_JUMP_LABEL=y
@@ -64,8 +67,6 @@ CONFIG_INPUT_MOUSEDEV=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
-CONFIG_HVC_RISCV_SBI=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
diff --git a/arch/riscv/configs/nommu_k210_defconfig b/arch/riscv/configs/nommu_k210_defconfig
index 3f42ed87dde807..2438fa39f8ae00 100644
--- a/arch/riscv/configs/nommu_k210_defconfig
+++ b/arch/riscv/configs/nommu_k210_defconfig
@@ -21,7 +21,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_AIO is not set
# CONFIG_IO_URING is not set
# CONFIG_ADVISE_SYSCALLS is not set
-# CONFIG_MEMBARRIER is not set
# CONFIG_KALLSYMS is not set
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig
index af64b95e88cc63..9a133e63ae5bf3 100644
--- a/arch/riscv/configs/nommu_k210_sdcard_defconfig
+++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig
@@ -13,7 +13,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_AIO is not set
# CONFIG_IO_URING is not set
# CONFIG_ADVISE_SYSCALLS is not set
-# CONFIG_MEMBARRIER is not set
# CONFIG_KALLSYMS is not set
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/riscv/configs/nommu_virt_defconfig b/arch/riscv/configs/nommu_virt_defconfig
index e1c9864b62376d..5269fbb6b4fcf6 100644
--- a/arch/riscv/configs/nommu_virt_defconfig
+++ b/arch/riscv/configs/nommu_virt_defconfig
@@ -19,7 +19,6 @@ CONFIG_EXPERT=y
# CONFIG_AIO is not set
# CONFIG_IO_URING is not set
# CONFIG_ADVISE_SYSCALLS is not set
-# CONFIG_MEMBARRIER is not set
# CONFIG_KALLSYMS is not set
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
index e0e5c7c09ab8fc..7e5efdc3829d11 100644
--- a/arch/riscv/configs/rv32_defconfig
+++ b/arch/riscv/configs/rv32_defconfig
@@ -15,11 +15,14 @@ CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
+CONFIG_PROFILING=y
CONFIG_SOC_SIFIVE=y
CONFIG_SOC_VIRT=y
CONFIG_ARCH_RV32I=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
+CONFIG_PM=y
+CONFIG_CPU_IDLE=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m
CONFIG_JUMP_LABEL=y
@@ -62,8 +65,6 @@ CONFIG_INPUT_MOUSEDEV=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
-CONFIG_HVC_RISCV_SBI=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index 618d7c5af1a2df..8c2549b16ac06f 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -67,4 +67,30 @@
#error "Unexpected __SIZEOF_SHORT__"
#endif
+#ifdef __ASSEMBLY__
+
+/* Common assembly source macros */
+
+#ifdef CONFIG_XIP_KERNEL
+.macro XIP_FIXUP_OFFSET reg
+ REG_L t0, _xip_fixup
+ add \reg, \reg, t0
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+ la t1, __data_loc
+ REG_L t1, _xip_phys_offset
+ sub \reg, \reg, t1
+ add \reg, \reg, t0
+.endm
+_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
+_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
+#else
+.macro XIP_FIXUP_OFFSET reg
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+.endm
+#endif /* CONFIG_XIP_KERNEL */
+
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_RISCV_ASM_H */
diff --git a/arch/riscv/include/asm/cpuidle.h b/arch/riscv/include/asm/cpuidle.h
new file mode 100644
index 00000000000000..71fdc607d4bc52
--- /dev/null
+++ b/arch/riscv/include/asm/cpuidle.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Allwinner Ltd
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ */
+
+#ifndef _ASM_RISCV_CPUIDLE_H
+#define _ASM_RISCV_CPUIDLE_H
+
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
+static inline void cpu_do_idle(void)
+{
+ /*
+ * Add mb() here to ensure that all
+ * IO/MEM accesses are completed prior
+ * to entering WFI.
+ */
+ mb();
+ wait_for_interrupt();
+}
+
+#endif
diff --git a/arch/riscv/include/asm/current.h b/arch/riscv/include/asm/current.h
index 1de233d8e8de21..21774d868c65bd 100644
--- a/arch/riscv/include/asm/current.h
+++ b/arch/riscv/include/asm/current.h
@@ -33,6 +33,8 @@ static __always_inline struct task_struct *get_current(void)
#define current get_current()
+register unsigned long current_stack_pointer __asm__("sp");
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_CURRENT_H */
diff --git a/arch/riscv/include/asm/module.lds.h b/arch/riscv/include/asm/module.lds.h
index 4254ff2ff04943..1075beae1ac645 100644
--- a/arch/riscv/include/asm/module.lds.h
+++ b/arch/riscv/include/asm/module.lds.h
@@ -2,8 +2,8 @@
/* Copyright (C) 2017 Andes Technology Corporation */
#ifdef CONFIG_MODULE_SECTIONS
SECTIONS {
- .plt (NOLOAD) : { BYTE(0) }
- .got (NOLOAD) : { BYTE(0) }
- .got.plt (NOLOAD) : { BYTE(0) }
+ .plt : { BYTE(0) }
+ .got : { BYTE(0) }
+ .got.plt : { BYTE(0) }
}
#endif
diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h
new file mode 100644
index 00000000000000..8be391c2aecb8c
--- /dev/null
+++ b/arch/riscv/include/asm/suspend.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#ifndef _ASM_RISCV_SUSPEND_H
+#define _ASM_RISCV_SUSPEND_H
+
+#include <asm/ptrace.h>
+
+struct suspend_context {
+ /* Saved and restored by low-level functions */
+ struct pt_regs regs;
+ /* Saved and restored by high-level functions */
+ unsigned long scratch;
+ unsigned long tvec;
+ unsigned long ie;
+#ifdef CONFIG_MMU
+ unsigned long satp;
+#endif
+};
+
+/* Low-level CPU suspend entry function */
+int __cpu_suspend_enter(struct suspend_context *context);
+
+/* High-level CPU suspend which will save context and call finish() */
+int cpu_suspend(unsigned long arg,
+ int (*finish)(unsigned long arg,
+ unsigned long entry,
+ unsigned long context));
+
+/* Low-level CPU resume entry function */
+int __cpu_resume_enter(unsigned long hartid, unsigned long context);
+
+#endif
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 60da0dcacf145c..74d888c8d631a2 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -11,11 +11,17 @@
#include <asm/page.h>
#include <linux/const.h>
+#ifdef CONFIG_KASAN
+#define KASAN_STACK_ORDER 1
+#else
+#define KASAN_STACK_ORDER 0
+#endif
+
/* thread information allocation */
#ifdef CONFIG_64BIT
-#define THREAD_SIZE_ORDER (2)
+#define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER)
#else
-#define THREAD_SIZE_ORDER (1)
+#define THREAD_SIZE_ORDER (1 + KASAN_STACK_ORDER)
#endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index e0133d113216f2..87adbe47bc1580 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -48,6 +48,8 @@ obj-$(CONFIG_RISCV_BOOT_SPINWAIT) += cpu_ops_spinwait.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
+obj-$(CONFIG_CPU_PM) += suspend_entry.o suspend.o
+
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index df0519a64eaf85..df9444397908d3 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -13,6 +13,7 @@
#include <asm/thread_info.h>
#include <asm/ptrace.h>
#include <asm/cpu_ops_sbi.h>
+#include <asm/suspend.h>
void asm_offsets(void);
@@ -113,6 +114,8 @@ void asm_offsets(void)
OFFSET(PT_BADADDR, pt_regs, badaddr);
OFFSET(PT_CAUSE, pt_regs, cause);
+ OFFSET(SUSPEND_CONTEXT_REGS, suspend_context, regs);
+
OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero);
OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra);
OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp);
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index d2a936195295b9..ccb617791e561a 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -69,11 +69,11 @@ int riscv_of_parent_hartid(struct device_node *node)
.uprop = #UPROP, \
.isa_ext_id = EXTID, \
}
-/**
+/*
* Here are the ordering rules of extension naming defined by RISC-V
* specification :
* 1. All extensions should be separated from other multi-letter extensions
- * from other multi-letter extensions by an underscore.
+ * by an underscore.
* 2. The first letter following the 'Z' conventionally indicates the most
* closely related alphabetical extension category, IMAFDQLCBKJTPVH.
* If multiple 'Z' extensions are named, they should be ordered first
@@ -110,7 +110,7 @@ static void print_isa_ext(struct seq_file *f)
}
}
-/**
+/*
* These are the only valid base (single letter) ISA extensions as per the spec.
* It also specifies the canonical order in which it appears in the spec.
* Some of the extension may just be a place holder for now (B, K, P, J).
diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
index 2e16f6732cdf8f..4f5a6f84e2a4f3 100644
--- a/arch/riscv/kernel/cpu_ops_sbi.c
+++ b/arch/riscv/kernel/cpu_ops_sbi.c
@@ -21,7 +21,7 @@ const struct cpu_operations cpu_ops_sbi;
* be invoked from multiple threads in parallel. Define a per cpu data
* to handle that.
*/
-DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
+static DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
unsigned long priv)
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index ec07f991866a55..893b8bb693912e 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -16,26 +16,6 @@
#include <asm/image.h>
#include "efi-header.S"
-#ifdef CONFIG_XIP_KERNEL
-.macro XIP_FIXUP_OFFSET reg
- REG_L t0, _xip_fixup
- add \reg, \reg, t0
-.endm
-.macro XIP_FIXUP_FLASH_OFFSET reg
- la t0, __data_loc
- REG_L t1, _xip_phys_offset
- sub \reg, \reg, t1
- add \reg, \reg, t0
-.endm
-_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
-_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
-#else
-.macro XIP_FIXUP_OFFSET reg
-.endm
-.macro XIP_FIXUP_FLASH_OFFSET reg
-.endm
-#endif /* CONFIG_XIP_KERNEL */
-
__HEAD
ENTRY(_start)
/*
@@ -89,7 +69,8 @@ pe_head_start:
.align 2
#ifdef CONFIG_MMU
-relocate:
+ .global relocate_enable_mmu
+relocate_enable_mmu:
/* Relocate return address */
la a1, kernel_map
XIP_FIXUP_OFFSET a1
@@ -184,7 +165,7 @@ secondary_start_sbi:
/* Enable virtual memory and relocate to virtual address */
la a0, swapper_pg_dir
XIP_FIXUP_OFFSET a0
- call relocate
+ call relocate_enable_mmu
#endif
call setup_trap_vector
tail smp_callin
@@ -328,7 +309,7 @@ clear_bss_done:
#ifdef CONFIG_MMU
la a0, early_pg_dir
XIP_FIXUP_OFFSET a0
- call relocate
+ call relocate_enable_mmu
#endif /* CONFIG_MMU */
call setup_trap_vector
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 4a48287513c375..c29cef90d1ddf6 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -69,7 +69,7 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
return 0;
}
-static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
+static int apply_r_riscv_rvc_branch_rela(struct module *me, u32 *location,
Elf_Addr v)
{
ptrdiff_t offset = (void *)v - (void *)location;
@@ -301,7 +301,7 @@ static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
[R_RISCV_64] = apply_r_riscv_64_rela,
[R_RISCV_BRANCH] = apply_r_riscv_branch_rela,
[R_RISCV_JAL] = apply_r_riscv_jal_rela,
- [R_RISCV_RVC_BRANCH] = apply_r_riscv_rcv_branch_rela,
+ [R_RISCV_RVC_BRANCH] = apply_r_riscv_rvc_branch_rela,
[R_RISCV_RVC_JUMP] = apply_r_riscv_rvc_jump_rela,
[R_RISCV_PCREL_HI20] = apply_r_riscv_pcrel_hi20_rela,
[R_RISCV_PCREL_LO12_I] = apply_r_riscv_pcrel_lo12_i_rela,
diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c
index 55faa4991b876f..3348a61de7d998 100644
--- a/arch/riscv/kernel/perf_callchain.c
+++ b/arch/riscv/kernel/perf_callchain.c
@@ -68,7 +68,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
static bool fill_callchain(void *entry, unsigned long pc)
{
- return perf_callchain_store(entry, pc);
+ return perf_callchain_store(entry, pc) == 0;
}
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 03ac3aa611f59c..504b496787aa4d 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -23,6 +23,7 @@
#include <asm/string.h>
#include <asm/switch_to.h>
#include <asm/thread_info.h>
+#include <asm/cpuidle.h>
register unsigned long gp_in_global __asm__("gp");
@@ -37,7 +38,7 @@ extern asmlinkage void ret_from_kernel_thread(void);
void arch_cpu_idle(void)
{
- wait_for_interrupt();
+ cpu_do_idle();
raw_local_irq_enable();
}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 14d2b53ec322d8..08d11a53f39e7a 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -14,8 +14,6 @@
#include <asm/stacktrace.h>
-register unsigned long sp_in_global __asm__("sp");
-
#ifdef CONFIG_FRAME_POINTER
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
@@ -30,7 +28,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
fp = (unsigned long)__builtin_frame_address(0);
- sp = sp_in_global;
+ sp = current_stack_pointer;
pc = (unsigned long)walk_stackframe;
} else {
/* task blocked in __switch_to */
@@ -78,7 +76,7 @@ void notrace walk_stackframe(struct task_struct *task,
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
- sp = sp_in_global;
+ sp = current_stack_pointer;
pc = (unsigned long)walk_stackframe;
} else {
/* task blocked in __switch_to */
diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c
new file mode 100644
index 00000000000000..9ba24fb8cc9327
--- /dev/null
+++ b/arch/riscv/kernel/suspend.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/ftrace.h>
+#include <asm/csr.h>
+#include <asm/suspend.h>
+
+static void suspend_save_csrs(struct suspend_context *context)
+{
+ context->scratch = csr_read(CSR_SCRATCH);
+ context->tvec = csr_read(CSR_TVEC);
+ context->ie = csr_read(CSR_IE);
+
+ /*
+ * No need to save/restore IP CSR (i.e. MIP or SIP) because:
+ *
+ * 1. For no-MMU (M-mode) kernel, the bits in MIP are set by
+ * external devices (such as interrupt controller, timer, etc).
+ * 2. For MMU (S-mode) kernel, the bits in SIP are set by
+ * M-mode firmware and external devices (such as interrupt
+ * controller, etc).
+ */
+
+#ifdef CONFIG_MMU
+ context->satp = csr_read(CSR_SATP);
+#endif
+}
+
+static void suspend_restore_csrs(struct suspend_context *context)
+{
+ csr_write(CSR_SCRATCH, context->scratch);
+ csr_write(CSR_TVEC, context->tvec);
+ csr_write(CSR_IE, context->ie);
+
+#ifdef CONFIG_MMU
+ csr_write(CSR_SATP, context->satp);
+#endif
+}
+
+int cpu_suspend(unsigned long arg,
+ int (*finish)(unsigned long arg,
+ unsigned long entry,
+ unsigned long context))
+{
+ int rc = 0;
+ struct suspend_context context = { 0 };
+
+ /* Finisher should be non-NULL */
+ if (!finish)
+ return -EINVAL;
+
+ /* Save additional CSRs*/
+ suspend_save_csrs(&context);
+
+ /*
+ * Function graph tracer state gets incosistent when the kernel
+ * calls functions that never return (aka finishers) hence disable
+ * graph tracing during their execution.
+ */
+ pause_graph_tracing();
+
+ /* Save context on stack */
+ if (__cpu_suspend_enter(&context)) {
+ /* Call the finisher */
+ rc = finish(arg, __pa_symbol(__cpu_resume_enter),
+ (ulong)&context);
+
+ /*
+ * Should never reach here, unless the suspend finisher
+ * fails. Successful cpu_suspend() should return from
+ * __cpu_resume_entry()
+ */
+ if (!rc)
+ rc = -EOPNOTSUPP;
+ }
+
+ /* Enable function graph tracer */
+ unpause_graph_tracing();
+
+ /* Restore additional CSRs */
+ suspend_restore_csrs(&context);
+
+ return rc;
+}
diff --git a/arch/riscv/kernel/suspend_entry.S b/arch/riscv/kernel/suspend_entry.S
new file mode 100644
index 00000000000000..4b07b809a2b8c4
--- /dev/null
+++ b/arch/riscv/kernel/suspend_entry.S
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/csr.h>
+
+ .text
+ .altmacro
+ .option norelax
+
+ENTRY(__cpu_suspend_enter)
+ /* Save registers (except A0 and T0-T6) */
+ REG_S ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
+ REG_S sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
+ REG_S gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
+ REG_S tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
+ REG_S s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
+ REG_S s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
+ REG_S a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
+ REG_S a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
+ REG_S a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
+ REG_S a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
+ REG_S a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
+ REG_S a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
+ REG_S a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
+ REG_S s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
+ REG_S s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
+ REG_S s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
+ REG_S s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
+ REG_S s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
+ REG_S s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
+ REG_S s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
+ REG_S s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
+ REG_S s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
+ REG_S s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
+
+ /* Save CSRs */
+ csrr t0, CSR_EPC
+ REG_S t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
+ csrr t0, CSR_STATUS
+ REG_S t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
+ csrr t0, CSR_TVAL
+ REG_S t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
+ csrr t0, CSR_CAUSE
+ REG_S t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
+
+ /* Return non-zero value */
+ li a0, 1
+
+ /* Return to C code */
+ ret
+END(__cpu_suspend_enter)
+
+ENTRY(__cpu_resume_enter)
+ /* Load the global pointer */
+ .option push
+ .option norelax
+ la gp, __global_pointer$
+ .option pop
+
+#ifdef CONFIG_MMU
+ /* Save A0 and A1 */
+ add t0, a0, zero
+ add t1, a1, zero
+
+ /* Enable MMU */
+ la a0, swapper_pg_dir
+ XIP_FIXUP_OFFSET a0
+ call relocate_enable_mmu
+
+ /* Restore A0 and A1 */
+ add a0, t0, zero
+ add a1, t1, zero
+#endif
+
+ /* Make A0 point to suspend context */
+ add a0, a1, zero
+
+ /* Restore CSRs */
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
+ csrw CSR_EPC, t0
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
+ csrw CSR_STATUS, t0
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
+ csrw CSR_TVAL, t0
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
+ csrw CSR_CAUSE, t0
+
+ /* Restore registers (except A0 and T0-T6) */
+ REG_L ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
+ REG_L sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
+ REG_L gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
+ REG_L tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
+ REG_L s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
+ REG_L s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
+ REG_L a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
+ REG_L a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
+ REG_L a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
+ REG_L a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
+ REG_L a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
+ REG_L a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
+ REG_L a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
+ REG_L s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
+ REG_L s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
+ REG_L s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
+ REG_L s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
+ REG_L s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
+ REG_L s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
+ REG_L s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
+ REG_L s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
+ REG_L s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
+ REG_L s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
+
+ /* Return zero value */
+ add a0, zero, zero
+
+ /* Return to C code */
+ ret
+END(__cpu_resume_enter)
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index c0aeedd66f022a..ff71dd662880d9 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -47,6 +47,10 @@ config CPU_IDLE_GOV_HALTPOLL
config DT_IDLE_STATES
bool
+config DT_IDLE_GENPD
+ depends on PM_GENERIC_DOMAINS_OF
+ bool
+
menu "ARM CPU Idle Drivers"
depends on ARM || ARM64
source "drivers/cpuidle/Kconfig.arm"
@@ -62,6 +66,11 @@ depends on PPC
source "drivers/cpuidle/Kconfig.powerpc"
endmenu
+menu "RISC-V CPU Idle Drivers"
+depends on RISCV
+source "drivers/cpuidle/Kconfig.riscv"
+endmenu
+
config HALTPOLL_CPUIDLE
tristate "Halt poll cpuidle driver"
depends on X86 && KVM_GUEST
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 15d6c46c0a4777..be7f512109f793 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -27,6 +27,7 @@ config ARM_PSCI_CPUIDLE_DOMAIN
bool "PSCI CPU idle Domain"
depends on ARM_PSCI_CPUIDLE
depends on PM_GENERIC_DOMAINS_OF
+ select DT_IDLE_GENPD
default y
help
Select this to enable the PSCI based CPUidle driver to use PM domains,
diff --git a/drivers/cpuidle/Kconfig.riscv b/drivers/cpuidle/Kconfig.riscv
new file mode 100644
index 00000000000000..78518c26af74ca
--- /dev/null
+++ b/drivers/cpuidle/Kconfig.riscv
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# RISC-V CPU Idle drivers
+#
+
+config RISCV_SBI_CPUIDLE
+ bool "RISC-V SBI CPU idle Driver"
+ depends on RISCV_SBI
+ select DT_IDLE_STATES
+ select CPU_IDLE_MULTIPLE_DRIVERS
+ select DT_IDLE_GENPD if PM_GENERIC_DOMAINS_OF
+ help
+ Select this option to enable RISC-V SBI firmware based CPU idle
+ driver for RISC-V systems. This drivers also supports hierarchical
+ DT based layout of the idle state.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 26bbc5e7412366..d103342b7cfc21 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -6,6 +6,7 @@
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o
+obj-$(CONFIG_DT_IDLE_GENPD) += dt_idle_genpd.o
obj-$(CONFIG_ARCH_HAS_CPU_RELAX) += poll_state.o
obj-$(CONFIG_HALTPOLL_CPUIDLE) += cpuidle-haltpoll.o
@@ -34,3 +35,7 @@ obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o
# POWERPC drivers
obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o
obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o
+
+###############################################################################
+# RISC-V drivers
+obj-$(CONFIG_RISCV_SBI_CPUIDLE) += cpuidle-riscv-sbi.o
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index ff2c3f8e4668ae..755bbdfc5b82ff 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -47,73 +47,14 @@ static int psci_pd_power_off(struct generic_pm_domain *pd)
return 0;
}
-static int psci_pd_parse_state_nodes(struct genpd_power_state *states,
- int state_count)
-{
- int i, ret;
- u32 psci_state, *psci_state_buf;
-
- for (i = 0; i < state_count; i++) {
- ret = psci_dt_parse_state_node(to_of_node(states[i].fwnode),
- &psci_state);
- if (ret)
- goto free_state;
-
- psci_state_buf = kmalloc(sizeof(u32), GFP_KERNEL);
- if (!psci_state_buf) {
- ret = -ENOMEM;
- goto free_state;
- }
- *psci_state_buf = psci_state;
- states[i].data = psci_state_buf;
- }
-
- return 0;
-
-free_state:
- i--;
- for (; i >= 0; i--)
- kfree(states[i].data);
- return ret;
-}
-
-static int psci_pd_parse_states(struct device_node *np,
- struct genpd_power_state **states, int *state_count)
-{
- int ret;
-
- /* Parse the domain idle states. */
- ret = of_genpd_parse_idle_states(np, states, state_count);
- if (ret)
- return ret;
-
- /* Fill out the PSCI specifics for each found state. */
- ret = psci_pd_parse_state_nodes(*states, *state_count);
- if (ret)
- kfree(*states);
-
- return ret;
-}
-
-static void psci_pd_free_states(struct genpd_power_state *states,
- unsigned int state_count)
-{
- int i;
-
- for (i = 0; i < state_count; i++)
- kfree(states[i].data);
- kfree(states);
-}
-
static int psci_pd_init(struct device_node *np, bool use_osi)
{
struct generic_pm_domain *pd;
struct psci_pd_provider *pd_provider;
struct dev_power_governor *pd_gov;
- struct genpd_power_state *states = NULL;
int ret = -ENOMEM, state_count = 0;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ pd = dt_idle_pd_alloc(np, psci_dt_parse_state_node);
if (!pd)
goto out;
@@ -121,22 +62,6 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
if (!pd_provider)
goto free_pd;
- pd->name = kasprintf(GFP_KERNEL, "%pOF", np);
- if (!pd->name)
- goto free_pd_prov;
-
- /*
- * Parse the domain idle states and let genpd manage the state selection
- * for those being compatible with "domain-idle-state".
- */
- ret = psci_pd_parse_states(np, &states, &state_count);
- if (ret)
- goto free_name;
-
- pd->free_states = psci_pd_free_states;
- pd->name = kbasename(pd->name);
- pd->states = states;
- pd->state_count = state_count;
pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
/* Allow power off when OSI has been successfully enabled. */
@@ -149,10 +74,8 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL;
ret = pm_genpd_init(pd, pd_gov, false);
- if (ret) {
- psci_pd_free_states(states, state_count);
- goto free_name;
- }
+ if (ret)
+ goto free_pd_prov;
ret = of_genpd_add_provider_simple(np, pd);
if (ret)
@@ -166,12 +89,10 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
remove_pd:
pm_genpd_remove(pd);
-free_name:
- kfree(pd->name);
free_pd_prov:
kfree(pd_provider);
free_pd:
- kfree(pd);
+ dt_idle_pd_free(pd);
out:
pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
return ret;
@@ -195,30 +116,6 @@ static void psci_pd_remove(void)
}
}
-static int psci_pd_init_topology(struct device_node *np)
-{
- struct device_node *node;
- struct of_phandle_args child, parent;
- int ret;
-
- for_each_child_of_node(np, node) {
- if (of_parse_phandle_with_args(node, "power-domains",
- "#power-domain-cells", 0, &parent))
- continue;
-
- child.np = node;
- child.args_count = 0;
- ret = of_genpd_add_subdomain(&parent, &child);
- of_node_put(parent.np);
- if (ret) {
- of_node_put(node);
- return ret;
- }
- }
-
- return 0;
-}
-
static bool psci_pd_try_set_osi_mode(void)
{
int ret;
@@ -282,7 +179,7 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
goto no_pd;
/* Link genpd masters/subdomains to model the CPU topology. */
- ret = psci_pd_init_topology(np);
+ ret = dt_idle_pd_init_topology(np);
if (ret)
goto remove_pd;
@@ -314,28 +211,3 @@ static int __init psci_idle_init_domains(void)
return platform_driver_register(&psci_cpuidle_domain_driver);
}
subsys_initcall(psci_idle_init_domains);
-
-struct device *psci_dt_attach_cpu(int cpu)
-{
- struct device *dev;
-
- dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), "psci");
- if (IS_ERR_OR_NULL(dev))
- return dev;
-
- pm_runtime_irq_safe(dev);
- if (cpu_online(cpu))
- pm_runtime_get_sync(dev);
-
- dev_pm_syscore_device(dev, true);
-
- return dev;
-}
-
-void psci_dt_detach_cpu(struct device *dev)
-{
- if (IS_ERR_OR_NULL(dev))
- return;
-
- dev_pm_domain_detach(dev, false);
-}
diff --git a/drivers/cpuidle/cpuidle-psci.h b/drivers/cpuidle/cpuidle-psci.h
index d8e925e84c27af..4e132640ed6485 100644
--- a/drivers/cpuidle/cpuidle-psci.h
+++ b/drivers/cpuidle/cpuidle-psci.h
@@ -10,8 +10,19 @@ void psci_set_domain_state(u32 state);
int psci_dt_parse_state_node(struct device_node *np, u32 *state);
#ifdef CONFIG_ARM_PSCI_CPUIDLE_DOMAIN
-struct device *psci_dt_attach_cpu(int cpu);
-void psci_dt_detach_cpu(struct device *dev);
+
+#include "dt_idle_genpd.h"
+
+static inline struct device *psci_dt_attach_cpu(int cpu)
+{
+ return dt_idle_attach_cpu(cpu, "psci");
+}
+
+static inline void psci_dt_detach_cpu(struct device *dev)
+{
+ dt_idle_detach_cpu(dev);
+}
+
#else
static inline struct device *psci_dt_attach_cpu(int cpu) { return NULL; }
static inline void psci_dt_detach_cpu(struct device *dev) { }
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
new file mode 100644
index 00000000000000..b459eda2cd375f
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * RISC-V SBI CPU idle driver.
+ *
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt
+
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpu_cooling.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <asm/cpuidle.h>
+#include <asm/sbi.h>
+#include <asm/suspend.h>
+
+#include "dt_idle_states.h"
+#include "dt_idle_genpd.h"
+
+struct sbi_cpuidle_data {
+ u32 *states;
+ struct device *dev;
+};
+
+struct sbi_domain_state {
+ bool available;
+ u32 state;
+};
+
+static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data);
+static DEFINE_PER_CPU(struct sbi_domain_state, domain_state);
+static bool sbi_cpuidle_use_osi;
+static bool sbi_cpuidle_use_cpuhp;
+static bool sbi_cpuidle_pd_allow_domain_state;
+
+static inline void sbi_set_domain_state(u32 state)
+{
+ struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+ data->available = true;
+ data->state = state;
+}
+
+static inline u32 sbi_get_domain_state(void)
+{
+ struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+ return data->state;
+}
+
+static inline void sbi_clear_domain_state(void)
+{
+ struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+ data->available = false;
+}
+
+static inline bool sbi_is_domain_state_available(void)
+{
+ struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+ return data->available;
+}
+
+static int sbi_suspend_finisher(unsigned long suspend_type,
+ unsigned long resume_addr,
+ unsigned long opaque)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
+ suspend_type, resume_addr, opaque, 0, 0, 0);
+
+ return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
+}
+
+static int sbi_suspend(u32 state)
+{
+ if (state & SBI_HSM_SUSP_NON_RET_BIT)
+ return cpu_suspend(state, sbi_suspend_finisher);
+ else
+ return sbi_suspend_finisher(state, 0, 0);
+}
+
+static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ u32 *states = __this_cpu_read(sbi_cpuidle_data.states);
+
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, states[idx]);
+}
+
+static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx,
+ bool s2idle)
+{
+ struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data);
+ u32 *states = data->states;
+ struct device *pd_dev = data->dev;
+ u32 state;
+ int ret;
+
+ ret = cpu_pm_enter();
+ if (ret)
+ return -1;
+
+ /* Do runtime PM to manage a hierarchical CPU toplogy. */
+ rcu_irq_enter_irqson();
+ if (s2idle)
+ dev_pm_genpd_suspend(pd_dev);
+ else
+ pm_runtime_put_sync_suspend(pd_dev);
+ rcu_irq_exit_irqson();
+
+ if (sbi_is_domain_state_available())
+ state = sbi_get_domain_state();
+ else
+ state = states[idx];
+
+ ret = sbi_suspend(state) ? -1 : idx;
+
+ rcu_irq_enter_irqson();
+ if (s2idle)
+ dev_pm_genpd_resume(pd_dev);
+ else
+ pm_runtime_get_sync(pd_dev);
+ rcu_irq_exit_irqson();
+
+ cpu_pm_exit();
+
+ /* Clear the domain state to start fresh when back from idle. */
+ sbi_clear_domain_state();
+ return ret;
+}
+
+static int sbi_enter_domain_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ return __sbi_enter_domain_idle_state(dev, drv, idx, false);
+}
+
+static int sbi_enter_s2idle_domain_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int idx)
+{
+ return __sbi_enter_domain_idle_state(dev, drv, idx, true);
+}
+
+static int sbi_cpuidle_cpuhp_up(unsigned int cpu)
+{
+ struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
+
+ if (pd_dev)
+ pm_runtime_get_sync(pd_dev);
+
+ return 0;
+}
+
+static int sbi_cpuidle_cpuhp_down(unsigned int cpu)
+{
+ struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
+
+ if (pd_dev) {
+ pm_runtime_put_sync(pd_dev);
+ /* Clear domain state to start fresh at next online. */
+ sbi_clear_domain_state();
+ }
+
+ return 0;
+}
+
+static void sbi_idle_init_cpuhp(void)
+{
+ int err;
+
+ if (!sbi_cpuidle_use_cpuhp)
+ return;
+
+ err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
+ "cpuidle/sbi:online",
+ sbi_cpuidle_cpuhp_up,
+ sbi_cpuidle_cpuhp_down);
+ if (err)
+ pr_warn("Failed %d while setup cpuhp state\n", err);
+}
+
+static const struct of_device_id sbi_cpuidle_state_match[] = {
+ { .compatible = "riscv,idle-state",
+ .data = sbi_cpuidle_enter_state },
+ { },
+};
+
+static bool sbi_suspend_state_is_valid(u32 state)
+{
+ if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
+ state < SBI_HSM_SUSPEND_RET_PLATFORM)
+ return false;
+ if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
+ state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
+ return false;
+ return true;
+}
+
+static int sbi_dt_parse_state_node(struct device_node *np, u32 *state)
+{
+ int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state);
+
+ if (err) {
+ pr_warn("%pOF missing riscv,sbi-suspend-param property\n", np);
+ return err;
+ }
+
+ if (!sbi_suspend_state_is_valid(*state)) {
+ pr_warn("Invalid SBI suspend state %#x\n", *state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv,
+ struct sbi_cpuidle_data *data,
+ unsigned int state_count, int cpu)
+{
+ /* Currently limit the hierarchical topology to be used in OSI mode. */
+ if (!sbi_cpuidle_use_osi)
+ return 0;
+
+ data->dev = dt_idle_attach_cpu(cpu, "sbi");
+ if (IS_ERR_OR_NULL(data->dev))
+ return PTR_ERR_OR_ZERO(data->dev);
+
+ /*
+ * Using the deepest state for the CPU to trigger a potential selection
+ * of a shared state for the domain, assumes the domain states are all
+ * deeper states.
+ */
+ drv->states[state_count - 1].enter = sbi_enter_domain_idle_state;
+ drv->states[state_count - 1].enter_s2idle =
+ sbi_enter_s2idle_domain_idle_state;
+ sbi_cpuidle_use_cpuhp = true;
+
+ return 0;
+}
+
+static int sbi_cpuidle_dt_init_states(struct device *dev,
+ struct cpuidle_driver *drv,
+ unsigned int cpu,
+ unsigned int state_count)
+{
+ struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
+ struct device_node *state_node;
+ struct device_node *cpu_node;
+ u32 *states;
+ int i, ret;
+
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node)
+ return -ENODEV;
+
+ states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL);
+ if (!states) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* Parse SBI specific details from state DT nodes */
+ for (i = 1; i < state_count; i++) {
+ state_node = of_get_cpu_state_node(cpu_node, i - 1);
+ if (!state_node)
+ break;
+
+ ret = sbi_dt_parse_state_node(state_node, &states[i]);
+ of_node_put(state_node);
+
+ if (ret)
+ return ret;
+
+ pr_debug("sbi-state %#x index %d\n", states[i], i);
+ }
+ if (i != state_count) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ /* Initialize optional data, used for the hierarchical topology. */
+ ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu);
+ if (ret < 0)
+ return ret;
+
+ /* Store states in the per-cpu struct. */
+ data->states = states;
+
+fail:
+ of_node_put(cpu_node);
+
+ return ret;
+}
+
+static void sbi_cpuidle_deinit_cpu(int cpu)
+{
+ struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
+
+ dt_idle_detach_cpu(data->dev);
+ sbi_cpuidle_use_cpuhp = false;
+}
+
+static int sbi_cpuidle_init_cpu(struct device *dev, int cpu)
+{
+ struct cpuidle_driver *drv;
+ unsigned int state_count = 0;
+ int ret = 0;
+
+ drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ drv->name = "sbi_cpuidle";
+ drv->owner = THIS_MODULE;
+ drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+
+ /* RISC-V architectural WFI to be represented as state index 0. */
+ drv->states[0].enter = sbi_cpuidle_enter_state;
+ drv->states[0].exit_latency = 1;
+ drv->states[0].target_residency = 1;
+ drv->states[0].power_usage = UINT_MAX;
+ strcpy(drv->states[0].name, "WFI");
+ strcpy(drv->states[0].desc, "RISC-V WFI");
+
+ /*
+ * If no DT idle states are detected (ret == 0) let the driver
+ * initialization fail accordingly since there is no reason to
+ * initialize the idle driver if only wfi is supported, the
+ * default archictectural back-end already executes wfi
+ * on idle entry.
+ */
+ ret = dt_init_idle_driver(drv, sbi_cpuidle_state_match, 1);
+ if (ret <= 0) {
+ pr_debug("HART%ld: failed to parse DT idle states\n",
+ cpuid_to_hartid_map(cpu));
+ return ret ? : -ENODEV;
+ }
+ state_count = ret + 1; /* Include WFI state as well */
+
+ /* Initialize idle states from DT. */
+ ret = sbi_cpuidle_dt_init_states(dev, drv, cpu, state_count);
+ if (ret) {
+ pr_err("HART%ld: failed to init idle states\n",
+ cpuid_to_hartid_map(cpu));
+ return ret;
+ }
+
+ ret = cpuidle_register(drv, NULL);
+ if (ret)
+ goto deinit;
+
+ cpuidle_cooling_register(drv);
+
+ return 0;
+deinit:
+ sbi_cpuidle_deinit_cpu(cpu);
+ return ret;
+}
+
+static void sbi_cpuidle_domain_sync_state(struct device *dev)
+{
+ /*
+ * All devices have now been attached/probed to the PM domain
+ * topology, hence it's fine to allow domain states to be picked.
+ */
+ sbi_cpuidle_pd_allow_domain_state = true;
+}
+
+#ifdef CONFIG_DT_IDLE_GENPD
+
+static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
+{
+ struct genpd_power_state *state = &pd->states[pd->state_idx];
+ u32 *pd_state;
+
+ if (!state->data)
+ return 0;
+
+ if (!sbi_cpuidle_pd_allow_domain_state)
+ return -EBUSY;
+
+ /* OSI mode is enabled, set the corresponding domain state. */
+ pd_state = state->data;
+ sbi_set_domain_state(*pd_state);
+
+ return 0;
+}
+
+struct sbi_pd_provider {
+ struct list_head link;
+ struct device_node *node;
+};
+
+static LIST_HEAD(sbi_pd_providers);
+
+static int sbi_pd_init(struct device_node *np)
+{
+ struct generic_pm_domain *pd;
+ struct sbi_pd_provider *pd_provider;
+ struct dev_power_governor *pd_gov;
+ int ret = -ENOMEM, state_count = 0;
+
+ pd = dt_idle_pd_alloc(np, sbi_dt_parse_state_node);
+ if (!pd)
+ goto out;
+
+ pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL);
+ if (!pd_provider)
+ goto free_pd;
+
+ pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
+
+ /* Allow power off when OSI is available. */
+ if (sbi_cpuidle_use_osi)
+ pd->power_off = sbi_cpuidle_pd_power_off;
+ else
+ pd->flags |= GENPD_FLAG_ALWAYS_ON;
+
+ /* Use governor for CPU PM domains if it has some states to manage. */
+ pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL;
+
+ ret = pm_genpd_init(pd, pd_gov, false);
+ if (ret)
+ goto free_pd_prov;
+
+ ret = of_genpd_add_provider_simple(np, pd);
+ if (ret)
+ goto remove_pd;
+
+ pd_provider->node = of_node_get(np);
+ list_add(&pd_provider->link, &sbi_pd_providers);
+
+ pr_debug("init PM domain %s\n", pd->name);
+ return 0;
+
+remove_pd:
+ pm_genpd_remove(pd);
+free_pd_prov:
+ kfree(pd_provider);
+free_pd:
+ dt_idle_pd_free(pd);
+out:
+ pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
+ return ret;
+}
+
+static void sbi_pd_remove(void)
+{
+ struct sbi_pd_provider *pd_provider, *it;
+ struct generic_pm_domain *genpd;
+
+ list_for_each_entry_safe(pd_provider, it, &sbi_pd_providers, link) {
+ of_genpd_del_provider(pd_provider->node);
+
+ genpd = of_genpd_remove_last(pd_provider->node);
+ if (!IS_ERR(genpd))
+ kfree(genpd);
+
+ of_node_put(pd_provider->node);
+ list_del(&pd_provider->link);
+ kfree(pd_provider);
+ }
+}
+
+static int sbi_genpd_probe(struct device_node *np)
+{
+ struct device_node *node;
+ int ret = 0, pd_count = 0;
+
+ if (!np)
+ return -ENODEV;
+
+ /*
+ * Parse child nodes for the "#power-domain-cells" property and
+ * initialize a genpd/genpd-of-provider pair when it's found.
+ */
+ for_each_child_of_node(np, node) {
+ if (!of_find_property(node, "#power-domain-cells", NULL))
+ continue;
+
+ ret = sbi_pd_init(node);
+ if (ret)
+ goto put_node;
+
+ pd_count++;
+ }
+
+ /* Bail out if not using the hierarchical CPU topology. */
+ if (!pd_count)
+ goto no_pd;
+
+ /* Link genpd masters/subdomains to model the CPU topology. */
+ ret = dt_idle_pd_init_topology(np);
+ if (ret)
+ goto remove_pd;
+
+ return 0;
+
+put_node:
+ of_node_put(node);
+remove_pd:
+ sbi_pd_remove();
+ pr_err("failed to create CPU PM domains ret=%d\n", ret);
+no_pd:
+ return ret;
+}
+
+#else
+
+static inline int sbi_genpd_probe(struct device_node *np)
+{
+ return 0;
+}
+
+#endif
+
+static int sbi_cpuidle_probe(struct platform_device *pdev)
+{
+ int cpu, ret;
+ struct cpuidle_driver *drv;
+ struct cpuidle_device *dev;
+ struct device_node *np, *pds_node;
+
+ /* Detect OSI support based on CPU DT nodes */
+ sbi_cpuidle_use_osi = true;
+ for_each_possible_cpu(cpu) {
+ np = of_cpu_device_node_get(cpu);
+ if (np &&
+ of_find_property(np, "power-domains", NULL) &&
+ of_find_property(np, "power-domain-names", NULL)) {
+ continue;
+ } else {
+ sbi_cpuidle_use_osi = false;
+ break;
+ }
+ }
+
+ /* Populate generic power domains from DT nodes */
+ pds_node = of_find_node_by_path("/cpus/power-domains");
+ if (pds_node) {
+ ret = sbi_genpd_probe(pds_node);
+ of_node_put(pds_node);
+ if (ret)
+ return ret;
+ }
+
+ /* Initialize CPU idle driver for each CPU */
+ for_each_possible_cpu(cpu) {
+ ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu);
+ if (ret) {
+ pr_debug("HART%ld: idle driver init failed\n",
+ cpuid_to_hartid_map(cpu));
+ goto out_fail;
+ }
+ }
+
+ /* Setup CPU hotplut notifiers */
+ sbi_idle_init_cpuhp();
+
+ pr_info("idle driver registered for all CPUs\n");
+
+ return 0;
+
+out_fail:
+ while (--cpu >= 0) {
+ dev = per_cpu(cpuidle_devices, cpu);
+ drv = cpuidle_get_cpu_driver(dev);
+ cpuidle_unregister(drv);
+ sbi_cpuidle_deinit_cpu(cpu);
+ }
+
+ return ret;
+}
+
+static struct platform_driver sbi_cpuidle_driver = {
+ .probe = sbi_cpuidle_probe,
+ .driver = {
+ .name = "sbi-cpuidle",
+ .sync_state = sbi_cpuidle_domain_sync_state,
+ },
+};
+
+static int __init sbi_cpuidle_init(void)
+{
+ int ret;
+ struct platform_device *pdev;
+
+ /*
+ * The SBI HSM suspend function is only available when:
+ * 1) SBI version is 0.3 or higher
+ * 2) SBI HSM extension is available
+ */
+ if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
+ sbi_probe_extension(SBI_EXT_HSM) <= 0) {
+ pr_info("HSM suspend not available\n");
+ return 0;
+ }
+
+ ret = platform_driver_register(&sbi_cpuidle_driver);
+ if (ret)
+ return ret;
+
+ pdev = platform_device_register_simple("sbi-cpuidle",
+ -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ platform_driver_unregister(&sbi_cpuidle_driver);
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
+device_initcall(sbi_cpuidle_init);
diff --git a/drivers/cpuidle/dt_idle_genpd.c b/drivers/cpuidle/dt_idle_genpd.c
new file mode 100644
index 00000000000000..b37165514d4e71
--- /dev/null
+++ b/drivers/cpuidle/dt_idle_genpd.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PM domains for CPUs via genpd.
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "dt-idle-genpd: " fmt
+
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "dt_idle_genpd.h"
+
+static int pd_parse_state_nodes(
+ int (*parse_state)(struct device_node *, u32 *),
+ struct genpd_power_state *states, int state_count)
+{
+ int i, ret;
+ u32 state, *state_buf;
+
+ for (i = 0; i < state_count; i++) {
+ ret = parse_state(to_of_node(states[i].fwnode), &state);
+ if (ret)
+ goto free_state;
+
+ state_buf = kmalloc(sizeof(u32), GFP_KERNEL);
+ if (!state_buf) {
+ ret = -ENOMEM;
+ goto free_state;
+ }
+ *state_buf = state;
+ states[i].data = state_buf;
+ }
+
+ return 0;
+
+free_state:
+ i--;
+ for (; i >= 0; i--)
+ kfree(states[i].data);
+ return ret;
+}
+
+static int pd_parse_states(struct device_node *np,
+ int (*parse_state)(struct device_node *, u32 *),
+ struct genpd_power_state **states,
+ int *state_count)
+{
+ int ret;
+
+ /* Parse the domain idle states. */
+ ret = of_genpd_parse_idle_states(np, states, state_count);
+ if (ret)
+ return ret;
+
+ /* Fill out the dt specifics for each found state. */
+ ret = pd_parse_state_nodes(parse_state, *states, *state_count);
+ if (ret)
+ kfree(*states);
+
+ return ret;
+}
+
+static void pd_free_states(struct genpd_power_state *states,
+ unsigned int state_count)
+{
+ int i;
+
+ for (i = 0; i < state_count; i++)
+ kfree(states[i].data);
+ kfree(states);
+}
+
+void dt_idle_pd_free(struct generic_pm_domain *pd)
+{
+ pd_free_states(pd->states, pd->state_count);
+ kfree(pd->name);
+ kfree(pd);
+}
+
+struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np,
+ int (*parse_state)(struct device_node *, u32 *))
+{
+ struct generic_pm_domain *pd;
+ struct genpd_power_state *states = NULL;
+ int ret, state_count = 0;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ goto out;
+
+ pd->name = kasprintf(GFP_KERNEL, "%pOF", np);
+ if (!pd->name)
+ goto free_pd;
+
+ /*
+ * Parse the domain idle states and let genpd manage the state selection
+ * for those being compatible with "domain-idle-state".
+ */
+ ret = pd_parse_states(np, parse_state, &states, &state_count);
+ if (ret)
+ goto free_name;
+
+ pd->free_states = pd_free_states;
+ pd->name = kbasename(pd->name);
+ pd->states = states;
+ pd->state_count = state_count;
+
+ pr_debug("alloc PM domain %s\n", pd->name);
+ return pd;
+
+free_name:
+ kfree(pd->name);
+free_pd:
+ kfree(pd);
+out:
+ pr_err("failed to alloc PM domain %pOF\n", np);
+ return NULL;
+}
+
+int dt_idle_pd_init_topology(struct device_node *np)
+{
+ struct device_node *node;
+ struct of_phandle_args child, parent;
+ int ret;
+
+ for_each_child_of_node(np, node) {
+ if (of_parse_phandle_with_args(node, "power-domains",
+ "#power-domain-cells", 0, &parent))
+ continue;
+
+ child.np = node;
+ child.args_count = 0;
+ ret = of_genpd_add_subdomain(&parent, &child);
+ of_node_put(parent.np);
+ if (ret) {
+ of_node_put(node);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+struct device *dt_idle_attach_cpu(int cpu, const char *name)
+{
+ struct device *dev;
+
+ dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), name);
+ if (IS_ERR_OR_NULL(dev))
+ return dev;
+
+ pm_runtime_irq_safe(dev);
+ if (cpu_online(cpu))
+ pm_runtime_get_sync(dev);
+
+ dev_pm_syscore_device(dev, true);
+
+ return dev;
+}
+
+void dt_idle_detach_cpu(struct device *dev)
+{
+ if (IS_ERR_OR_NULL(dev))
+ return;
+
+ dev_pm_domain_detach(dev, false);
+}
diff --git a/drivers/cpuidle/dt_idle_genpd.h b/drivers/cpuidle/dt_idle_genpd.h
new file mode 100644
index 00000000000000..a95483d08a02a8
--- /dev/null
+++ b/drivers/cpuidle/dt_idle_genpd.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_IDLE_GENPD
+#define __DT_IDLE_GENPD
+
+struct device_node;
+struct generic_pm_domain;
+
+#ifdef CONFIG_DT_IDLE_GENPD
+
+void dt_idle_pd_free(struct generic_pm_domain *pd);
+
+struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np,
+ int (*parse_state)(struct device_node *, u32 *));
+
+int dt_idle_pd_init_topology(struct device_node *np);
+
+struct device *dt_idle_attach_cpu(int cpu, const char *name);
+
+void dt_idle_detach_cpu(struct device *dev);
+
+#else
+
+static inline void dt_idle_pd_free(struct generic_pm_domain *pd)
+{
+}
+
+static inline struct generic_pm_domain *dt_idle_pd_alloc(
+ struct device_node *np,
+ int (*parse_state)(struct device_node *, u32 *))
+{
+ return NULL;
+}
+
+static inline int dt_idle_pd_init_topology(struct device_node *np)
+{
+ return 0;
+}
+
+static inline struct device *dt_idle_attach_cpu(int cpu, const char *name)
+{
+ return NULL;
+}
+
+static inline void dt_idle_detach_cpu(struct device *dev)
+{
+}
+
+#endif
+
+#endif