aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPavel <pavel@ucw.cz>2018-10-28 12:47:32 +0100
committerPavel <pavel@ucw.cz>2019-01-07 11:22:23 +0100
commit83aaa653bd6a7f7140b8293309a8b1c838fdb995 (patch)
tree0079287e0f4797e79c728a1a27e25734b0fd8eb3
parent6847e2a900404d3d66e7b6380254f7c10a5a3f09 (diff)
downloadlinux-k-83aaa653bd6a7f7140b8293309a8b1c838fdb995.tar.gz
rowhammer: add attempts at rowhammer protection.
-rw-r--r--arch/x86/events/intel/core.c7
-rw-r--r--kernel/events/Kconfig9
-rw-r--r--kernel/events/Makefile2
-rw-r--r--kernel/events/nohammer.c166
-rw-r--r--tools/rowhammer.c25
5 files changed, 208 insertions, 1 deletions
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 40e12cfc87f62e..d8eabb61831744 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -26,13 +26,18 @@
*/
static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
{
+ /* __XX event num
+ XX__ umask value ?
+ See http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-system-programming-manual-325384.pdf
+ */
[PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
[PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
- [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
+ [PERF_COUNT_HW_BUS_CYCLES] = 0xc06f, /* Non halted bus cycles: 0x013c */
+ /* Hmm. manual says event 0x70, mask 0xc0? */
[PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
};
diff --git a/kernel/events/Kconfig b/kernel/events/Kconfig
new file mode 100644
index 00000000000000..ccdf500811e98f
--- /dev/null
+++ b/kernel/events/Kconfig
@@ -0,0 +1,9 @@
+config NOHAMMER
+ tristate "Rowhammer protection"
+ help
+ Enable rowhammer attack prevention. Will degrade system
+ performance under attack so much that attack should not
+ be feasible. This will degrade performance when enabled.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nohammer.
diff --git a/kernel/events/Makefile b/kernel/events/Makefile
index 3c022e33c10916..3cdf6f766279f4 100644
--- a/kernel/events/Makefile
+++ b/kernel/events/Makefile
@@ -5,6 +5,8 @@ endif
obj-y := core.o ring_buffer.o callchain.o
+obj-$(CONFIG_NOHAMMER) += nohammer.o
+
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_UPROBES) += uprobes.o
diff --git a/kernel/events/nohammer.c b/kernel/events/nohammer.c
new file mode 100644
index 00000000000000..1f84f2845fe794
--- /dev/null
+++ b/kernel/events/nohammer.c
@@ -0,0 +1,166 @@
+/*
+ * Attempt to prevent rowhammer attack.
+ *
+ * On many new DRAM chips, repeated read access to nearby cells can cause
+ * victim cell to flip bits. Unfortunately, that can be used to gain root
+ * on affected machine, or to execute native code from javascript, escaping
+ * the sandbox.
+ *
+ * Fortunately, a lot of memory accesses is needed between DRAM refresh
+ * cycles. This is rather unusual workload, and we can detect it, and
+ * prevent the DRAM accesses, before bit flips happen.
+ *
+ * Thanks to Peter Zijlstra <peterz@infradead.org>.
+ * Thanks to presentation at blackhat:
+ * https://www.blackhat.com/docs/us-15/materials/us-15-Herath-These-Are-Not-Your-Grand-Daddys-CPU-Performance-Counters-CPU-Hardware-Performance-Counters-For-Security.pdf
+ * Thanks to Pavel Troller.
+ */
+
+#include <linux/perf_event.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/math64.h>
+
+static struct perf_event_attr rh_attr = {
+ .type = PERF_TYPE_HARDWARE,
+// .config = PERF_COUNT_HW_CACHE_MISSES,
+ .config = PERF_COUNT_HW_BUS_CYCLES,
+ .size = sizeof(struct perf_event_attr),
+ .pinned = 1,
+ .sample_period = 10000,
+};
+
+/*
+ * How often is the DRAM refreshed. Setting it too low is safe.
+ */
+static int dram_refresh_msec = 64;
+
+static DEFINE_PER_CPU(struct perf_event *, rh_event);
+static DEFINE_PER_CPU(u64, rh_timestamp);
+static DEFINE_PER_CPU(int, rh_cpu);
+
+static void rh_overflow(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs)
+{
+ u64 *ts = this_cpu_ptr(&rh_timestamp); /* this is NMI context */
+ u64 now = ktime_get_mono_fast_ns();
+ s64 delta = now - *ts;
+ static int verbose;
+
+ *ts = now;
+
+ if (verbose < 10) {
+ s64 delta2 = delta;
+ do_div(delta2, NSEC_PER_MSEC);
+ printk("rh_overflow, cpu %d, delta is %d msec\n",
+ *this_cpu_ptr(&rh_cpu), (int) delta2);
+ }
+
+ if (delta < dram_refresh_msec * NSEC_PER_MSEC) {
+ mdelay(dram_refresh_msec);
+ now = ktime_get_mono_fast_ns();
+ *ts = now;
+ }
+}
+
+static __init int rh_module_init(void)
+{
+ int cpu;
+
+/*
+ * DRAM refresh is every 64 msec. That is not enough to prevent rowhammer.
+ * Some vendors doubled the refresh rate to 32 msec, that helps a lot, but
+ * does not close the attack completely. 8 msec refresh would probably do
+ * that on almost all chips.
+ *
+ * Thinkpad X60 can produce cca 12,200,000 cache misses a second, that's
+ * 780,800 cache misses per 64 msec window.
+ *
+ * X60 is from generation that is not yet vulnerable from rowhammer, and
+ * is pretty slow machine. That means that this limit is probably very
+ * safe on newer machines.
+ */
+ //int cache_misses_per_second = 12200000;
+
+// int cache_misses_per_second = 166000000; /* FIXME: bus cycles */
+ int cache_misses_per_second = 566000000; /* FIXME: bus cycles */
+
+
+/*
+ * Maximum permitted utilization of DRAM. Setting this to f will mean that
+ * when more than 1/f of maximum cache-miss performance is used, delay will
+ * be inserted, and will have similar effect on rowhammer as refreshing memory
+ * f times more often.
+ *
+ * Setting this to 8 should prevent the rowhammer attack.
+ */
+ int dram_max_utilization_factor = 1;
+
+ /*
+ * Hardware should be able to do approximately this many
+ * misses per refresh
+ */
+ u64 cache_miss_per_refresh = ((u64) cache_misses_per_second * dram_refresh_msec);
+ int cache_miss_limit;
+
+ /*
+ * DRAM is shared between CPUs, but these performance counters are
+ * per-CPU.
+ */
+ int max_attacking_cpus = 1;
+
+ do_div(cache_miss_per_refresh, 1000);
+
+ /*
+ * So we do not want more than this many accesses to DRAM per
+ * refresh.
+ */
+ cache_miss_limit = cache_miss_per_refresh / dram_max_utilization_factor;
+
+
+ /*
+ * We ignore counter overflows "too far away", but some of the
+ * events might have actually occurent recently. Thus additional
+ * factor of 2
+ */
+
+ rh_attr.sample_period = cache_miss_limit / (2*max_attacking_cpus);
+
+ printk("Rowhammer protection limit is set to %d cache misses per %d msec\n",
+ (int) rh_attr.sample_period, dram_refresh_msec);
+
+ /* XXX borken vs hotplug */
+
+ for_each_online_cpu(cpu) {
+ struct perf_event *event;
+
+ event = perf_event_create_kernel_counter(&rh_attr, cpu, NULL, rh_overflow, NULL);
+ per_cpu(rh_event, cpu) = event;
+ if (!event) {
+ pr_err("Not enough resources to initialize nohammer on cpu %d\n", cpu);
+ continue;
+ }
+ per_cpu(rh_cpu, cpu) = cpu;
+ pr_info("Nohammer initialized on cpu %d\n", cpu);
+ }
+ return 0;
+}
+
+static __exit void rh_module_exit(void)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ struct perf_event *event = per_cpu(rh_event, cpu);
+
+ if (event)
+ perf_event_release_kernel(event);
+ }
+ return;
+}
+
+module_init(rh_module_init);
+module_exit(rh_module_exit);
+
+MODULE_DESCRIPTION("Rowhammer protection");
+//MODULE_LICENSE("GPL v2+");
+MODULE_LICENSE("GPL");
diff --git a/tools/rowhammer.c b/tools/rowhammer.c
new file mode 100644
index 00000000000000..578ec82c7fee2f
--- /dev/null
+++ b/tools/rowhammer.c
@@ -0,0 +1,25 @@
+/*
+ * gcc -O2 rowhammer.c -o rowhammer
+ */
+
+char pad[1024];
+long long foo;
+char pad2[1024];
+
+void main(void)
+{
+ long long i;
+ asm volatile(
+ "mov $foo, %%edi \n\
+ clflush (%%edi)" ::: "%edi");
+
+ for (i=0; i<1000000000; i++) {
+#if 1
+ asm volatile(
+ "mov $foo, %%edi \n\
+ movnti %%eax, (%%edi)" ::: "%edi");
+#endif
+
+ // asm volatile( "" );
+ }
+}