summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2015-04-23 15:25:50 -0700
committerAndy Lutomirski <luto@kernel.org>2015-04-23 15:25:50 -0700
commit3609a4c498a91389a9acb5d5fc81b5781028f789 (patch)
tree42350edb77bba57b3fc11cd05b9b16522e111102
parent1c236b677e1a83e2751c8882dd3cd0ffef29dd1c (diff)
downloadmisc-tests-3609a4c498a91389a9acb5d5fc81b5781028f789.tar.gz
sysret_ss_attrs: Remove segmentation and add 64-bit code
The test now terminates after a bunch of successful iterations, it no longer depends on segmentation (that was a false alarm), and it tests the 64-bit case. Signed-off-by: Andy Lutomirski <luto@kernel.org>
-rw-r--r--Makefile2
-rw-r--r--sysret_ss_attrs.c119
2 files changed, 61 insertions, 60 deletions
diff --git a/Makefile b/Makefile
index 5afc43a..82382aa 100644
--- a/Makefile
+++ b/Makefile
@@ -40,3 +40,5 @@ $(SPLIT_CC_TARGETS:%=%_64): %_64: %.cc
syscall32_from_64: syscall32_from_64.c thunks.S
gcc -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
+
+sysret_ss_attrs_64: thunks.S
diff --git a/sysret_ss_attrs.c b/sysret_ss_attrs.c
index 9d80edd..cc19383 100644
--- a/sysret_ss_attrs.c
+++ b/sysret_ss_attrs.c
@@ -21,86 +21,85 @@
#include <errno.h>
#include <pthread.h>
-static unsigned short GDT3(int idx)
-{
- return (idx << 3) | 3;
-}
-
-static int force_set_thread_area(struct user_desc *u_info)
-{
-#ifdef __x86_64__
- int ret;
- struct user_desc *low = mmap(NULL, sizeof(struct user_desc),
- PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE | MAP_32BIT,
- -1, 0);
- memcpy(low, u_info, sizeof(struct user_desc));
- /* Call the 32-bit set_thread_area (nr 243) using int80 */
- asm volatile ("int $0x80"
- : "=a" (ret) : "a" (243), "b" (low)
- : "memory");
- if (ret < 0) {
- munmap(low, sizeof(struct user_desc));
- errno = -ret;
- return -1;
- }
- memcpy(u_info, low, sizeof(struct user_desc));
- munmap(low, sizeof(struct user_desc));
- return ret;
-#else
- return syscall(SYS_set_thread_area, u_info);
-#endif
-}
-
static void *threadproc(void *ctx)
{
- struct user_desc desc = {
- .entry_number = -1,
- .base_addr = 0,
- .limit = 1,
- .seg_32bit = 1,
- .contents = 0, /* Data, grow-up */
- .read_exec_only = 0,
- .limit_in_pages = 0,
- .seg_not_present = 0,
- .useable = 0,
- };
-
- if (force_set_thread_area(&desc) != 0)
- err(1, "set_thread_area");
-
- asm volatile (
- "mov %%ax, %%ss\n\t"
- "1:\n\t"
- /*
- "movl $0xbfffffff, %%eax\n\t"
- "int $0x80\n\t"
- */
- "jmp 1b"
- : : "a" (GDT3(desc.entry_number))
- );
+ /*
+ * Do our best to cause sleeps on this CPU to exit the kernel and
+ * re-enter with SS = 0.
+ */
+ while (true)
+ ;
return NULL;
}
+#ifdef __x86_64__
+extern unsigned long call32_from_64(void *stack, void (*function)(void));
+asm (".pushsection .text\n\t"
+ ".code32\n\t"
+ "test_ss:\n\t"
+ "pushl $0\n\t"
+ "popl %eax\n\t"
+ "ret\n\t"
+ ".code64");
+extern void test_ss(void);
+#endif
int main()
{
+ /*
+ * Start a busy-looping thread on the same CPU we're on.
+ * For simplicity, just stick everything to CPU 0. This will
+ * fail in some containers, but that's probably okay.
+ */
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(0, &cpuset);
- sched_setaffinity(0, sizeof(cpuset), &cpuset);
+ if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
+ printf("[WARN]\tsched_setaffinity failed\n");
pthread_t thread;
if (pthread_create(&thread, 0, threadproc, 0) != 0)
err(1, "pthread_create");
- pause();
+#ifdef __x86_64__
+ unsigned char *stack32 = mmap(NULL, 4096, PROT_READ | PROT_WRITE,
+ MAP_32BIT | MAP_ANONYMOUS | MAP_PRIVATE,
+ -1, 0);
+ if (stack32 == MAP_FAILED)
+ err(1, "mmap");
+#endif
+
+ printf("[RUN]\tSyscalls followed by SS validation\n");
- while (1) {
- usleep(1);
+ for (int i = 0; i < 1000; i++) {
+ /*
+ * Go to sleep and return using sysret (if we're 64-bit
+ * or we're 32-bit on AMD on a 64-bit kernel). On AMD CPUs,
+ * SYSRET doesn't fix up the cached SS descriptor, so the
+ * kernel needs some kind of workaround to make sure that we
+ * end the system call with a valid stack segment. This
+ * can be a confusing failure because the SS *selector*
+ * is the same regardless.
+ */
+ usleep(2);
+
+#ifdef __x86_64__
+ /*
+ * On 32-bit, just doing a syscall through glibc is enough
+ * to cause a crash if our cached SS descriptor is invalid.
+ * On 64-bit, it's not, so try extra hard.
+ */
+ call32_from_64(stack32 + 4088, test_ss);
+#endif
}
+ printf("[OK]\tWe survived\n");
+
+#ifdef __x86_64__
+ munmap(stack32, 4096);
+#endif
+
return 0;
}