€•hŒsphinx.addnodes”Œdocument”“”)”}”(Œ rawsource”Œ”Œchildren”]”(Œ translations”Œ LanguagesNode”“”)”}”(hhh]”(hŒ pending_xref”“”)”}”(hhh]”Œdocutils.nodes”ŒText”“”ŒChinese (Simplified)”…””}”Œparent”hsbaŒ attributes”}”(Œids”]”Œclasses”]”Œnames”]”Œdupnames”]”Œbackrefs”]”Œ refdomain”Œstd”Œreftype”Œdoc”Œ reftarget”Œ./translations/zh_CN/mm/vmalloced-kernel-stacks”Œmodname”NŒ classname”NŒ refexplicit”ˆuŒtagname”hhh ubh)”}”(hhh]”hŒChinese (Traditional)”…””}”hh2sbah}”(h]”h ]”h"]”h$]”h&]”Œ refdomain”h)Œreftype”h+Œ reftarget”Œ./translations/zh_TW/mm/vmalloced-kernel-stacks”Œmodname”NŒ classname”NŒ refexplicit”ˆuh1hhh ubh)”}”(hhh]”hŒItalian”…””}”hhFsbah}”(h]”h ]”h"]”h$]”h&]”Œ refdomain”h)Œreftype”h+Œ reftarget”Œ./translations/it_IT/mm/vmalloced-kernel-stacks”Œmodname”NŒ classname”NŒ refexplicit”ˆuh1hhh ubh)”}”(hhh]”hŒJapanese”…””}”hhZsbah}”(h]”h ]”h"]”h$]”h&]”Œ refdomain”h)Œreftype”h+Œ reftarget”Œ./translations/ja_JP/mm/vmalloced-kernel-stacks”Œmodname”NŒ classname”NŒ refexplicit”ˆuh1hhh ubh)”}”(hhh]”hŒKorean”…””}”hhnsbah}”(h]”h ]”h"]”h$]”h&]”Œ refdomain”h)Œreftype”h+Œ reftarget”Œ./translations/ko_KR/mm/vmalloced-kernel-stacks”Œmodname”NŒ classname”NŒ refexplicit”ˆuh1hhh ubh)”}”(hhh]”hŒSpanish”…””}”hh‚sbah}”(h]”h ]”h"]”h$]”h&]”Œ refdomain”h)Œreftype”h+Œ reftarget”Œ./translations/sp_SP/mm/vmalloced-kernel-stacks”Œmodname”NŒ classname”NŒ refexplicit”ˆuh1hhh ubeh}”(h]”h ]”h"]”h$]”h&]”Œcurrent_language”ŒEnglish”uh1h hhŒ _document”hŒsource”NŒline”NubhŒcomment”“”)”}”(hŒ SPDX-License-Identifier: GPL-2.0”h]”hŒ SPDX-License-Identifier: GPL-2.0”…””}”hh£sbah}”(h]”h ]”h"]”h$]”h&]”Œ xml:space”Œpreserve”uh1h¡hhhžhhŸŒH/var/lib/git/docbuild/linux/Documentation/mm/vmalloced-kernel-stacks.rst”h KubhŒsection”“”)”}”(hhh]”(hŒtitle”“”)”}”(hŒ%Virtually Mapped Kernel Stack Support”h]”hŒ%Virtually Mapped Kernel Stack Support”…””}”(hh»hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1h¹hh¶hžhhŸh³h KubhŒ field_list”“”)”}”(hhh]”hŒfield”“”)”}”(hhh]”(hŒ field_name”“”)”}”(hŒAuthor”h]”hŒAuthor”…””}”(hhÕhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÓhhÐhŸh³h KubhŒ field_body”“”)”}”(hŒ'Shuah Khan ”h]”hŒ paragraph”“”)”}”(hŒ&Shuah Khan ”h]”(hŒ Shuah Khan <”…””}”(hhëhžhhŸNh NubhŒ reference”“”)”}”(hŒskhan@linuxfoundation.org”h]”hŒskhan@linuxfoundation.org”…””}”(hhõhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”Œrefuri”Œ mailto:skhan@linuxfoundation.org”uh1hóhhëubhŒ>”…””}”(hhëhžhhŸNh Nubeh}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h Khhåubah}”(h]”h ]”h"]”h$]”h&]”uh1hãhhÐubeh}”(h]”h ]”h"]”h$]”h&]”uh1hÎhŸh³h KhhËhžhubah}”(h]”h ]”h"]”h$]”h&]”uh1hÉhh¶hžhhŸh³h KubhŒtopic”“”)”}”(hhh]”hŒ bullet_list”“”)”}”(hhh]”(hŒ list_item”“”)”}”(hhh]”hê)”}”(hhh]”hô)”}”(hhh]”hŒOverview”…””}”(hj3hžhhŸNh Nubah}”(h]”Œid1”ah ]”h"]”h$]”h&]”Œrefid”Œoverview”uh1hóhj0ubah}”(h]”h ]”h"]”h$]”h&]”uh1héhj-ubah}”(h]”h ]”h"]”h$]”h&]”uh1j+hj(ubj,)”}”(hhh]”hê)”}”(hhh]”hô)”}”(hhh]”hŒ Introduction”…””}”(hjUhžhhŸNh Nubah}”(h]”Œid2”ah ]”h"]”h$]”h&]”Œrefid”Œ introduction”uh1hóhjRubah}”(h]”h ]”h"]”h$]”h&]”uh1héhjOubah}”(h]”h ]”h"]”h$]”h&]”uh1j+hj(ubj,)”}”(hhh]”hê)”}”(hhh]”hô)”}”(hhh]”hŒHAVE_ARCH_VMAP_STACK”…””}”(hjwhžhhŸNh Nubah}”(h]”Œid3”ah ]”h"]”h$]”h&]”Œrefid”Œhave-arch-vmap-stack”uh1hóhjtubah}”(h]”h ]”h"]”h$]”h&]”uh1héhjqubah}”(h]”h ]”h"]”h$]”h&]”uh1j+hj(ubj,)”}”(hhh]”hê)”}”(hhh]”hô)”}”(hhh]”hŒ VMAP_STACK”…””}”(hj™hžhhŸNh Nubah}”(h]”Œid4”ah ]”h"]”h$]”h&]”Œrefid”Œ vmap-stack”uh1hóhj–ubah}”(h]”h ]”h"]”h$]”h&]”uh1héhj“ubah}”(h]”h ]”h"]”h$]”h&]”uh1j+hj(ubj,)”}”(hhh]”hê)”}”(hhh]”hô)”}”(hhh]”hŒ Allocation”…””}”(hj»hžhhŸNh Nubah}”(h]”Œid5”ah ]”h"]”h$]”h&]”Œrefid”Œ allocation”uh1hóhj¸ubah}”(h]”h ]”h"]”h$]”h&]”uh1héhjµubah}”(h]”h ]”h"]”h$]”h&]”uh1j+hj(ubj,)”}”(hhh]”hê)”}”(hhh]”hô)”}”(hhh]”hŒStack overflow handling”…””}”(hjÝhžhhŸNh Nubah}”(h]”Œid6”ah ]”h"]”h$]”h&]”Œrefid”Œstack-overflow-handling”uh1hóhjÚubah}”(h]”h ]”h"]”h$]”h&]”uh1héhj×ubah}”(h]”h ]”h"]”h$]”h&]”uh1j+hj(ubj,)”}”(hhh]”hê)”}”(hhh]”hô)”}”(hhh]”hŒ(Testing VMAP allocation with guard pages”…””}”(hjÿhžhhŸNh Nubah}”(h]”Œid7”ah ]”h"]”h$]”h&]”Œrefid”Œ(testing-vmap-allocation-with-guard-pages”uh1hóhjüubah}”(h]”h ]”h"]”h$]”h&]”uh1héhjùubah}”(h]”h ]”h"]”h$]”h&]”uh1j+hj(ubj,)”}”(hhh]”hê)”}”(hhh]”hô)”}”(hhh]”hŒ Conclusions”…””}”(hj!hžhhŸNh Nubah}”(h]”Œid8”ah ]”h"]”h$]”h&]”Œrefid”Œ conclusions”uh1hóhjubah}”(h]”h ]”h"]”h$]”h&]”uh1héhjubah}”(h]”h ]”h"]”h$]”h&]”uh1j+hj(ubeh}”(h]”h ]”h"]”h$]”h&]”uh1j&hj#hžhhŸNh Nubah}”(h]”Œcontents”ah ]”(Œcontents”Œlocal”eh"]”Œcontents”ah$]”h&]”uh1j!hŸh³h K hh¶hžhubhµ)”}”(hhh]”(hº)”}”(hŒOverview”h]”hŒOverview”…””}”(hjPhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”Œrefid”j<uh1h¹hjMhžhhŸh³h K ubhê)”}”(hŒ¬This is a compilation of information from the code and original patch series that introduced the `Virtually Mapped Kernel Stacks feature `”h]”(hŒaThis is a compilation of information from the code and original patch series that introduced the ”…””}”(hj_hžhhŸNh NubhŒtitle_reference”“”)”}”(hŒK`Virtually Mapped Kernel Stacks feature `”h]”hŒIVirtually Mapped Kernel Stacks feature ”…””}”(hjihžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1jghj_ubeh}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h KhjMhžhubeh}”(h]”jBah ]”h"]”Œoverview”ah$]”h&]”uh1h´hh¶hžhhŸh³h K ubhµ)”}”(hhh]”(hº)”}”(hŒ Introduction”h]”hŒ Introduction”…””}”(hj‡hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”j^j^uh1h¹hj„hžhhŸh³h Kubhê)”}”(hŒ±Kernel stack overflows are often hard to debug and make the kernel susceptible to exploits. Problems could show up at a later time making it difficult to isolate and root-cause.”h]”hŒ±Kernel stack overflows are often hard to debug and make the kernel susceptible to exploits. Problems could show up at a later time making it difficult to isolate and root-cause.”…””}”(hj•hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h Khj„hžhubhê)”}”(hŒœVirtually mapped kernel stacks with guard pages cause kernel stack overflows to be caught immediately rather than causing difficult to diagnose corruptions.”h]”hŒœVirtually mapped kernel stacks with guard pages cause kernel stack overflows to be caught immediately rather than causing difficult to diagnose corruptions.”…””}”(hj£hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h Khj„hžhubhê)”}”(hX#HAVE_ARCH_VMAP_STACK and VMAP_STACK configuration options enable support for virtually mapped stacks with guard pages. This feature causes reliable faults when the stack overflows. The usability of the stack trace after overflow and response to the overflow itself is architecture dependent.”h]”hX#HAVE_ARCH_VMAP_STACK and VMAP_STACK configuration options enable support for virtually mapped stacks with guard pages. This feature causes reliable faults when the stack overflows. The usability of the stack trace after overflow and response to the overflow itself is architecture dependent.”…””}”(hj±hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h Khj„hžhubhŒnote”“”)”}”(hŒYAs of this writing, arm64, powerpc, riscv, s390, um, and x86 have support for VMAP_STACK.”h]”hê)”}”(hŒYAs of this writing, arm64, powerpc, riscv, s390, um, and x86 have support for VMAP_STACK.”h]”hŒYAs of this writing, arm64, powerpc, riscv, s390, um, and x86 have support for VMAP_STACK.”…””}”(hjÅhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h K$hjÁubah}”(h]”h ]”h"]”h$]”h&]”uh1j¿hj„hžhhŸh³h Nubeh}”(h]”jdah ]”h"]”Œ introduction”ah$]”h&]”uh1h´hh¶hžhhŸh³h Kubhµ)”}”(hhh]”(hº)”}”(hŒHAVE_ARCH_VMAP_STACK”h]”hŒHAVE_ARCH_VMAP_STACK”…””}”(hjãhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”j^j€uh1h¹hjàhžhhŸh³h K(ubhê)”}”(hŒArchitectures that can support Virtually Mapped Kernel Stacks should enable this bool configuration option. The requirements are:”h]”hŒArchitectures that can support Virtually Mapped Kernel Stacks should enable this bool configuration option. The requirements are:”…””}”(hjñhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h K*hjàhžhubj')”}”(hhh]”(j,)”}”(hŒkvmalloc space must be large enough to hold many kernel stacks. This may rule out many 32-bit architectures.”h]”hê)”}”(hŒkvmalloc space must be large enough to hold many kernel stacks. This may rule out many 32-bit architectures.”h]”hŒkvmalloc space must be large enough to hold many kernel stacks. This may rule out many 32-bit architectures.”…””}”(hjhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h K-hjubah}”(h]”h ]”h"]”h$]”h&]”uh1j+hjÿhžhhŸh³h Nubj,)”}”(hX~Stacks in vmalloc space need to work reliably. For example, if vmap page tables are created on demand, either this mechanism needs to work while the stack points to a virtual address with unpopulated page tables or arch code (switch_to() and switch_mm(), most likely) needs to ensure that the stack's page table entries are populated before running on a possibly unpopulated stack.”h]”hê)”}”(hX~Stacks in vmalloc space need to work reliably. For example, if vmap page tables are created on demand, either this mechanism needs to work while the stack points to a virtual address with unpopulated page tables or arch code (switch_to() and switch_mm(), most likely) needs to ensure that the stack's page table entries are populated before running on a possibly unpopulated stack.”h]”hX€Stacks in vmalloc space need to work reliably. For example, if vmap page tables are created on demand, either this mechanism needs to work while the stack points to a virtual address with unpopulated page tables or arch code (switch_to() and switch_mm(), most likely) needs to ensure that the stack’s page table entries are populated before running on a possibly unpopulated stack.”…””}”(hjhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h K/hjubah}”(h]”h ]”h"]”h$]”h&]”uh1j+hjÿhžhhŸh³h Nubj,)”}”(hŒÀIf the stack overflows into a guard page, something reasonable should happen. The definition of "reasonable" is flexible, but instantly rebooting without logging anything would be unfriendly. ”h]”hê)”}”(hŒ¿If the stack overflows into a guard page, something reasonable should happen. The definition of "reasonable" is flexible, but instantly rebooting without logging anything would be unfriendly.”h]”hŒÃIf the stack overflows into a guard page, something reasonable should happen. The definition of “reasonable†is flexible, but instantly rebooting without logging anything would be unfriendly.”…””}”(hj6hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h K5hj2ubah}”(h]”h ]”h"]”h$]”h&]”uh1j+hjÿhžhhŸh³h Nubeh}”(h]”h ]”h"]”h$]”h&]”Œbullet”Œ-”uh1j&hŸh³h K-hjàhžhubeh}”(h]”j†ah ]”h"]”Œhave_arch_vmap_stack”ah$]”h&]”uh1h´hh¶hžhhŸh³h K(ubhµ)”}”(hhh]”(hº)”}”(hŒ VMAP_STACK”h]”hŒ VMAP_STACK”…””}”(hj\hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”j^j¢uh1h¹hjYhžhhŸh³h K:ubhê)”}”(hŒ‹When enabled, the VMAP_STACK bool configuration option allocates virtually mapped task stacks. This option depends on HAVE_ARCH_VMAP_STACK.”h]”hŒ‹When enabled, the VMAP_STACK bool configuration option allocates virtually mapped task stacks. This option depends on HAVE_ARCH_VMAP_STACK.”…””}”(hjjhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h K`”h]”jh)”}”(hjÙh]”hŒ^Kconfig ”…””}”(hjÛhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1jghj×ubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h KQhjYhžhubeh}”(h]”j¨ah ]”h"]”Œ vmap_stack”ah$]”h&]”uh1h´hh¶hžhhŸh³h K:ubhµ)”}”(hhh]”(hº)”}”(hŒ Allocation”h]”hŒ Allocation”…””}”(hjøhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”j^jÄuh1h¹hjõhžhhŸh³h KTubhê)”}”(hŒàWhen a new kernel thread is created, a thread stack is allocated from virtually contiguous memory pages from the page level allocator. These pages are mapped into contiguous kernel virtual space with PAGE_KERNEL protections.”h]”hŒàWhen a new kernel thread is created, a thread stack is allocated from virtually contiguous memory pages from the page level allocator. These pages are mapped into contiguous kernel virtual space with PAGE_KERNEL protections.”…””}”(hjhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h KVhjõhžhubhê)”}”(hŒfalloc_thread_stack_node() calls __vmalloc_node_range() to allocate stack with PAGE_KERNEL protections.”h]”hŒfalloc_thread_stack_node() calls __vmalloc_node_range() to allocate stack with PAGE_KERNEL protections.”…””}”(hjhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h K[hjõhžhubj')”}”(hhh]”(j,)”}”(hŒËAllocated stacks are cached and later reused by new threads, so memcg accounting is performed manually on assigning/releasing stacks to tasks. Hence, __vmalloc_node_range is called without __GFP_ACCOUNT.”h]”hê)”}”(hŒËAllocated stacks are cached and later reused by new threads, so memcg accounting is performed manually on assigning/releasing stacks to tasks. Hence, __vmalloc_node_range is called without __GFP_ACCOUNT.”h]”hŒËAllocated stacks are cached and later reused by new threads, so memcg accounting is performed manually on assigning/releasing stacks to tasks. Hence, __vmalloc_node_range is called without __GFP_ACCOUNT.”…””}”(hj)hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h K^hj%ubah}”(h]”h ]”h"]”h$]”h&]”uh1j+hj"hžhhŸh³h Nubj,)”}”(hŒ’vm_struct is cached to be able to find when thread free is initiated in interrupt context. free_thread_stack() can be called in interrupt context.”h]”hê)”}”(hŒ’vm_struct is cached to be able to find when thread free is initiated in interrupt context. free_thread_stack() can be called in interrupt context.”h]”hŒ’vm_struct is cached to be able to find when thread free is initiated in interrupt context. free_thread_stack() can be called in interrupt context.”…””}”(hjAhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h Kahj=ubah}”(h]”h ]”h"]”h$]”h&]”uh1j+hj"hžhhŸh³h Nubj,)”}”(hŒ¹On arm64, all VMAP's stacks need to have the same alignment to ensure that VMAP'd stack overflow detection works correctly. Arch specific vmap stack allocator takes care of this detail.”h]”hê)”}”(hŒ¹On arm64, all VMAP's stacks need to have the same alignment to ensure that VMAP'd stack overflow detection works correctly. Arch specific vmap stack allocator takes care of this detail.”h]”hŒ½On arm64, all VMAP’s stacks need to have the same alignment to ensure that VMAP’d stack overflow detection works correctly. Arch specific vmap stack allocator takes care of this detail.”…””}”(hjYhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h KdhjUubah}”(h]”h ]”h"]”h$]”h&]”uh1j+hj"hžhhŸh³h Nubj,)”}”(hŒIThis does not address interrupt stacks - according to the original patch ”h]”hê)”}”(hŒHThis does not address interrupt stacks - according to the original patch”h]”hŒHThis does not address interrupt stacks - according to the original patch”…””}”(hjqhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h Kghjmubah}”(h]”h ]”h"]”h$]”h&]”uh1j+hj"hžhhŸh³h Nubeh}”(h]”h ]”h"]”h$]”h&]”jPjQuh1j&hŸh³h K^hjõhžhubhê)”}”(hŒÑThread stack allocation is initiated from clone(), fork(), vfork(), kernel_thread() via kernel_clone(). These are a few hints for searching the code base to understand when and how a thread stack is allocated.”h]”hŒÑThread stack allocation is initiated from clone(), fork(), vfork(), kernel_thread() via kernel_clone(). These are a few hints for searching the code base to understand when and how a thread stack is allocated.”…””}”(hj‹hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h Kihjõhžhubhê)”}”(hŒ€Bulk of the code is in: `kernel/fork.c `.”h]”(hŒBulk of the code is in: ”…””}”(hj™hžhhŸNh Nubjh)”}”(hŒg`kernel/fork.c `”h]”hŒekernel/fork.c ”…””}”(hj¡hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1jghj™ubhŒ.”…””}”(hj™hžhhŸNh Nubeh}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h Kmhjõhžhubhê)”}”(hŒÃstack_vm_area pointer in task_struct keeps track of the virtually allocated stack and a non-null stack_vm_area pointer serves as an indication that the virtually mapped kernel stacks are enabled.”h]”hŒÃstack_vm_area pointer in task_struct keeps track of the virtually allocated stack and a non-null stack_vm_area pointer serves as an indication that the virtually mapped kernel stacks are enabled.”…””}”(hj¹hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h KphjõhžhubhŒ literal_block”“”)”}”(hŒ struct vm_struct *stack_vm_area;”h]”hŒ struct vm_struct *stack_vm_area;”…””}”hjÉsbah}”(h]”h ]”h"]”h$]”h&]”h±h²uh1jÇhŸh³h Kvhjõhžhubeh}”(h]”jÊah ]”h"]”Œ allocation”ah$]”h&]”uh1h´hh¶hžhhŸh³h KTubhµ)”}”(hhh]”(hº)”}”(hŒStack overflow handling”h]”hŒStack overflow handling”…””}”(hjáhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”j^jæuh1h¹hjÞhžhhŸh³h Kyubhê)”}”(hŒöLeading and trailing guard pages help detect stack overflows. When the stack overflows into the guard pages, handlers have to be careful not to overflow the stack again. When handlers are called, it is likely that very little stack space is left.”h]”hŒöLeading and trailing guard pages help detect stack overflows. When the stack overflows into the guard pages, handlers have to be careful not to overflow the stack again. When handlers are called, it is likely that very little stack space is left.”…””}”(hjïhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h K{hjÞhžhubhê)”}”(hŒoOn x86, this is done by handling the page fault indicating the kernel stack overflow on the double-fault stack.”h]”hŒoOn x86, this is done by handling the page fault indicating the kernel stack overflow on the double-fault stack.”…””}”(hjýhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h K€hjÞhžhubeh}”(h]”jìah ]”h"]”Œstack overflow handling”ah$]”h&]”uh1h´hh¶hžhhŸh³h Kyubhµ)”}”(hhh]”(hº)”}”(hŒ(Testing VMAP allocation with guard pages”h]”hŒ(Testing VMAP allocation with guard pages”…””}”(hjhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”j^juh1h¹hjhžhhŸh³h K„ubhê)”}”(hŒšHow do we ensure that VMAP_STACK is actually allocating with a leading and trailing guard page? The following lkdtm tests can help detect any regressions.”h]”hŒšHow do we ensure that VMAP_STACK is actually allocating with a leading and trailing guard page? The following lkdtm tests can help detect any regressions.”…””}”(hj#hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h K†hjhžhubjÈ)”}”(hŒLvoid lkdtm_STACK_GUARD_PAGE_LEADING() void lkdtm_STACK_GUARD_PAGE_TRAILING()”h]”hŒLvoid lkdtm_STACK_GUARD_PAGE_LEADING() void lkdtm_STACK_GUARD_PAGE_TRAILING()”…””}”hj1sbah}”(h]”h ]”h"]”h$]”h&]”h±h²uh1jÇhŸh³h KŒhjhžhubeh}”(h]”jah ]”h"]”Œ(testing vmap allocation with guard pages”ah$]”h&]”uh1h´hh¶hžhhŸh³h K„ubhµ)”}”(hhh]”(hº)”}”(hŒ Conclusions”h]”hŒ Conclusions”…””}”(hjIhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”j^j*uh1h¹hjFhžhhŸh³h Kubj')”}”(hhh]”(j,)”}”(hŒA percpu cache of vmalloced stacks appears to be a bit faster than a high-order stack allocation, at least when the cache hits.”h]”hê)”}”(hŒA percpu cache of vmalloced stacks appears to be a bit faster than a high-order stack allocation, at least when the cache hits.”h]”hŒA percpu cache of vmalloced stacks appears to be a bit faster than a high-order stack allocation, at least when the cache hits.”…””}”(hj^hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h K’hjZubah}”(h]”h ]”h"]”h$]”h&]”uh1j+hjWhžhhŸh³h Nubj,)”}”(hŒ›THREAD_INFO_IN_TASK gets rid of arch-specific thread_info entirely and simply embed the thread_info (containing only flags) and 'int cpu' into task_struct.”h]”hê)”}”(hŒ›THREAD_INFO_IN_TASK gets rid of arch-specific thread_info entirely and simply embed the thread_info (containing only flags) and 'int cpu' into task_struct.”h]”hŒŸTHREAD_INFO_IN_TASK gets rid of arch-specific thread_info entirely and simply embed the thread_info (containing only flags) and ‘int cpu’ into task_struct.”…””}”(hjvhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h K”hjrubah}”(h]”h ]”h"]”h$]”h&]”uh1j+hjWhžhhŸh³h Nubj,)”}”(hŒ­The thread stack can be freed as soon as the task is dead (without waiting for RCU) and then, if vmapped stacks are in use, cache the entire stack for reuse on the same cpu.”h]”hê)”}”(hŒ­The thread stack can be freed as soon as the task is dead (without waiting for RCU) and then, if vmapped stacks are in use, cache the entire stack for reuse on the same cpu.”h]”hŒ­The thread stack can be freed as soon as the task is dead (without waiting for RCU) and then, if vmapped stacks are in use, cache the entire stack for reuse on the same cpu.”…””}”(hjŽhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1héhŸh³h K—hjŠubah}”(h]”h ]”h"]”h$]”h&]”uh1j+hjWhžhhŸh³h Nubeh}”(h]”h ]”h"]”h$]”h&]”jPjQuh1j&hŸh³h K’hjFhžhubeh}”(h]”j0ah ]”h"]”Œ conclusions”ah$]”h&]”uh1h´hh¶hžhhŸh³h Kubeh}”(h]”Œ%virtually-mapped-kernel-stack-support”ah ]”h"]”Œ%virtually mapped kernel stack support”ah$]”h&]”uh1h´hhhžhhŸh³h Kubeh}”(h]”h ]”h"]”h$]”h&]”Œsource”h³uh1hŒcurrent_source”NŒ current_line”NŒsettings”Œdocutils.frontend”ŒValues”“”)”}”(h¹NŒ generator”NŒ datestamp”NŒ source_link”NŒ source_url”NŒ toc_backlinks”Œentry”Œfootnote_backlinks”KŒ sectnum_xform”KŒstrip_comments”NŒstrip_elements_with_classes”NŒ strip_classes”NŒ report_level”KŒ halt_level”KŒexit_status_level”KŒdebug”NŒwarning_stream”NŒ traceback”ˆŒinput_encoding”Œ utf-8-sig”Œinput_encoding_error_handler”Œstrict”Œoutput_encoding”Œutf-8”Œoutput_encoding_error_handler”jÚŒerror_encoding”Œutf-8”Œerror_encoding_error_handler”Œbackslashreplace”Œ language_code”Œen”Œrecord_dependencies”NŒconfig”NŒ id_prefix”hŒauto_id_prefix”Œid”Œ dump_settings”NŒdump_internals”NŒdump_transforms”NŒdump_pseudo_xml”NŒexpose_internals”NŒstrict_visitor”NŒ_disable_config”NŒ_source”h³Œ _destination”NŒ _config_files”]”Œ7/var/lib/git/docbuild/linux/Documentation/docutils.conf”aŒfile_insertion_enabled”ˆŒ raw_enabled”KŒline_length_limit”M'Œpep_references”NŒ pep_base_url”Œhttps://peps.python.org/”Œpep_file_url_template”Œpep-%04d”Œrfc_references”NŒ rfc_base_url”Œ&https://datatracker.ietf.org/doc/html/”Œ tab_width”KŒtrim_footnote_reference_space”‰Œsyntax_highlight”Œlong”Œ smart_quotes”ˆŒsmartquotes_locales”]”Œcharacter_level_inline_markup”‰Œdoctitle_xform”‰Œ docinfo_xform”KŒsectsubtitle_xform”‰Œ image_loading”Œlink”Œembed_stylesheet”‰Œcloak_email_addresses”ˆŒsection_self_link”‰Œenv”NubŒreporter”NŒindirect_targets”]”Œsubstitution_defs”}”Œsubstitution_names”}”Œrefnames”}”Œrefids”}”Œnameids”}”(j´j±jJjEjjBjÝjdjVj†jòj¨jÛjÊjjìjCjj¬j0uŒ nametypes”}”(j´‰jJ‰j‰j݉jV‰jò‰jÛ‰j‰jC‰j¬‰uh}”(j±h¶jEj#jBjMjdj„j†jàj¨jYjÊjõjìjÞjjj0jFj<j3j^jUj€jwj¢j™jÄj»jæjÝjjÿj*j!uŒ footnote_refs”}”Œ citation_refs”}”Œ autofootnotes”]”Œautofootnote_refs”]”Œsymbol_footnotes”]”Œsymbol_footnote_refs”]”Œ footnotes”]”Œ citations”]”Œautofootnote_start”KŒsymbol_footnote_start”KŒ id_counter”Œ collections”ŒCounter”“”}”jèKs…”R”Œparse_messages”]”Œtransform_messages”]”Œ transformer”NŒ include_log”]”Œ decoration”Nhžhub.