aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/arm/mali/mali_kbase_mem.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/arm/mali/mali_kbase_mem.h')
-rw-r--r--drivers/gpu/arm/mali/mali_kbase_mem.h835
1 files changed, 521 insertions, 314 deletions
diff --git a/drivers/gpu/arm/mali/mali_kbase_mem.h b/drivers/gpu/arm/mali/mali_kbase_mem.h
index cabf77b8ce5889..d8b5c387e4d85d 100644
--- a/drivers/gpu/arm/mali/mali_kbase_mem.h
+++ b/drivers/gpu/arm/mali/mali_kbase_mem.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -37,6 +37,8 @@
#include "mali_kbase_defs.h"
/* Required for kbase_mem_evictable_unmake */
#include "mali_kbase_mem_linux.h"
+#include "mali_kbase_mem_migrate.h"
+#include "mali_kbase_refcount_defs.h"
static inline void kbase_process_page_usage_inc(struct kbase_context *kctx,
int pages);
@@ -60,6 +62,186 @@ static inline void kbase_process_page_usage_inc(struct kbase_context *kctx,
#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_8316 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316)
#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_9630 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630)
+/* Free region */
+#define KBASE_REG_FREE (1ul << 0)
+/* CPU write access */
+#define KBASE_REG_CPU_WR (1ul << 1)
+/* GPU write access */
+#define KBASE_REG_GPU_WR (1ul << 2)
+/* No eXecute flag */
+#define KBASE_REG_GPU_NX (1ul << 3)
+/* Is CPU cached? */
+#define KBASE_REG_CPU_CACHED (1ul << 4)
+/* Is GPU cached?
+ * Some components within the GPU might only be able to access memory that is
+ * GPU cacheable. Refer to the specific GPU implementation for more details.
+ */
+#define KBASE_REG_GPU_CACHED (1ul << 5)
+
+#define KBASE_REG_GROWABLE (1ul << 6)
+/* Can grow on pf? */
+#define KBASE_REG_PF_GROW (1ul << 7)
+
+/* Allocation doesn't straddle the 4GB boundary in GPU virtual space */
+#define KBASE_REG_GPU_VA_SAME_4GB_PAGE (1ul << 8)
+
+/* inner shareable coherency */
+#define KBASE_REG_SHARE_IN (1ul << 9)
+/* inner & outer shareable coherency */
+#define KBASE_REG_SHARE_BOTH (1ul << 10)
+
+#if MALI_USE_CSF
+/* Space for 8 different zones */
+#define KBASE_REG_ZONE_BITS 3
+#else
+/* Space for 4 different zones */
+#define KBASE_REG_ZONE_BITS 2
+#endif
+
+/* The bits 11-13 (inclusive) of the kbase_va_region flag are reserved
+ * for information about the zone in which it was allocated.
+ */
+#define KBASE_REG_ZONE_SHIFT (11ul)
+#define KBASE_REG_ZONE_MASK (((1 << KBASE_REG_ZONE_BITS) - 1ul) << KBASE_REG_ZONE_SHIFT)
+
+#if KBASE_REG_ZONE_MAX > (1 << KBASE_REG_ZONE_BITS)
+#error "Too many zones for the number of zone bits defined"
+#endif
+
+/* GPU read access */
+#define KBASE_REG_GPU_RD (1ul << 14)
+/* CPU read access */
+#define KBASE_REG_CPU_RD (1ul << 15)
+
+/* Index of chosen MEMATTR for this region (0..7) */
+#define KBASE_REG_MEMATTR_MASK (7ul << 16)
+#define KBASE_REG_MEMATTR_INDEX(x) (((x)&7) << 16)
+#define KBASE_REG_MEMATTR_VALUE(x) (((x)&KBASE_REG_MEMATTR_MASK) >> 16)
+
+#define KBASE_REG_PROTECTED (1ul << 19)
+
+/* Region belongs to a shrinker.
+ *
+ * This can either mean that it is part of the JIT/Ephemeral or tiler heap
+ * shrinker paths. Should be removed only after making sure that there are
+ * no references remaining to it in these paths, as it may cause the physical
+ * backing of the region to disappear during use.
+ */
+#define KBASE_REG_DONT_NEED (1ul << 20)
+
+/* Imported buffer is padded? */
+#define KBASE_REG_IMPORT_PAD (1ul << 21)
+
+#if MALI_USE_CSF
+/* CSF event memory */
+#define KBASE_REG_CSF_EVENT (1ul << 22)
+/* Bit 23 is reserved.
+ *
+ * Do not remove, use the next unreserved bit for new flags
+ */
+#define KBASE_REG_RESERVED_BIT_23 (1ul << 23)
+#else
+/* Bit 22 is reserved.
+ *
+ * Do not remove, use the next unreserved bit for new flags
+ */
+#define KBASE_REG_RESERVED_BIT_22 (1ul << 22)
+/* The top of the initial commit is aligned to extension pages.
+ * Extent must be a power of 2
+ */
+#define KBASE_REG_TILER_ALIGN_TOP (1ul << 23)
+#endif /* MALI_USE_CSF */
+
+/* Bit 24 is currently unused and is available for use for a new flag */
+
+/* Memory has permanent kernel side mapping */
+#define KBASE_REG_PERMANENT_KERNEL_MAPPING (1ul << 25)
+
+/* GPU VA region has been freed by the userspace, but still remains allocated
+ * due to the reference held by CPU mappings created on the GPU VA region.
+ *
+ * A region with this flag set has had kbase_gpu_munmap() called on it, but can
+ * still be looked-up in the region tracker as a non-free region. Hence must
+ * not create or update any more GPU mappings on such regions because they will
+ * not be unmapped when the region is finally destroyed.
+ *
+ * Since such regions are still present in the region tracker, new allocations
+ * attempted with BASE_MEM_SAME_VA might fail if their address intersects with
+ * a region with this flag set.
+ *
+ * In addition, this flag indicates the gpu_alloc member might no longer valid
+ * e.g. in infinite cache simulation.
+ */
+#define KBASE_REG_VA_FREED (1ul << 26)
+
+/* If set, the heap info address points to a u32 holding the used size in bytes;
+ * otherwise it points to a u64 holding the lowest address of unused memory.
+ */
+#define KBASE_REG_HEAP_INFO_IS_SIZE (1ul << 27)
+
+/* Allocation is actively used for JIT memory */
+#define KBASE_REG_ACTIVE_JIT_ALLOC (1ul << 28)
+
+#if MALI_USE_CSF
+/* This flag only applies to allocations in the EXEC_FIXED_VA and FIXED_VA
+ * memory zones, and it determines whether they were created with a fixed
+ * GPU VA address requested by the user.
+ */
+#define KBASE_REG_FIXED_ADDRESS (1ul << 29)
+#else
+#define KBASE_REG_RESERVED_BIT_29 (1ul << 29)
+#endif
+
+#define KBASE_REG_ZONE_CUSTOM_VA_BASE (0x100000000ULL >> PAGE_SHIFT)
+
+#if MALI_USE_CSF
+/* only used with 32-bit clients */
+/* On a 32bit platform, custom VA should be wired from 4GB to 2^(43).
+ */
+#define KBASE_REG_ZONE_CUSTOM_VA_SIZE (((1ULL << 43) >> PAGE_SHIFT) - KBASE_REG_ZONE_CUSTOM_VA_BASE)
+#else
+/* only used with 32-bit clients */
+/* On a 32bit platform, custom VA should be wired from 4GB to the VA limit of the
+ * GPU. Unfortunately, the Linux mmap() interface limits us to 2^32 pages (2^44
+ * bytes, see mmap64 man page for reference). So we put the default limit to the
+ * maximum possible on Linux and shrink it down, if required by the GPU, during
+ * initialization.
+ */
+#define KBASE_REG_ZONE_CUSTOM_VA_SIZE (((1ULL << 44) >> PAGE_SHIFT) - KBASE_REG_ZONE_CUSTOM_VA_BASE)
+/* end 32-bit clients only */
+#endif
+
+/* The starting address and size of the GPU-executable zone are dynamic
+ * and depend on the platform and the number of pages requested by the
+ * user process, with an upper limit of 4 GB.
+ */
+#define KBASE_REG_ZONE_EXEC_VA_MAX_PAGES ((1ULL << 32) >> PAGE_SHIFT) /* 4 GB */
+#define KBASE_REG_ZONE_EXEC_VA_SIZE KBASE_REG_ZONE_EXEC_VA_MAX_PAGES
+
+#if MALI_USE_CSF
+#define KBASE_REG_ZONE_MCU_SHARED_BASE (0x04000000ULL >> PAGE_SHIFT)
+#define MCU_SHARED_ZONE_SIZE (((0x08000000ULL) >> PAGE_SHIFT) - KBASE_REG_ZONE_MCU_SHARED_BASE)
+
+/* For CSF GPUs, the EXEC_VA zone is always 4GB in size, and starts at 2^47 for 64-bit
+ * clients, and 2^43 for 32-bit clients.
+ */
+#define KBASE_REG_ZONE_EXEC_VA_BASE_64 ((1ULL << 47) >> PAGE_SHIFT)
+#define KBASE_REG_ZONE_EXEC_VA_BASE_32 ((1ULL << 43) >> PAGE_SHIFT)
+/* Executable zone supporting FIXED/FIXABLE allocations.
+ * It is always 4GB in size.
+ */
+#define KBASE_REG_ZONE_EXEC_FIXED_VA_SIZE KBASE_REG_ZONE_EXEC_VA_MAX_PAGES
+
+/* Non-executable zone supporting FIXED/FIXABLE allocations.
+ * It extends from (2^47) up to (2^48)-1, for 64-bit userspace clients, and from
+ * (2^43) up to (2^44)-1 for 32-bit userspace clients. For the same reason,
+ * the end of the FIXED_VA zone for 64-bit clients is (2^48)-1.
+ */
+#define KBASE_REG_ZONE_FIXED_VA_END_64 ((1ULL << 48) >> PAGE_SHIFT)
+#define KBASE_REG_ZONE_FIXED_VA_END_32 ((1ULL << 44) >> PAGE_SHIFT)
+
+#endif
+
/*
* A CPU mapping
*/
@@ -183,7 +365,106 @@ struct kbase_mem_phy_alloc {
};
/**
- * The top bit of kbase_alloc_import_user_buf::current_mapping_usage_count is
+ * enum kbase_page_status - Status of a page used for page migration.
+ *
+ * @MEM_POOL: Stable state. Page is located in a memory pool and can safely
+ * be migrated.
+ * @ALLOCATE_IN_PROGRESS: Transitory state. A page is set to this status as
+ * soon as it leaves a memory pool.
+ * @SPILL_IN_PROGRESS: Transitory state. Corner case where pages in a memory
+ * pool of a dying context are being moved to the device
+ * memory pool.
+ * @NOT_MOVABLE: Stable state. Page has been allocated for an object that is
+ * not movable, but may return to be movable when the object
+ * is freed.
+ * @ALLOCATED_MAPPED: Stable state. Page has been allocated, mapped to GPU
+ * and has reference to kbase_mem_phy_alloc object.
+ * @PT_MAPPED: Stable state. Similar to ALLOCATED_MAPPED, but page doesn't
+ * reference kbase_mem_phy_alloc object. Used as a page in MMU
+ * page table.
+ * @FREE_IN_PROGRESS: Transitory state. A page is set to this status as soon as
+ * the driver manages to acquire a lock on the page while
+ * unmapping it. This status means that a memory release is
+ * happening and it's still not complete.
+ * @FREE_ISOLATED_IN_PROGRESS: Transitory state. This is a very particular corner case.
+ * A page is isolated while it is in ALLOCATED_MAPPED state,
+ * but then the driver tries to destroy the allocation.
+ * @FREE_PT_ISOLATED_IN_PROGRESS: Transitory state. This is a very particular corner case.
+ * A page is isolated while it is in PT_MAPPED state, but
+ * then the driver tries to destroy the allocation.
+ *
+ * Pages can only be migrated in stable states.
+ */
+enum kbase_page_status {
+ MEM_POOL = 0,
+ ALLOCATE_IN_PROGRESS,
+ SPILL_IN_PROGRESS,
+ NOT_MOVABLE,
+ ALLOCATED_MAPPED,
+ PT_MAPPED,
+ FREE_IN_PROGRESS,
+ FREE_ISOLATED_IN_PROGRESS,
+ FREE_PT_ISOLATED_IN_PROGRESS,
+};
+
+#define PGD_VPFN_LEVEL_MASK ((u64)0x3)
+#define PGD_VPFN_LEVEL_GET_LEVEL(pgd_vpfn_level) (pgd_vpfn_level & PGD_VPFN_LEVEL_MASK)
+#define PGD_VPFN_LEVEL_GET_VPFN(pgd_vpfn_level) (pgd_vpfn_level & ~PGD_VPFN_LEVEL_MASK)
+#define PGD_VPFN_LEVEL_SET(pgd_vpfn, level) \
+ ((pgd_vpfn & ~PGD_VPFN_LEVEL_MASK) | (level & PGD_VPFN_LEVEL_MASK))
+
+/**
+ * struct kbase_page_metadata - Metadata for each page in kbase
+ *
+ * @kbdev: Pointer to kbase device.
+ * @dma_addr: DMA address mapped to page.
+ * @migrate_lock: A spinlock to protect the private metadata.
+ * @data: Member in union valid based on @status.
+ * @status: Status to keep track if page can be migrated at any
+ * given moment. MSB will indicate if page is isolated.
+ * Protected by @migrate_lock.
+ * @vmap_count: Counter of kernel mappings.
+ * @group_id: Memory group ID obtained at the time of page allocation.
+ *
+ * Each 4KB page will have a reference to this struct in the private field.
+ * This will be used to keep track of information required for Linux page
+ * migration functionality as well as address for DMA mapping.
+ */
+struct kbase_page_metadata {
+ dma_addr_t dma_addr;
+ spinlock_t migrate_lock;
+
+ union {
+ struct {
+ struct kbase_mem_pool *pool;
+ /* Pool could be terminated after page is isolated and therefore
+ * won't be able to get reference to kbase device.
+ */
+ struct kbase_device *kbdev;
+ } mem_pool;
+ struct {
+ struct kbase_va_region *reg;
+ struct kbase_mmu_table *mmut;
+ u64 vpfn;
+ } mapped;
+ struct {
+ struct kbase_mmu_table *mmut;
+ u64 pgd_vpfn_level;
+ } pt_mapped;
+ struct {
+ struct kbase_device *kbdev;
+ } free_isolated;
+ struct {
+ struct kbase_device *kbdev;
+ } free_pt_isolated;
+ } data;
+
+ u8 status;
+ u8 vmap_count;
+ u8 group_id;
+};
+
+/* The top bit of kbase_alloc_import_user_buf::current_mapping_usage_count is
* used to signify that a buffer was pinned when it was imported. Since the
* reference count is limited by the number of atoms that can be submitted at
* once there should be no danger of overflowing into this bit.
@@ -205,6 +486,46 @@ enum kbase_jit_report_flags {
KBASE_JIT_REPORT_ON_ALLOC_OR_FREE = (1u << 0)
};
+/**
+ * kbase_zone_to_bits - Convert a memory zone @zone to the corresponding
+ * bitpattern, for ORing together with other flags.
+ * @zone: Memory zone
+ *
+ * Return: Bitpattern with the appropriate bits set.
+ */
+unsigned long kbase_zone_to_bits(enum kbase_memory_zone zone);
+
+/**
+ * kbase_bits_to_zone - Convert the bitpattern @zone_bits to the corresponding
+ * zone identifier
+ * @zone_bits: Memory allocation flag containing a zone pattern
+ *
+ * Return: Zone identifier for valid zone bitpatterns,
+ */
+enum kbase_memory_zone kbase_bits_to_zone(unsigned long zone_bits);
+
+/**
+ * kbase_mem_zone_get_name - Get the string name for a given memory zone
+ * @zone: Memory zone identifier
+ *
+ * Return: string for valid memory zone, NULL otherwise
+ */
+char *kbase_reg_zone_get_name(enum kbase_memory_zone zone);
+
+/**
+ * kbase_set_phy_alloc_page_status - Set the page migration status of the underlying
+ * physical allocation.
+ * @alloc: the physical allocation containing the pages whose metadata is going
+ * to be modified
+ * @status: the status the pages should end up in
+ *
+ * Note that this function does not go through all of the checking to ensure that
+ * proper states are set. Instead, it is only used when we change the allocation
+ * to NOT_MOVABLE or from NOT_MOVABLE to ALLOCATED_MAPPED
+ */
+void kbase_set_phy_alloc_page_status(struct kbase_mem_phy_alloc *alloc,
+ enum kbase_page_status status);
+
static inline void kbase_mem_phy_alloc_gpu_mapped(struct kbase_mem_phy_alloc *alloc)
{
KBASE_DEBUG_ASSERT(alloc);
@@ -305,8 +626,8 @@ static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_put(struct kbase_m
* @jit_usage_id: The last just-in-time memory usage ID for this region.
* @jit_bin_id: The just-in-time memory bin this region came from.
* @va_refcnt: Number of users of this region. Protected by reg_lock.
- * @no_user_free_refcnt: Number of users that want to prevent the region from
- * being freed by userspace.
+ * @no_user_free_count: Number of contexts that want to prevent the region
+ * from being freed by userspace.
* @heap_info_gpu_addr: Pointer to an object in GPU memory defining an end of
* an allocated region
* The object can be one of:
@@ -334,204 +655,6 @@ struct kbase_va_region {
size_t nr_pages;
size_t initial_commit;
size_t threshold_pages;
-
-/* Free region */
-#define KBASE_REG_FREE (1ul << 0)
-/* CPU write access */
-#define KBASE_REG_CPU_WR (1ul << 1)
-/* GPU write access */
-#define KBASE_REG_GPU_WR (1ul << 2)
-/* No eXecute flag */
-#define KBASE_REG_GPU_NX (1ul << 3)
-/* Is CPU cached? */
-#define KBASE_REG_CPU_CACHED (1ul << 4)
-/* Is GPU cached?
- * Some components within the GPU might only be able to access memory that is
- * GPU cacheable. Refer to the specific GPU implementation for more details.
- */
-#define KBASE_REG_GPU_CACHED (1ul << 5)
-
-#define KBASE_REG_GROWABLE (1ul << 6)
-/* Can grow on pf? */
-#define KBASE_REG_PF_GROW (1ul << 7)
-
-/* Allocation doesn't straddle the 4GB boundary in GPU virtual space */
-#define KBASE_REG_GPU_VA_SAME_4GB_PAGE (1ul << 8)
-
-/* inner shareable coherency */
-#define KBASE_REG_SHARE_IN (1ul << 9)
-/* inner & outer shareable coherency */
-#define KBASE_REG_SHARE_BOTH (1ul << 10)
-
-#if MALI_USE_CSF
-/* Space for 8 different zones */
-#define KBASE_REG_ZONE_BITS 3
-#else
-/* Space for 4 different zones */
-#define KBASE_REG_ZONE_BITS 2
-#endif
-
-#define KBASE_REG_ZONE_MASK (((1 << KBASE_REG_ZONE_BITS) - 1ul) << 11)
-#define KBASE_REG_ZONE(x) (((x) & ((1 << KBASE_REG_ZONE_BITS) - 1ul)) << 11)
-#define KBASE_REG_ZONE_IDX(x) (((x) & KBASE_REG_ZONE_MASK) >> 11)
-
-#if KBASE_REG_ZONE_MAX > (1 << KBASE_REG_ZONE_BITS)
-#error "Too many zones for the number of zone bits defined"
-#endif
-
-/* GPU read access */
-#define KBASE_REG_GPU_RD (1ul << 14)
-/* CPU read access */
-#define KBASE_REG_CPU_RD (1ul << 15)
-
-/* Index of chosen MEMATTR for this region (0..7) */
-#define KBASE_REG_MEMATTR_MASK (7ul << 16)
-#define KBASE_REG_MEMATTR_INDEX(x) (((x) & 7) << 16)
-#define KBASE_REG_MEMATTR_VALUE(x) (((x) & KBASE_REG_MEMATTR_MASK) >> 16)
-
-#define KBASE_REG_PROTECTED (1ul << 19)
-
-/* Region belongs to a shrinker.
- *
- * This can either mean that it is part of the JIT/Ephemeral or tiler heap
- * shrinker paths. Should be removed only after making sure that there are
- * no references remaining to it in these paths, as it may cause the physical
- * backing of the region to disappear during use.
- */
-#define KBASE_REG_DONT_NEED (1ul << 20)
-
-/* Imported buffer is padded? */
-#define KBASE_REG_IMPORT_PAD (1ul << 21)
-
-#if MALI_USE_CSF
-/* CSF event memory */
-#define KBASE_REG_CSF_EVENT (1ul << 22)
-#else
-/* Bit 22 is reserved.
- *
- * Do not remove, use the next unreserved bit for new flags
- */
-#define KBASE_REG_RESERVED_BIT_22 (1ul << 22)
-#endif
-
-#if !MALI_USE_CSF
-/* The top of the initial commit is aligned to extension pages.
- * Extent must be a power of 2
- */
-#define KBASE_REG_TILER_ALIGN_TOP (1ul << 23)
-#else
-/* Bit 23 is reserved.
- *
- * Do not remove, use the next unreserved bit for new flags
- */
-#define KBASE_REG_RESERVED_BIT_23 (1ul << 23)
-#endif /* !MALI_USE_CSF */
-
-/* Bit 24 is currently unused and is available for use for a new flag */
-
-/* Memory has permanent kernel side mapping */
-#define KBASE_REG_PERMANENT_KERNEL_MAPPING (1ul << 25)
-
-/* GPU VA region has been freed by the userspace, but still remains allocated
- * due to the reference held by CPU mappings created on the GPU VA region.
- *
- * A region with this flag set has had kbase_gpu_munmap() called on it, but can
- * still be looked-up in the region tracker as a non-free region. Hence must
- * not create or update any more GPU mappings on such regions because they will
- * not be unmapped when the region is finally destroyed.
- *
- * Since such regions are still present in the region tracker, new allocations
- * attempted with BASE_MEM_SAME_VA might fail if their address intersects with
- * a region with this flag set.
- *
- * In addition, this flag indicates the gpu_alloc member might no longer valid
- * e.g. in infinite cache simulation.
- */
-#define KBASE_REG_VA_FREED (1ul << 26)
-
-/* If set, the heap info address points to a u32 holding the used size in bytes;
- * otherwise it points to a u64 holding the lowest address of unused memory.
- */
-#define KBASE_REG_HEAP_INFO_IS_SIZE (1ul << 27)
-
-/* Allocation is actively used for JIT memory */
-#define KBASE_REG_ACTIVE_JIT_ALLOC (1ul << 28)
-
-#if MALI_USE_CSF
-/* This flag only applies to allocations in the EXEC_FIXED_VA and FIXED_VA
- * memory zones, and it determines whether they were created with a fixed
- * GPU VA address requested by the user.
- */
-#define KBASE_REG_FIXED_ADDRESS (1ul << 29)
-#else
-#define KBASE_REG_RESERVED_BIT_29 (1ul << 29)
-#endif
-
-#define KBASE_REG_ZONE_SAME_VA KBASE_REG_ZONE(0)
-
-#define KBASE_REG_ZONE_CUSTOM_VA KBASE_REG_ZONE(1)
-#define KBASE_REG_ZONE_CUSTOM_VA_BASE (0x100000000ULL >> PAGE_SHIFT)
-
-#if MALI_USE_CSF
-/* only used with 32-bit clients */
-/* On a 32bit platform, custom VA should be wired from 4GB to 2^(43).
- */
-#define KBASE_REG_ZONE_CUSTOM_VA_SIZE \
- (((1ULL << 43) >> PAGE_SHIFT) - KBASE_REG_ZONE_CUSTOM_VA_BASE)
-#else
-/* only used with 32-bit clients */
-/* On a 32bit platform, custom VA should be wired from 4GB to the VA limit of the
- * GPU. Unfortunately, the Linux mmap() interface limits us to 2^32 pages (2^44
- * bytes, see mmap64 man page for reference). So we put the default limit to the
- * maximum possible on Linux and shrink it down, if required by the GPU, during
- * initialization.
- */
-#define KBASE_REG_ZONE_CUSTOM_VA_SIZE \
- (((1ULL << 44) >> PAGE_SHIFT) - KBASE_REG_ZONE_CUSTOM_VA_BASE)
-/* end 32-bit clients only */
-#endif
-
-/* The starting address and size of the GPU-executable zone are dynamic
- * and depend on the platform and the number of pages requested by the
- * user process, with an upper limit of 4 GB.
- */
-#define KBASE_REG_ZONE_EXEC_VA KBASE_REG_ZONE(2)
-#define KBASE_REG_ZONE_EXEC_VA_MAX_PAGES ((1ULL << 32) >> PAGE_SHIFT) /* 4 GB */
-
-#if MALI_USE_CSF
-#define KBASE_REG_ZONE_MCU_SHARED KBASE_REG_ZONE(3)
-#define KBASE_REG_ZONE_MCU_SHARED_BASE (0x04000000ULL >> PAGE_SHIFT)
-#define KBASE_REG_ZONE_MCU_SHARED_SIZE (((0x08000000ULL) >> PAGE_SHIFT) - \
- KBASE_REG_ZONE_MCU_SHARED_BASE)
-
-/* For CSF GPUs, the EXEC_VA zone is always 4GB in size, and starts at 2^47 for 64-bit
- * clients, and 2^43 for 32-bit clients.
- */
-#define KBASE_REG_ZONE_EXEC_VA_BASE_64 ((1ULL << 47) >> PAGE_SHIFT)
-#define KBASE_REG_ZONE_EXEC_VA_BASE_32 ((1ULL << 43) >> PAGE_SHIFT)
-#define KBASE_REG_ZONE_EXEC_VA_SIZE KBASE_REG_ZONE_EXEC_VA_MAX_PAGES
-
-/* Executable zone supporting FIXED/FIXABLE allocations.
- * It is always 4GB in size.
- */
-
-#define KBASE_REG_ZONE_EXEC_FIXED_VA KBASE_REG_ZONE(4)
-#define KBASE_REG_ZONE_EXEC_FIXED_VA_SIZE KBASE_REG_ZONE_EXEC_VA_MAX_PAGES
-
-/* Non-executable zone supporting FIXED/FIXABLE allocations.
- * It extends from (2^47) up to (2^48)-1, for 64-bit userspace clients, and from
- * (2^43) up to (2^44)-1 for 32-bit userspace clients.
- */
-#define KBASE_REG_ZONE_FIXED_VA KBASE_REG_ZONE(5)
-
-/* Again - 32-bit userspace cannot map addresses beyond 2^44, but 64-bit can - and so
- * the end of the FIXED_VA zone for 64-bit clients is (2^48)-1.
- */
-#define KBASE_REG_ZONE_FIXED_VA_END_64 ((1ULL << 48) >> PAGE_SHIFT)
-#define KBASE_REG_ZONE_FIXED_VA_END_32 ((1ULL << 44) >> PAGE_SHIFT)
-
-#endif
-
unsigned long flags;
size_t extension;
struct kbase_mem_phy_alloc *cpu_alloc;
@@ -567,25 +690,24 @@ struct kbase_va_region {
size_t used_pages;
#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
- int va_refcnt;
- int no_user_free_refcnt;
+ kbase_refcount_t va_refcnt;
+ atomic_t no_user_free_count;
};
/**
- * kbase_is_ctx_reg_zone - determine whether a KBASE_REG_ZONE_<...> is for a
- * context or for a device
- * @zone_bits: A KBASE_REG_ZONE_<...> to query
+ * kbase_is_ctx_reg_zone - Determine whether a zone is associated with a
+ * context or with the device
+ * @zone: Zone identifier
*
- * Return: True if the zone for @zone_bits is a context zone, False otherwise
+ * Return: True if @zone is a context zone, False otherwise
*/
-static inline bool kbase_is_ctx_reg_zone(unsigned long zone_bits)
+static inline bool kbase_is_ctx_reg_zone(enum kbase_memory_zone zone)
{
- WARN_ON((zone_bits & KBASE_REG_ZONE_MASK) != zone_bits);
- return (zone_bits == KBASE_REG_ZONE_SAME_VA ||
#if MALI_USE_CSF
- zone_bits == KBASE_REG_ZONE_EXEC_FIXED_VA || zone_bits == KBASE_REG_ZONE_FIXED_VA ||
+ return !(zone == MCU_SHARED_ZONE);
+#else
+ return true;
#endif
- zone_bits == KBASE_REG_ZONE_CUSTOM_VA || zone_bits == KBASE_REG_ZONE_EXEC_VA);
}
/* Special marker for failed JIT allocations that still must be marked as
@@ -645,15 +767,12 @@ static inline void kbase_region_refcnt_free(struct kbase_device *kbdev,
static inline struct kbase_va_region *kbase_va_region_alloc_get(
struct kbase_context *kctx, struct kbase_va_region *region)
{
- lockdep_assert_held(&kctx->reg_lock);
+ WARN_ON(!kbase_refcount_read(&region->va_refcnt));
+ WARN_ON(kbase_refcount_read(&region->va_refcnt) == INT_MAX);
- WARN_ON(!region->va_refcnt);
- WARN_ON(region->va_refcnt == INT_MAX);
-
- /* non-atomic as kctx->reg_lock is held */
dev_dbg(kctx->kbdev->dev, "va_refcnt %d before get %pK\n",
- region->va_refcnt, (void *)region);
- region->va_refcnt++;
+ kbase_refcount_read(&region->va_refcnt), (void *)region);
+ kbase_refcount_inc(&region->va_refcnt);
return region;
}
@@ -661,17 +780,14 @@ static inline struct kbase_va_region *kbase_va_region_alloc_get(
static inline struct kbase_va_region *kbase_va_region_alloc_put(
struct kbase_context *kctx, struct kbase_va_region *region)
{
- lockdep_assert_held(&kctx->reg_lock);
-
- WARN_ON(region->va_refcnt <= 0);
+ WARN_ON(kbase_refcount_read(&region->va_refcnt) <= 0);
WARN_ON(region->flags & KBASE_REG_FREE);
- /* non-atomic as kctx->reg_lock is held */
- region->va_refcnt--;
- dev_dbg(kctx->kbdev->dev, "va_refcnt %d after put %pK\n",
- region->va_refcnt, (void *)region);
- if (!region->va_refcnt)
+ if (kbase_refcount_dec_and_test(&region->va_refcnt))
kbase_region_refcnt_free(kctx->kbdev, region);
+ else
+ dev_dbg(kctx->kbdev->dev, "va_refcnt %d after put %pK\n",
+ kbase_refcount_read(&region->va_refcnt), (void *)region);
return NULL;
}
@@ -685,58 +801,44 @@ static inline struct kbase_va_region *kbase_va_region_alloc_put(
* Hence, callers cannot rely on this check alone to determine if a region might be shrunk
* by any part of kbase. Instead they should use kbase_is_region_shrinkable().
*
- * @kctx: Pointer to kbase context.
* @region: Pointer to region.
*
* Return: true if userspace cannot free the region, false if userspace can free the region.
*/
-static inline bool kbase_va_region_is_no_user_free(struct kbase_context *kctx,
- struct kbase_va_region *region)
+static inline bool kbase_va_region_is_no_user_free(struct kbase_va_region *region)
{
- lockdep_assert_held(&kctx->reg_lock);
- return region->no_user_free_refcnt > 0;
+ return atomic_read(&region->no_user_free_count) > 0;
}
/**
- * kbase_va_region_no_user_free_get - Increment "no user free" refcount for a region.
+ * kbase_va_region_no_user_free_inc - Increment "no user free" count for a region.
* Calling this function will prevent the region to be shrunk by parts of kbase that
- * don't own the region (as long as the refcount stays above zero). Refer to
+ * don't own the region (as long as the count stays above zero). Refer to
* kbase_va_region_is_no_user_free() for more information.
*
- * @kctx: Pointer to kbase context.
* @region: Pointer to region (not shrinkable).
*
* Return: the pointer to the region passed as argument.
*/
-static inline struct kbase_va_region *
-kbase_va_region_no_user_free_get(struct kbase_context *kctx, struct kbase_va_region *region)
+static inline void kbase_va_region_no_user_free_inc(struct kbase_va_region *region)
{
- lockdep_assert_held(&kctx->reg_lock);
-
WARN_ON(kbase_is_region_shrinkable(region));
- WARN_ON(region->no_user_free_refcnt == INT_MAX);
+ WARN_ON(atomic_read(&region->no_user_free_count) == INT_MAX);
/* non-atomic as kctx->reg_lock is held */
- region->no_user_free_refcnt++;
-
- return region;
+ atomic_inc(&region->no_user_free_count);
}
/**
- * kbase_va_region_no_user_free_put - Decrement "no user free" refcount for a region.
+ * kbase_va_region_no_user_free_dec - Decrement "no user free" count for a region.
*
- * @kctx: Pointer to kbase context.
* @region: Pointer to region (not shrinkable).
*/
-static inline void kbase_va_region_no_user_free_put(struct kbase_context *kctx,
- struct kbase_va_region *region)
+static inline void kbase_va_region_no_user_free_dec(struct kbase_va_region *region)
{
- lockdep_assert_held(&kctx->reg_lock);
+ WARN_ON(!kbase_va_region_is_no_user_free(region));
- WARN_ON(!kbase_va_region_is_no_user_free(kctx, region));
-
- /* non-atomic as kctx->reg_lock is held */
- region->no_user_free_refcnt--;
+ atomic_dec(&region->no_user_free_count);
}
/* Common functions */
@@ -952,12 +1054,9 @@ static inline size_t kbase_mem_pool_config_get_max_size(
*
* Return: 0 on success, negative -errno on error
*/
-int kbase_mem_pool_init(struct kbase_mem_pool *pool,
- const struct kbase_mem_pool_config *config,
- unsigned int order,
- int group_id,
- struct kbase_device *kbdev,
- struct kbase_mem_pool *next_pool);
+int kbase_mem_pool_init(struct kbase_mem_pool *pool, const struct kbase_mem_pool_config *config,
+ unsigned int order, int group_id, struct kbase_device *kbdev,
+ struct kbase_mem_pool *next_pool);
/**
* kbase_mem_pool_term - Destroy a memory pool
@@ -1037,6 +1136,9 @@ void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p,
* @pages: Pointer to array where the physical address of the allocated
* pages will be stored.
* @partial_allowed: If fewer pages allocated is allowed
+ * @page_owner: Pointer to the task that created the Kbase context for which
+ * the pages are being allocated. It can be NULL if the pages
+ * won't be associated with any Kbase context.
*
* Like kbase_mem_pool_alloc() but optimized for allocating many pages.
*
@@ -1053,7 +1155,8 @@ void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p,
* this lock, it should use kbase_mem_pool_alloc_pages_locked() instead.
*/
int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
- struct tagged_addr *pages, bool partial_allowed);
+ struct tagged_addr *pages, bool partial_allowed,
+ struct task_struct *page_owner);
/**
* kbase_mem_pool_alloc_pages_locked - Allocate pages from memory pool
@@ -1165,13 +1268,17 @@ void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size);
* kbase_mem_pool_grow - Grow the pool
* @pool: Memory pool to grow
* @nr_to_grow: Number of pages to add to the pool
+ * @page_owner: Pointer to the task that created the Kbase context for which
+ * the memory pool is being grown. It can be NULL if the pages
+ * to be allocated won't be associated with any Kbase context.
*
* Adds @nr_to_grow pages to the pool. Note that this may cause the pool to
* become larger than the maximum size specified.
*
* Return: 0 on success, -ENOMEM if unable to allocate sufficent pages
*/
-int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow);
+int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow,
+ struct task_struct *page_owner);
/**
* kbase_mem_pool_trim - Grow or shrink the pool to a new size
@@ -1205,6 +1312,16 @@ void kbase_mem_pool_mark_dying(struct kbase_mem_pool *pool);
struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool);
/**
+ * kbase_mem_pool_free_page - Free a page from a memory pool.
+ * @pool: Memory pool to free a page from
+ * @p: Page to free
+ *
+ * This will free any associated data stored for the page and release
+ * the page back to the kernel.
+ */
+void kbase_mem_pool_free_page(struct kbase_mem_pool *pool, struct page *p);
+
+/**
* kbase_region_tracker_init - Initialize the region tracker data structure
* @kctx: kbase context
*
@@ -1249,18 +1366,19 @@ int kbase_region_tracker_init_exec(struct kbase_context *kctx, u64 exec_va_pages
void kbase_region_tracker_term(struct kbase_context *kctx);
/**
- * kbase_region_tracker_term_rbtree - Free memory for a region tracker
+ * kbase_region_tracker_erase_rbtree - Free memory for a region tracker
*
* @rbtree: Region tracker tree root
*
* This will free all the regions within the region tracker
*/
-void kbase_region_tracker_term_rbtree(struct rb_root *rbtree);
+void kbase_region_tracker_erase_rbtree(struct rb_root *rbtree);
struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(
struct kbase_context *kctx, u64 gpu_addr);
struct kbase_va_region *kbase_find_region_enclosing_address(
struct rb_root *rbtree, u64 gpu_addr);
+void kbase_region_tracker_insert(struct kbase_va_region *new_reg);
/**
* kbase_region_tracker_find_region_base_address - Check that a pointer is
@@ -1277,8 +1395,11 @@ struct kbase_va_region *kbase_region_tracker_find_region_base_address(
struct kbase_va_region *kbase_find_region_base_address(struct rb_root *rbtree,
u64 gpu_addr);
-struct kbase_va_region *kbase_alloc_free_region(struct rb_root *rbtree,
- u64 start_pfn, size_t nr_pages, int zone);
+struct kbase_va_region *kbase_alloc_free_region(struct kbase_reg_zone *zone, u64 start_pfn,
+ size_t nr_pages);
+struct kbase_va_region *kbase_ctx_alloc_free_region(struct kbase_context *kctx,
+ enum kbase_memory_zone id, u64 start_pfn,
+ size_t nr_pages);
void kbase_free_alloced_region(struct kbase_va_region *reg);
int kbase_add_va_region(struct kbase_context *kctx, struct kbase_va_region *reg,
u64 addr, size_t nr_pages, size_t align);
@@ -1289,6 +1410,32 @@ int kbase_add_va_region_rbtree(struct kbase_device *kbdev,
bool kbase_check_alloc_flags(unsigned long flags);
bool kbase_check_import_flags(unsigned long flags);
+static inline bool kbase_import_size_is_valid(struct kbase_device *kbdev, u64 va_pages)
+{
+ if (va_pages > KBASE_MEM_ALLOC_MAX_SIZE) {
+ dev_dbg(
+ kbdev->dev,
+ "Import attempted with va_pages==%lld larger than KBASE_MEM_ALLOC_MAX_SIZE!",
+ (unsigned long long)va_pages);
+ return false;
+ }
+
+ return true;
+}
+
+static inline bool kbase_alias_size_is_valid(struct kbase_device *kbdev, u64 va_pages)
+{
+ if (va_pages > KBASE_MEM_ALLOC_MAX_SIZE) {
+ dev_dbg(
+ kbdev->dev,
+ "Alias attempted with va_pages==%lld larger than KBASE_MEM_ALLOC_MAX_SIZE!",
+ (unsigned long long)va_pages);
+ return false;
+ }
+
+ return true;
+}
+
/**
* kbase_check_alloc_sizes - check user space sizes parameters for an
* allocation
@@ -1616,7 +1763,7 @@ int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc,
*
* @prealloc_sa: Information about the partial allocation if the amount of memory requested
* is not a multiple of 2MB. One instance of struct kbase_sub_alloc must be
- * allocated by the caller iff CONFIG_MALI_2MB_ALLOC is enabled.
+ * allocated by the caller if kbdev->pagesize_2mb is enabled.
*
* Allocates @nr_pages_requested and updates the alloc object. This function does not allocate new
* pages from the kernel, and therefore will never trigger the OoM killer. Therefore, it can be
@@ -1644,7 +1791,7 @@ int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc,
* This ensures that the pool can be grown to the required size and that the allocation can
* complete without another thread using the newly grown pages.
*
- * If CONFIG_MALI_2MB_ALLOC is defined and the allocation is >= 2MB, then @pool must be one of the
+ * If kbdev->pagesize_2mb is enabled and the allocation is >= 2MB, then @pool must be one of the
* pools from alloc->imported.native.kctx->mem_pools.large[]. Otherwise it must be one of the
* mempools from alloc->imported.native.kctx->mem_pools.small[].
*
@@ -1694,7 +1841,7 @@ void kbase_free_phy_pages_helper_locked(struct kbase_mem_phy_alloc *alloc,
struct kbase_mem_pool *pool, struct tagged_addr *pages,
size_t nr_pages_to_free);
-static inline void kbase_set_dma_addr(struct page *p, dma_addr_t dma_addr)
+static inline void kbase_set_dma_addr_as_priv(struct page *p, dma_addr_t dma_addr)
{
SetPagePrivate(p);
if (sizeof(dma_addr_t) > sizeof(p->private)) {
@@ -1710,7 +1857,7 @@ static inline void kbase_set_dma_addr(struct page *p, dma_addr_t dma_addr)
}
}
-static inline dma_addr_t kbase_dma_addr(struct page *p)
+static inline dma_addr_t kbase_dma_addr_as_priv(struct page *p)
{
if (sizeof(dma_addr_t) > sizeof(p->private))
return ((dma_addr_t)page_private(p)) << PAGE_SHIFT;
@@ -1718,11 +1865,35 @@ static inline dma_addr_t kbase_dma_addr(struct page *p)
return (dma_addr_t)page_private(p);
}
-static inline void kbase_clear_dma_addr(struct page *p)
+static inline void kbase_clear_dma_addr_as_priv(struct page *p)
{
ClearPagePrivate(p);
}
+static inline struct kbase_page_metadata *kbase_page_private(struct page *p)
+{
+ return (struct kbase_page_metadata *)page_private(p);
+}
+
+static inline dma_addr_t kbase_dma_addr(struct page *p)
+{
+ if (kbase_is_page_migration_enabled())
+ return kbase_page_private(p)->dma_addr;
+
+ return kbase_dma_addr_as_priv(p);
+}
+
+static inline dma_addr_t kbase_dma_addr_from_tagged(struct tagged_addr tagged_pa)
+{
+ phys_addr_t pa = as_phys_addr_t(tagged_pa);
+ struct page *page = pfn_to_page(PFN_DOWN(pa));
+ dma_addr_t dma_addr = (is_huge(tagged_pa) || is_partial(tagged_pa)) ?
+ kbase_dma_addr_as_priv(page) :
+ kbase_dma_addr(page);
+
+ return dma_addr;
+}
+
/**
* kbase_flush_mmu_wqs() - Flush MMU workqueues.
* @kbdev: Device pointer.
@@ -2275,75 +2446,95 @@ int kbase_mem_copy_to_pinned_user_pages(struct page **dest_pages,
unsigned int *target_page_nr, size_t offset);
/**
- * kbase_reg_zone_end_pfn - return the end Page Frame Number of @zone
- * @zone: zone to query
+ * kbase_ctx_reg_zone_get_nolock - Get a zone from @kctx where the caller does
+ * not have @kctx 's region lock
+ * @kctx: Pointer to kbase context
+ * @zone: Zone identifier
*
- * Return: The end of the zone corresponding to @zone
+ * This should only be used in performance-critical paths where the code is
+ * resilient to a race with the zone changing, and only when the zone is tracked
+ * by the @kctx.
+ *
+ * Return: The zone corresponding to @zone
*/
-static inline u64 kbase_reg_zone_end_pfn(struct kbase_reg_zone *zone)
+static inline struct kbase_reg_zone *kbase_ctx_reg_zone_get_nolock(struct kbase_context *kctx,
+ enum kbase_memory_zone zone)
{
- return zone->base_pfn + zone->va_size_pages;
+ WARN_ON(!kbase_is_ctx_reg_zone(zone));
+ return &kctx->reg_zone[zone];
}
/**
- * kbase_ctx_reg_zone_init - initialize a zone in @kctx
+ * kbase_ctx_reg_zone_get - Get a memory zone from @kctx
* @kctx: Pointer to kbase context
- * @zone_bits: A KBASE_REG_ZONE_<...> to initialize
+ * @zone: Zone identifier
+ *
+ * Note that the zone is not refcounted, so there is no corresponding operation to
+ * put the zone back.
+ *
+ * Return: The zone corresponding to @zone
+ */
+static inline struct kbase_reg_zone *kbase_ctx_reg_zone_get(struct kbase_context *kctx,
+ enum kbase_memory_zone zone)
+{
+ lockdep_assert_held(&kctx->reg_lock);
+ return kbase_ctx_reg_zone_get_nolock(kctx, zone);
+}
+
+/**
+ * kbase_reg_zone_init - Initialize a zone in @kctx
+ * @kbdev: Pointer to kbase device in order to initialize the VA region cache
+ * @zone: Memory zone
+ * @id: Memory zone identifier to facilitate lookups
* @base_pfn: Page Frame Number in GPU virtual address space for the start of
* the Zone
* @va_size_pages: Size of the Zone in pages
+ *
+ * Return:
+ * * 0 on success
+ * * -ENOMEM on error
*/
-static inline void kbase_ctx_reg_zone_init(struct kbase_context *kctx,
- unsigned long zone_bits,
- u64 base_pfn, u64 va_size_pages)
+static inline int kbase_reg_zone_init(struct kbase_device *kbdev, struct kbase_reg_zone *zone,
+ enum kbase_memory_zone id, u64 base_pfn, u64 va_size_pages)
{
- struct kbase_reg_zone *zone;
+ struct kbase_va_region *reg;
- lockdep_assert_held(&kctx->reg_lock);
- WARN_ON(!kbase_is_ctx_reg_zone(zone_bits));
+ *zone = (struct kbase_reg_zone){ .reg_rbtree = RB_ROOT,
+ .base_pfn = base_pfn,
+ .va_size_pages = va_size_pages,
+ .id = id,
+ .cache = kbdev->va_region_slab };
+
+ if (unlikely(!va_size_pages))
+ return 0;
- zone = &kctx->reg_zone[KBASE_REG_ZONE_IDX(zone_bits)];
- *zone = (struct kbase_reg_zone){
- .base_pfn = base_pfn, .va_size_pages = va_size_pages,
- };
+ reg = kbase_alloc_free_region(zone, base_pfn, va_size_pages);
+ if (unlikely(!reg))
+ return -ENOMEM;
+
+ kbase_region_tracker_insert(reg);
+
+ return 0;
}
/**
- * kbase_ctx_reg_zone_get_nolock - get a zone from @kctx where the caller does
- * not have @kctx 's region lock
- * @kctx: Pointer to kbase context
- * @zone_bits: A KBASE_REG_ZONE_<...> to retrieve
- *
- * This should only be used in performance-critical paths where the code is
- * resilient to a race with the zone changing.
+ * kbase_reg_zone_end_pfn - return the end Page Frame Number of @zone
+ * @zone: zone to query
*
- * Return: The zone corresponding to @zone_bits
+ * Return: The end of the zone corresponding to @zone
*/
-static inline struct kbase_reg_zone *
-kbase_ctx_reg_zone_get_nolock(struct kbase_context *kctx,
- unsigned long zone_bits)
+static inline u64 kbase_reg_zone_end_pfn(struct kbase_reg_zone *zone)
{
- WARN_ON(!kbase_is_ctx_reg_zone(zone_bits));
-
- return &kctx->reg_zone[KBASE_REG_ZONE_IDX(zone_bits)];
+ return zone->base_pfn + zone->va_size_pages;
}
/**
- * kbase_ctx_reg_zone_get - get a zone from @kctx
- * @kctx: Pointer to kbase context
- * @zone_bits: A KBASE_REG_ZONE_<...> to retrieve
- *
- * The get is not refcounted - there is no corresponding 'put' operation
- *
- * Return: The zone corresponding to @zone_bits
+ * kbase_reg_zone_term - Terminate the memory zone tracker
+ * @zone: Memory zone
*/
-static inline struct kbase_reg_zone *
-kbase_ctx_reg_zone_get(struct kbase_context *kctx, unsigned long zone_bits)
+static inline void kbase_reg_zone_term(struct kbase_reg_zone *zone)
{
- lockdep_assert_held(&kctx->reg_lock);
- WARN_ON(!kbase_is_ctx_reg_zone(zone_bits));
-
- return &kctx->reg_zone[KBASE_REG_ZONE_IDX(zone_bits)];
+ kbase_region_tracker_erase_rbtree(&zone->reg_rbtree);
}
/**
@@ -2362,6 +2553,22 @@ static inline bool kbase_mem_allow_alloc(struct kbase_context *kctx)
}
/**
+ * kbase_mem_mmgrab - Wrapper function to take reference on mm_struct of current process
+ */
+static inline void kbase_mem_mmgrab(void)
+{
+ /* This merely takes a reference on the memory descriptor structure
+ * i.e. mm_struct of current process and not on its address space and
+ * so won't block the freeing of address space on process exit.
+ */
+#if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE
+ atomic_inc(&current->mm->mm_count);
+#else
+ mmgrab(current->mm);
+#endif
+}
+
+/**
* kbase_mem_group_id_get - Get group ID from flags
* @flags: Flags to pass to base_mem_alloc
*