diff options
author | Oliver Upton <oliver.upton@linux.dev> | 2024-04-26 19:34:32 +0000 |
---|---|---|
committer | Oliver Upton <oliver.upton@linux.dev> | 2024-04-29 23:37:54 -0700 |
commit | 5f3266590ca2c329eb8dab158a723bdc7ba8dd8a (patch) | |
tree | feac3212161ec0340fddd13023225c54f1e354c2 | |
parent | dd55a3e7d52a4b1a083768f653c4fc2780e630a5 (diff) | |
download | linux-5f3266590ca2c329eb8dab158a723bdc7ba8dd8a.tar.gz |
KVM: arm64: Use MMU gather for unmap TLB invalidations
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
-rw-r--r-- | arch/arm64/include/asm/kvm_tlb.h | 14 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/pgtable.c | 26 |
2 files changed, 15 insertions, 25 deletions
diff --git a/arch/arm64/include/asm/kvm_tlb.h b/arch/arm64/include/asm/kvm_tlb.h index 3c3e079a594939..ed58e34a6e8c3d 100644 --- a/arch/arm64/include/asm/kvm_tlb.h +++ b/arch/arm64/include/asm/kvm_tlb.h @@ -48,6 +48,16 @@ static inline gpa_t __kvm_s2_tlb_stride(struct kvm_s2_gather *tlb) return kvm_granule_size(ttl); } +static inline void kvm_s2_tlb_flush(struct kvm_s2_gather *tlb) +{ + kvm_call_hyp(__kvm_s2_tlb_flush, tlb); +} + +static inline bool __kvm_s2_can_defer_table_flush(void) +{ + return is_hyp_code(); +} + static inline void kvm_s2_tlb_remove_pte(struct kvm_s2_gather *tlb, const struct kvm_pgtable_visit_ctx *ctx) { @@ -61,6 +71,10 @@ static inline void kvm_s2_tlb_remove_pte(struct kvm_s2_gather *tlb, tlb->start = min(tlb->start, ctx->addr); tlb->end = max(tlb->end, ctx->addr + kvm_granule_size(ctx->level)); + + if (!stage2_has_fwb(tlb->mmu->pgt) || + (!__kvm_s2_can_defer_table_flush() && kvm_pte_table(ctx->old, ctx->level))) + kvm_s2_tlb_flush(tlb); } #endif /* __ARM64_KVM_TLB_H__ */ diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index e0eb8af8afffc6..1fd262f762fb7e 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -853,25 +853,11 @@ struct stage2_unmap_data { struct kvm_s2_gather *tlb; }; -static bool stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt) -{ - /* - * If FEAT_TLBIRANGE is implemented, defer the individual - * TLB invalidations until the entire walk is finished, and - * then use the range-based TLBI instructions to do the - * invalidations. Condition deferred TLB invalidation on the - * system supporting FWB as the optimization is entirely - * pointless when the unmap walker needs to perform CMOs. - */ - return system_supports_tlb_range() && stage2_has_fwb(pgt); -} - static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops) { struct stage2_unmap_data *data = ctx->arg; - struct kvm_pgtable *pgt = data->pgt; /* * Clear the existing PTE, and perform break-before-make if it was @@ -882,14 +868,6 @@ static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_clear_pte(ctx->ptep); kvm_s2_tlb_remove_pte(data->tlb, ctx); - - if (kvm_pte_table(ctx->old, ctx->level)) { - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, - TLBI_TTL_UNKNOWN); - } else if (!stage2_unmap_defer_tlb_flush(pgt)) { - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, - ctx->level); - } } mm_ops->put_page(ctx->ptep); @@ -1175,9 +1153,7 @@ int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) }; ret = kvm_pgtable_walk(pgt, addr, size, &walker); - if (stage2_unmap_defer_tlb_flush(pgt)) - /* Perform the deferred TLB invalidations */ - kvm_tlb_flush_vmid_range(pgt->mmu, addr, size); + kvm_s2_tlb_flush(&tlb); return ret; } |