Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 25 Oct 2021 13:54:52 GMT
From:      Andrew Turner <andrew@FreeBSD.org>
To:        src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-branches@FreeBSD.org
Subject:   git: 39dbb494404f - stable/13 - Only demote when needed in the arm64 pmap_change_props_locked
Message-ID:  <202110251354.19PDsqVj000562@gitrepo.freebsd.org>

next in thread | raw e-mail | index | archive | help
The branch stable/13 has been updated by andrew:

URL: https://cgit.FreeBSD.org/src/commit/?id=39dbb494404f511422a97274d5b55fae6ef4a6e2

commit 39dbb494404f511422a97274d5b55fae6ef4a6e2
Author:     Andrew Turner <andrew@FreeBSD.org>
AuthorDate: 2021-10-06 16:38:22 +0000
Commit:     Andrew Turner <andrew@FreeBSD.org>
CommitDate: 2021-10-25 13:46:44 +0000

    Only demote when needed in the arm64 pmap_change_props_locked
    
    When changing page table properties there is no need to demote a
    level 1 or level 2 block if we are changing the entire memory range the
    block is mapping. In this case just change the block directly.
    
    Reported by:    alc, kib, markj
    Sponsored by:   The FreeBSD Foundation
    Differential Revision: https://reviews.freebsd.org/D32339
    
    (cherry picked from commit 806a88e742002b0e82a4ea06f8e147f627947c2c)
---
 sys/arm64/arm64/pmap.c | 76 ++++++++++++++++++++++++++++++--------------------
 1 file changed, 45 insertions(+), 31 deletions(-)

diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index 3fa3a4b62711..176762b9bfff 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -6160,7 +6160,8 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
     int mode)
 {
 	vm_offset_t base, offset, tmpva;
-	pt_entry_t l3, *pte, *newpte;
+	vm_size_t pte_size;
+	pt_entry_t pte, *ptep, *newpte;
 	pt_entry_t bits, mask;
 	int lvl, rv;
 
@@ -6206,11 +6207,11 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
 	}
 
 	for (tmpva = base; tmpva < base + size; ) {
-		pte = pmap_pte(kernel_pmap, tmpva, &lvl);
-		if (pte == NULL)
+		ptep = pmap_pte(kernel_pmap, tmpva, &lvl);
+		if (ptep == NULL)
 			return (EINVAL);
 
-		if ((pmap_load(pte) & mask) == bits) {
+		if ((pmap_load(ptep) & mask) == bits) {
 			/*
 			 * We already have the correct attribute,
 			 * ignore this entry.
@@ -6237,47 +6238,60 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
 			default:
 				panic("Invalid DMAP table level: %d\n", lvl);
 			case 1:
-				newpte = pmap_demote_l1(kernel_pmap, pte,
+				if ((tmpva & L1_OFFSET) == 0 &&
+				    (base + size - tmpva) >= L1_SIZE) {
+					pte_size = L1_SIZE;
+					break;
+				}
+				newpte = pmap_demote_l1(kernel_pmap, ptep,
 				    tmpva & ~L1_OFFSET);
 				if (newpte == NULL)
 					return (EINVAL);
-				pte = pmap_l1_to_l2(pte, tmpva);
+				ptep = pmap_l1_to_l2(ptep, tmpva);
+				/* FALLTHROUGH */
 			case 2:
-				newpte = pmap_demote_l2(kernel_pmap, pte,
+				if ((tmpva & L2_OFFSET) == 0 &&
+				    (base + size - tmpva) >= L2_SIZE) {
+					pte_size = L2_SIZE;
+					break;
+				}
+				newpte = pmap_demote_l2(kernel_pmap, ptep,
 				    tmpva);
 				if (newpte == NULL)
 					return (EINVAL);
-				pte = pmap_l2_to_l3(pte, tmpva);
+				ptep = pmap_l2_to_l3(ptep, tmpva);
+				/* FALLTHROUGH */
 			case 3:
-				/* Update the entry */
-				l3 = pmap_load(pte);
-				l3 &= ~mask;
-				l3 |= bits;
+				pte_size = PAGE_SIZE;
+				break;
+			}
 
-				pmap_update_entry(kernel_pmap, pte, l3, tmpva,
-				    PAGE_SIZE);
+			/* Update the entry */
+			pte = pmap_load(ptep);
+			pte &= ~mask;
+			pte |= bits;
 
-				if (!VIRT_IN_DMAP(tmpva)) {
-					/*
-					 * Keep the DMAP memory in sync.
-					 */
-					rv = pmap_change_props_locked(
-					    PHYS_TO_DMAP(l3 & ~ATTR_MASK),
-					    L3_SIZE, prot, mode);
-					if (rv != 0)
-						return (rv);
-				}
+			pmap_update_entry(kernel_pmap, ptep, pte, tmpva,
+			    pte_size);
 
+			if (!VIRT_IN_DMAP(tmpva)) {
 				/*
-				 * If moving to a non-cacheable entry flush
-				 * the cache.
+				 * Keep the DMAP memory in sync.
 				 */
-				if (mode == VM_MEMATTR_UNCACHEABLE)
-					cpu_dcache_wbinv_range(tmpva, L3_SIZE);
-
-				break;
+				rv = pmap_change_props_locked(
+				    PHYS_TO_DMAP(pte & ~ATTR_MASK), pte_size,
+				    prot, mode);
+				if (rv != 0)
+					return (rv);
 			}
-			tmpva += PAGE_SIZE;
+
+			/*
+			 * If moving to a non-cacheable entry flush
+			 * the cache.
+			 */
+			if (mode == VM_MEMATTR_UNCACHEABLE)
+				cpu_dcache_wbinv_range(tmpva, pte_size);
+			tmpva += pte_size;
 		}
 	}
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202110251354.19PDsqVj000562>