Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 13 Jul 2014 16:27:58 +0000 (UTC)
From:      Alan Cox <alc@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r268591 - in head/sys/powerpc: aim booke powerpc
Message-ID:  <201407131627.s6DGRwZe059179@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: alc
Date: Sun Jul 13 16:27:57 2014
New Revision: 268591
URL: http://svnweb.freebsd.org/changeset/base/268591

Log:
  Implement pmap_unwire().  See r268327 for the motivation behind this change.

Modified:
  head/sys/powerpc/aim/mmu_oea.c
  head/sys/powerpc/aim/mmu_oea64.c
  head/sys/powerpc/booke/pmap.c
  head/sys/powerpc/powerpc/mmu_if.m
  head/sys/powerpc/powerpc/pmap_dispatch.c

Modified: head/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea.c	Sun Jul 13 16:24:16 2014	(r268590)
+++ head/sys/powerpc/aim/mmu_oea.c	Sun Jul 13 16:27:57 2014	(r268591)
@@ -297,6 +297,7 @@ void moea_release(mmu_t, pmap_t);
 void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
 void moea_remove_all(mmu_t, vm_page_t);
 void moea_remove_write(mmu_t, vm_page_t);
+void moea_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
 void moea_zero_page(mmu_t, vm_page_t);
 void moea_zero_page_area(mmu_t, vm_page_t, int, int);
 void moea_zero_page_idle(mmu_t, vm_page_t);
@@ -345,6 +346,7 @@ static mmu_method_t moea_methods[] = {
 	MMUMETHOD(mmu_remove_all,      	moea_remove_all),
 	MMUMETHOD(mmu_remove_write,	moea_remove_write),
 	MMUMETHOD(mmu_sync_icache,	moea_sync_icache),
+	MMUMETHOD(mmu_unwire,		moea_unwire),
 	MMUMETHOD(mmu_zero_page,       	moea_zero_page),
 	MMUMETHOD(mmu_zero_page_area,	moea_zero_page_area),
 	MMUMETHOD(mmu_zero_page_idle,	moea_zero_page_idle),
@@ -1036,6 +1038,24 @@ moea_change_wiring(mmu_t mmu, pmap_t pm,
 }
 
 void
+moea_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
+{
+	struct	pvo_entry key, *pvo;
+
+	PMAP_LOCK(pm);
+	key.pvo_vaddr = sva;
+	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
+	    pvo != NULL && PVO_VADDR(pvo) < eva;
+	    pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
+		if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
+			panic("moea_unwire: pvo %p is missing PVO_WIRED", pvo);
+		pvo->pvo_vaddr &= ~PVO_WIRED;
+		pm->pm_stats.wired_count--;
+	}
+	PMAP_UNLOCK(pm);
+}
+
+void
 moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
 {
 	vm_offset_t	dst;

Modified: head/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea64.c	Sun Jul 13 16:24:16 2014	(r268590)
+++ head/sys/powerpc/aim/mmu_oea64.c	Sun Jul 13 16:27:57 2014	(r268591)
@@ -312,6 +312,7 @@ void moea64_remove(mmu_t, pmap_t, vm_off
 void moea64_remove_pages(mmu_t, pmap_t);
 void moea64_remove_all(mmu_t, vm_page_t);
 void moea64_remove_write(mmu_t, vm_page_t);
+void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
 void moea64_zero_page(mmu_t, vm_page_t);
 void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
 void moea64_zero_page_idle(mmu_t, vm_page_t);
@@ -359,6 +360,7 @@ static mmu_method_t moea64_methods[] = {
 	MMUMETHOD(mmu_remove_all,      	moea64_remove_all),
 	MMUMETHOD(mmu_remove_write,	moea64_remove_write),
 	MMUMETHOD(mmu_sync_icache,	moea64_sync_icache),
+	MMUMETHOD(mmu_unwire,		moea64_unwire),
 	MMUMETHOD(mmu_zero_page,       	moea64_zero_page),
 	MMUMETHOD(mmu_zero_page_area,	moea64_zero_page_area),
 	MMUMETHOD(mmu_zero_page_idle,	moea64_zero_page_idle),
@@ -1076,6 +1078,41 @@ moea64_change_wiring(mmu_t mmu, pmap_t p
 	PMAP_UNLOCK(pm);
 }
 
+void
+moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
+{
+	struct	pvo_entry key, *pvo;
+	uintptr_t pt;
+
+	LOCK_TABLE_RD();
+	PMAP_LOCK(pm);
+	key.pvo_vaddr = sva;
+	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
+	    pvo != NULL && PVO_VADDR(pvo) < eva;
+	    pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
+		if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
+			panic("moea64_unwire: pvo %p is missing PVO_WIRED",
+			    pvo);
+		pvo->pvo_vaddr &= ~PVO_WIRED;
+		if ((pvo->pvo_pte.lpte.pte_hi & LPTE_WIRED) == 0)
+			panic("moea64_unwire: pte %p is missing LPTE_WIRED",
+			    &pvo->pvo_pte.lpte);
+		pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
+		if ((pt = MOEA64_PVO_TO_PTE(mmu, pvo)) != -1) {
+			/*
+			 * The PTE's wired attribute is not a hardware
+			 * feature, so there is no need to invalidate any TLB
+			 * entries.
+			 */
+			MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
+			    pvo->pvo_vpn);
+		}
+		pm->pm_stats.wired_count--;
+	}
+	UNLOCK_TABLE_RD();
+	PMAP_UNLOCK(pm);
+}
+
 /*
  * This goes through and sets the physical address of our
  * special scratch PTE to the PA we want to zero or copy. Because

Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c	Sun Jul 13 16:24:16 2014	(r268590)
+++ head/sys/powerpc/booke/pmap.c	Sun Jul 13 16:27:57 2014	(r268591)
@@ -306,6 +306,7 @@ static void		mmu_booke_release(mmu_t, pm
 static void		mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
 static void		mmu_booke_remove_all(mmu_t, vm_page_t);
 static void		mmu_booke_remove_write(mmu_t, vm_page_t);
+static void		mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
 static void		mmu_booke_zero_page(mmu_t, vm_page_t);
 static void		mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
 static void		mmu_booke_zero_page_idle(mmu_t, vm_page_t);
@@ -361,6 +362,7 @@ static mmu_method_t mmu_booke_methods[] 
 	MMUMETHOD(mmu_remove_all,	mmu_booke_remove_all),
 	MMUMETHOD(mmu_remove_write,	mmu_booke_remove_write),
 	MMUMETHOD(mmu_sync_icache,	mmu_booke_sync_icache),
+	MMUMETHOD(mmu_unwire,		mmu_booke_unwire),
 	MMUMETHOD(mmu_zero_page,	mmu_booke_zero_page),
 	MMUMETHOD(mmu_zero_page_area,	mmu_booke_zero_page_area),
 	MMUMETHOD(mmu_zero_page_idle,	mmu_booke_zero_page_idle),
@@ -2435,6 +2437,36 @@ mmu_booke_change_wiring(mmu_t mmu, pmap_
 }
 
 /*
+ * Clear the wired attribute from the mappings for the specified range of
+ * addresses in the given pmap.  Every valid mapping within that range must
+ * have the wired attribute set.  In contrast, invalid mappings cannot have
+ * the wired attribute set, so they are ignored.
+ *
+ * The wired attribute of the page table entry is not a hardware feature, so
+ * there is no need to invalidate any TLB entries.
+ */
+static void
+mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+	vm_offset_t va;
+	pte_t *pte;
+
+	PMAP_LOCK(pmap);
+	for (va = sva; va < eva; va += PAGE_SIZE) {
+		if ((pte = pte_find(mmu, pmap, va)) != NULL &&
+		    PTE_ISVALID(pte)) {
+			if (!PTE_ISWIRED(pte))
+				panic("mmu_booke_unwire: pte %p isn't wired",
+				    pte);
+			pte->flags &= ~PTE_WIRED;
+			pmap->pm_stats.wired_count--;
+		}
+	}
+	PMAP_UNLOCK(pmap);
+
+}
+
+/*
  * Return true if the pmap's pv is one of the first 16 pvs linked to from this
  * page.  This count may be changed upwards or downwards in the future; it is
  * only necessary that true be returned for a small subset of pmaps for proper

Modified: head/sys/powerpc/powerpc/mmu_if.m
==============================================================================
--- head/sys/powerpc/powerpc/mmu_if.m	Sun Jul 13 16:24:16 2014	(r268590)
+++ head/sys/powerpc/powerpc/mmu_if.m	Sun Jul 13 16:27:57 2014	(r268591)
@@ -628,6 +628,22 @@ METHOD void remove_pages {
 
 
 /**
+ * @brief Clear the wired attribute from the mappings for the specified range
+ * of addresses in the given pmap.
+ *
+ * @param _pmap		physical map
+ * @param _start	virtual range start
+ * @param _end		virtual range end
+ */
+METHOD void unwire {
+	mmu_t		_mmu;
+	pmap_t		_pmap;
+	vm_offset_t	_start;
+	vm_offset_t	_end;
+};
+
+
+/**
  * @brief Zero a physical page. It is not assumed that the page is mapped,
  * so a temporary (or direct) mapping may need to be used.
  *

Modified: head/sys/powerpc/powerpc/pmap_dispatch.c
==============================================================================
--- head/sys/powerpc/powerpc/pmap_dispatch.c	Sun Jul 13 16:24:16 2014	(r268590)
+++ head/sys/powerpc/powerpc/pmap_dispatch.c	Sun Jul 13 16:27:57 2014	(r268591)
@@ -361,6 +361,14 @@ pmap_remove_write(vm_page_t m)
 }
 
 void
+pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end)
+{
+
+	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
+	MMU_UNWIRE(mmu_obj, pmap, start, end);
+}
+
+void
 pmap_zero_page(vm_page_t m)
 {
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201407131627.s6DGRwZe059179>