Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 24 Aug 2014 07:53:15 +0000 (UTC)
From:      Konstantin Belousov <kib@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org
Subject:   svn commit: r270439 - in stable/10/sys: amd64/amd64 arm/arm i386/i386 i386/xen ia64/ia64 mips/mips powerpc/aim powerpc/booke powerpc/include powerpc/powerpc sparc64/sparc64 vm
Message-ID:  <201408240753.s7O7rFxL051819@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: kib
Date: Sun Aug 24 07:53:15 2014
New Revision: 270439
URL: http://svnweb.freebsd.org/changeset/base/270439

Log:
  Merge the changes to pmap_enter(9) for sleep-less operation (requested
  by flag).  The ia64 pmap.c changes are direct commit, since ia64 is
  removed on head.
  
  MFC r269368 (by alc):
  Retire PVO_EXECUTABLE.
  
  MFC r269728:
  Change pmap_enter(9) interface to take flags parameter and superpage
  mapping size (currently unused).
  
  MFC r269759 (by alc):
  Update the text of a KASSERT() to reflect the changes in r269728.
  
  MFC r269822 (by alc):
  Change {_,}pmap_allocpte() so that they look for the flag
  PMAP_ENTER_NOSLEEP instead of M_NOWAIT/M_WAITOK when deciding whether
  to sleep on page table page allocation.
  
  MFC r270151 (by alc):
  Replace KASSERT that no PV list locks are held with a conditional
  unlock.
  
  Reviewed by:	alc
  Approved by:	re (gjb)
  Sponsored by:	The FreeBSD Foundation

Modified:
  stable/10/sys/amd64/amd64/pmap.c
  stable/10/sys/arm/arm/pmap-v6.c
  stable/10/sys/arm/arm/pmap.c
  stable/10/sys/i386/i386/pmap.c
  stable/10/sys/i386/xen/pmap.c
  stable/10/sys/ia64/ia64/pmap.c
  stable/10/sys/mips/mips/pmap.c
  stable/10/sys/powerpc/aim/mmu_oea.c
  stable/10/sys/powerpc/aim/mmu_oea64.c
  stable/10/sys/powerpc/booke/pmap.c
  stable/10/sys/powerpc/include/pmap.h
  stable/10/sys/powerpc/powerpc/mmu_if.m
  stable/10/sys/powerpc/powerpc/pmap_dispatch.c
  stable/10/sys/sparc64/sparc64/pmap.c
  stable/10/sys/vm/pmap.h
  stable/10/sys/vm/vm_fault.c
  stable/10/sys/vm/vm_kern.c
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/amd64/amd64/pmap.c
==============================================================================
--- stable/10/sys/amd64/amd64/pmap.c	Sun Aug 24 02:07:34 2014	(r270438)
+++ stable/10/sys/amd64/amd64/pmap.c	Sun Aug 24 07:53:15 2014	(r270439)
@@ -4116,9 +4116,9 @@ setpte:
  *	or lose information.  That is, this routine must actually
  *	insert this page into the given map NOW.
  */
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
-    vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+    u_int flags, int8_t psind __unused)
 {
 	struct rwlock *lock;
 	pd_entry_t *pde;
@@ -4127,6 +4127,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	pv_entry_t pv;
 	vm_paddr_t opa, pa;
 	vm_page_t mpte, om;
+	boolean_t nosleep;
 
 	PG_A = pmap_accessed_bit(pmap);
 	PG_G = pmap_global_bit(pmap);
@@ -4143,18 +4144,18 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	    va >= kmi.clean_eva,
 	    ("pmap_enter: managed mapping within the clean submap"));
 	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
-		VM_OBJECT_ASSERT_WLOCKED(m->object);
+		VM_OBJECT_ASSERT_LOCKED(m->object);
 	pa = VM_PAGE_TO_PHYS(m);
 	newpte = (pt_entry_t)(pa | PG_A | PG_V);
-	if ((access & VM_PROT_WRITE) != 0)
+	if ((flags & VM_PROT_WRITE) != 0)
 		newpte |= PG_M;
 	if ((prot & VM_PROT_WRITE) != 0)
 		newpte |= PG_RW;
 	KASSERT((newpte & (PG_M | PG_RW)) != PG_M,
-	    ("pmap_enter: access includes VM_PROT_WRITE but prot doesn't"));
+	    ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't"));
 	if ((prot & VM_PROT_EXECUTE) == 0)
 		newpte |= pg_nx;
-	if (wired)
+	if ((flags & PMAP_ENTER_WIRED) != 0)
 		newpte |= PG_W;
 	if (va < VM_MAXUSER_ADDRESS)
 		newpte |= PG_U;
@@ -4196,7 +4197,16 @@ retry:
 		 * Here if the pte page isn't mapped, or if it has been
 		 * deallocated.
 		 */
-		mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va), &lock);
+		nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
+		mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va),
+		    nosleep ? NULL : &lock);
+		if (mpte == NULL && nosleep) {
+			if (lock != NULL)
+				rw_wunlock(lock);
+			rw_runlock(&pvh_global_lock);
+			PMAP_UNLOCK(pmap);
+			return (KERN_RESOURCE_SHORTAGE);
+		}
 		goto retry;
 	} else
 		panic("pmap_enter: invalid page directory va=%#lx", va);
@@ -4328,6 +4338,7 @@ unchanged:
 		rw_wunlock(lock);
 	rw_runlock(&pvh_global_lock);
 	PMAP_UNLOCK(pmap);
+	return (KERN_SUCCESS);
 }
 
 /*

Modified: stable/10/sys/arm/arm/pmap-v6.c
==============================================================================
--- stable/10/sys/arm/arm/pmap-v6.c	Sun Aug 24 02:07:34 2014	(r270438)
+++ stable/10/sys/arm/arm/pmap-v6.c	Sun Aug 24 07:53:15 2014	(r270439)
@@ -231,8 +231,8 @@ static boolean_t	pmap_pv_insert_section(
 static struct pv_entry	*pmap_remove_pv(struct vm_page *, pmap_t, vm_offset_t);
 static int		pmap_pvh_wired_mappings(struct md_page *, int);
 
-static void		pmap_enter_locked(pmap_t, vm_offset_t, vm_prot_t,
-    vm_page_t, vm_prot_t, boolean_t, int);
+static int		pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
+    vm_prot_t, u_int);
 static vm_paddr_t	pmap_extract_locked(pmap_t pmap, vm_offset_t va);
 static void		pmap_alloc_l1(pmap_t);
 static void		pmap_free_l1(pmap_t);
@@ -2944,35 +2944,38 @@ pmap_protect(pmap_t pmap, vm_offset_t sv
  *	insert this page into the given map NOW.
  */
 
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
-    vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+    u_int flags, int8_t psind __unused)
 {
 	struct l2_bucket *l2b;
+	int rv;
 
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pmap);
-	pmap_enter_locked(pmap, va, access, m, prot, wired, M_WAITOK);
-	/*
-	 * If both the l2b_occupancy and the reservation are fully
-	 * populated, then attempt promotion.
-	 */
-	l2b = pmap_get_l2_bucket(pmap, va);
-	if ((l2b != NULL) && (l2b->l2b_occupancy == L2_PTE_NUM_TOTAL) &&
-	    sp_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
-	    vm_reserv_level_iffullpop(m) == 0)
-		pmap_promote_section(pmap, va);
-
+	rv = pmap_enter_locked(pmap, va, m, prot, flags);
+	if (rv == KERN_SUCCESS) {
+		/*
+		 * If both the l2b_occupancy and the reservation are fully
+		 * populated, then attempt promotion.
+		 */
+		l2b = pmap_get_l2_bucket(pmap, va);
+		if (l2b != NULL && l2b->l2b_occupancy == L2_PTE_NUM_TOTAL &&
+		    sp_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
+		    vm_reserv_level_iffullpop(m) == 0)
+			pmap_promote_section(pmap, va);
+	}
 	PMAP_UNLOCK(pmap);
 	rw_wunlock(&pvh_global_lock);
+	return (rv);
 }
 
 /*
  *	The pvh global and pmap locks must be held.
  */
-static void
-pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
-    vm_prot_t prot, boolean_t wired, int flags)
+static int
+pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+    u_int flags)
 {
 	struct l2_bucket *l2b = NULL;
 	struct vm_page *om;
@@ -2990,9 +2993,8 @@ pmap_enter_locked(pmap_t pmap, vm_offset
 		pa = systempage.pv_pa;
 		m = NULL;
 	} else {
-		KASSERT((m->oflags & VPO_UNMANAGED) != 0 ||
-		    vm_page_xbusied(m) || (flags & M_NOWAIT) != 0,
-		    ("pmap_enter_locked: page %p is not busy", m));
+		if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
+			VM_OBJECT_ASSERT_LOCKED(m->object);
 		pa = VM_PAGE_TO_PHYS(m);
 	}
 
@@ -3013,12 +3015,12 @@ pmap_enter_locked(pmap_t pmap, vm_offset
 
 	if (prot & VM_PROT_WRITE)
 		nflags |= PVF_WRITE;
-	if (wired)
+	if ((flags & PMAP_ENTER_WIRED) != 0)
 		nflags |= PVF_WIRED;
 
 	PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, "
-	    "prot = %x, wired = %x\n", (uint32_t) pmap, va, (uint32_t) m,
-	    prot, wired));
+	    "prot = %x, flags = %x\n", (uint32_t) pmap, va, (uint32_t) m,
+	    prot, flags));
 
 	if (pmap == pmap_kernel()) {
 		l2b = pmap_get_l2_bucket(pmap, va);
@@ -3028,7 +3030,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset
 do_l2b_alloc:
 		l2b = pmap_alloc_l2_bucket(pmap, va);
 		if (l2b == NULL) {
-			if (flags & M_WAITOK) {
+			if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
 				PMAP_UNLOCK(pmap);
 				rw_wunlock(&pvh_global_lock);
 				VM_WAIT;
@@ -3036,7 +3038,7 @@ do_l2b_alloc:
 				PMAP_LOCK(pmap);
 				goto do_l2b_alloc;
 			}
-			return;
+			return (KERN_RESOURCE_SHORTAGE);
 		}
 	}
 
@@ -3195,6 +3197,7 @@ validate:
 
 	if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap))
 		cpu_icache_sync_range(va, PAGE_SIZE);
+	return (KERN_SUCCESS);
 }
 
 /*
@@ -3216,13 +3219,12 @@ pmap_enter_object(pmap_t pmap, vm_offset
 	vm_offset_t va;
 	vm_page_t m;
 	vm_pindex_t diff, psize;
-	vm_prot_t access;
 
 	VM_OBJECT_ASSERT_LOCKED(m_start->object);
 
 	psize = atop(end - start);
 	m = m_start;
-	access = prot = prot & (VM_PROT_READ | VM_PROT_EXECUTE);
+	prot &= VM_PROT_READ | VM_PROT_EXECUTE;
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pmap);
 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
@@ -3232,8 +3234,8 @@ pmap_enter_object(pmap_t pmap, vm_offset
 		    pmap_enter_section(pmap, va, m, prot))
 			m = &m[L1_S_SIZE / PAGE_SIZE - 1];
 		else
-			pmap_enter_locked(pmap, va, access, m, prot,
-			    FALSE, M_NOWAIT);
+			pmap_enter_locked(pmap, va, m, prot,
+			    PMAP_ENTER_NOSLEEP);
 		m = TAILQ_NEXT(m, listq);
 	}
 	PMAP_UNLOCK(pmap);
@@ -3252,12 +3254,11 @@ pmap_enter_object(pmap_t pmap, vm_offset
 void
 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 {
-	vm_prot_t access;
 
-	access = prot = prot & (VM_PROT_READ | VM_PROT_EXECUTE);
+	prot &= VM_PROT_READ | VM_PROT_EXECUTE;
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pmap);
-	pmap_enter_locked(pmap, va, access, m, prot, FALSE, M_NOWAIT);
+	pmap_enter_locked(pmap, va, m, prot, PMAP_ENTER_NOSLEEP);
 	PMAP_UNLOCK(pmap);
 	rw_wunlock(&pvh_global_lock);
 }
@@ -3488,8 +3489,8 @@ pmap_pinit(pmap_t pmap)
 	pmap->pm_stats.resident_count = 1;
 	if (vector_page < KERNBASE) {
 		pmap_enter(pmap, vector_page,
-		    VM_PROT_READ, PHYS_TO_VM_PAGE(systempage.pv_pa),
-		    VM_PROT_READ, 1);
+		    PHYS_TO_VM_PAGE(systempage.pv_pa), VM_PROT_READ,
+		    PMAP_ENTER_WIRED, 0);
 	}
 	return (1);
 }

Modified: stable/10/sys/arm/arm/pmap.c
==============================================================================
--- stable/10/sys/arm/arm/pmap.c	Sun Aug 24 02:07:34 2014	(r270438)
+++ stable/10/sys/arm/arm/pmap.c	Sun Aug 24 07:53:15 2014	(r270439)
@@ -199,8 +199,8 @@ extern int last_fault_code;
 static void pmap_free_pv_entry (pv_entry_t);
 static pv_entry_t pmap_get_pv_entry(void);
 
-static void		pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
-    vm_prot_t, boolean_t, int);
+static int		pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
+    vm_prot_t, u_int);
 static vm_paddr_t	pmap_extract_locked(pmap_t pmap, vm_offset_t va);
 static void		pmap_fix_cache(struct vm_page *, pmap_t, vm_offset_t);
 static void		pmap_alloc_l1(pmap_t);
@@ -3204,24 +3204,26 @@ pmap_protect(pmap_t pm, vm_offset_t sva,
  *	insert this page into the given map NOW.
  */
 
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
-    vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+    u_int flags, int8_t psind __unused)
 {
+	int rv;
 
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pmap);
-	pmap_enter_locked(pmap, va, m, prot, wired, M_WAITOK);
+	rv = pmap_enter_locked(pmap, va, m, prot, flags);
 	rw_wunlock(&pvh_global_lock);
  	PMAP_UNLOCK(pmap);
+	return (rv);
 }
 
 /*
  *	The pvh global and pmap locks must be held.
  */
-static void
+static int
 pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
-    boolean_t wired, int flags)
+    u_int flags)
 {
 	struct l2_bucket *l2b = NULL;
 	struct vm_page *opg;
@@ -3237,9 +3239,8 @@ pmap_enter_locked(pmap_t pmap, vm_offset
 		pa = systempage.pv_pa;
 		m = NULL;
 	} else {
-		KASSERT((m->oflags & VPO_UNMANAGED) != 0 ||
-		    vm_page_xbusied(m) || (flags & M_NOWAIT) != 0,
-		    ("pmap_enter_locked: page %p is not busy", m));
+		if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
+			VM_OBJECT_ASSERT_LOCKED(m->object);
 		pa = VM_PAGE_TO_PHYS(m);
 	}
 	nflags = 0;
@@ -3247,10 +3248,10 @@ pmap_enter_locked(pmap_t pmap, vm_offset
 		nflags |= PVF_WRITE;
 	if (prot & VM_PROT_EXECUTE)
 		nflags |= PVF_EXEC;
-	if (wired)
+	if ((flags & PMAP_ENTER_WIRED) != 0)
 		nflags |= PVF_WIRED;
 	PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, "
-	    "wired = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, wired));
+	    "flags = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, flags));
 
 	if (pmap == pmap_kernel()) {
 		l2b = pmap_get_l2_bucket(pmap, va);
@@ -3260,7 +3261,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset
 do_l2b_alloc:
 		l2b = pmap_alloc_l2_bucket(pmap, va);
 		if (l2b == NULL) {
-			if (flags & M_WAITOK) {
+			if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
 				PMAP_UNLOCK(pmap);
 				rw_wunlock(&pvh_global_lock);
 				VM_WAIT;
@@ -3268,7 +3269,7 @@ do_l2b_alloc:
 				PMAP_LOCK(pmap);
 				goto do_l2b_alloc;
 			}
-			return;
+			return (KERN_RESOURCE_SHORTAGE);
 		}
 	}
 
@@ -3482,6 +3483,7 @@ do_l2b_alloc:
 		if (m)
 			pmap_fix_cache(m, pmap, va);
 	}
+	return (KERN_SUCCESS);
 }
 
 /*
@@ -3511,7 +3513,7 @@ pmap_enter_object(pmap_t pmap, vm_offset
 	PMAP_LOCK(pmap);
 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 		pmap_enter_locked(pmap, start + ptoa(diff), m, prot &
-		    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE, M_NOWAIT);
+		    (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP);
 		m = TAILQ_NEXT(m, listq);
 	}
 	rw_wunlock(&pvh_global_lock);
@@ -3534,7 +3536,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_
 	rw_wlock(&pvh_global_lock);
  	PMAP_LOCK(pmap);
 	pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
-	    FALSE, M_NOWAIT);
+	    PMAP_ENTER_NOSLEEP);
 	rw_wunlock(&pvh_global_lock);
  	PMAP_UNLOCK(pmap);
 }
@@ -3746,9 +3748,8 @@ pmap_pinit(pmap_t pmap)
 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 	pmap->pm_stats.resident_count = 1;
 	if (vector_page < KERNBASE) {
-		pmap_enter(pmap, vector_page,
-		    VM_PROT_READ, PHYS_TO_VM_PAGE(systempage.pv_pa),
-		    VM_PROT_READ, 1);
+		pmap_enter(pmap, vector_page, PHYS_TO_VM_PAGE(systempage.pv_pa),
+		    VM_PROT_READ, PMAP_ENTER_WIRED | VM_PROT_READ, 0);
 	}
 	return (1);
 }

Modified: stable/10/sys/i386/i386/pmap.c
==============================================================================
--- stable/10/sys/i386/i386/pmap.c	Sun Aug 24 02:07:34 2014	(r270438)
+++ stable/10/sys/i386/i386/pmap.c	Sun Aug 24 07:53:15 2014	(r270439)
@@ -331,9 +331,9 @@ static void pmap_update_pde(pmap_t pmap,
     pd_entry_t newpde);
 static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde);
 
-static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
+static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags);
 
-static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags);
+static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags);
 static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free);
 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
 static void pmap_pte_release(pt_entry_t *pte);
@@ -1818,21 +1818,17 @@ pmap_pinit(pmap_t pmap)
  * mapped correctly.
  */
 static vm_page_t
-_pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags)
+_pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags)
 {
 	vm_paddr_t ptepa;
 	vm_page_t m;
 
-	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
-	    (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
-	    ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
-
 	/*
 	 * Allocate a page table page.
 	 */
 	if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
 	    VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
-		if (flags & M_WAITOK) {
+		if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
 			PMAP_UNLOCK(pmap);
 			rw_wunlock(&pvh_global_lock);
 			VM_WAIT;
@@ -1864,16 +1860,12 @@ _pmap_allocpte(pmap_t pmap, u_int ptepin
 }
 
 static vm_page_t
-pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
+pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
 {
 	u_int ptepindex;
 	pd_entry_t ptepa;
 	vm_page_t m;
 
-	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
-	    (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
-	    ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
-
 	/*
 	 * Calculate pagetable page index
 	 */
@@ -1906,7 +1898,7 @@ retry:
 		 * been deallocated. 
 		 */
 		m = _pmap_allocpte(pmap, ptepindex, flags);
-		if (m == NULL && (flags & M_WAITOK))
+		if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0)
 			goto retry;
 	}
 	return (m);
@@ -3458,9 +3450,9 @@ setpte:
  *	or lose information.  That is, this routine must actually
  *	insert this page into the given map NOW.
  */
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
-    vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+    u_int flags, int8_t psind)
 {
 	pd_entry_t *pde;
 	pt_entry_t *pte;
@@ -3468,17 +3460,18 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	pv_entry_t pv;
 	vm_paddr_t opa, pa;
 	vm_page_t mpte, om;
-	boolean_t invlva;
+	boolean_t invlva, wired;
 
 	va = trunc_page(va);
+	mpte = NULL;
+	wired = (flags & PMAP_ENTER_WIRED) != 0;
+
 	KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
 	KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
 	    ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
 	    va));
 	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
-		VM_OBJECT_ASSERT_WLOCKED(m->object);
-
-	mpte = NULL;
+		VM_OBJECT_ASSERT_LOCKED(m->object);
 
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pmap);
@@ -3489,7 +3482,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	 * resident, we are creating it here.
 	 */
 	if (va < VM_MAXUSER_ADDRESS) {
-		mpte = pmap_allocpte(pmap, va, M_WAITOK);
+		mpte = pmap_allocpte(pmap, va, flags);
+		if (mpte == NULL) {
+			KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
+			    ("pmap_allocpte failed with sleep allowed"));
+			sched_unpin();
+			rw_wunlock(&pvh_global_lock);
+			PMAP_UNLOCK(pmap);
+			return (KERN_RESOURCE_SHORTAGE);
+		}
 	}
 
 	pde = pmap_pde(pmap, va);
@@ -3607,7 +3608,7 @@ validate:
 	 */
 	if ((origpte & ~(PG_M|PG_A)) != newpte) {
 		newpte |= PG_A;
-		if ((access & VM_PROT_WRITE) != 0)
+		if ((flags & VM_PROT_WRITE) != 0)
 			newpte |= PG_M;
 		if (origpte & PG_V) {
 			invlva = FALSE;
@@ -3652,6 +3653,7 @@ validate:
 	sched_unpin();
 	rw_wunlock(&pvh_global_lock);
 	PMAP_UNLOCK(pmap);
+	return (KERN_SUCCESS);
 }
 
 /*
@@ -3817,7 +3819,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
 				mpte->wire_count++;
 			} else {
 				mpte = _pmap_allocpte(pmap, ptepindex,
-				    M_NOWAIT);
+				    PMAP_ENTER_NOSLEEP);
 				if (mpte == NULL)
 					return (mpte);
 			}
@@ -4102,7 +4104,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm
 			 */
 			if ((ptetemp & PG_MANAGED) != 0) {
 				dstmpte = pmap_allocpte(dst_pmap, addr,
-				    M_NOWAIT);
+				    PMAP_ENTER_NOSLEEP);
 				if (dstmpte == NULL)
 					goto out;
 				dst_pte = pmap_pte_quick(dst_pmap, addr);

Modified: stable/10/sys/i386/xen/pmap.c
==============================================================================
--- stable/10/sys/i386/xen/pmap.c	Sun Aug 24 02:07:34 2014	(r270438)
+++ stable/10/sys/i386/xen/pmap.c	Sun Aug 24 07:53:15 2014	(r270439)
@@ -298,9 +298,9 @@ static void pmap_remove_entry(struct pma
 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
     vm_page_t m);
 
-static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
+static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags);
 
-static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags);
+static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags);
 static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free);
 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
 static void pmap_pte_release(pt_entry_t *pte);
@@ -1546,21 +1546,17 @@ pmap_pinit(pmap_t pmap)
  * mapped correctly.
  */
 static vm_page_t
-_pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags)
+_pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags)
 {
 	vm_paddr_t ptema;
 	vm_page_t m;
 
-	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
-	    (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
-	    ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
-
 	/*
 	 * Allocate a page table page.
 	 */
 	if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
 	    VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
-		if (flags & M_WAITOK) {
+		if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
 			PMAP_UNLOCK(pmap);
 			rw_wunlock(&pvh_global_lock);
 			VM_WAIT;
@@ -1595,16 +1591,12 @@ _pmap_allocpte(pmap_t pmap, u_int ptepin
 }
 
 static vm_page_t
-pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
+pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
 {
 	u_int ptepindex;
 	pd_entry_t ptema;
 	vm_page_t m;
 
-	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
-	    (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
-	    ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
-
 	/*
 	 * Calculate pagetable page index
 	 */
@@ -1644,7 +1636,7 @@ retry:
 		CTR3(KTR_PMAP, "pmap_allocpte: pmap=%p va=0x%08x flags=0x%x",
 		    pmap, va, flags);
 		m = _pmap_allocpte(pmap, ptepindex, flags);
-		if (m == NULL && (flags & M_WAITOK))
+		if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0)
 			goto retry;
 
 		KASSERT(pmap->pm_pdir[ptepindex], ("ptepindex=%d did not get mapped", ptepindex));
@@ -2643,9 +2635,9 @@ retry:
  *	or lose information.  That is, this routine must actually
  *	insert this page into the given map NOW.
  */
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
-    vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+    u_int flags, int8_t psind __unused)
 {
 	pd_entry_t *pde;
 	pt_entry_t *pte;
@@ -2653,19 +2645,21 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	pv_entry_t pv;
 	vm_paddr_t opa, pa;
 	vm_page_t mpte, om;
-	boolean_t invlva;
+	boolean_t invlva, wired;
 
-	CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x prot=0x%x wired=%d",
-	    pmap, va, access, VM_PAGE_TO_MACH(m), prot, wired);
+	CTR5(KTR_PMAP,
+	    "pmap_enter: pmap=%08p va=0x%08x ma=0x%08x prot=0x%x flags=0x%x",
+	    pmap, va, VM_PAGE_TO_MACH(m), prot, flags);
 	va = trunc_page(va);
 	KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
 	KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
 	    ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
 	    va));
 	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
-		VM_OBJECT_ASSERT_WLOCKED(m->object);
+		VM_OBJECT_ASSERT_LOCKED(m->object);
 
 	mpte = NULL;
+	wired = (flags & PMAP_ENTER_WIRED) != 0;
 
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pmap);
@@ -2676,7 +2670,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	 * resident, we are creating it here.
 	 */
 	if (va < VM_MAXUSER_ADDRESS) {
-		mpte = pmap_allocpte(pmap, va, M_WAITOK);
+		mpte = pmap_allocpte(pmap, va, flags);
+		if (mpte == NULL) {
+			KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
+			    ("pmap_allocpte failed with sleep allowed"));
+			sched_unpin();
+			rw_wunlock(&pvh_global_lock);
+			PMAP_UNLOCK(pmap);
+			return (KERN_RESOURCE_SHORTAGE);
+		}
 	}
 
 	pde = pmap_pde(pmap, va);
@@ -2842,6 +2844,7 @@ validate:
 	sched_unpin();
 	rw_wunlock(&pvh_global_lock);
 	PMAP_UNLOCK(pmap);
+	return (KERN_SUCCESS);
 }
 
 /*
@@ -2996,7 +2999,7 @@ pmap_enter_quick_locked(multicall_entry_
 				mpte->wire_count++;
 			} else {
 				mpte = _pmap_allocpte(pmap, ptepindex,
-				    M_NOWAIT);
+				    PMAP_ENTER_NOSLEEP);
 				if (mpte == NULL)
 					return (mpte);
 			}
@@ -3287,7 +3290,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm
 			 */
 			if ((ptetemp & PG_MANAGED) != 0) {
 				dstmpte = pmap_allocpte(dst_pmap, addr,
-				    M_NOWAIT);
+				    PMAP_ENTER_NOSLEEP);
 				if (dstmpte == NULL)
 					goto out;
 				dst_pte = pmap_pte_quick(dst_pmap, addr);

Modified: stable/10/sys/ia64/ia64/pmap.c
==============================================================================
--- stable/10/sys/ia64/ia64/pmap.c	Sun Aug 24 02:07:34 2014	(r270438)
+++ stable/10/sys/ia64/ia64/pmap.c	Sun Aug 24 07:53:15 2014	(r270439)
@@ -1692,20 +1692,21 @@ pmap_protect(pmap_t pmap, vm_offset_t sv
  *	or lose information.  That is, this routine must actually
  *	insert this page into the given map NOW.
  */
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
-    vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+    u_int flags, int8_t psind __unused)
 {
 	pmap_t oldpmap;
 	vm_offset_t pa;
 	vm_offset_t opa;
 	struct ia64_lpte origpte;
 	struct ia64_lpte *pte;
-	boolean_t icache_inval, managed;
+	boolean_t icache_inval, managed, wired;
 
-	CTR6(KTR_PMAP, "pmap_enter(pm=%p, va=%#lx, acc=%#x, m=%p, prot=%#x, "
-	    "wired=%u)", pmap, va, access, m, prot, wired);
+	CTR5(KTR_PMAP, "pmap_enter(pm=%p, va=%#lx, m=%p, prot=%#x, "
+	    "flags=%u)", pmap, va, m, prot, flags);
 
+	wired = (flags & PMAP_ENTER_WIRED) != 0;
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pmap);
 	oldpmap = pmap_switch(pmap);
@@ -1722,6 +1723,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 		pmap_switch(oldpmap);
 		PMAP_UNLOCK(pmap);
 		rw_wunlock(&pvh_global_lock);
+		if ((flags & PMAP_ENTER_NOSLEEP) != 0)
+			return (KERN_RESOURCE_SHORTAGE);
 		VM_WAIT;
 		rw_wlock(&pvh_global_lock);
 		PMAP_LOCK(pmap);
@@ -1815,6 +1818,7 @@ validate:
 	rw_wunlock(&pvh_global_lock);
 	pmap_switch(oldpmap);
 	PMAP_UNLOCK(pmap);
+	return (KERN_SUCCESS);
 }
 
 /*

Modified: stable/10/sys/mips/mips/pmap.c
==============================================================================
--- stable/10/sys/mips/mips/pmap.c	Sun Aug 24 02:07:34 2014	(r270438)
+++ stable/10/sys/mips/mips/pmap.c	Sun Aug 24 07:53:15 2014	(r270439)
@@ -177,8 +177,8 @@ static void pmap_invalidate_all(pmap_t p
 static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va);
 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m);
 
-static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
-static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
+static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags);
+static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, u_int flags);
 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t);
 static pt_entry_t init_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot);
 
@@ -1094,20 +1094,16 @@ pmap_pinit(pmap_t pmap)
  * mapped correctly.
  */
 static vm_page_t
-_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
+_pmap_allocpte(pmap_t pmap, unsigned ptepindex, u_int flags)
 {
 	vm_offset_t pageva;
 	vm_page_t m;
 
-	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
-	    (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
-	    ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
-
 	/*
 	 * Find or fabricate a new pagetable page
 	 */
 	if ((m = pmap_alloc_direct_page(ptepindex, VM_ALLOC_NORMAL)) == NULL) {
-		if (flags & M_WAITOK) {
+		if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
 			PMAP_UNLOCK(pmap);
 			rw_wunlock(&pvh_global_lock);
 			pmap_grow_direct_page_cache();
@@ -1164,16 +1160,12 @@ _pmap_allocpte(pmap_t pmap, unsigned pte
 }
 
 static vm_page_t
-pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
+pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
 {
 	unsigned ptepindex;
 	pd_entry_t *pde;
 	vm_page_t m;
 
-	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
-	    (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
-	    ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
-
 	/*
 	 * Calculate pagetable page index
 	 */
@@ -1197,7 +1189,7 @@ retry:
 		 * deallocated.
 		 */
 		m = _pmap_allocpte(pmap, ptepindex, flags);
-		if (m == NULL && (flags & M_WAITOK))
+		if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0)
 			goto retry;
 	}
 	return (m);
@@ -1994,9 +1986,9 @@ pmap_protect(pmap_t pmap, vm_offset_t sv
  *	or lose information.  That is, this routine must actually
  *	insert this page into the given map NOW.
  */
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
-    vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+    u_int flags, int8_t psind __unused)
 {
 	vm_paddr_t pa, opa;
 	pt_entry_t *pte;
@@ -2009,11 +2001,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
 	    va >= kmi.clean_eva,
 	    ("pmap_enter: managed mapping within the clean submap"));
-	KASSERT((m->oflags & VPO_UNMANAGED) != 0 || vm_page_xbusied(m),
-	    ("pmap_enter: page %p is not busy", m));
+	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
+		VM_OBJECT_ASSERT_LOCKED(m->object);
 	pa = VM_PAGE_TO_PHYS(m);
-	newpte = TLBLO_PA_TO_PFN(pa) | init_pte_prot(m, access, prot);
-	if (wired)
+	newpte = TLBLO_PA_TO_PFN(pa) | init_pte_prot(m, flags, prot);
+	if ((flags & PMAP_ENTER_WIRED) != 0)
 		newpte |= PTE_W;
 	if (is_kernel_pmap(pmap))
 		newpte |= PTE_G;
@@ -2032,7 +2024,14 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	 * creating it here.
 	 */
 	if (va < VM_MAXUSER_ADDRESS) {
-		mpte = pmap_allocpte(pmap, va, M_WAITOK);
+		mpte = pmap_allocpte(pmap, va, flags);
+		if (mpte == NULL) {
+			KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
+			    ("pmap_allocpte failed with sleep allowed"));
+			rw_wunlock(&pvh_global_lock);
+			PMAP_UNLOCK(pmap);
+			return (KERN_RESOURCE_SHORTAGE);
+		}
 	}
 	pte = pmap_pte(pmap, va);
 
@@ -2057,9 +2056,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 		 * are valid mappings in them. Hence, if a user page is
 		 * wired, the PT page will be also.
 		 */
-		if (wired && !pte_test(&origpte, PTE_W))
+		if (pte_test(&newpte, PTE_W) && !pte_test(&origpte, PTE_W))
 			pmap->pm_stats.wired_count++;
-		else if (!wired && pte_test(&origpte, PTE_W))
+		else if (!pte_test(&newpte, PTE_W) && pte_test(&origpte,
+		    PTE_W))
 			pmap->pm_stats.wired_count--;
 
 		KASSERT(!pte_test(&origpte, PTE_D | PTE_RO),
@@ -2123,7 +2123,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	/*
 	 * Increment counters
 	 */
-	if (wired)
+	if (pte_test(&newpte, PTE_W))
 		pmap->pm_stats.wired_count++;
 
 validate:
@@ -2170,6 +2170,7 @@ validate:
 	}
 	rw_wunlock(&pvh_global_lock);
 	PMAP_UNLOCK(pmap);
+	return (KERN_SUCCESS);
 }
 
 /*
@@ -2235,7 +2236,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
 				mpte->wire_count++;
 			} else {
 				mpte = _pmap_allocpte(pmap, ptepindex,
-				    M_NOWAIT);
+				    PMAP_ENTER_NOSLEEP);
 				if (mpte == NULL)
 					return (mpte);
 			}

Modified: stable/10/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- stable/10/sys/powerpc/aim/mmu_oea.c	Sun Aug 24 02:07:34 2014	(r270438)
+++ stable/10/sys/powerpc/aim/mmu_oea.c	Sun Aug 24 07:53:15 2014	(r270439)
@@ -258,8 +258,8 @@ static struct	pte *moea_pvo_to_pte(const
 /*
  * Utility routines.
  */
-static void		moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
-			    vm_prot_t, boolean_t);
+static int		moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
+			    vm_prot_t, u_int, int8_t);
 static void		moea_syncicache(vm_offset_t, vm_size_t);
 static boolean_t	moea_query_bit(vm_page_t, int);
 static u_int		moea_clear_bit(vm_page_t, int);
@@ -274,7 +274,8 @@ void moea_clear_modify(mmu_t, vm_page_t)
 void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
 void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
     vm_page_t *mb, vm_offset_t b_offset, int xfersize);
-void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
+int moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int,
+    int8_t);
 void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
     vm_prot_t);
 void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
@@ -1104,16 +1105,25 @@ moea_zero_page_idle(mmu_t mmu, vm_page_t
  * target pmap with the protection requested.  If specified the page
  * will be wired down.
  */
-void
+int
 moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
-	   boolean_t wired)
+    u_int flags, int8_t psind)
 {
+	int error;
 
-	rw_wlock(&pvh_global_lock);
-	PMAP_LOCK(pmap);
-	moea_enter_locked(pmap, va, m, prot, wired);
-	rw_wunlock(&pvh_global_lock);
-	PMAP_UNLOCK(pmap);
+	for (;;) {
+		rw_wlock(&pvh_global_lock);
+		PMAP_LOCK(pmap);
+		error = moea_enter_locked(pmap, va, m, prot, flags, psind);
+		rw_wunlock(&pvh_global_lock);
+		PMAP_UNLOCK(pmap);
+		if (error != ENOMEM)
+			return (KERN_SUCCESS);
+		if ((flags & PMAP_ENTER_NOSLEEP) != 0)
+			return (KERN_RESOURCE_SHORTAGE);
+		VM_OBJECT_ASSERT_UNLOCKED(m->object);
+		VM_WAIT;
+	}
 }
 
 /*
@@ -1123,9 +1133,9 @@ moea_enter(mmu_t mmu, pmap_t pmap, vm_of
  *
  * The page queues and pmap must be locked.
  */
-static void
+static int
 moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
-    boolean_t wired)
+    u_int flags, int8_t psind __unused)
 {
 	struct		pvo_head *pvo_head;
 	uma_zone_t	zone;
@@ -1167,10 +1177,7 @@ moea_enter_locked(pmap_t pmap, vm_offset
 	} else
 		pte_lo |= PTE_BR;
 
-	if (prot & VM_PROT_EXECUTE)
-		pvo_flags |= PVO_EXECUTABLE;
-
-	if (wired)
+	if ((flags & PMAP_ENTER_WIRED) != 0)
 		pvo_flags |= PVO_WIRED;
 
 	error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
@@ -1185,6 +1192,8 @@ moea_enter_locked(pmap_t pmap, vm_offset
 	if (pmap != kernel_pmap && error == ENOENT &&
 	    (pte_lo & (PTE_I | PTE_G)) == 0)
 		moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
+
+	return (error);
 }
 
 /*
@@ -1214,7 +1223,7 @@ moea_enter_object(mmu_t mmu, pmap_t pm, 
 	PMAP_LOCK(pm);
 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 		moea_enter_locked(pm, start + ptoa(diff), m, prot &
-		    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
+		    (VM_PROT_READ | VM_PROT_EXECUTE), 0, 0);
 		m = TAILQ_NEXT(m, listq);
 	}
 	rw_wunlock(&pvh_global_lock);
@@ -1229,7 +1238,7 @@ moea_enter_quick(mmu_t mmu, pmap_t pm, v
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pm);
 	moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
-	    FALSE);
+	    0, 0);
 	rw_wunlock(&pvh_global_lock);
 	PMAP_UNLOCK(pm);
 }
@@ -1725,8 +1734,6 @@ moea_protect(mmu_t mmu, pmap_t pm, vm_of
 	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
 	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
 		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
-		if ((prot & VM_PROT_EXECUTE) == 0)
-			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
 
 		/*
 		 * Grab the PTE pointer before we diddle with the cached PTE
@@ -1968,8 +1975,6 @@ moea_pvo_enter(pmap_t pm, uma_zone_t zon
 	pvo->pvo_pmap = pm;
 	LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink);
 	pvo->pvo_vaddr &= ~ADDR_POFF;
-	if (flags & VM_PROT_EXECUTE)
-		pvo->pvo_vaddr |= PVO_EXECUTABLE;
 	if (flags & PVO_WIRED)
 		pvo->pvo_vaddr |= PVO_WIRED;
 	if (pvo_head != &moea_pvo_kunmanaged)

Modified: stable/10/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- stable/10/sys/powerpc/aim/mmu_oea64.c	Sun Aug 24 02:07:34 2014	(r270438)
+++ stable/10/sys/powerpc/aim/mmu_oea64.c	Sun Aug 24 07:53:15 2014	(r270439)
@@ -267,7 +267,7 @@ int		moea64_large_page_shift = 0;
  * PVO calls.
  */
 static int	moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *,
-		    vm_offset_t, vm_offset_t, uint64_t, int);

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201408240753.s7O7rFxL051819>