From owner-svn-src-head@FreeBSD.ORG Mon May 24 14:26:57 2010 Return-Path: Delivered-To: svn-src-head@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id 9F8431065674; Mon, 24 May 2010 14:26:57 +0000 (UTC) (envelope-from alc@FreeBSD.org) Received: from svn.freebsd.org (unknown [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id 8B2B38FC08; Mon, 24 May 2010 14:26:57 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.3/8.14.3) with ESMTP id o4OEQv5o025592; Mon, 24 May 2010 14:26:57 GMT (envelope-from alc@svn.freebsd.org) Received: (from alc@localhost) by svn.freebsd.org (8.14.3/8.14.3/Submit) id o4OEQv92025582; Mon, 24 May 2010 14:26:57 GMT (envelope-from alc@svn.freebsd.org) Message-Id: <201005241426.o4OEQv92025582@svn.freebsd.org> From: Alan Cox Date: Mon, 24 May 2010 14:26:57 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r208504 - in head/sys: amd64/amd64 arm/arm i386/i386 i386/xen ia64/ia64 kern mips/mips powerpc/aim powerpc/booke powerpc/powerpc sparc64/sparc64 sun4v/sun4v sys vm X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 24 May 2010 14:26:57 -0000 Author: alc Date: Mon May 24 14:26:57 2010 New Revision: 208504 URL: http://svn.freebsd.org/changeset/base/208504 Log: Roughly half of a typical pmap_mincore() implementation is machine- independent code. Move this code into mincore(), and eliminate the page queues lock from pmap_mincore(). Push down the page queues lock into pmap_clear_modify(), pmap_clear_reference(), and pmap_is_modified(). Assert that these functions are never passed an unmanaged page. Eliminate an inaccurate comment from powerpc/powerpc/mmu_if.m: Contrary to what the comment says, pmap_mincore() is not simply an optimization. Without a complete pmap_mincore() implementation, mincore() cannot return either MINCORE_MODIFIED or MINCORE_REFERENCED because only the pmap can provide this information. Eliminate the page queues lock from vfs_setdirty_locked_object(), vm_pageout_clean(), vm_object_page_collect_flush(), and vm_object_page_clean(). Generally speaking, these are all accesses to the page's dirty field, which are synchronized by the containing vm object's lock. Reduce the scope of the page queues lock in vm_object_madvise() and vm_page_dontneed(). Reviewed by: kib (an earlier version) Modified: head/sys/amd64/amd64/pmap.c head/sys/arm/arm/pmap.c head/sys/i386/i386/pmap.c head/sys/i386/xen/pmap.c head/sys/ia64/ia64/pmap.c head/sys/kern/vfs_bio.c head/sys/mips/mips/pmap.c head/sys/powerpc/aim/mmu_oea.c head/sys/powerpc/aim/mmu_oea64.c head/sys/powerpc/booke/pmap.c head/sys/powerpc/powerpc/mmu_if.m head/sys/powerpc/powerpc/pmap_dispatch.c head/sys/sparc64/sparc64/pmap.c head/sys/sun4v/sun4v/pmap.c head/sys/sys/pcpu.h head/sys/vm/pmap.h head/sys/vm/vm_mmap.c head/sys/vm/vm_object.c head/sys/vm/vm_page.c head/sys/vm/vm_page.h head/sys/vm/vm_pageout.c Modified: head/sys/amd64/amd64/pmap.c ============================================================================== --- head/sys/amd64/amd64/pmap.c Mon May 24 13:44:39 2010 (r208503) +++ head/sys/amd64/amd64/pmap.c Mon May 24 14:26:57 2010 (r208504) @@ -4128,12 +4128,25 @@ pmap_remove_pages(pmap_t pmap) boolean_t pmap_is_modified(vm_page_t m) { + boolean_t rv; - if (m->flags & PG_FICTITIOUS) + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_is_modified: page %p is not managed", m)); + + /* + * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * is clear, no PTEs can have PG_M set. + */ + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + if ((m->oflags & VPO_BUSY) == 0 && + (m->flags & PG_WRITEABLE) == 0) return (FALSE); - if (pmap_is_modified_pvh(&m->md)) - return (TRUE); - return (pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); + vm_page_lock_queues(); + rv = pmap_is_modified_pvh(&m->md) || + pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))); + vm_page_unlock_queues(); + return (rv); } /* @@ -4384,9 +4397,20 @@ pmap_clear_modify(vm_page_t m) pt_entry_t oldpte, *pte; vm_offset_t va; - if ((m->flags & PG_FICTITIOUS) != 0) + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_modify: page %p is not managed", m)); + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + KASSERT((m->oflags & VPO_BUSY) == 0, + ("pmap_clear_modify: page %p is busy", m)); + + /* + * If the page is not PG_WRITEABLE, then no PTEs can have PG_M set. + * If the object containing the page is locked and the page is not + * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + */ + if ((m->flags & PG_WRITEABLE) == 0) return; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_queues(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { pmap = PV_PMAP(pv); @@ -4432,6 +4456,7 @@ pmap_clear_modify(vm_page_t m) } PMAP_UNLOCK(pmap); } + vm_page_unlock_queues(); } /* @@ -4449,9 +4474,9 @@ pmap_clear_reference(vm_page_t m) pt_entry_t *pte; vm_offset_t va; - if ((m->flags & PG_FICTITIOUS) != 0) - return; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_reference: page %p is not managed", m)); + vm_page_lock_queues(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { pmap = PV_PMAP(pv); @@ -4488,6 +4513,7 @@ pmap_clear_reference(vm_page_t m) } PMAP_UNLOCK(pmap); } + vm_page_unlock_queues(); } /* @@ -4897,70 +4923,49 @@ pmap_change_attr_locked(vm_offset_t va, * perform the pmap work for mincore */ int -pmap_mincore(pmap_t pmap, vm_offset_t addr) +pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) { pd_entry_t *pdep; pt_entry_t pte; vm_paddr_t pa; - vm_page_t m; - int val = 0; - + int val; + PMAP_LOCK(pmap); +retry: pdep = pmap_pde(pmap, addr); if (pdep != NULL && (*pdep & PG_V)) { if (*pdep & PG_PS) { pte = *pdep; - val = MINCORE_SUPER; /* Compute the physical address of the 4KB page. */ pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) & PG_FRAME; + val = MINCORE_SUPER; } else { pte = *pmap_pde_to_pte(pdep, addr); pa = pte & PG_FRAME; + val = 0; } } else { pte = 0; pa = 0; + val = 0; } - PMAP_UNLOCK(pmap); - - if (pte != 0) { + if ((pte & PG_V) != 0) { val |= MINCORE_INCORE; - if ((pte & PG_MANAGED) == 0) - return (val); - - m = PHYS_TO_VM_PAGE(pa); - - /* - * Modified by us - */ if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) - val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; - else { - /* - * Modified by someone else - */ - vm_page_lock_queues(); - if (m->dirty || pmap_is_modified(m)) - val |= MINCORE_MODIFIED_OTHER; - vm_page_unlock_queues(); - } - /* - * Referenced by us - */ - if (pte & PG_A) - val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; - else { - /* - * Referenced by someone else - */ - vm_page_lock_queues(); - if ((m->flags & PG_REFERENCED) || - pmap_is_referenced(m)) - val |= MINCORE_REFERENCED_OTHER; - vm_page_unlock_queues(); - } - } + val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; + if ((pte & PG_A) != 0) + val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; + } + if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != + (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && + (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { + /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ + if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) + goto retry; + } else + PA_UNLOCK_COND(*locked_pa); + PMAP_UNLOCK(pmap); return (val); } Modified: head/sys/arm/arm/pmap.c ============================================================================== --- head/sys/arm/arm/pmap.c Mon May 24 13:44:39 2010 (r208503) +++ head/sys/arm/arm/pmap.c Mon May 24 14:26:57 2010 (r208504) @@ -4475,6 +4475,8 @@ boolean_t pmap_is_modified(vm_page_t m) { + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_is_modified: page %p is not managed", m)); if (m->md.pvh_attrs & PVF_MOD) return (TRUE); @@ -4489,8 +4491,23 @@ void pmap_clear_modify(vm_page_t m) { + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_modify: page %p is not managed", m)); + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + KASSERT((m->oflags & VPO_BUSY) == 0, + ("pmap_clear_modify: page %p is busy", m)); + + /* + * If the page is not PG_WRITEABLE, then no mappings can be modified. + * If the object containing the page is locked and the page is not + * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + */ + if ((m->flags & PG_WRITEABLE) == 0) + return; + vm_page_lock_queues(); if (m->md.pvh_attrs & PVF_MOD) pmap_clearbit(m, PVF_MOD); + vm_page_unlock_queues(); } @@ -4517,8 +4534,12 @@ void pmap_clear_reference(vm_page_t m) { + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_reference: page %p is not managed", m)); + vm_page_lock_queues(); if (m->md.pvh_attrs & PVF_REF) pmap_clearbit(m, PVF_REF); + vm_page_unlock_queues(); } @@ -4551,7 +4572,7 @@ pmap_remove_write(vm_page_t m) * perform the pmap work for mincore */ int -pmap_mincore(pmap_t pmap, vm_offset_t addr) +pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) { printf("pmap_mincore()\n"); Modified: head/sys/i386/i386/pmap.c ============================================================================== --- head/sys/i386/i386/pmap.c Mon May 24 13:44:39 2010 (r208503) +++ head/sys/i386/i386/pmap.c Mon May 24 14:26:57 2010 (r208504) @@ -4294,12 +4294,25 @@ pmap_remove_pages(pmap_t pmap) boolean_t pmap_is_modified(vm_page_t m) { + boolean_t rv; - if (m->flags & PG_FICTITIOUS) + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_is_modified: page %p is not managed", m)); + + /* + * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * is clear, no PTEs can have PG_M set. + */ + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + if ((m->oflags & VPO_BUSY) == 0 && + (m->flags & PG_WRITEABLE) == 0) return (FALSE); - if (pmap_is_modified_pvh(&m->md)) - return (TRUE); - return (pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); + vm_page_lock_queues(); + rv = pmap_is_modified_pvh(&m->md) || + pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))); + vm_page_unlock_queues(); + return (rv); } /* @@ -4563,9 +4576,20 @@ pmap_clear_modify(vm_page_t m) pt_entry_t oldpte, *pte; vm_offset_t va; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - if ((m->flags & PG_FICTITIOUS) != 0) + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_modify: page %p is not managed", m)); + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + KASSERT((m->oflags & VPO_BUSY) == 0, + ("pmap_clear_modify: page %p is busy", m)); + + /* + * If the page is not PG_WRITEABLE, then no PTEs can have PG_M set. + * If the object containing the page is locked and the page is not + * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + */ + if ((m->flags & PG_WRITEABLE) == 0) return; + vm_page_lock_queues(); sched_pin(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { @@ -4623,6 +4647,7 @@ pmap_clear_modify(vm_page_t m) PMAP_UNLOCK(pmap); } sched_unpin(); + vm_page_unlock_queues(); } /* @@ -4640,9 +4665,9 @@ pmap_clear_reference(vm_page_t m) pt_entry_t *pte; vm_offset_t va; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - if ((m->flags & PG_FICTITIOUS) != 0) - return; + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_reference: page %p is not managed", m)); + vm_page_lock_queues(); sched_pin(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { @@ -4686,6 +4711,7 @@ pmap_clear_reference(vm_page_t m) PMAP_UNLOCK(pmap); } sched_unpin(); + vm_page_unlock_queues(); } /* @@ -4955,72 +4981,51 @@ pmap_change_attr(vm_offset_t va, vm_size * perform the pmap work for mincore */ int -pmap_mincore(pmap_t pmap, vm_offset_t addr) +pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) { pd_entry_t *pdep; pt_entry_t *ptep, pte; vm_paddr_t pa; - vm_page_t m; - int val = 0; - + int val; + PMAP_LOCK(pmap); +retry: pdep = pmap_pde(pmap, addr); if (*pdep != 0) { if (*pdep & PG_PS) { pte = *pdep; - val = MINCORE_SUPER; /* Compute the physical address of the 4KB page. */ pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) & PG_FRAME; + val = MINCORE_SUPER; } else { ptep = pmap_pte(pmap, addr); pte = *ptep; pmap_pte_release(ptep); pa = pte & PG_FRAME; + val = 0; } } else { pte = 0; pa = 0; + val = 0; } - PMAP_UNLOCK(pmap); - - if (pte != 0) { + if ((pte & PG_V) != 0) { val |= MINCORE_INCORE; - if ((pte & PG_MANAGED) == 0) - return (val); - - m = PHYS_TO_VM_PAGE(pa); - - /* - * Modified by us - */ if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) - val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; - else { - /* - * Modified by someone else - */ - vm_page_lock_queues(); - if (m->dirty || pmap_is_modified(m)) - val |= MINCORE_MODIFIED_OTHER; - vm_page_unlock_queues(); - } - /* - * Referenced by us - */ - if (pte & PG_A) - val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; - else { - /* - * Referenced by someone else - */ - vm_page_lock_queues(); - if ((m->flags & PG_REFERENCED) || - pmap_is_referenced(m)) - val |= MINCORE_REFERENCED_OTHER; - vm_page_unlock_queues(); - } - } + val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; + if ((pte & PG_A) != 0) + val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; + } + if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != + (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && + (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { + /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ + if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) + goto retry; + } else + PA_UNLOCK_COND(*locked_pa); + PMAP_UNLOCK(pmap); return (val); } Modified: head/sys/i386/xen/pmap.c ============================================================================== --- head/sys/i386/xen/pmap.c Mon May 24 13:44:39 2010 (r208503) +++ head/sys/i386/xen/pmap.c Mon May 24 14:26:57 2010 (r208504) @@ -3663,12 +3663,21 @@ pmap_is_modified(vm_page_t m) pmap_t pmap; boolean_t rv; + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_is_modified: page %p is not managed", m)); rv = FALSE; - if (m->flags & PG_FICTITIOUS) - return (rv); + /* + * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * is clear, no PTEs can have PG_M set. + */ + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + if ((m->oflags & VPO_BUSY) == 0 && + (m->flags & PG_WRITEABLE) == 0) + return (rv); + vm_page_lock_queues(); sched_pin(); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); @@ -3681,6 +3690,7 @@ pmap_is_modified(vm_page_t m) if (*PMAP1) PT_SET_MA(PADDR1, 0); sched_unpin(); + vm_page_unlock_queues(); return (rv); } @@ -3887,9 +3897,20 @@ pmap_clear_modify(vm_page_t m) pmap_t pmap; pt_entry_t *pte; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - if ((m->flags & PG_FICTITIOUS) != 0) + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_modify: page %p is not managed", m)); + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + KASSERT((m->oflags & VPO_BUSY) == 0, + ("pmap_clear_modify: page %p is busy", m)); + + /* + * If the page is not PG_WRITEABLE, then no PTEs can have PG_M set. + * If the object containing the page is locked and the page is not + * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + */ + if ((m->flags & PG_WRITEABLE) == 0) return; + vm_page_lock_queues(); sched_pin(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap = PV_PMAP(pv); @@ -3907,6 +3928,7 @@ pmap_clear_modify(vm_page_t m) PMAP_UNLOCK(pmap); } sched_unpin(); + vm_page_unlock_queues(); } /* @@ -3921,9 +3943,9 @@ pmap_clear_reference(vm_page_t m) pmap_t pmap; pt_entry_t *pte; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - if ((m->flags & PG_FICTITIOUS) != 0) - return; + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_reference: page %p is not managed", m)); + vm_page_lock_queues(); sched_pin(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap = PV_PMAP(pv); @@ -3941,6 +3963,7 @@ pmap_clear_reference(vm_page_t m) PMAP_UNLOCK(pmap); } sched_unpin(); + vm_page_unlock_queues(); } /* @@ -4133,60 +4156,36 @@ pmap_change_attr(va, size, mode) * perform the pmap work for mincore */ int -pmap_mincore(pmap_t pmap, vm_offset_t addr) +pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) { pt_entry_t *ptep, pte; - vm_page_t m; - int val = 0; + vm_paddr_t pa; + int val; PMAP_LOCK(pmap); +retry: ptep = pmap_pte(pmap, addr); pte = (ptep != NULL) ? PT_GET(ptep) : 0; pmap_pte_release(ptep); - PMAP_UNLOCK(pmap); - - if (pte != 0) { - vm_paddr_t pa; - - val = MINCORE_INCORE; - if ((pte & PG_MANAGED) == 0) - return val; - + val = 0; + if ((pte & PG_V) != 0) { + val |= MINCORE_INCORE; + if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) + val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; + if ((pte & PG_A) != 0) + val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; + } + if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != + (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && + (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { pa = pte & PG_FRAME; - - m = PHYS_TO_VM_PAGE(pa); - - /* - * Modified by us - */ - if (pte & PG_M) - val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; - else { - /* - * Modified by someone else - */ - vm_page_lock_queues(); - if (m->dirty || pmap_is_modified(m)) - val |= MINCORE_MODIFIED_OTHER; - vm_page_unlock_queues(); - } - /* - * Referenced by us - */ - if (pte & PG_A) - val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; - else { - /* - * Referenced by someone else - */ - vm_page_lock_queues(); - if ((m->flags & PG_REFERENCED) || - pmap_is_referenced(m)) - val |= MINCORE_REFERENCED_OTHER; - vm_page_unlock_queues(); - } - } - return val; + /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ + if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) + goto retry; + } else + PA_UNLOCK_COND(*locked_pa); + PMAP_UNLOCK(pmap); + return (val); } void Modified: head/sys/ia64/ia64/pmap.c ============================================================================== --- head/sys/ia64/ia64/pmap.c Mon May 24 13:44:39 2010 (r208503) +++ head/sys/ia64/ia64/pmap.c Mon May 24 14:26:57 2010 (r208504) @@ -1981,10 +1981,20 @@ pmap_is_modified(vm_page_t m) pv_entry_t pv; boolean_t rv; + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_is_modified: page %p is not managed", m)); rv = FALSE; - if (m->flags & PG_FICTITIOUS) - return (rv); + /* + * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * is clear, no PTEs can be dirty. + */ + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + if ((m->oflags & VPO_BUSY) == 0 && + (m->flags & PG_WRITEABLE) == 0) + return (rv); + vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); oldpmap = pmap_switch(pv->pv_pmap); @@ -1996,7 +2006,7 @@ pmap_is_modified(vm_page_t m) if (rv) break; } - + vm_page_unlock_queues(); return (rv); } @@ -2058,9 +2068,20 @@ pmap_clear_modify(vm_page_t m) pmap_t oldpmap; pv_entry_t pv; - if (m->flags & PG_FICTITIOUS) - return; + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_modify: page %p is not managed", m)); + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + KASSERT((m->oflags & VPO_BUSY) == 0, + ("pmap_clear_modify: page %p is busy", m)); + /* + * If the page is not PG_WRITEABLE, then no PTEs can be modified. + * If the object containing the page is locked and the page is not + * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + */ + if ((m->flags & PG_WRITEABLE) == 0) + return; + vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); oldpmap = pmap_switch(pv->pv_pmap); @@ -2073,6 +2094,7 @@ pmap_clear_modify(vm_page_t m) pmap_switch(oldpmap); PMAP_UNLOCK(pv->pv_pmap); } + vm_page_unlock_queues(); } /* @@ -2087,9 +2109,9 @@ pmap_clear_reference(vm_page_t m) pmap_t oldpmap; pv_entry_t pv; - if (m->flags & PG_FICTITIOUS) - return; - + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_reference: page %p is not managed", m)); + vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); oldpmap = pmap_switch(pv->pv_pmap); @@ -2102,6 +2124,7 @@ pmap_clear_reference(vm_page_t m) pmap_switch(oldpmap); PMAP_UNLOCK(pv->pv_pmap); } + vm_page_unlock_queues(); } /* @@ -2178,13 +2201,15 @@ pmap_unmapdev(vm_offset_t va, vm_size_t * perform the pmap work for mincore */ int -pmap_mincore(pmap_t pmap, vm_offset_t addr) +pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) { pmap_t oldpmap; struct ia64_lpte *pte, tpte; - int val = 0; + vm_paddr_t pa; + int val; PMAP_LOCK(pmap); +retry: oldpmap = pmap_switch(pmap); pte = pmap_find_vhpt(addr); if (pte != NULL) { @@ -2192,53 +2217,27 @@ pmap_mincore(pmap_t pmap, vm_offset_t ad pte = &tpte; } pmap_switch(oldpmap); - PMAP_UNLOCK(pmap); - - if (pte == NULL) - return 0; - - if (pmap_present(pte)) { - vm_page_t m; - vm_offset_t pa; - - val = MINCORE_INCORE; - if (!pmap_managed(pte)) - return val; - + if (pte == NULL || !pmap_present(pte)) { + val = 0; + goto out; + } + val = MINCORE_INCORE; + if (pmap_dirty(pte)) + val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; + if (pmap_accessed(pte)) + val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; + if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != + (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && + pmap_managed(pte)) { pa = pmap_ppn(pte); - - m = PHYS_TO_VM_PAGE(pa); - - /* - * Modified by us - */ - if (pmap_dirty(pte)) - val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; - else { - /* - * Modified by someone - */ - vm_page_lock_queues(); - if (pmap_is_modified(m)) - val |= MINCORE_MODIFIED_OTHER; - vm_page_unlock_queues(); - } - /* - * Referenced by us - */ - if (pmap_accessed(pte)) - val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; - else { - /* - * Referenced by someone - */ - vm_page_lock_queues(); - if (pmap_is_referenced(m)) - val |= MINCORE_REFERENCED_OTHER; - vm_page_unlock_queues(); - } - } - return val; + /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ + if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) + goto retry; + } else +out: + PA_UNLOCK_COND(*locked_pa); + PMAP_UNLOCK(pmap); + return (val); } void Modified: head/sys/kern/vfs_bio.c ============================================================================== --- head/sys/kern/vfs_bio.c Mon May 24 13:44:39 2010 (r208503) +++ head/sys/kern/vfs_bio.c Mon May 24 14:26:57 2010 (r208504) @@ -2443,7 +2443,6 @@ vfs_setdirty_locked_object(struct buf *b vm_offset_t boffset; vm_offset_t eoffset; - vm_page_lock_queues(); /* * test the pages to see if they have been modified directly * by users through the VM system. @@ -2469,7 +2468,6 @@ vfs_setdirty_locked_object(struct buf *b } eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); - vm_page_unlock_queues(); /* * Fit it to the buffer. */ Modified: head/sys/mips/mips/pmap.c ============================================================================== --- head/sys/mips/mips/pmap.c Mon May 24 13:44:39 2010 (r208503) +++ head/sys/mips/mips/pmap.c Mon May 24 14:26:57 2010 (r208504) @@ -2586,13 +2586,27 @@ pmap_ts_referenced(vm_page_t m) boolean_t pmap_is_modified(vm_page_t m) { - if (m->flags & PG_FICTITIOUS) - return FALSE; + boolean_t rv; + + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_is_modified: page %p is not managed", m)); + /* + * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * is clear, no PTEs can have PTE_M set. + */ + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + if ((m->oflags & VPO_BUSY) == 0 && + (m->flags & PG_WRITEABLE) == 0) + return (FALSE); + vm_page_lock_queues(); if (m->md.pv_flags & PV_TABLE_MOD) - return TRUE; + rv = TRUE; else - return pmap_testbit(m, PTE_M); + rv = pmap_testbit(m, PTE_M); + vm_page_unlock_queues(); + return (rv); } /* N/C */ @@ -2625,13 +2639,26 @@ pmap_is_prefaultable(pmap_t pmap, vm_off void pmap_clear_modify(vm_page_t m) { - if (m->flags & PG_FICTITIOUS) + + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_modify: page %p is not managed", m)); + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + KASSERT((m->oflags & VPO_BUSY) == 0, + ("pmap_clear_modify: page %p is busy", m)); + + /* + * If the page is not PG_WRITEABLE, then no PTEs can have PTE_M set. + * If the object containing the page is locked and the page is not + * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + */ + if ((m->flags & PG_WRITEABLE) == 0) return; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_queues(); if (m->md.pv_flags & PV_TABLE_MOD) { pmap_changebit(m, PTE_M, FALSE); m->md.pv_flags &= ~PV_TABLE_MOD; } + vm_page_unlock_queues(); } /* @@ -2656,13 +2683,14 @@ pmap_is_referenced(vm_page_t m) void pmap_clear_reference(vm_page_t m) { - if (m->flags & PG_FICTITIOUS) - return; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_reference: page %p is not managed", m)); + vm_page_lock_queues(); if (m->md.pv_flags & PV_TABLE_REF) { m->md.pv_flags &= ~PV_TABLE_REF; } + vm_page_unlock_queues(); } /* @@ -2733,51 +2761,47 @@ pmap_unmapdev(vm_offset_t va, vm_size_t * perform the pmap work for mincore */ int -pmap_mincore(pmap_t pmap, vm_offset_t addr) +pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) { - pt_entry_t *ptep, pte; - vm_page_t m; - int val = 0; + vm_offset_t pa; + int val; + boolean_t managed; PMAP_LOCK(pmap); +retry: ptep = pmap_pte(pmap, addr); pte = (ptep != NULL) ? *ptep : 0; - PMAP_UNLOCK(pmap); - - if (mips_pg_v(pte)) { - vm_offset_t pa; - - val = MINCORE_INCORE; - pa = mips_tlbpfn_to_paddr(pte); - if (!page_is_managed(pa)) - return val; - - m = PHYS_TO_VM_PAGE(pa); - + if (!mips_pg_v(pte)) { + val = 0; + goto out; + } + val = MINCORE_INCORE; + if ((pte & PTE_M) != 0) + val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; + pa = mips_tlbpfn_to_paddr(pte); + managed = page_is_managed(pa); + if (managed) { /* - * Modified by us + * This may falsely report the given address as + * MINCORE_REFERENCED. Unfortunately, due to the lack of + * per-PTE reference information, it is impossible to + * determine if the address is MINCORE_REFERENCED. */ - if (pte & PTE_M) - val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; - /* - * Modified by someone - */ - else { - vm_page_lock_queues(); - if (m->dirty || pmap_is_modified(m)) - val |= MINCORE_MODIFIED_OTHER; - vm_page_unlock_queues(); - } - /* - * Referenced by us or someone - */ - vm_page_lock_queues(); - if ((m->flags & PG_REFERENCED) || pmap_is_referenced(m)) + m = PHYS_TO_VM_PAGE(pa); + if ((m->flags & PG_REFERENCED) != 0) val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; - vm_page_unlock_queues(); } - return val; + if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != + (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) { + /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ + if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) + goto retry; + } else +out: + PA_UNLOCK_COND(*locked_pa); + PMAP_UNLOCK(pmap); + return (val); } void Modified: head/sys/powerpc/aim/mmu_oea.c ============================================================================== --- head/sys/powerpc/aim/mmu_oea.c Mon May 24 13:44:39 2010 (r208503) +++ head/sys/powerpc/aim/mmu_oea.c Mon May 24 14:26:57 2010 (r208504) @@ -1290,29 +1290,57 @@ moea_is_referenced(mmu_t mmu, vm_page_t boolean_t moea_is_modified(mmu_t mmu, vm_page_t m) { + boolean_t rv; - if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0) - return (FALSE); + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("moea_is_modified: page %p is not managed", m)); - return (moea_query_bit(m, PTE_CHG)); + /* + * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * is clear, no PTEs can have PTE_CHG set. + */ + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + if ((m->oflags & VPO_BUSY) == 0 && + (m->flags & PG_WRITEABLE) == 0) + return (FALSE); + vm_page_lock_queues(); + rv = moea_query_bit(m, PTE_CHG); + vm_page_unlock_queues(); + return (rv); } void moea_clear_reference(mmu_t mmu, vm_page_t m) { - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) - return; + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("moea_clear_reference: page %p is not managed", m)); + vm_page_lock_queues(); moea_clear_bit(m, PTE_REF, NULL); + vm_page_unlock_queues(); } void moea_clear_modify(mmu_t mmu, vm_page_t m) { - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("moea_clear_modify: page %p is not managed", m)); + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + KASSERT((m->oflags & VPO_BUSY) == 0, + ("moea_clear_modify: page %p is busy", m)); + + /* + * If the page is not PG_WRITEABLE, then no PTEs can have PTE_CHG + * set. If the object containing the page is locked and the page is + * not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + */ + if ((m->flags & PG_WRITEABLE) == 0) *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***