From owner-svn-src-head@FreeBSD.ORG Fri Jun 29 18:15:57 2012 Return-Path: Delivered-To: svn-src-head@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [69.147.83.52]) by hub.freebsd.org (Postfix) with ESMTP id AC27B106564A; Fri, 29 Jun 2012 18:15:57 +0000 (UTC) (envelope-from alc@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id 8BD098FC12; Fri, 29 Jun 2012 18:15:57 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.4/8.14.4) with ESMTP id q5TIFvQX062704; Fri, 29 Jun 2012 18:15:57 GMT (envelope-from alc@svn.freebsd.org) Received: (from alc@localhost) by svn.freebsd.org (8.14.4/8.14.4/Submit) id q5TIFv0A062702; Fri, 29 Jun 2012 18:15:57 GMT (envelope-from alc@svn.freebsd.org) Message-Id: <201206291815.q5TIFv0A062702@svn.freebsd.org> From: Alan Cox Date: Fri, 29 Jun 2012 18:15:57 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r237813 - head/sys/amd64/amd64 X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 29 Jun 2012 18:15:57 -0000 Author: alc Date: Fri Jun 29 18:15:56 2012 New Revision: 237813 URL: http://svn.freebsd.org/changeset/base/237813 Log: In r237592, I forgot that pmap_enter() might already hold a PV list lock at the point that it calls get_pv_entry(). Thus, pmap_enter()'s PV list lock pointer must be passed to get_pv_entry() for those rare occasions when get_pv_entry() calls reclaim_pv_chunk(). Update some related comments. Modified: head/sys/amd64/amd64/pmap.c Modified: head/sys/amd64/amd64/pmap.c ============================================================================== --- head/sys/amd64/amd64/pmap.c Fri Jun 29 18:09:39 2012 (r237812) +++ head/sys/amd64/amd64/pmap.c Fri Jun 29 18:15:56 2012 (r237813) @@ -264,7 +264,7 @@ static caddr_t crashdumpmap; static void free_pv_chunk(struct pv_chunk *pc); static void free_pv_entry(pmap_t pmap, pv_entry_t pv); -static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try); +static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); static int popcnt_pc_map_elem(uint64_t elem); static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp); static void reserve_pv_entries(pmap_t pmap, int needed, @@ -2102,6 +2102,8 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_ * drastic measures to free some pages so we can allocate * another pv entry chunk. * + * Returns NULL if PV entries were reclaimed from the specified pmap. + * * We do not, however, unmap 2mpages because subsequent accesses will * allocate per-page pv entries until repromotion occurs, thereby * exacerbating the shortage of free pv entries. @@ -2123,6 +2125,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, str rw_assert(&pvh_global_lock, RA_LOCKED); PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); + KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL")); pmap = NULL; free = m_pc = NULL; TAILQ_INIT(&new_tail); @@ -2287,16 +2290,19 @@ free_pv_chunk(struct pv_chunk *pc) } /* - * get a new pv_entry, allocating a block from the system - * when needed. + * Returns a new PV entry, allocating a new PV chunk from the system when + * needed. If this PV chunk allocation fails and a PV list lock pointer was + * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is + * returned. + * + * The given PV list lock may be released. */ static pv_entry_t -get_pv_entry(pmap_t pmap, boolean_t try) +get_pv_entry(pmap_t pmap, struct rwlock **lockp) { int bit, field; pv_entry_t pv; struct pv_chunk *pc; - struct rwlock *lock; vm_page_t m; rw_assert(&pvh_global_lock, RA_LOCKED); @@ -2330,14 +2336,11 @@ retry: m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); if (m == NULL) { - if (try) { + if (lockp == NULL) { PV_STAT(pc_chunk_tryfail++); return (NULL); } - lock = NULL; - m = reclaim_pv_chunk(pmap, &lock); - if (lock != NULL) - rw_wunlock(lock); + m = reclaim_pv_chunk(pmap, lockp); if (m == NULL) goto retry; } @@ -2380,6 +2383,8 @@ popcnt_pc_map_elem(uint64_t elem) /* * Ensure that the number of spare PV entries in the specified pmap meets or * exceeds the given count, "needed". + * + * The given PV list lock may be released. */ static void reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp) @@ -2391,6 +2396,7 @@ reserve_pv_entries(pmap_t pmap, int need rw_assert(&pvh_global_lock, RA_LOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); + KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL")); /* * Newly allocated PV chunks must be stored in a private list until @@ -2584,7 +2590,8 @@ pmap_pvh_free(struct md_page *pvh, pmap_ } /* - * Conditionally create a pv entry. + * Conditionally create the PV entry for a 4KB page mapping if the required + * memory can be allocated without resorting to reclamation. */ static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, @@ -2594,7 +2601,8 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm rw_assert(&pvh_global_lock, RA_LOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); - if ((pv = get_pv_entry(pmap, TRUE)) != NULL) { + /* Pass NULL instead of the lock pointer to disable reclamation. */ + if ((pv = get_pv_entry(pmap, NULL)) != NULL) { pv->pv_va = va; CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); @@ -2604,7 +2612,8 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm } /* - * Create the pv entry for a 2MB page mapping. + * Conditionally create the PV entry for a 2MB page mapping if the required + * memory can be allocated without resorting to reclamation. */ static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, @@ -2614,7 +2623,9 @@ pmap_pv_insert_pde(pmap_t pmap, vm_offse pv_entry_t pv; rw_assert(&pvh_global_lock, RA_LOCKED); - if ((pv = get_pv_entry(pmap, TRUE)) != NULL) { + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + /* Pass NULL instead of the lock pointer to disable reclamation. */ + if ((pv = get_pv_entry(pmap, NULL)) != NULL) { pv->pv_va = va; CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); pvh = pa_to_pvh(pa); @@ -3513,7 +3524,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); if (pv == NULL) - pv = get_pv_entry(pmap, FALSE); + pv = get_pv_entry(pmap, &lock); CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m); pv->pv_va = va; TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); @@ -3785,6 +3796,10 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME); mpte->wire_count++; } else { + /* + * Pass NULL instead of the PV list lock + * pointer, because we don't intend to sleep. + */ mpte = _pmap_allocpte(pmap, ptepindex, NULL); if (mpte == NULL) return (mpte);