From owner-p4-projects@FreeBSD.ORG Thu Oct 21 04:25:48 2010 Return-Path: Delivered-To: p4-projects@freebsd.org Received: by hub.freebsd.org (Postfix, from userid 32767) id 42DC51065673; Thu, 21 Oct 2010 04:25:48 +0000 (UTC) Delivered-To: perforce@FreeBSD.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id CFF84106566C for ; Thu, 21 Oct 2010 04:25:47 +0000 (UTC) (envelope-from gonzo@FreeBSD.org) Received: from skunkworks.freebsd.org (skunkworks.freebsd.org [IPv6:2001:4f8:fff6::2d]) by mx1.freebsd.org (Postfix) with ESMTP id B29AA8FC24 for ; Thu, 21 Oct 2010 04:25:47 +0000 (UTC) Received: from skunkworks.freebsd.org (localhost [127.0.0.1]) by skunkworks.freebsd.org (8.14.4/8.14.4) with ESMTP id o9L4Pl1Q001576 for ; Thu, 21 Oct 2010 04:25:47 GMT (envelope-from gonzo@FreeBSD.org) Received: (from perforce@localhost) by skunkworks.freebsd.org (8.14.4/8.14.4/Submit) id o9L4PlxF001573 for perforce@freebsd.org; Thu, 21 Oct 2010 04:25:47 GMT (envelope-from gonzo@FreeBSD.org) Date: Thu, 21 Oct 2010 04:25:47 GMT Message-Id: <201010210425.o9L4PlxF001573@skunkworks.freebsd.org> X-Authentication-Warning: skunkworks.freebsd.org: perforce set sender to gonzo@FreeBSD.org using -f From: Oleksandr Tymoshenko To: Perforce Change Reviews Precedence: bulk Cc: Subject: PERFORCE change 184946 for review X-BeenThere: p4-projects@freebsd.org X-Mailman-Version: 2.1.5 List-Id: p4 projects tree changes List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 21 Oct 2010 04:25:48 -0000 http://p4web.freebsd.org/@@184946?ac=10 Change 184946 by gonzo@gonzo_figaro on 2010/10/21 04:25:44 - Fix pmap-related panic in the beginning of boot-up - Improve AVR32's pmap shape by bringing changes from MIPS code Affected files ... .. //depot/projects/avr32/src/sys/avr32/avr32/pmap.c#24 edit Differences ... ==== //depot/projects/avr32/src/sys/avr32/avr32/pmap.c#24 (text+ko) ==== @@ -136,6 +136,9 @@ bit_offset(SYS, MMUCR, S) | bit_offset(SYS, MMUCR, E) | bit_offset(SYS, MMUCR, I)); + /* + * TODO: check for I bit cleared instead of nops + */ nop(); nop(); nop(); nop(); nop(); nop(); nop(); nop(); } @@ -208,6 +211,7 @@ PMAP_LOCK_INIT(pmap); /* allocate the page directory page */ + /* XXX: check the allocation */ ptdpg = vm_page_alloc(NULL, 512, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_ZERO); @@ -342,14 +346,21 @@ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, ("pmap_is_referenced: page %p is not managed", m)); - return ((m->flags & PG_FICTITIOUS) == 0 && - (m->md.pv_flags & PV_TABLE_REF) != 0); + return ((m->md.pv_flags & PV_TABLE_REF) != 0); } void pmap_clear_reference(vm_page_t m) { - avr32_impl(); + + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_reference: page %p is not managed", m)); + vm_page_lock_queues(); + if (m->md.pv_flags & PV_TABLE_REF) { + m->md.pv_flags &= ~PV_TABLE_REF; + } + vm_page_unlock_queues(); + } void @@ -407,6 +418,7 @@ ent = pmap_pte(kernel_pmap, va); *ent = 0; + pmap_invalidate_page(kernel_pmap, va); } /* @@ -575,6 +587,7 @@ { // Not really sure what to do here, need to look better into it, but the // kernel should have all the pages tables needed to grow within the P3 segment + panic("%s", __func__); } /* @@ -724,9 +737,8 @@ } if (prot & VM_PROT_WRITE) { newpte |= PTE_PERM_WRITE; - // XXX: Check what's the problem with - // managed pages and PG_WRITEABLE flag - vm_page_flag_set(m, PG_WRITEABLE); + if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) + vm_page_flag_set(m, PG_WRITEABLE); } if (prot & VM_PROT_EXECUTE) { newpte |= PTE_PERM_EXECUTE; @@ -902,6 +914,9 @@ newpte |= PTE_MANAGED; } *pte = newpte; + /* + * XXX: check for kernel pmap? + */ return (mpte); } @@ -1453,7 +1468,10 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { - avr32_impl(); + + VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, + ("pmap_object_init_pt: non-device object")); } static vm_page_t @@ -1487,6 +1505,7 @@ */ return (NULL); } + // XXX: Check if it is really required if ((m->flags & PG_ZERO) == 0) { pmap_zero_page(m); } @@ -1581,12 +1600,17 @@ PMAP_LOCK_ASSERT(pmap, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); - TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { - if (pmap == pv->pv_pmap && va == pv->pv_va) { - break; + if (pvh->pv_list_count < pmap->pm_stats.resident_count) { + TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { + if (pmap == pv->pv_pmap && va == pv->pv_va) + break; + } + } else { + TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) { + if (va == pv->pv_va) + break; } } - if (pv != NULL) { TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); pvh->pv_list_count--; @@ -1594,6 +1618,7 @@ } return (pv); } + static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) { @@ -1638,6 +1663,7 @@ static pv_entry_t get_pv_entry(pmap_t locked_pmap) { + printf("--> get_pv_entry\n"); mtx_assert(&vm_page_queue_mtx, MA_OWNED); pv_entry_count++;