Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 7 Jul 2010 23:35:19 GMT
From:      Oleksandr Tymoshenko <gonzo@FreeBSD.org>
To:        Perforce Change Reviews <perforce@FreeBSD.org>
Subject:   PERFORCE change 180619 for review
Message-ID:  <201007072335.o67NZJj3072224@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://p4web.freebsd.org/@@180619?ac=10

Change 180619 by gonzo@gonzo_figaro on 2010/07/07 23:34:35

	Unbreak kernel build. I tried to merge alc's changes
	    from HEAD (locking mostly) but overall state of 
	    AVR32's pmap is "wreck". This module need more love.

Affected files ...

.. //depot/projects/avr32/src/sys/avr32/avr32/pmap.c#23 edit
.. //depot/projects/avr32/src/sys/avr32/include/pmap.h#10 edit

Differences ...

==== //depot/projects/avr32/src/sys/avr32/avr32/pmap.c#23 (text+ko) ====

@@ -43,13 +43,15 @@
 
 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
 static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
-static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
-static void pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va);
 static void free_pv_entry(pv_entry_t pv);
-static pv_entry_t get_pv_entry(void);
+static pv_entry_t get_pv_entry(pmap_t locked_pmap);
+static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
+static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
+    vm_offset_t va);
 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
     vm_page_t *free);
 static void pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free);
+static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
 static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *);
 static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free);
 static __inline int pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free);
@@ -253,13 +255,22 @@
 {
 	pv_entry_t pv;
 	pt_entry_t *pte;
-	boolean_t rv;
+	boolean_t rv = FALSE;
+
+	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+	    ("pmap_is_modified: page %p is not managed", m));
 
-	rv = FALSE;
-        if (m->flags & PG_FICTITIOUS) {
-		return (rv);
-	}
+	/*
+	 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
+	 * concurrently set while the object is locked.  Thus, if PG_WRITEABLE
+	 * is clear, no PTEs can have PTE_M set.
+	 */
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+	if ((m->oflags & VPO_BUSY) == 0 &&
+	    (m->flags & PG_WRITEABLE) == 0)
+		return (FALSE);
 
+	vm_page_lock_queues();
 	sched_pin();
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@@ -272,6 +283,8 @@
 		}
         }
 	sched_unpin();
+	vm_page_unlock_queues();
+
 	return (rv);
 }
 
@@ -281,11 +294,21 @@
 	pv_entry_t pv;
 	pt_entry_t *pte;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
-	if (m->flags & PG_FICTITIOUS) {
-                return;
-	}
+	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+	    ("pmap_clear_modify: page %p is not managed", m));
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+	KASSERT((m->oflags & VPO_BUSY) == 0,
+	    ("pmap_clear_modify: page %p is busy", m));
+
+	/*
+	 * If the page is not PG_WRITEABLE, then no PTEs can have PTE_M set.
+	 * If the object containing the page is locked and the page is not
+	 * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
+	 */
+	if ((m->flags & PG_WRITEABLE) == 0)
+		return;
 
+	vm_page_lock_queues();
 	sched_pin();
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 		PMAP_LOCK(pv->pv_pmap);
@@ -297,6 +320,7 @@
 		PMAP_UNLOCK(pv->pv_pmap);
 	}
 	sched_unpin();
+	vm_page_lock_queues();
 }
 
 int
@@ -306,6 +330,22 @@
 	return (0);
 }
 
+ /*
+  *      pmap_is_referenced:
+  *
+  *      Return whether or not the specified physical page was referenced
+  *      in any physical maps.
+  */
+boolean_t
+pmap_is_referenced(vm_page_t m)
+{
+ 	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+	    ("pmap_is_referenced: page %p is not managed", m));
+
+	return ((m->flags & PG_FICTITIOUS) == 0 &&
+		(m->md.pv_flags & PV_TABLE_REF) != 0);
+}
+
 void
 pmap_clear_reference(vm_page_t m)
 {
@@ -515,8 +555,18 @@
 pmap_page_init(vm_page_t m)
 {
 	TAILQ_INIT(&m->md.pv_list);
+	m->md.pv_list_count = 0;
+	m->md.pv_flags = 0;
+}
+
+
+void
+pmap_release(pmap_t pmap)
+{
+	avr32_impl();
 }
 
+
 /*
  * The pmap_growkernel() function grows the kernel virtual address space to the virtual address addr.
  */
@@ -560,6 +610,7 @@
 	vm_offset_t pa, opa;
 	pt_entry_t *pte;
 	pt_entry_t origpte, newpte;
+	pv_entry_t pv;
 	vm_page_t mpte, om;
 	boolean_t invalidate;
 
@@ -567,6 +618,9 @@
 	mpte = NULL;
 	va &= ~PAGE_MASK;
 	KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
+	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
+	    (m->oflags & VPO_BUSY) != 0, 
+	    ("pmap_enter: page %p is not busy", m));
 
 	vm_page_lock_queues();
 	PMAP_LOCK(pmap);
@@ -621,6 +675,8 @@
 		goto update;
 	}
 
+	pv = NULL;
+
 	/* Mapping has changed */
 	if (opa) {
 		if (origpte & PTE_WIRED) {
@@ -628,7 +684,7 @@
 		}
 		if (origpte & PTE_MANAGED) {
 			om = PHYS_TO_VM_PAGE(opa);
-			pmap_remove_entry(pmap, om, va);
+			pv = pmap_pvh_remove(&om->md, pmap, va);
 		}
 		if (mpte != NULL) {
 			mpte->wire_count--;
@@ -644,9 +700,17 @@
 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
 		KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
 			("pmap_enter: managed mapping within the clean submap"));
-		pmap_insert_entry(pmap, va, m);
+		 if (pv == NULL)
+			pv = get_pv_entry(pmap);
+		 pv->pv_va = va;
+		 pv->pv_pmap = pmap;
+		 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
+		 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
+		 m->md.pv_list_count++;
 		pa |= PTE_MANAGED;
-	}
+	} else if (pv != NULL)
+		free_pv_entry(pv);
+
 
 	/* Increment counters */
 	if (wired) {
@@ -660,6 +724,8 @@
 	}
 	if (prot & VM_PROT_WRITE) {
 		newpte |= PTE_PERM_WRITE;
+		// XXX: Check what's the problem with 
+		// managed pages and PG_WRITEABLE flag
 		vm_page_flag_set(m, PG_WRITEABLE);
 	}
 	if (prot & VM_PROT_EXECUTE) {
@@ -733,9 +799,11 @@
 void
 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 {
+	vm_page_lock_queues();
 	PMAP_LOCK(pmap);
 	pmap_enter_quick_locked(pmap, va, m, prot, NULL);
 	PMAP_UNLOCK(pmap);
+	vm_page_unlock_queues();
 }
 
 static vm_page_t
@@ -862,12 +930,14 @@
 	psize = atop(end - start);
 	mpte = NULL;
 	m = m_start;
+	vm_page_lock_queues();
 	PMAP_LOCK(pmap);
 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 		mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m,
 		    prot, mpte);
 		m = TAILQ_NEXT(m, listq);
 	}
+	vm_page_unlock_queues();
 	PMAP_UNLOCK(pmap);
 }
 
@@ -933,7 +1003,7 @@
 
 	KASSERT((m->flags & PG_FICTITIOUS) == 0,
 	    ("pmap_remove_all: page %p is fictitious", m));
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	vm_page_lock_queues();
 	sched_pin();
 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 		PMAP_LOCK(pv->pv_pmap);
@@ -964,11 +1034,13 @@
 		pmap_free_zero_pages(free);
 		TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
 		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
+		m->md.pv_list_count--;
 		PMAP_UNLOCK(pv->pv_pmap);
 		free_pv_entry(pv);
 	}
 	vm_page_flag_clear(m, PG_WRITEABLE);
 	sched_unpin();
+	vm_page_unlock_queues();
 }
 
 void
@@ -1016,6 +1088,7 @@
 		/* Remove from lists and free */
 		TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
 		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
+		m->md.pv_list_count--;
 		if (TAILQ_EMPTY(&m->md.pv_list)) {
 			vm_page_flag_clear(m, PG_WRITEABLE);
 		}
@@ -1128,12 +1201,21 @@
 	pv_entry_t pv;
 	pt_entry_t oldpte, *pte;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
-	if ((m->flags & PG_FICTITIOUS) != 0 ||
-	    (m->flags & PG_WRITEABLE) == 0) {
+	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+	    ("pmap_remove_write: page %p is not managed", m));
+
+	/*
+	 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
+	 * another thread while the object is locked.  Thus, if PG_WRITEABLE
+	 * is clear, no page table entries need updating.
+	 */
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+	if ((m->oflags & VPO_BUSY) == 0 &&
+	    (m->flags & PG_WRITEABLE) == 0)
 		return;
-	}
+
 
+	vm_page_lock_queues();
 	sched_pin();
         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 		PMAP_LOCK(pv->pv_pmap);
@@ -1155,6 +1237,7 @@
 	}
 	vm_page_flag_clear(m, PG_WRITEABLE);
 	sched_unpin();
+	vm_page_unlock_queues();
 }
 
 /*
@@ -1324,6 +1407,11 @@
 	PMAP_UNLOCK(pmap);
 }
 
+void
+pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+}
+
 /*
  * Increase the starting virtual address of the given mapping if a
  * different alignment might result in more superpage mappings.
@@ -1440,6 +1528,7 @@
 	return (page);
 }
 
+#if 0
 static void
 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
 {
@@ -1448,7 +1537,7 @@
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 
-	pv = get_pv_entry();
+	pv = get_pv_entry(pmap);
 	if (pv == NULL) {
 		panic("no pv entries: increase vm.pmap.shpgperproc");
 	}
@@ -1457,8 +1546,11 @@
 
 	TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
 	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
+	m->md.pv_list_count++;
 }
 
+#endif
+
 /*
  * Conditionally create a pv entry.
  */
@@ -1470,39 +1562,61 @@
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	if (pv_entry_count < pv_entry_high_water &&
-	    (pv = get_pv_entry()) != NULL) {
+	    (pv = get_pv_entry(pmap)) != NULL) {
 		pv->pv_va = va;
 		pv->pv_pmap = pmap;
 		TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
+		m->md.pv_list_count++;
 		return (TRUE);
 	} else {
 		return (FALSE);
 	}
 }
 
-static void
-pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
+static pv_entry_t
+pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 {
-	pv_entry_t pv;
+	pv_entry_t pv = NULL;
 
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
-	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+	TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 		if (pmap == pv->pv_pmap && va == pv->pv_va) {
 			break;
 		}
 	}
 
-	KASSERT(pv != NULL, ("pmap_remove_entry: pv not found"));
-	TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
-	TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
-	if (TAILQ_EMPTY(&m->md.pv_list)) {
-		vm_page_flag_clear(m, PG_WRITEABLE);
+	if (pv != NULL) {
+		TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
+		pvh->pv_list_count--;
+		TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
 	}
+	return (pv);
+}
+static void
+pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
+{
+	pv_entry_t pv;
+
+	pv = pmap_pvh_remove(pvh, pmap, va);
+
+	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found, pa %lx va %lx",
+	    (u_long)VM_PAGE_TO_PHYS(member2struct(vm_page, md, pvh)),
+	    (u_long)va));
+
 	free_pv_entry(pv);
 }
 
+static void
+pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
+{
+	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	pmap_pvh_free(&m->md, pmap, va);
+	if (TAILQ_EMPTY(&m->md.pv_list))
+		vm_page_flag_clear(m, PG_WRITEABLE);
+}
+
 /*
  * free the pv_entry back to the free list
  */
@@ -1522,7 +1636,7 @@
  * because of the possibility of allocations at interrupt time.
  */
 static pv_entry_t
-get_pv_entry(void)
+get_pv_entry(pmap_t locked_pmap)
 {
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 

==== //depot/projects/avr32/src/sys/avr32/include/pmap.h#10 (text+ko) ====

@@ -131,6 +131,10 @@
 	int			pm_active;
 	pd_entry_t		*pm_pd;		/**< KVA of Page directory */
 	TAILQ_HEAD(,pv_entry)	pm_pvlist;	/* list of mappings in pmap */
+	uint32_t		pm_gen_count;   /* generation count (pmap lock
+							dropped) */
+	u_int			pm_retries;
+
 } *pmap_t;
 
 #ifdef _KERNEL
@@ -149,9 +153,15 @@
 #endif
 
 struct md_page {
+	int pv_list_count;
+	int pv_flags;
 	TAILQ_HEAD(,pv_entry)	pv_list;
 };
 
+#define	PV_TABLE_MOD		0x01	/* modified */
+#define	PV_TABLE_REF		0x02	/* referenced */
+
+
 #define	ASID_MAX		256
 
 /*



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201007072335.o67NZJj3072224>