Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 25 Mar 2014 08:41:15 -0700
From:      Marcel Moolenaar <xcllnt@mac.com>
To:        mexas@bris.ac.uk
Cc:        freebsd-ia64@freebsd.org
Subject:   Re: ia64/183227: panic: uma_zfree: Freeing to non free bucket index;  in r262690
Message-ID:  <AB8B3D35-4E9F-4D1E-BCBF-F1260C6B942C@mac.com>
In-Reply-To: <201403251221.s2PCLNNc066759@mech-cluster241.men.bris.ac.uk>
References:  <201403251221.s2PCLNNc066759@mech-cluster241.men.bris.ac.uk>

next in thread | previous in thread | raw e-mail | index | archive | help

--Apple-Mail=_1D14D3A5-939A-4EBD-B387-717A9A26ADA4
Content-Type: multipart/mixed;
	boundary="Apple-Mail=_3E57AF10-9CFE-4F3C-8985-B31BBDFA1830"


--Apple-Mail=_3E57AF10-9CFE-4F3C-8985-B31BBDFA1830
Content-Transfer-Encoding: quoted-printable
Content-Type: text/plain;
	charset=us-ascii


On Mar 25, 2014, at 5:21 AM, Anton Shterenlikht <mexas@bris.ac.uk> =
wrote:

> Hi Marcel
>=20
> Just to let you know that the panic
> ia64/183227: panic: uma_zfree: Freeing to non free bucket index
> ( http://www.freebsd.org/cgi/query-pr.cgi?pr=3D183227 )
>=20
> for which you provided a fix,
> just appeared again in r262690.
> I updated the PR.
>=20
> I think it might be related to
> http://www.freebsd.org/cgi/query-pr.cgi?pr=3D187816
> because both happened while building the same
> 2 moc related ports.
>=20

Thanks,

I ran into it myself. I'm running with the attached patch.
It makes PTE updates more atomic. It appears that this is
really what's causing some of the "weird" panics.

Note that I have still observed page faults, even when
running with the patch. I can easily reproduce it when
trying to run X. The key is that X actually doesn't run
and gets restarted continuously. And it's that that
triggers the panic. Such panic looks like:

#13 0x9ffc000000a47bb0 in trap (vector=3D<value optimized out>, =
tf=3D0xa0000000be813000) at ../../../ia64/ia64/trap.c:562
#14 0x9ffc000000008a00 in ivt_Data_TLB ()
#15 0x9ffc0000009b9c11 in uma_zalloc_arg (zone=3D0x0, udata=3D0x0, =
flags=3D0) at ../../../vm/uma_core.c:2152
#16 0x9ffc0000005b5e80 in malloc (size=3D<value optimized out>, =
mtp=3D0x9ffc000000d40598, flags=3D2) at uma.h:336
#17 0x9ffc0000006825f0 in cloneuio (uiop=3D0xa0000000be813360) at =
../../../kern/subr_uio.c:384
#18 0x9ffc0000007520b0 in vn_io_fault (fp=3D0xe000000015b17450, =
uio=3D0xa0000000be813360, active_cred=3D0xe000000014fc8400, flags=3D0, =
td=3D0xe000000012a7e4b0) at ../../../kern/vfs_vnops.c:969
#19 0x9ffc0000006985a0 in dofilewrite (td=3D0xe000000012a7e4b0, fd=3D2, =
fp=3D0xe000000015b17450, auio=3D0xa0000000be813360, offset=3D-1, =
flags=3D0) at file.h:307
#20 0x9ffc000000698a50 in kern_writev (td=3D0xe000000012a7e4b0, fd=3D2, =
auio=3D0xa0000000be813360) at ../../../kern/sys_generic.c:467
#21 0x9ffc000000698d10 in sys_write (td=3D0xe000000012a7e4b0, =
uap=3D0xa0000000be8134e8) at ../../../kern/sys_generic.c:382
#22 0x9ffc000000a46bd0 in syscall (tf=3D<value optimized out>) at =
subr_syscall.c:133

I've added or revived KTR tracing to help get a clear
picture of the events...

FYI,

--=20
Marcel Moolenaar
xcllnt@mac.com



--Apple-Mail=_3E57AF10-9CFE-4F3C-8985-B31BBDFA1830
Content-Disposition: attachment;
	filename=ia64.diff
Content-Type: application/octet-stream;
	name="ia64.diff"
Content-Transfer-Encoding: 7bit

Index: sys/ia64/ia64/pmap.c
===================================================================
--- sys/ia64/ia64/pmap.c	(revision 262683)
+++ sys/ia64/ia64/pmap.c	(working copy)
@@ -132,13 +132,14 @@
 #define	pmap_prot(lpte)			(((lpte)->pte & PTE_PROT_MASK) >> 56)
 #define	pmap_wired(lpte)		((lpte)->pte & PTE_WIRED)
 
-#define	pmap_clear_accessed(lpte)	(lpte)->pte &= ~PTE_ACCESSED
-#define	pmap_clear_dirty(lpte)		(lpte)->pte &= ~PTE_DIRTY
-#define	pmap_clear_present(lpte)	(lpte)->pte &= ~PTE_PRESENT
-#define	pmap_clear_wired(lpte)		(lpte)->pte &= ~PTE_WIRED
+#define pmap_clr_accessed(lpte) atomic_clear_64(&((lpte)->pte), PTE_ACCESSED)
+#define pmap_clr_dirty(lpte)    atomic_clear_64(&((lpte)->pte), PTE_DIRTY)
+#define pmap_clr_present(lpte)  atomic_clear_64(&((lpte)->pte), PTE_PRESENT)
+#define pmap_clr_wired(lpte)    atomic_clear_64(&((lpte)->pte), PTE_WIRED)
+ 
+#define pmap_set_present(lpte)  atomic_set_64(&((lpte)->pte), PTE_PRESENT)
+#define pmap_set_wired(lpte)    atomic_set_64(&((lpte)->pte), PTE_WIRED)
 
-#define	pmap_set_wired(lpte)		(lpte)->pte |= PTE_WIRED
-
 /*
  * Individual PV entries are stored in per-pmap chunks.  This saves
  * space by eliminating the need to record the pmap within every PV
@@ -1254,11 +1255,11 @@
 	if (va < VM_MAXUSER_ADDRESS)
 		uma_zfree(ptezone, pte);
 	else
-		pmap_clear_present(pte);
+		pmap_clr_present(pte);
 }
 
-static PMAP_INLINE void
-pmap_pte_prot(pmap_t pm, struct ia64_lpte *pte, vm_prot_t prot)
+static PMAP_INLINE uint64_t
+pmap_pte_prot(pmap_t pm, uint64_t pte, vm_prot_t prot)
 {
 	static long prot2ar[4] = {
 		PTE_AR_R,		/* VM_PROT_NONE */
@@ -1267,21 +1268,35 @@
 		PTE_AR_RWX|PTE_ED	/* VM_PROT_WRITE|VM_PROT_EXECUTE */
 	};
 
-	pte->pte &= ~(PTE_PROT_MASK | PTE_PL_MASK | PTE_AR_MASK | PTE_ED);
-	pte->pte |= (uint64_t)(prot & VM_PROT_ALL) << 56;
-	pte->pte |= (prot == VM_PROT_NONE || pm == kernel_pmap)
+	pte &= ~(PTE_PROT_MASK | PTE_PL_MASK | PTE_AR_MASK | PTE_ED);
+	pte |= (uint64_t)(prot & VM_PROT_ALL) << 56;
+	pte |= (prot == VM_PROT_NONE || pm == kernel_pmap)
 	    ? PTE_PL_KERN : PTE_PL_USER;
-	pte->pte |= prot2ar[(prot & VM_PROT_ALL) >> 1];
+	pte |= prot2ar[(prot & VM_PROT_ALL) >> 1];
+	return (pte);
 }
 
-static PMAP_INLINE void
-pmap_pte_attr(struct ia64_lpte *pte, vm_memattr_t ma)
+static PMAP_INLINE uint64_t
+pmap_pte_attr(uint64_t pte, vm_memattr_t ma)
 {
 
-	pte->pte &= ~PTE_MA_MASK;
-	pte->pte |= (ma & PTE_MA_MASK);
+	pte &= ~PTE_MA_MASK;
+	pte |= (ma & PTE_MA_MASK);
+	return (pte);
 }
 
+static PMAP_INLINE uint64_t
+pmap_pte_xlat(uint64_t pte, vm_offset_t pa, boolean_t wired, boolean_t managed)
+{
+
+	pte &= PTE_PROT_MASK | PTE_MA_MASK | PTE_PL_MASK |
+	    PTE_AR_MASK | PTE_ED;
+	pte |= (managed) ? PTE_MANAGED : (PTE_DIRTY | PTE_ACCESSED);
+	pte |= (wired) ? PTE_WIRED : 0;
+	pte |= pa & PTE_PPN_MASK;
+	return (pte);
+}
+
 /*
  * Set a pte to contain a valid mapping and enter it in the VHPT. If
  * the pte was orginally valid, then its assumed to already be in the
@@ -1290,22 +1305,19 @@
  * that those have been set correctly prior to calling this function.
  */
 static void
-pmap_set_pte(struct ia64_lpte *pte, vm_offset_t va, vm_offset_t pa,
-    boolean_t wired, boolean_t managed)
+pmap_set_pte(struct ia64_lpte *pte, uint64_t p, vm_offset_t va, vm_size_t ps)
 {
 
-	pte->pte &= PTE_PROT_MASK | PTE_MA_MASK | PTE_PL_MASK |
-	    PTE_AR_MASK | PTE_ED;
-	pte->pte |= PTE_PRESENT;
-	pte->pte |= (managed) ? PTE_MANAGED : (PTE_DIRTY | PTE_ACCESSED);
-	pte->pte |= (wired) ? PTE_WIRED : 0;
-	pte->pte |= pa & PTE_PPN_MASK;
+	critical_enter();
 
-	pte->itir = PAGE_SHIFT << 2;
-
+	pte->pte = p;
+	pte->itir = ps << 2;
 	ia64_mf();
+	pte->tag = ia64_ttag(va);
+	ia64_mf();
+	pmap_set_present(pte);
 
-	pte->tag = ia64_ttag(va);
+	critical_exit();
 }
 
 /*
@@ -1417,6 +1429,7 @@
 pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
 {
 	struct ia64_lpte *pte;
+	uint64_t p;
 	int i;
 
 	for (i = 0; i < count; i++) {
@@ -1425,9 +1438,10 @@
 			pmap_invalidate_page(va);
 		else
 			pmap_enter_vhpt(pte, va);
-		pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL);
-		pmap_pte_attr(pte, m[i]->md.memattr);
-		pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m[i]), FALSE, FALSE);
+		p = pmap_pte_prot(kernel_pmap, pte->pte, VM_PROT_ALL);
+		p = pmap_pte_attr(p, m[i]->md.memattr);
+		p = pmap_pte_xlat(p, VM_PAGE_TO_PHYS(m[i]), FALSE, FALSE);
+		pmap_set_pte(pte, p, va, PAGE_SHIFT);
 		va += PAGE_SIZE;
 	}
 }
@@ -1447,7 +1461,7 @@
 		if (pmap_present(pte)) {
 			pmap_remove_vhpt(va);
 			pmap_invalidate_page(va);
-			pmap_clear_present(pte);
+			pmap_clr_present(pte);
 		}
 		va += PAGE_SIZE;
 	}
@@ -1461,6 +1475,7 @@
 pmap_kenter(vm_offset_t va, vm_offset_t pa)
 {
 	struct ia64_lpte *pte;
+	uint64_t p;
 
 	pte = pmap_find_kpte(va);
 	if (pmap_present(pte))
@@ -1467,9 +1482,10 @@
 		pmap_invalidate_page(va);
 	else
 		pmap_enter_vhpt(pte, va);
-	pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL);
-	pmap_pte_attr(pte, VM_MEMATTR_DEFAULT);
-	pmap_set_pte(pte, va, pa, FALSE, FALSE);
+	p = pmap_pte_prot(kernel_pmap, pte->pte, VM_PROT_ALL);
+	p = pmap_pte_attr(p, VM_MEMATTR_DEFAULT);
+	p = pmap_pte_xlat(p, pa, FALSE, FALSE);
+	pmap_set_pte(pte, p, va, PAGE_SHIFT);
 }
 
 /*
@@ -1484,7 +1500,7 @@
 	if (pmap_present(pte)) {
 		pmap_remove_vhpt(va);
 		pmap_invalidate_page(va);
-		pmap_clear_present(pte);
+		pmap_clr_present(pte);
 	}
 }
 
@@ -1591,6 +1607,7 @@
 {
 	pmap_t oldpmap;
 	struct ia64_lpte *pte;
+	uint64_t p;
 
 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
 		pmap_remove(pmap, sva, eva);
@@ -1622,13 +1639,14 @@
 			vm_page_t m = PHYS_TO_VM_PAGE(pa);
 
 			vm_page_dirty(m);
-			pmap_clear_dirty(pte);
+			pmap_clr_dirty(pte);
 		}
 
 		if (prot & VM_PROT_EXECUTE)
 			ia64_sync_icache(sva, PAGE_SIZE);
 
-		pmap_pte_prot(pmap, pte, prot);
+		p = pmap_pte_prot(pmap, pte->pte, prot);
+		atomic_store_rel_64(&pte->pte, p);
 		pmap_invalidate_page(sva);
 	}
 	pmap_switch(oldpmap);
@@ -1656,6 +1674,7 @@
 	vm_offset_t opa;
 	struct ia64_lpte origpte;
 	struct ia64_lpte *pte;
+	uint64_t p;
 	boolean_t icache_inval, managed;
 
 	rw_wlock(&pvh_global_lock);
@@ -1754,9 +1773,10 @@
 	 * Now validate mapping with desired protection/wiring. This
 	 * adds the pte to the VHPT if necessary.
 	 */
-	pmap_pte_prot(pmap, pte, prot);
-	pmap_pte_attr(pte, m->md.memattr);
-	pmap_set_pte(pte, va, pa, wired, managed);
+	p = pmap_pte_prot(pmap, pte->pte, prot);
+	p = pmap_pte_attr(p, m->md.memattr);
+	p = pmap_pte_xlat(p, pa, wired, managed);
+	pmap_set_pte(pte, p, va, PAGE_SHIFT);
 
 	/* Invalidate the I-cache when needed. */
 	if (icache_inval)
@@ -1833,6 +1853,7 @@
     vm_prot_t prot)
 {
 	struct ia64_lpte *pte;
+	uint64_t p;
 	boolean_t managed;
 
 	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
@@ -1860,10 +1881,11 @@
 
 		/* Initialise with R/O protection and enter into VHPT. */
 		pmap_enter_vhpt(pte, va);
-		pmap_pte_prot(pmap, pte,
+		p = pmap_pte_prot(pmap, pte->pte,
 		    prot & (VM_PROT_READ | VM_PROT_EXECUTE));
-		pmap_pte_attr(pte, m->md.memattr);
-		pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m), FALSE, managed);
+		p = pmap_pte_attr(p, m->md.memattr);
+		p = pmap_pte_xlat(p, VM_PAGE_TO_PHYS(m), FALSE, managed);
+		pmap_set_pte(pte, p, va, PAGE_SHIFT);
 
 		if (prot & VM_PROT_EXECUTE)
 			ia64_sync_icache(va, PAGE_SIZE);
@@ -1912,7 +1934,7 @@
 		pmap_set_wired(pte);
 	} else if (!wired && pmap_wired(pte)) {
 		pmap->pm_stats.wired_count--;
-		pmap_clear_wired(pte);
+		pmap_clr_wired(pte);
 	}
 
 	pmap_switch(oldpmap);
@@ -2190,7 +2212,7 @@
 		KASSERT(pte != NULL, ("pte"));
 		if (pmap_accessed(pte)) {
 			count++;
-			pmap_clear_accessed(pte);
+			pmap_clr_accessed(pte);
 			pmap_invalidate_page(pv->pv_va);
 		}
 		pmap_switch(oldpmap);
@@ -2328,10 +2350,10 @@
 				m = PHYS_TO_VM_PAGE(pmap_ppn(pte));
 				vm_page_dirty(m);
 			}
-			pmap_clear_dirty(pte);
+			pmap_clr_dirty(pte);
 		} else if (!pmap_accessed(pte))
 			continue;
-		pmap_clear_accessed(pte);
+		pmap_clr_accessed(pte);
 		pmap_invalidate_page(sva);
 	}
 	pmap_switch(oldpmap);
@@ -2369,7 +2391,7 @@
 		pte = pmap_find_vhpt(pv->pv_va);
 		KASSERT(pte != NULL, ("pte"));
 		if (pmap_dirty(pte)) {
-			pmap_clear_dirty(pte);
+			pmap_clr_dirty(pte);
 			pmap_invalidate_page(pv->pv_va);
 		}
 		pmap_switch(oldpmap);
@@ -2385,6 +2407,7 @@
 pmap_remove_write(vm_page_t m)
 {
 	struct ia64_lpte *pte;
+	uint64_t p;
 	pmap_t oldpmap, pmap;
 	pv_entry_t pv;
 	vm_prot_t prot;
@@ -2411,11 +2434,12 @@
 		if ((prot & VM_PROT_WRITE) != 0) {
 			if (pmap_dirty(pte)) {
 				vm_page_dirty(m);
-				pmap_clear_dirty(pte);
+				pmap_clr_dirty(pte);
 			}
 			prot &= ~VM_PROT_WRITE;
-			pmap_pte_prot(pmap, pte, prot);
-			pmap_pte_attr(pte, m->md.memattr);
+			p = pmap_pte_prot(pmap, pte->pte, prot);
+			p = pmap_pte_attr(p, m->md.memattr);
+			atomic_store_rel_64(&pte->pte, p);
 			pmap_invalidate_page(pv->pv_va);
 		}
 		pmap_switch(oldpmap);
@@ -2492,6 +2516,7 @@
 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
 {
 	struct ia64_lpte *pte;
+	uint64_t p;
 	pmap_t oldpmap, pmap;
 	pv_entry_t pv;
 	void *va;
@@ -2504,7 +2529,8 @@
 		oldpmap = pmap_switch(pmap);
 		pte = pmap_find_vhpt(pv->pv_va);
 		KASSERT(pte != NULL, ("pte"));
-		pmap_pte_attr(pte, ma);
+		p = pmap_pte_attr(pte->pte, ma);
+		atomic_store_rel_64(&pte->pte, p);
 		pmap_invalidate_page(pv->pv_va);
 		pmap_switch(oldpmap);
 		PMAP_UNLOCK(pmap);
@@ -2700,7 +2726,7 @@
 		if (!(res.pal_result[0] & 2))
 			buf.pte &= ~PTE_PL_MASK;
 		if (!(res.pal_result[0] & 4))
-			pmap_clear_dirty(&buf);
+			pmap_clr_dirty(&buf);
 		if (!(res.pal_result[0] & 8))
 			buf.pte &= ~PTE_MA_MASK;
 		db_printf("%d %06x %013lx %013lx %4s %d  %d  %d  %d %d %-3s "

--Apple-Mail=_3E57AF10-9CFE-4F3C-8985-B31BBDFA1830
Content-Transfer-Encoding: 7bit
Content-Type: text/plain;
	charset=us-ascii



--Apple-Mail=_3E57AF10-9CFE-4F3C-8985-B31BBDFA1830--

--Apple-Mail=_1D14D3A5-939A-4EBD-B387-717A9A26ADA4
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment;
	filename=signature.asc
Content-Type: application/pgp-signature;
	name=signature.asc
Content-Description: Message signed with OpenPGP using GPGMail

-----BEGIN PGP SIGNATURE-----
Comment: GPGTools - http://gpgtools.org

iEYEARECAAYFAlMxo5sACgkQpgWlLWHuifZ54QCfeUZ6oBlwFsNJGWq3EMOgIn5s
DpIAnR4s19G3gOnadQHHGnILHrWxg7ad
=ivb9
-----END PGP SIGNATURE-----

--Apple-Mail=_1D14D3A5-939A-4EBD-B387-717A9A26ADA4--



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?AB8B3D35-4E9F-4D1E-BCBF-F1260C6B942C>