Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 8 Jul 2009 10:07:31 -0500 (CDT)
From:      Mark Tinguely <tinguely@casselton.net>
To:        mih@semihalf.com, tinguely@casselton.net
Cc:        stas@FreeBSD.org, freebsd-arm@FreeBSD.org
Subject:   Re: pmap problem in FreeBSD current
Message-ID:  <200907081507.n68F7Vsu070524@casselton.net>
In-Reply-To: <4A54A6B2.1020400@semihalf.com>

next in thread | previous in thread | raw e-mail | index | archive | help

>  Hello Mark,
>  Thank you for looking again on r194459.
>
>  I've tried to compile kernel using your patch with
>  changes regarding PG_WRITABLE and arm_unmap_nocache(),
>  but it didn't help.
>
>  My problems are rather related to memory corruption, which
>  I've observed in uma_core (pointers had bad values and
>  count of free items in slab had negative value).
>
>  I am still concerned about LOR, which has appeared in
>  svn commit r194459 (I wrote about it in my first mail).
>  Could you take a look at this LOR?
>
>  Thanks for your help.
>
>  Best regards,
>  MichaƂ Hajduk


I don't think this is a corruption issue but an allocation of a l2_bucket
issue. The kernel map is locked to do the kenter. A new bucket is needed,
UMA is called and the new page is kenter into the map. In pmap_enter_pv()
before we allocate a pv_entry, we turn off the kmap lock. I am not sure if
the kmap lock should be turned off in the pmap_alloc_l2_bucket(), or the
caller, but here is a quick manually made  kmap lock disable patch:

I still think the last "vm_page_flag_clear(pg, PG_WRITEABLE);" needs
to be removed from pmap_nuke_pv(). All dangling kenters need to be
kremoved ...
		------------------------

/*
 * Returns a pointer to the L2 bucket associated with the specified pmap
 * and VA.
 *
 * If no L2 bucket exists, perform the necessary allocations to put an L2
 * bucket/page table in place.
 *
 * Note that if a new L2 bucket/page was allocated, the caller *must*
 * increment the bucket occupancy counter appropriately *before* 
 * releasing the pmap's lock to ensure no other thread or cpu deallocates
 * the bucket/page in the meantime.
 */
static struct l2_bucket *
pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va)
{
	struct l2_dtable *l2;
	struct l2_bucket *l2b;
	u_short l1idx;
+	int km = 0;

	l1idx = L1_IDX(va);

	PMAP_ASSERT_LOCKED(pm);
+	if ((pm != pmap_kernel()) && PMAP_OWNED(pmap_kernel()) {
+		km = 1;
+		PMAP_UNLOCK(pmap_kernel());
+	}
	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
	if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
		/*
		 * No mapping at this address, as there is
		 * no entry in the L1 table.
		 * Need to allocate a new l2_dtable.
		 */
again_l2table:
		PMAP_UNLOCK(pm);
		vm_page_unlock_queues();
		if ((l2 = pmap_alloc_l2_dtable()) == NULL) {
			vm_page_lock_queues();
			PMAP_LOCK(pm);
+			if (km)
+				PMAP_LOCK(pmap_kernel());
			return (NULL);
		}
		vm_page_lock_queues();
		PMAP_LOCK(pm);
		if (pm->pm_l2[L2_IDX(l1idx)] != NULL) {
			PMAP_UNLOCK(pm);
			vm_page_unlock_queues();
			uma_zfree(l2table_zone, l2);
			vm_page_lock_queues();
			PMAP_LOCK(pm);
			l2 = pm->pm_l2[L2_IDX(l1idx)];
			if (l2 == NULL)
				goto again_l2table;
			/*
			 * Someone already allocated the l2_dtable while
			 * we were doing the same.
			 */
		} else {
			bzero(l2, sizeof(*l2));
			/*
			 * Link it into the parent pmap
			 */
			pm->pm_l2[L2_IDX(l1idx)] = l2;
		}
	} 

	l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];

	/*
	 * Fetch pointer to the L2 page table associated with the address.
	 */
	if (l2b->l2b_kva == NULL) {
		pt_entry_t *ptep;

		/*
		 * No L2 page table has been allocated. Chances are, this
		 * is because we just allocated the l2_dtable, above.
		 */
again_ptep:
		PMAP_UNLOCK(pm);
		vm_page_unlock_queues();
		ptep = (void*)uma_zalloc(l2zone, M_NOWAIT|M_USE_RESERVE);
		vm_page_lock_queues();
		PMAP_LOCK(pm);
		if (l2b->l2b_kva != 0) {
			/* We lost the race. */
			PMAP_UNLOCK(pm);
			vm_page_unlock_queues();
			uma_zfree(l2zone, ptep);
			vm_page_lock_queues();
			PMAP_LOCK(pm);
			if (l2b->l2b_kva == 0)
				goto again_ptep;
			return (l2b);
		}
		l2b->l2b_phys = vtophys(ptep);
		if (ptep == NULL) {
			/*
			 * Oops, no more L2 page tables available at this
			 * time. We may need to deallocate the l2_dtable
			 * if we allocated a new one above.
			 */
			if (l2->l2_occupancy == 0) {
				pm->pm_l2[L2_IDX(l1idx)] = NULL;
				pmap_free_l2_dtable(l2);
			}
+			if (km)
+				PMAP_LOCK(pmap_kernel());
			return (NULL);
		}

		l2->l2_occupancy++;
		l2b->l2b_kva = ptep;
		l2b->l2b_l1idx = l1idx;
	}

+	if (km)
+		PMAP_LOCK(pmap_kernel());
	return (l2b);
}



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200907081507.n68F7Vsu070524>