Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 27 Feb 2016 20:39:36 +0000 (UTC)
From:      Justin Hibbits <jhibbits@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r296142 - in head/sys/powerpc: booke include powerpc
Message-ID:  <201602272039.u1RKdaCw027811@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: jhibbits
Date: Sat Feb 27 20:39:36 2016
New Revision: 296142
URL: https://svnweb.freebsd.org/changeset/base/296142

Log:
  Implement pmap_change_attr() for PowerPC (Book-E only for now)
  
  Summary:
  Some drivers need special memory requirements.  X86 solves this with a
  pmap_change_attr() API, which DRM uses for changing the mapping of the GART and
  other memory regions.  Implement the same function for PowerPC.  AIM currently
  does not need this, but will in the future for DRM, so a default is added for
  that, for business as usual.  Book-E has some drivers coming down that do
  require non-default memory coherency.  In this case, the Datapath Acceleration
  Architecture (DPAA) based ethernet controller has 2 regions for the buffer
  portals: cache-inhibited, and cache-enabled.  By default, device memory is
  cache-inhibited.  If the cache-enabled memory regions are mapped
  cache-inhibited, an alignment exception is thrown on access.
  
  Test Plan:
  Tested with a new driver to be added after this (DPAA dTSEC ethernet driver).
  No alignment exceptions thrown, driver works as expected with this.
  
  Reviewed By:	nwhitehorn
  Sponsored by:	Alex Perez/Inertial Computing
  Differential Revision: https://reviews.freebsd.org/D5471

Modified:
  head/sys/powerpc/booke/pmap.c
  head/sys/powerpc/include/pmap.h
  head/sys/powerpc/include/tlb.h
  head/sys/powerpc/powerpc/mmu_if.m
  head/sys/powerpc/powerpc/pmap_dispatch.c

Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c	Sat Feb 27 16:27:48 2016	(r296141)
+++ head/sys/powerpc/booke/pmap.c	Sat Feb 27 20:39:36 2016	(r296142)
@@ -340,6 +340,8 @@ static void		mmu_booke_dumpsys_unmap(mmu
 static void		mmu_booke_scan_init(mmu_t);
 static vm_offset_t	mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m);
 static void		mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr);
+static int		mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr,
+    vm_size_t sz, vm_memattr_t mode);
 
 static mmu_method_t mmu_booke_methods[] = {
 	/* pmap dispatcher interface */
@@ -392,6 +394,7 @@ static mmu_method_t mmu_booke_methods[] 
 	MMUMETHOD(mmu_kextract,		mmu_booke_kextract),
 /*	MMUMETHOD(mmu_kremove,		mmu_booke_kremove),	*/
 	MMUMETHOD(mmu_unmapdev,		mmu_booke_unmapdev),
+	MMUMETHOD(mmu_change_attr,	mmu_booke_change_attr),
 
 	/* dumpsys() support */
 	MMUMETHOD(mmu_dumpsys_map,	mmu_booke_dumpsys_map),
@@ -419,6 +422,8 @@ tlb_calc_wimg(vm_paddr_t pa, vm_memattr_
 			return (MAS2_I);
 		case VM_MEMATTR_WRITE_THROUGH:
 			return (MAS2_W | MAS2_M);
+		case VM_MEMATTR_CACHEABLE:
+			return (MAS2_M);
 		}
 	}
 
@@ -2900,6 +2905,63 @@ mmu_booke_mincore(mmu_t mmu, pmap_t pmap
 	return (0);
 }
 
+static int
+mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
+    vm_memattr_t mode)
+{
+	vm_offset_t va;
+	pte_t *pte;
+	int i, j;
+
+	/* Check TLB1 mappings */
+	for (i = 0; i < tlb1_idx; i++) {
+		if (!(tlb1[i].mas1 & MAS1_VALID))
+			continue;
+		if (addr >= tlb1[i].virt && addr < tlb1[i].virt + tlb1[i].size)
+			break;
+	}
+	if (i < tlb1_idx) {
+		/* Only allow full mappings to be modified for now. */
+		/* Validate the range. */
+		for (j = i, va = addr; va < addr + sz; va += tlb1[j].size, j++) {
+			if (va != tlb1[j].virt || (sz - (va - addr) < tlb1[j].size))
+				return (EINVAL);
+		}
+		for (va = addr; va < addr + sz; va += tlb1[i].size, i++) {
+			tlb1[i].mas2 &= ~MAS2_WIMGE_MASK;
+			tlb1[i].mas2 |= tlb_calc_wimg(tlb1[i].phys, mode);
+
+			/*
+			 * Write it out to the TLB.  Should really re-sync with other
+			 * cores.
+			 */
+			tlb1_write_entry(i);
+		}
+		return (0);
+	}
+
+	/* Not in TLB1, try through pmap */
+	/* First validate the range. */
+	for (va = addr; va < addr + sz; va += PAGE_SIZE) {
+		pte = pte_find(mmu, kernel_pmap, va);
+		if (pte == NULL || !PTE_ISVALID(pte))
+			return (EINVAL);
+	}
+
+	mtx_lock_spin(&tlbivax_mutex);
+	tlb_miss_lock();
+	for (va = addr; va < addr + sz; va += PAGE_SIZE) {
+		pte = pte_find(mmu, kernel_pmap, va);
+		*pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT);
+		*pte |= tlb_calc_wimg(PTE_PA(pte), mode << PTE_MAS2_SHIFT);
+		tlb0_flush_entry(va);
+	}
+	tlb_miss_unlock();
+	mtx_unlock_spin(&tlbivax_mutex);
+
+	return (pte_vatopa(mmu, kernel_pmap, va));
+}
+
 /**************************************************************************/
 /* TID handling */
 /**************************************************************************/

Modified: head/sys/powerpc/include/pmap.h
==============================================================================
--- head/sys/powerpc/include/pmap.h	Sat Feb 27 16:27:48 2016	(r296141)
+++ head/sys/powerpc/include/pmap.h	Sat Feb 27 20:39:36 2016	(r296142)
@@ -238,6 +238,7 @@ void		*pmap_mapdev(vm_paddr_t, vm_size_t
 void		*pmap_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
 void		pmap_unmapdev(vm_offset_t, vm_size_t);
 void		pmap_page_set_memattr(vm_page_t, vm_memattr_t);
+int		pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
 void		pmap_deactivate(struct thread *);
 vm_paddr_t	pmap_kextract(vm_offset_t);
 int		pmap_dev_direct_mapped(vm_paddr_t, vm_size_t);

Modified: head/sys/powerpc/include/tlb.h
==============================================================================
--- head/sys/powerpc/include/tlb.h	Sat Feb 27 16:27:48 2016	(r296141)
+++ head/sys/powerpc/include/tlb.h	Sat Feb 27 20:39:36 2016	(r296142)
@@ -74,6 +74,7 @@
 #define	MAS2_M			0x00000004
 #define	MAS2_G			0x00000002
 #define	MAS2_E			0x00000001
+#define	MAS2_WIMGE_MASK		0x0000001F
 
 #define	MAS3_RPN		0xFFFFF000
 #define	MAS3_RPN_SHIFT		12

Modified: head/sys/powerpc/powerpc/mmu_if.m
==============================================================================
--- head/sys/powerpc/powerpc/mmu_if.m	Sat Feb 27 16:27:48 2016	(r296141)
+++ head/sys/powerpc/powerpc/mmu_if.m	Sat Feb 27 20:39:36 2016	(r296142)
@@ -124,6 +124,12 @@ CODE {
 	{
 		return;
 	}
+
+	static int mmu_null_change_attr(mmu_t mmu, vm_offset_t va,
+	    vm_size_t sz, vm_memattr_t mode)
+	{
+		return (0);
+	}
 };
 
 
@@ -956,3 +962,20 @@ METHOD void quick_remove_page {
 	vm_offset_t	_va;
 };
 
+/**
+ * @brief Change the specified virtual address range's memory type.
+ *
+ * @param _va		The virtual base address to change
+ *
+ * @param _sz		Size of the region to change
+ *
+ * @param _mode		New mode to set on the VA range
+ *
+ * @retval error	0 on success, EINVAL or ENOMEM on error.
+ */
+METHOD int change_attr {
+	mmu_t		_mmu;
+	vm_offset_t	_va;
+	vm_size_t	_sz;
+	vm_memattr_t	_mode;
+} DEFAULT mmu_null_change_attr;

Modified: head/sys/powerpc/powerpc/pmap_dispatch.c
==============================================================================
--- head/sys/powerpc/powerpc/pmap_dispatch.c	Sat Feb 27 16:27:48 2016	(r296141)
+++ head/sys/powerpc/powerpc/pmap_dispatch.c	Sat Feb 27 20:39:36 2016	(r296142)
@@ -564,6 +564,13 @@ pmap_quick_remove_page(vm_offset_t addr)
 	MMU_QUICK_REMOVE_PAGE(mmu_obj, addr);
 }
 
+int
+pmap_change_attr(vm_offset_t addr, vm_size_t size, vm_memattr_t mode)
+{
+	CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, addr, size, mode);
+	return (MMU_CHANGE_ATTR(mmu_obj, addr, size, mode));
+}
+
 /*
  * MMU install routines. Highest priority wins, equal priority also
  * overrides allowing last-set to win.



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201602272039.u1RKdaCw027811>