Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 26 Mar 2015 14:51:25 +0000 (UTC)
From:      Ruslan Bukin <br@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r280691 - in head/sys/mips: include mips
Message-ID:  <201503261451.t2QEpP1N013001@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: br
Date: Thu Mar 26 14:51:24 2015
New Revision: 280691
URL: https://svnweb.freebsd.org/changeset/base/280691

Log:
  Add 64 byte linesize cache flushing routines for L1 instruction, L1 data
  and L2 data caches.
  
  Sponsored by:	HEIF5

Modified:
  head/sys/mips/include/cache_mipsNN.h
  head/sys/mips/include/cache_r4k.h
  head/sys/mips/mips/cache.c
  head/sys/mips/mips/cache_mipsNN.c

Modified: head/sys/mips/include/cache_mipsNN.h
==============================================================================
--- head/sys/mips/include/cache_mipsNN.h	Thu Mar 26 13:31:04 2015	(r280690)
+++ head/sys/mips/include/cache_mipsNN.h	Thu Mar 26 14:51:24 2015	(r280691)
@@ -43,37 +43,50 @@ void	mipsNN_cache_init(struct mips_cpuin
 
 void	mipsNN_icache_sync_all_16(void);
 void	mipsNN_icache_sync_all_32(void);
+void	mipsNN_icache_sync_all_64(void);
+void	mipsNN_icache_sync_all_128(void);
 void	mipsNN_icache_sync_range_16(vm_offset_t, vm_size_t);
 void	mipsNN_icache_sync_range_32(vm_offset_t, vm_size_t);
+void	mipsNN_icache_sync_range_64(vm_offset_t, vm_size_t);
+void	mipsNN_icache_sync_range_128(vm_offset_t, vm_size_t);
 void	mipsNN_icache_sync_range_index_16(vm_offset_t, vm_size_t);
 void	mipsNN_icache_sync_range_index_32(vm_offset_t, vm_size_t);
+void	mipsNN_icache_sync_range_index_64(vm_offset_t, vm_size_t);
+void	mipsNN_icache_sync_range_index_128(vm_offset_t, vm_size_t);
 void	mipsNN_pdcache_wbinv_all_16(void);
 void	mipsNN_pdcache_wbinv_all_32(void);
+void	mipsNN_pdcache_wbinv_all_64(void);
+void	mipsNN_pdcache_wbinv_all_128(void);
 void	mipsNN_pdcache_wbinv_range_16(vm_offset_t, vm_size_t);
 void	mipsNN_pdcache_wbinv_range_32(vm_offset_t, vm_size_t);
+void	mipsNN_pdcache_wbinv_range_64(vm_offset_t, vm_size_t);
+void	mipsNN_pdcache_wbinv_range_128(vm_offset_t, vm_size_t);
 void	mipsNN_pdcache_wbinv_range_index_16(vm_offset_t, vm_size_t);
 void	mipsNN_pdcache_wbinv_range_index_32(vm_offset_t, vm_size_t);
+void	mipsNN_pdcache_wbinv_range_index_64(vm_offset_t, vm_size_t);
+void	mipsNN_pdcache_wbinv_range_index_128(vm_offset_t, vm_size_t);
 void	mipsNN_pdcache_inv_range_16(vm_offset_t, vm_size_t);
 void	mipsNN_pdcache_inv_range_32(vm_offset_t, vm_size_t);
+void	mipsNN_pdcache_inv_range_64(vm_offset_t, vm_size_t);
+void	mipsNN_pdcache_inv_range_128(vm_offset_t, vm_size_t);
 void	mipsNN_pdcache_wb_range_16(vm_offset_t, vm_size_t);
 void	mipsNN_pdcache_wb_range_32(vm_offset_t, vm_size_t);
-void	mipsNN_icache_sync_all_128(void);
-void	mipsNN_icache_sync_range_128(vm_offset_t, vm_size_t);
-void	mipsNN_icache_sync_range_index_128(vm_offset_t, vm_size_t);
-void	mipsNN_pdcache_wbinv_all_128(void);
-void	mipsNN_pdcache_wbinv_range_128(vm_offset_t, vm_size_t);
-void	mipsNN_pdcache_wbinv_range_index_128(vm_offset_t, vm_size_t);
-void	mipsNN_pdcache_inv_range_128(vm_offset_t, vm_size_t);
+void	mipsNN_pdcache_wb_range_64(vm_offset_t, vm_size_t);
 void	mipsNN_pdcache_wb_range_128(vm_offset_t, vm_size_t);
 void	mipsNN_sdcache_wbinv_all_32(void);
-void	mipsNN_sdcache_wbinv_range_32(vm_offset_t, vm_size_t);
-void	mipsNN_sdcache_wbinv_range_index_32(vm_offset_t, vm_size_t);
-void	mipsNN_sdcache_inv_range_32(vm_offset_t, vm_size_t);
-void	mipsNN_sdcache_wb_range_32(vm_offset_t, vm_size_t);
+void	mipsNN_sdcache_wbinv_all_64(void);
 void	mipsNN_sdcache_wbinv_all_128(void);
+void	mipsNN_sdcache_wbinv_range_32(vm_offset_t, vm_size_t);
+void	mipsNN_sdcache_wbinv_range_64(vm_offset_t, vm_size_t);
 void	mipsNN_sdcache_wbinv_range_128(vm_offset_t, vm_size_t);
+void	mipsNN_sdcache_wbinv_range_index_32(vm_offset_t, vm_size_t);
+void	mipsNN_sdcache_wbinv_range_index_64(vm_offset_t, vm_size_t);
 void	mipsNN_sdcache_wbinv_range_index_128(vm_offset_t, vm_size_t);
+void	mipsNN_sdcache_inv_range_32(vm_offset_t, vm_size_t);
+void	mipsNN_sdcache_inv_range_64(vm_offset_t, vm_size_t);
 void	mipsNN_sdcache_inv_range_128(vm_offset_t, vm_size_t);
+void	mipsNN_sdcache_wb_range_32(vm_offset_t, vm_size_t);
+void	mipsNN_sdcache_wb_range_64(vm_offset_t, vm_size_t);
 void	mipsNN_sdcache_wb_range_128(vm_offset_t, vm_size_t);
 
 #endif	/* _MACHINE_CACHE_MIPSNN_H_ */

Modified: head/sys/mips/include/cache_r4k.h
==============================================================================
--- head/sys/mips/include/cache_r4k.h	Thu Mar 26 13:31:04 2015	(r280690)
+++ head/sys/mips/include/cache_r4k.h	Thu Mar 26 14:51:24 2015	(r280691)
@@ -114,6 +114,25 @@ do {									\
 } while (/*CONSTCOND*/0)
 
 /*
+ * cache_r4k_op_8lines_64:
+ *
+ *	Perform the specified cache operation on 8 64-byte cache lines.
+ */
+#define	cache_r4k_op_8lines_64(va, op)					\
+do {									\
+	__asm __volatile(						\
+		".set noreorder					\n\t"	\
+		"cache %1, 0x000(%0); cache %1, 0x040(%0)	\n\t"	\
+		"cache %1, 0x080(%0); cache %1, 0x0c0(%0)	\n\t"	\
+		"cache %1, 0x100(%0); cache %1, 0x140(%0)	\n\t"	\
+		"cache %1, 0x180(%0); cache %1, 0x1c0(%0)	\n\t"	\
+		".set reorder"						\
+	    :								\
+	    : "r" (va), "i" (op)					\
+	    : "memory");						\
+} while (/*CONSTCOND*/0)
+
+/*
  * cache_r4k_op_32lines_16:
  *
  *	Perform the specified cache operation on 32 16-byte
@@ -178,6 +197,38 @@ do {									\
 } while (/*CONSTCOND*/0)
 
 /*
+ * cache_r4k_op_32lines_64:
+ *
+ *	Perform the specified cache operation on 32 64-byte
+ *	cache lines.
+ */
+#define	cache_r4k_op_32lines_64(va, op)					\
+do {									\
+	__asm __volatile(						\
+		".set noreorder					\n\t"	\
+		"cache %1, 0x000(%0); cache %1, 0x040(%0);	\n\t"	\
+		"cache %1, 0x080(%0); cache %1, 0x0c0(%0);	\n\t"	\
+		"cache %1, 0x100(%0); cache %1, 0x140(%0);	\n\t"	\
+		"cache %1, 0x180(%0); cache %1, 0x1c0(%0);	\n\t"	\
+		"cache %1, 0x200(%0); cache %1, 0x240(%0);	\n\t"	\
+		"cache %1, 0x280(%0); cache %1, 0x2c0(%0);	\n\t"	\
+		"cache %1, 0x300(%0); cache %1, 0x340(%0);	\n\t"	\
+		"cache %1, 0x380(%0); cache %1, 0x3c0(%0);	\n\t"	\
+		"cache %1, 0x400(%0); cache %1, 0x440(%0);	\n\t"	\
+		"cache %1, 0x480(%0); cache %1, 0x4c0(%0);	\n\t"	\
+		"cache %1, 0x500(%0); cache %1, 0x540(%0);	\n\t"	\
+		"cache %1, 0x580(%0); cache %1, 0x5c0(%0);	\n\t"	\
+		"cache %1, 0x600(%0); cache %1, 0x640(%0);	\n\t"	\
+		"cache %1, 0x680(%0); cache %1, 0x6c0(%0);	\n\t"	\
+		"cache %1, 0x700(%0); cache %1, 0x740(%0);	\n\t"	\
+		"cache %1, 0x780(%0); cache %1, 0x7c0(%0);	\n\t"	\
+		".set reorder"						\
+	    :								\
+	    : "r" (va), "i" (op)					\
+	    : "memory");						\
+} while (/*CONSTCOND*/0)
+
+/*
  * cache_r4k_op_32lines_128:
  *
  *	Perform the specified cache operation on 32 128-byte

Modified: head/sys/mips/mips/cache.c
==============================================================================
--- head/sys/mips/mips/cache.c	Thu Mar 26 13:31:04 2015	(r280690)
+++ head/sys/mips/mips/cache.c	Thu Mar 26 14:51:24 2015	(r280691)
@@ -104,6 +104,13 @@ mips_config_cache(struct mips_cpuinfo * 
 		mips_cache_ops.mco_icache_sync_range_index =
 		    mipsNN_icache_sync_range_index_32;
 		break;
+	case 64:
+		mips_cache_ops.mco_icache_sync_all = mipsNN_icache_sync_all_64;
+		mips_cache_ops.mco_icache_sync_range =
+		    mipsNN_icache_sync_range_64;
+		mips_cache_ops.mco_icache_sync_range_index =
+		    mipsNN_icache_sync_range_index_64;
+		break;
 	case 128:
 		mips_cache_ops.mco_icache_sync_all = mipsNN_icache_sync_all_128;
 		mips_cache_ops.mco_icache_sync_range =
@@ -170,6 +177,21 @@ mips_config_cache(struct mips_cpuinfo * 
 		    mipsNN_pdcache_wb_range_32;
 #endif
 		break;
+	case 64:
+		mips_cache_ops.mco_pdcache_wbinv_all =
+		    mips_cache_ops.mco_intern_pdcache_wbinv_all =
+		    mipsNN_pdcache_wbinv_all_64;
+		mips_cache_ops.mco_pdcache_wbinv_range =
+		    mipsNN_pdcache_wbinv_range_64;
+		mips_cache_ops.mco_pdcache_wbinv_range_index =
+		    mips_cache_ops.mco_intern_pdcache_wbinv_range_index =
+		    mipsNN_pdcache_wbinv_range_index_64;
+		mips_cache_ops.mco_pdcache_inv_range =
+		    mipsNN_pdcache_inv_range_64;
+		mips_cache_ops.mco_pdcache_wb_range =
+		    mips_cache_ops.mco_intern_pdcache_wb_range =
+		    mipsNN_pdcache_wb_range_64;
+		break;
 	case 128:
 		mips_cache_ops.mco_pdcache_wbinv_all =
 		    mips_cache_ops.mco_intern_pdcache_wbinv_all =
@@ -275,6 +297,18 @@ mips_config_cache(struct mips_cpuinfo * 
 		mips_cache_ops.mco_sdcache_wb_range =
 			mipsNN_sdcache_wb_range_32;
 		break;
+	case 64:
+		mips_cache_ops.mco_sdcache_wbinv_all =
+			mipsNN_sdcache_wbinv_all_64;
+		mips_cache_ops.mco_sdcache_wbinv_range =
+			mipsNN_sdcache_wbinv_range_64;
+		mips_cache_ops.mco_sdcache_wbinv_range_index =
+			mipsNN_sdcache_wbinv_range_index_64;
+		mips_cache_ops.mco_sdcache_inv_range =
+			mipsNN_sdcache_inv_range_64;
+		mips_cache_ops.mco_sdcache_wb_range =
+			mipsNN_sdcache_wb_range_64;
+		break;
 	case 128:
 		mips_cache_ops.mco_sdcache_wbinv_all =
 			mipsNN_sdcache_wbinv_all_128;

Modified: head/sys/mips/mips/cache_mipsNN.c
==============================================================================
--- head/sys/mips/mips/cache_mipsNN.c	Thu Mar 26 13:31:04 2015	(r280690)
+++ head/sys/mips/mips/cache_mipsNN.c	Thu Mar 26 14:51:24 2015	(r280691)
@@ -52,6 +52,9 @@ __FBSDID("$FreeBSD$");
 #define	round_line32(x)		(((x) + 31) & ~31)
 #define	trunc_line32(x)		((x) & ~31)
 
+#define	round_line64(x)		(((x) + 63) & ~63)
+#define	trunc_line64(x)		((x) & ~63)
+
 #define	round_line128(x)	(((x) + 127) & ~127)
 #define	trunc_line128(x)	((x) & ~127)
 
@@ -213,6 +216,29 @@ mipsNN_icache_sync_all_32(void)
 }
 
 void
+mipsNN_icache_sync_all_64(void)
+{
+	vm_offset_t va, eva;
+
+	va = MIPS_PHYS_TO_KSEG0(0);
+	eva = va + picache_size;
+
+	/*
+	 * Since we're hitting the whole thing, we don't have to
+	 * worry about the N different "ways".
+	 */
+
+	mips_intern_dcache_wbinv_all();
+
+	while (va < eva) {
+		cache_r4k_op_32lines_64(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
+		va += (32 * 64);
+	}
+
+	SYNC;
+}
+
+void
 mipsNN_icache_sync_range_16(vm_offset_t va, vm_size_t size)
 {
 	vm_offset_t eva;
@@ -259,6 +285,29 @@ mipsNN_icache_sync_range_32(vm_offset_t 
 }
 
 void
+mipsNN_icache_sync_range_64(vm_offset_t va, vm_size_t size)
+{
+	vm_offset_t eva;
+
+	eva = round_line64(va + size);
+	va = trunc_line64(va);
+
+	mips_intern_dcache_wb_range(va, (eva - va));
+
+	while ((eva - va) >= (32 * 64)) {
+		cache_r4k_op_32lines_64(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
+		va += (32 * 64);
+	}
+
+	while (va < eva) {
+		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
+		va += 64;
+	}
+
+	SYNC;
+}
+
+void
 mipsNN_icache_sync_range_index_16(vm_offset_t va, vm_size_t size)
 {
 	vm_offset_t eva, tmpva;
@@ -345,6 +394,49 @@ mipsNN_icache_sync_range_index_32(vm_off
 }
 
 void
+mipsNN_icache_sync_range_index_64(vm_offset_t va, vm_size_t size)
+{
+	vm_offset_t eva, tmpva;
+	int i, stride, loopcount;
+
+	/*
+	 * Since we're doing Index ops, we expect to not be able
+	 * to access the address we've been given.  So, get the
+	 * bits that determine the cache index, and make a KSEG0
+	 * address out of them.
+	 */
+	va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask);
+
+	eva = round_line64(va + size);
+	va = trunc_line64(va);
+
+	/*
+	 * GCC generates better code in the loops if we reference local
+	 * copies of these global variables.
+	 */
+	stride = picache_stride;
+	loopcount = picache_loopcount;
+
+	mips_intern_dcache_wbinv_range_index(va, (eva - va));
+
+	while ((eva - va) >= (8 * 64)) {
+		tmpva = va;
+		for (i = 0; i < loopcount; i++, tmpva += stride)
+			cache_r4k_op_8lines_64(tmpva,
+			    CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
+		va += 8 * 64;
+	}
+
+	while (va < eva) {
+		tmpva = va;
+		for (i = 0; i < loopcount; i++, tmpva += stride)
+			cache_op_r4k_line(tmpva,
+			    CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
+		va += 64;
+	}
+}
+
+void
 mipsNN_pdcache_wbinv_all_16(void)
 {
 	vm_offset_t va, eva;
@@ -389,6 +481,28 @@ mipsNN_pdcache_wbinv_all_32(void)
 }
 
 void
+mipsNN_pdcache_wbinv_all_64(void)
+{
+	vm_offset_t va, eva;
+
+	va = MIPS_PHYS_TO_KSEG0(0);
+	eva = va + pdcache_size;
+
+	/*
+	 * Since we're hitting the whole thing, we don't have to
+	 * worry about the N different "ways".
+	 */
+
+	while (va < eva) {
+		cache_r4k_op_32lines_64(va,
+		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
+		va += (32 * 64);
+	}
+
+	SYNC;
+}
+
+void
 mipsNN_pdcache_wbinv_range_16(vm_offset_t va, vm_size_t size)
 {
 	vm_offset_t eva;
@@ -433,6 +547,28 @@ mipsNN_pdcache_wbinv_range_32(vm_offset_
 }
 
 void
+mipsNN_pdcache_wbinv_range_64(vm_offset_t va, vm_size_t size)
+{
+	vm_offset_t eva;
+
+	eva = round_line64(va + size);
+	va = trunc_line64(va);
+
+	while ((eva - va) >= (32 * 64)) {
+		cache_r4k_op_32lines_64(va,
+		    CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
+		va += (32 * 64);
+	}
+
+	while (va < eva) {
+		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
+		va += 64;
+	}
+
+	SYNC;
+}
+
+void
 mipsNN_pdcache_wbinv_range_index_16(vm_offset_t va, vm_size_t size)
 {
 	vm_offset_t eva, tmpva;
@@ -513,6 +649,47 @@ mipsNN_pdcache_wbinv_range_index_32(vm_o
 		va += 32;
 	}
 }
+
+void
+mipsNN_pdcache_wbinv_range_index_64(vm_offset_t va, vm_size_t size)
+{
+	vm_offset_t eva, tmpva;
+	int i, stride, loopcount;
+
+	/*
+	 * Since we're doing Index ops, we expect to not be able
+	 * to access the address we've been given.  So, get the
+	 * bits that determine the cache index, and make a KSEG0
+	 * address out of them.
+	 */
+	va = MIPS_PHYS_TO_KSEG0(va & pdcache_way_mask);
+
+	eva = round_line64(va + size);
+	va = trunc_line64(va);
+
+	/*
+	 * GCC generates better code in the loops if we reference local
+	 * copies of these global variables.
+	 */
+	stride = pdcache_stride;
+	loopcount = pdcache_loopcount;
+
+	while ((eva - va) >= (8 * 64)) {
+		tmpva = va;
+		for (i = 0; i < loopcount; i++, tmpva += stride)
+			cache_r4k_op_8lines_64(tmpva,
+			    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
+		va += 8 * 64;
+	}
+
+	while (va < eva) {
+		tmpva = va;
+		for (i = 0; i < loopcount; i++, tmpva += stride)
+			cache_op_r4k_line(tmpva,
+			    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
+		va += 64;
+	}
+}
  
 void
 mipsNN_pdcache_inv_range_16(vm_offset_t va, vm_size_t size)
@@ -557,6 +734,27 @@ mipsNN_pdcache_inv_range_32(vm_offset_t 
 }
 
 void
+mipsNN_pdcache_inv_range_64(vm_offset_t va, vm_size_t size)
+{
+	vm_offset_t eva;
+
+	eva = round_line64(va + size);
+	va = trunc_line64(va);
+
+	while ((eva - va) >= (32 * 64)) {
+		cache_r4k_op_32lines_64(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
+		va += (32 * 64);
+	}
+
+	while (va < eva) {
+		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
+		va += 64;
+	}
+
+	SYNC;
+}
+
+void
 mipsNN_pdcache_wb_range_16(vm_offset_t va, vm_size_t size)
 {
 	vm_offset_t eva;
@@ -598,6 +796,26 @@ mipsNN_pdcache_wb_range_32(vm_offset_t v
 	SYNC;
 }
 
+void
+mipsNN_pdcache_wb_range_64(vm_offset_t va, vm_size_t size)
+{
+	vm_offset_t eva;
+
+	eva = round_line64(va + size);
+	va = trunc_line64(va);
+
+	while ((eva - va) >= (32 * 64)) {
+		cache_r4k_op_32lines_64(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
+		va += (32 * 64);
+	}
+
+	while (va < eva) {
+		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
+		va += 64;
+	}
+
+	SYNC;
+}
 
 #ifdef CPU_CNMIPS
 
@@ -882,6 +1100,19 @@ mipsNN_sdcache_wbinv_all_32(void)
 }
 
 void
+mipsNN_sdcache_wbinv_all_64(void)
+{
+	vm_offset_t va = MIPS_PHYS_TO_KSEG0(0);
+	vm_offset_t eva = va + sdcache_size;
+
+	while (va < eva) {
+		cache_r4k_op_32lines_64(va,
+		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
+		va += (32 * 64);
+	}
+}
+
+void
 mipsNN_sdcache_wbinv_range_32(vm_offset_t va, vm_size_t size)
 {
 	vm_offset_t eva = round_line32(va + size);
@@ -901,6 +1132,25 @@ mipsNN_sdcache_wbinv_range_32(vm_offset_
 }
 
 void
+mipsNN_sdcache_wbinv_range_64(vm_offset_t va, vm_size_t size)
+{
+	vm_offset_t eva = round_line64(va + size);
+
+	va = trunc_line64(va);
+
+	while ((eva - va) >= (32 * 64)) {
+		cache_r4k_op_32lines_64(va,
+		    CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
+		va += (32 * 64);
+	}
+
+	while (va < eva) {
+		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
+		va += 64;
+	}
+}
+
+void
 mipsNN_sdcache_wbinv_range_index_32(vm_offset_t va, vm_size_t size)
 {
 	vm_offset_t eva;
@@ -929,6 +1179,34 @@ mipsNN_sdcache_wbinv_range_index_32(vm_o
 }
 
 void
+mipsNN_sdcache_wbinv_range_index_64(vm_offset_t va, vm_size_t size)
+{
+	vm_offset_t eva;
+
+	/*
+	 * Since we're doing Index ops, we expect to not be able
+	 * to access the address we've been given.  So, get the
+	 * bits that determine the cache index, and make a KSEG0
+	 * address out of them.
+	 */
+	va = MIPS_PHYS_TO_KSEG0(va & (sdcache_size - 1));
+
+	eva = round_line64(va + size);
+	va = trunc_line64(va);
+
+	while ((eva - va) >= (32 * 64)) {
+		cache_r4k_op_32lines_64(va,
+		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
+		va += (32 * 64);
+	}
+
+	while (va < eva) {
+		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
+		va += 64;
+	}
+}
+
+void
 mipsNN_sdcache_inv_range_32(vm_offset_t va, vm_size_t size)
 {
 	vm_offset_t eva = round_line32(va + size);
@@ -947,6 +1225,24 @@ mipsNN_sdcache_inv_range_32(vm_offset_t 
 }
 
 void
+mipsNN_sdcache_inv_range_64(vm_offset_t va, vm_size_t size)
+{
+	vm_offset_t eva = round_line64(va + size);
+
+	va = trunc_line64(va);
+
+	while ((eva - va) >= (32 * 64)) {
+		cache_r4k_op_32lines_64(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
+		va += (32 * 64);
+	}
+
+	while (va < eva) {
+		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
+		va += 64;
+	}
+}
+
+void
 mipsNN_sdcache_wb_range_32(vm_offset_t va, vm_size_t size)
 {
 	vm_offset_t eva = round_line32(va + size);
@@ -965,6 +1261,24 @@ mipsNN_sdcache_wb_range_32(vm_offset_t v
 }
 
 void
+mipsNN_sdcache_wb_range_64(vm_offset_t va, vm_size_t size)
+{
+	vm_offset_t eva = round_line64(va + size);
+
+	va = trunc_line64(va);
+
+	while ((eva - va) >= (32 * 64)) {
+		cache_r4k_op_32lines_64(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
+		va += (32 * 64);
+	}
+
+	while (va < eva) {
+		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
+		va += 64;
+	}
+}
+
+void
 mipsNN_sdcache_wbinv_all_128(void)
 {
 	vm_offset_t va = MIPS_PHYS_TO_KSEG0(0);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201503261451.t2QEpP1N013001>