Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 29 Mar 2015 17:42:33 +0000 (UTC)
From:      Andrew Turner <andrew@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r280811 - in head/sys/arm: arm include
Message-ID:  <201503291742.t2THgX1k040261@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: andrew
Date: Sun Mar 29 17:42:32 2015
New Revision: 280811
URL: https://svnweb.freebsd.org/changeset/base/280811

Log:
  Remove unused arm10_* functions. The remaining functions are only used in
  mv configs.

Modified:
  head/sys/arm/arm/cpufunc_asm_arm10.S
  head/sys/arm/include/cpufunc.h

Modified: head/sys/arm/arm/cpufunc_asm_arm10.S
==============================================================================
--- head/sys/arm/arm/cpufunc_asm_arm10.S	Sun Mar 29 17:33:03 2015	(r280810)
+++ head/sys/arm/arm/cpufunc_asm_arm10.S	Sun Mar 29 17:42:32 2015	(r280811)
@@ -36,23 +36,6 @@
 __FBSDID("$FreeBSD$");
 
 /*
- * Functions to set the MMU Translation Table Base register
- *
- * We need to clean and flush the cache as it uses virtual
- * addresses that are about to change.
- */
-ENTRY(arm10_setttb)
-	stmfd	sp!, {r0, lr}
-	bl	_C_LABEL(arm10_idcache_wbinv_all)
-	ldmfd	sp!, {r0, lr}
-
-	mcr	p15, 0, r0, c2, c0, 0	/* load new TTB */
-
-	mcr	p15, 0, r0, c8, c7, 0	/* invalidate I+D TLBs */
-	bx	lr
-END(arm10_setttb)
-
-/*
  * TLB functions
  */
 ENTRY(arm10_tlb_flushID_SE)
@@ -66,168 +49,6 @@ ENTRY(arm10_tlb_flushI_SE)
 	bx	lr
 END(arm10_tlb_flushI_SE)
 
-/*
- * Cache operations.  For the entire cache we use the set/index
- * operations.
- */
-	s_max	.req r0
-	i_max	.req r1
-	s_inc	.req r2
-	i_inc	.req r3
-
-ENTRY_NP(arm10_icache_sync_range)
-	ldr	ip, .Larm10_line_size
-	cmp	r1, #0x4000
-	bcs	.Larm10_icache_sync_all
-	ldr	ip, [ip]
-	sub	r3, ip, #1
-	and	r2, r0, r3
-	add	r1, r1, r2
-	bic	r0, r0, r3
-.Larm10_sync_next:
-	mcr	p15, 0, r0, c7, c5, 1	/* Invalidate I cache SE with VA */
-	mcr	p15, 0, r0, c7, c10, 1	/* Clean D cache SE with VA */
-	add	r0, r0, ip
-	subs	r1, r1, ip
-	bhi	.Larm10_sync_next
-	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
-	bx	lr
-END(arm10_icache_sync_range)
-
-ENTRY_NP(arm10_icache_sync_all)
-.Larm10_icache_sync_all:
-	/*
-	 * We assume that the code here can never be out of sync with the
-	 * dcache, so that we can safely flush the Icache and fall through
-	 * into the Dcache cleaning code.
-	 */
-	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
-	/* Fall through to clean Dcache. */
-
-.Larm10_dcache_wb:
-	ldr	ip, .Larm10_cache_data
-	ldmia	ip, {s_max, i_max, s_inc, i_inc}
-.Lnext_set:
-	orr	ip, s_max, i_max
-.Lnext_index:
-	mcr	p15, 0, ip, c7, c10, 2	/* Clean D cache SE with Set/Index */
-	subs	ip, ip, i_inc
-	bhs	.Lnext_index		/* Next index */
-	subs	s_max, s_max, s_inc
-	bhs	.Lnext_set		/* Next set */
-	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
-	bx	lr
-END(arm10_icache_sync_all)
-
-.Larm10_line_size:
-	.word	_C_LABEL(arm_pdcache_line_size)
-
-ENTRY(arm10_dcache_wb_range)
-	ldr	ip, .Larm10_line_size
-	cmp	r1, #0x4000
-	bcs	.Larm10_dcache_wb
-	ldr	ip, [ip]
-	sub	r3, ip, #1
-	and	r2, r0, r3
-	add	r1, r1, r2
-	bic	r0, r0, r3
-.Larm10_wb_next:
-	mcr	p15, 0, r0, c7, c10, 1	/* Clean D cache SE with VA */
-	add	r0, r0, ip
-	subs	r1, r1, ip
-	bhi	.Larm10_wb_next
-	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
-	bx	lr
-END(arm10_dcache_wb_range)
-	
-ENTRY(arm10_dcache_wbinv_range)
-	ldr	ip, .Larm10_line_size
-	cmp	r1, #0x4000
-	bcs	.Larm10_dcache_wbinv_all
-	ldr	ip, [ip]
-	sub	r3, ip, #1
-	and	r2, r0, r3
-	add	r1, r1, r2
-	bic	r0, r0, r3
-.Larm10_wbinv_next:
-	mcr	p15, 0, r0, c7, c14, 1	/* Purge D cache SE with VA */
-	add	r0, r0, ip
-	subs	r1, r1, ip
-	bhi	.Larm10_wbinv_next
-	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
-	bx	lr
-END(arm10_dcache_wbinv_range)
-	
-/*
- * Note, we must not invalidate everything.  If the range is too big we
- * must use wb-inv of the entire cache.
- */
-ENTRY(arm10_dcache_inv_range)
-	ldr	ip, .Larm10_line_size
-	cmp	r1, #0x4000
-	bcs	.Larm10_dcache_wbinv_all
-	ldr	ip, [ip]
-	sub	r3, ip, #1
-	and	r2, r0, r3
-	add	r1, r1, r2
-	bic	r0, r0, r3
-.Larm10_inv_next:
-	mcr	p15, 0, r0, c7, c6, 1	/* Invalidate D cache SE with VA */
-	add	r0, r0, ip
-	subs	r1, r1, ip
-	bhi	.Larm10_inv_next
-	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
-	bx	lr
-END(arm10_dcache_inv_range)
-
-ENTRY(arm10_idcache_wbinv_range)
-	ldr	ip, .Larm10_line_size
-	cmp	r1, #0x4000
-	bcs	.Larm10_idcache_wbinv_all
-	ldr	ip, [ip]
-	sub	r3, ip, #1
-	and	r2, r0, r3
-	add	r1, r1, r2
-	bic	r0, r0, r3
-.Larm10_id_wbinv_next:
-	mcr	p15, 0, r0, c7, c5, 1	/* Invalidate I cache SE with VA */
-	mcr	p15, 0, r0, c7, c14, 1	/* Purge D cache SE with VA */
-	add	r0, r0, ip
-	subs	r1, r1, ip
-	bhi	.Larm10_id_wbinv_next
-	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
-	bx	lr
-END(arm10_idcache_wbinv_range)
-
-ENTRY_NP(arm10_idcache_wbinv_all)
-.Larm10_idcache_wbinv_all:
-	/*
-	 * We assume that the code here can never be out of sync with the
-	 * dcache, so that we can safely flush the Icache and fall through
-	 * into the Dcache purging code.
-	 */
-	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
-	/* Fall through to purge Dcache. */
-
-EENTRY(arm10_dcache_wbinv_all)
-.Larm10_dcache_wbinv_all:
-	ldr	ip, .Larm10_cache_data
-	ldmia	ip, {s_max, i_max, s_inc, i_inc}
-.Lnext_set_inv:
-	orr	ip, s_max, i_max
-.Lnext_index_inv:
-	mcr	p15, 0, ip, c7, c14, 2	/* Purge D cache SE with Set/Index */
-	subs	ip, ip, i_inc
-	bhs	.Lnext_index_inv		/* Next index */
-	subs	s_max, s_max, s_inc
-	bhs	.Lnext_set_inv		/* Next set */
-	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
-	bx	lr
-EEND(arm10_dcache_wbinv_all)
-END(arm10_idcache_wbinv_all)
-
-.Larm10_cache_data:
-	.word	_C_LABEL(arm10_dcache_sets_max)
 
 /*
  * Context switch.
@@ -253,24 +74,3 @@ ENTRY(arm10_context_switch)
 	nop
 	bx	lr
 END(arm10_context_switch)
-
-	.bss
-
-/* XXX The following macros should probably be moved to asm.h */
-#define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
-#define C_OBJECT(x)	_DATA_OBJECT(_C_LABEL(x))
-
-/*
- * Parameters for the cache cleaning code.  Note that the order of these
- * four variables is assumed in the code above.  Hence the reason for
- * declaring them in the assembler file.
- */
-	.align 2
-C_OBJECT(arm10_dcache_sets_max)
-	.space	4
-C_OBJECT(arm10_dcache_index_max)
-	.space	4
-C_OBJECT(arm10_dcache_sets_inc)
-	.space	4
-C_OBJECT(arm10_dcache_index_inc)
-	.space	4

Modified: head/sys/arm/include/cpufunc.h
==============================================================================
--- head/sys/arm/include/cpufunc.h	Sun Mar 29 17:33:03 2015	(r280810)
+++ head/sys/arm/include/cpufunc.h	Sun Mar 29 17:42:32 2015	(r280811)
@@ -334,31 +334,13 @@ extern unsigned arm9_dcache_index_inc;
 #endif
 
 #if defined(CPU_ARM9E)
-void	arm10_setttb		(u_int);
-
 void	arm10_tlb_flushID_SE	(u_int);
 void	arm10_tlb_flushI_SE	(u_int);
 
-void	arm10_icache_sync_all	(void);
-void	arm10_icache_sync_range	(vm_offset_t, vm_size_t);
-
-void	arm10_dcache_wbinv_all	(void);
-void	arm10_dcache_wbinv_range (vm_offset_t, vm_size_t);
-void	arm10_dcache_inv_range	(vm_offset_t, vm_size_t);
-void	arm10_dcache_wb_range	(vm_offset_t, vm_size_t);
-
-void	arm10_idcache_wbinv_all	(void);
-void	arm10_idcache_wbinv_range (vm_offset_t, vm_size_t);
-
 void	arm10_context_switch	(void);
 
 void	arm10_setup		(char *string);
 
-extern unsigned arm10_dcache_sets_max;
-extern unsigned arm10_dcache_sets_inc;
-extern unsigned arm10_dcache_index_max;
-extern unsigned arm10_dcache_index_inc;
-
 u_int	sheeva_control_ext 		(u_int, u_int);
 void	sheeva_cpu_sleep		(int);
 void	sheeva_setttb			(u_int);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201503291742.t2THgX1k040261>