Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 24 Aug 2011 07:49:18 +0000 (UTC)
From:      Grzegorz Bernacki <gber@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r225134 - in projects/armv6/sys: arm/arm arm/include conf
Message-ID:  <201108240749.p7O7nIVZ039310@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: gber
Date: Wed Aug 24 07:49:18 2011
New Revision: 225134
URL: http://svn.freebsd.org/changeset/base/225134

Log:
  Initial support for armv6/v7.
  
  Obtained from: Marvell, Semihalf

Added:
  projects/armv6/sys/arm/arm/cpufunc_asm_armv7.S
  projects/armv6/sys/arm/arm/cpufunc_asm_pj4b.S
  projects/armv6/sys/arm/arm/mp_machdep.c
  projects/armv6/sys/arm/arm/pmap-v6.c
Modified:
  projects/armv6/sys/arm/arm/bus_space_asm_generic.S
  projects/armv6/sys/arm/arm/cpufunc.c
  projects/armv6/sys/arm/arm/cpufunc_asm.S
  projects/armv6/sys/arm/arm/elf_trampoline.c
  projects/armv6/sys/arm/arm/identcpu.c
  projects/armv6/sys/arm/arm/locore.S
  projects/armv6/sys/arm/arm/swtch.S
  projects/armv6/sys/arm/include/armreg.h
  projects/armv6/sys/arm/include/cpuconf.h
  projects/armv6/sys/arm/include/cpufunc.h
  projects/armv6/sys/arm/include/pmap.h
  projects/armv6/sys/arm/include/pte.h
  projects/armv6/sys/arm/include/smp.h
  projects/armv6/sys/conf/Makefile.arm
  projects/armv6/sys/conf/files.arm
  projects/armv6/sys/conf/options.arm

Modified: projects/armv6/sys/arm/arm/bus_space_asm_generic.S
==============================================================================
--- projects/armv6/sys/arm/arm/bus_space_asm_generic.S	Wed Aug 24 07:28:04 2011	(r225133)
+++ projects/armv6/sys/arm/arm/bus_space_asm_generic.S	Wed Aug 24 07:49:18 2011	(r225134)
@@ -51,7 +51,7 @@ ENTRY(generic_bs_r_1)
 	ldrb	r0, [r1, r2]
 	RET
 
-#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+#if (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6) > 0
 ENTRY(generic_armv4_bs_r_2)
 	ldrh	r0, [r1, r2]
 	RET
@@ -69,7 +69,7 @@ ENTRY(generic_bs_w_1)
 	strb	r3, [r1, r2]
 	RET
 
-#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+#if (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6) > 0
 ENTRY(generic_armv4_bs_w_2)
 	strh	r3, [r1, r2]
 	RET
@@ -97,7 +97,7 @@ ENTRY(generic_bs_rm_1)
 
 	RET
 
-#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+#if (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6) > 0
 ENTRY(generic_armv4_bs_rm_2)
 	add	r0, r1, r2
 	mov	r1, r3
@@ -145,7 +145,7 @@ ENTRY(generic_bs_wm_1)
 
 	RET
 
-#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+#if (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6) > 0
 ENTRY(generic_armv4_bs_wm_2)
 	add	r0, r1, r2
 	mov	r1, r3
@@ -193,7 +193,7 @@ ENTRY(generic_bs_rr_1)
 
 	RET
 
-#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+#if (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6) > 0
 ENTRY(generic_armv4_bs_rr_2)
 	add	r0, r1, r2
 	mov	r1, r3
@@ -241,7 +241,7 @@ ENTRY(generic_bs_wr_1)
 
 	RET
 
-#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+#if (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6) > 0
 ENTRY(generic_armv4_bs_wr_2)
 	add	r0, r1, r2
 	mov	r1, r3
@@ -288,7 +288,7 @@ ENTRY(generic_bs_sr_1)
 
 	RET
 
-#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+#if (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6) > 0
 ENTRY(generic_armv4_bs_sr_2)
 	add	r0, r1, r2
 	mov	r1, r3
@@ -320,7 +320,7 @@ ENTRY(generic_bs_sr_4)
  * copy region
  */
 
-#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+#if (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6) > 0
 ENTRY(generic_armv4_bs_c_2)
 	add	r0, r1, r2
 	ldr	r2, [sp, #0]

Modified: projects/armv6/sys/arm/arm/cpufunc.c
==============================================================================
--- projects/armv6/sys/arm/arm/cpufunc.c	Wed Aug 24 07:28:04 2011	(r225133)
+++ projects/armv6/sys/arm/arm/cpufunc.c	Wed Aug 24 07:49:18 2011	(r225134)
@@ -98,6 +98,10 @@ int	arm_pcache_unified;
 int	arm_dcache_align;
 int	arm_dcache_align_mask;
 
+u_int	arm_cache_level;
+u_int	arm_cache_type[14];
+u_int	arm_cache_loc;
+
 /* 1 == use cpu_sleep(), 0 == don't */
 int cpu_do_powersave;
 int ctrl;
@@ -472,6 +476,126 @@ struct cpu_functions arm10_cpufuncs = {
 };
 #endif /* CPU_ARM10 */
 
+#ifdef CPU_MV_PJ4B
+struct cpu_functions pj4bv7_cpufuncs = {
+	/* CPU functions */
+
+	cpufunc_id,			/* id			*/
+	arm11_drain_writebuf,		/* cpwait		*/
+
+	/* MMU functions */
+
+	cpufunc_control,		/* control		*/
+	cpufunc_domains,		/* Domain		*/
+	pj4b_setttb,			/* Setttb		*/
+	cpufunc_faultstatus,		/* Faultstatus		*/
+	cpufunc_faultaddress,		/* Faultaddress		*/
+
+	/* TLB functions */
+
+	armv7_tlb_flushID,		/* tlb_flushID		*/
+	armv7_tlb_flushID_SE,		/* tlb_flushID_SE	*/
+	armv7_tlb_flushID,		/* tlb_flushI		*/
+	armv7_tlb_flushID_SE,		/* tlb_flushI_SE	*/
+	armv7_tlb_flushID,		/* tlb_flushD		*/
+	armv7_tlb_flushID_SE,		/* tlb_flushD_SE	*/
+
+	/* Cache operations */
+	armv7_idcache_wbinv_all,	/* icache_sync_all	*/
+	armv7_icache_sync_range,	/* icache_sync_range	*/
+
+	armv7_dcache_wbinv_all,		/* dcache_wbinv_all	*/
+	armv7_dcache_wbinv_range,	/* dcache_wbinv_range	*/
+	armv7_dcache_inv_range,		/* dcache_inv_range	*/
+	armv7_dcache_wb_range,		/* dcache_wb_range	*/
+
+	armv7_idcache_wbinv_all,	/* idcache_wbinv_all	*/
+	armv7_idcache_wbinv_range,	/* idcache_wbinv_all	*/
+
+	(void *)cpufunc_nullop,		/* l2cache_wbinv_all	*/
+	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
+	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
+	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
+
+	/* Other functions */
+
+	pj4b_drain_readbuf,		/* flush_prefetchbuf	*/
+	arm11_drain_writebuf,		/* drain_writebuf	*/
+	pj4b_flush_brnchtgt_all,	/* flush_brnchtgt_C	*/
+	pj4b_flush_brnchtgt_va,		/* flush_brnchtgt_E	*/
+
+	pj4b_sleep,			/* sleep		*/
+
+	/* Soft functions */
+
+	cpufunc_null_fixup,		/* dataabt_fixup	*/
+	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
+
+	arm11_context_switch,		/* context_switch	*/
+
+	pj4bv7_setup			/* cpu setup		*/
+};
+
+struct cpu_functions pj4bv6_cpufuncs = {
+	/* CPU functions */
+
+	cpufunc_id,			/* id			*/
+	arm11_drain_writebuf,		/* cpwait		*/
+
+	/* MMU functions */
+
+	cpufunc_control,		/* control		*/
+	cpufunc_domains,		/* Domain		*/
+	pj4b_setttb,			/* Setttb		*/
+	cpufunc_faultstatus,		/* Faultstatus		*/
+	cpufunc_faultaddress,		/* Faultaddress		*/
+
+	/* TLB functions */
+
+	arm11_tlb_flushID,		/* tlb_flushID		*/
+	arm11_tlb_flushID_SE,		/* tlb_flushID_SE	*/
+	arm11_tlb_flushI,		/* tlb_flushI		*/
+	arm11_tlb_flushI_SE,		/* tlb_flushI_SE	*/
+	arm11_tlb_flushD,		/* tlb_flushD		*/
+	arm11_tlb_flushD_SE,		/* tlb_flushD_SE	*/
+
+	/* Cache operations */
+	armv6_icache_sync_all,		/* icache_sync_all	*/
+	pj4b_icache_sync_range,		/* icache_sync_range	*/
+
+	armv6_dcache_wbinv_all,		/* dcache_wbinv_all	*/
+	pj4b_dcache_wbinv_range,	/* dcache_wbinv_range	*/
+	pj4b_dcache_inv_range,		/* dcache_inv_range	*/
+	pj4b_dcache_wb_range,		/* dcache_wb_range	*/
+
+	armv6_idcache_wbinv_all,	/* idcache_wbinv_all	*/
+	pj4b_idcache_wbinv_range,	/* idcache_wbinv_all	*/
+
+	pj4b_l2cache_wbinv_all,		/* l2cache_wbinv_all	*/
+	pj4b_l2cache_wbinv_range,	/* l2cache_wbinv_range	*/
+	pj4b_l2cache_inv_range,		/* l2cache_inv_range	*/
+	pj4b_l2cache_wb_range,		/* l2cache_wb_range	*/
+
+	/* Other functions */
+
+	pj4b_drain_readbuf,		/* flush_prefetchbuf	*/
+	arm11_drain_writebuf,		/* drain_writebuf	*/
+	pj4b_flush_brnchtgt_all,	/* flush_brnchtgt_C	*/
+	pj4b_flush_brnchtgt_va,		/* flush_brnchtgt_E	*/
+
+	pj4b_sleep,			/* sleep		*/
+
+	/* Soft functions */
+
+	cpufunc_null_fixup,		/* dataabt_fixup	*/
+	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
+
+	arm11_context_switch,		/* context_switch	*/
+
+	pj4bv6_setup			/* cpu setup		*/
+};
+#endif /* CPU_MV_PJ4B */
+
 #ifdef CPU_SA110
 struct cpu_functions sa110_cpufuncs = {
 	/* CPU functions */
@@ -854,10 +978,10 @@ u_int cputype;
 u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
 
 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) ||	\
-  defined (CPU_ARM9E) || defined (CPU_ARM10) ||				\
+  defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM11) ||	\
   defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
-  defined(CPU_FA526) || defined(CPU_FA626TE) ||				\
+  defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) ||			\
   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
 
 static void get_cachetype_cp15(void);
@@ -871,12 +995,15 @@ static int	arm_dcache_l2_linesize;
 static void
 get_cachetype_cp15()
 {
-	u_int ctype, isize, dsize;
+	u_int ctype, isize, dsize, cpuid;
+	u_int clevel, csize, i, sel;
 	u_int multiplier;
+	u_char type;
 
 	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
 		: "=r" (ctype));
 
+	cpuid = cpufunc_id();
 	/*
 	 * ...and thus spake the ARM ARM:
 	 *
@@ -884,57 +1011,86 @@ get_cachetype_cp15()
 	 * reserved ID register is encountered, the System Control
 	 * processor returns the value of the main ID register.
 	 */
-	if (ctype == cpufunc_id())
+	if (ctype == cpuid)
 		goto out;
 
-	if ((ctype & CPU_CT_S) == 0)
-		arm_pcache_unified = 1;
+	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
+		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
+		    : "=r" (clevel));
+		arm_cache_level = clevel;
+		arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level) + 1;
+		i = 0;
+		while ((type = (clevel & 0x7)) && i < 7) {
+			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
+			    type == CACHE_SEP_CACHE) {
+				sel = i << 1;
+				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
+				    : : "r" (sel));
+				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
+				    : "=r" (csize));
+				arm_cache_type[sel] = csize;
+			}
+			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
+				sel = (i << 1) | 1;
+				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
+				    : : "r" (sel));
+				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
+				    : "=r" (csize));
+				arm_cache_type[sel] = csize;
+			}
+			i++;
+			clevel >>= 3;
+		}
+	} else {
+		if ((ctype & CPU_CT_S) == 0)
+			arm_pcache_unified = 1;
 
-	/*
-	 * If you want to know how this code works, go read the ARM ARM.
-	 */
+		/*
+		 * If you want to know how this code works, go read the ARM ARM.
+		 */
 
-	arm_pcache_type = CPU_CT_CTYPE(ctype);
+		arm_pcache_type = CPU_CT_CTYPE(ctype);
 
-	if (arm_pcache_unified == 0) {
-		isize = CPU_CT_ISIZE(ctype);
-		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
-		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
-		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
-			if (isize & CPU_CT_xSIZE_M)
-				arm_picache_line_size = 0; /* not present */
+		if (arm_pcache_unified == 0) {
+			isize = CPU_CT_ISIZE(ctype);
+			multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
+			arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
+			if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
+				if (isize & CPU_CT_xSIZE_M)
+					arm_picache_line_size = 0; /* not present */
+				else
+					arm_picache_ways = 1;
+			} else {
+				arm_picache_ways = multiplier <<
+				    (CPU_CT_xSIZE_ASSOC(isize) - 1);
+			}
+			arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
+		}
+
+		dsize = CPU_CT_DSIZE(ctype);
+		multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
+		arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
+		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
+			if (dsize & CPU_CT_xSIZE_M)
+				arm_pdcache_line_size = 0; /* not present */
 			else
-				arm_picache_ways = 1;
+				arm_pdcache_ways = 1;
 		} else {
-			arm_picache_ways = multiplier <<
-			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
+			arm_pdcache_ways = multiplier <<
+			    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
 		}
-		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
-	}
-
-	dsize = CPU_CT_DSIZE(ctype);
-	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
-	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
-	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
-		if (dsize & CPU_CT_xSIZE_M)
-			arm_pdcache_line_size = 0; /* not present */
-		else
-			arm_pdcache_ways = 1;
-	} else {
-		arm_pdcache_ways = multiplier <<
-		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
-	}
-	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
+		arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
 
-	arm_dcache_align = arm_pdcache_line_size;
+		arm_dcache_align = arm_pdcache_line_size;
 
-	arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
-	arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
-	arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
-	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
+		arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
+		arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
+		arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
+		    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
 
- out:
-	arm_dcache_align_mask = arm_dcache_align - 1;
+	out:
+		arm_dcache_align_mask = arm_dcache_align - 1;
+	}
 }
 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
 
@@ -1108,6 +1264,30 @@ set_cpufuncs()
 		goto out;
 	}
 #endif /* CPU_ARM10 */
+#if defined(CPU_MV_PJ4B)
+	if (cputype == CPU_ID_MV88SV581X_V6 ||
+	    cputype == CPU_ID_MV88SV581X_V7 ||
+	    cputype == CPU_ID_ARM_88SV581X_V6 ||
+	    cputype == CPU_ID_ARM_88SV581X_V7) {
+		if (cpu_pfr(0) & ARM_PFR0_THUMBEE_MASK)
+			cpufuncs = pj4bv7_cpufuncs;
+		else
+			cpufuncs = pj4bv6_cpufuncs;
+
+		pj4b_config();
+		get_cachetype_cp15();
+		pmap_pte_init_mmu_v6();
+		goto out;
+	} else if (cputype == CPU_ID_ARM_88SV584X ||
+	    cputype == CPU_ID_MV88SV584X) {
+		cpufuncs = pj4bv6_cpufuncs;
+		pj4b_config();
+		get_cachetype_cp15();
+		pmap_pte_init_mmu_v6();
+		goto out;
+	}
+
+#endif /* CPU_MV_PJ4B */
 #ifdef CPU_SA110
 	if (cputype == CPU_ID_SA110) {
 		cpufuncs = sa110_cpufuncs;
@@ -1978,6 +2158,75 @@ arm11_setup(args)
 }
 #endif	/* CPU_ARM11 */
 
+#ifdef CPU_MV_PJ4B
+void
+pj4bv6_setup(char *args)
+{
+	int cpuctrl;
+
+	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
+	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+	    | CPU_CONTROL_BPRD_ENABLE  | CPU_CONTROL_V6_EXTPAGE
+	    | CPU_CONTROL_L2_ENABLE ;
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+#ifdef __ARMEB__
+	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+	if (vector_page == ARM_VECTORS_HIGH)
+		cpuctrl |= CPU_CONTROL_VECRELOC;
+
+	/* Clear out the cache */
+	cpu_idcache_wbinv_all();
+	cpu_l2cache_wbinv_all();
+
+	/* Now really make sure they are clean.  */
+	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
+
+	/* Set the control register */
+	ctrl = cpuctrl;
+	cpu_control(0xffffffff, cpuctrl);
+
+	/* And again. */
+	cpu_idcache_wbinv_all();
+	cpu_l2cache_wbinv_all();
+}
+
+void
+pj4bv7_setup(args)
+	char *args;
+{
+	int cpuctrl;
+
+	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
+	    | (0xf << 3) | CPU_CONTROL_BPRD_ENABLE
+	    | CPU_CONTROL_IC_ENABLE | (0x5 << 16) | (1 < 22)
+	    | CPU_CONTROL_V6_EXTPAGE;
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+	if (vector_page == ARM_VECTORS_HIGH)
+		cpuctrl |= CPU_CONTROL_VECRELOC;
+
+	/* Clear out the cache */
+	cpu_idcache_wbinv_all();
+	cpu_l2cache_wbinv_all();
+
+	/* Set the control register */
+	ctrl = cpuctrl;
+	cpu_control(0xFFFFFFFF, cpuctrl);
+
+	/* And again. */
+	cpu_idcache_wbinv_all();
+	cpu_l2cache_wbinv_all();
+}
+#endif /* CPU_MV_PJ4B */
+
 #ifdef CPU_SA110
 struct cpu_option sa110_options[] = {
 #ifdef COMPAT_12

Modified: projects/armv6/sys/arm/arm/cpufunc_asm.S
==============================================================================
--- projects/armv6/sys/arm/arm/cpufunc_asm.S	Wed Aug 24 07:28:04 2011	(r225133)
+++ projects/armv6/sys/arm/arm/cpufunc_asm.S	Wed Aug 24 07:49:18 2011	(r225134)
@@ -65,6 +65,10 @@ ENTRY(cpufunc_id)
 	mrc	p15, 0, r0, c0, c0, 0
 	RET
 
+ENTRY(cpufunc_cpuid)
+	mrc	p15, 0, r0, c0, c0, 0
+	RET
+
 ENTRY(cpu_get_control)
 	mrc	p15, 0, r0, c1, c0, 0
 	RET

Added: projects/armv6/sys/arm/arm/cpufunc_asm_armv7.S
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/armv6/sys/arm/arm/cpufunc_asm_armv7.S	Wed Aug 24 07:49:18 2011	(r225134)
@@ -0,0 +1,194 @@
+/*-
+ * Copyright (C) 2011 MARVELL INTERNATIONAL LTD.
+ * All rights reserved.
+ *
+ * Developed by Semihalf.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of MARVELL nor the names of contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+#define isb	mcr	p15, 0, r0, c7, c5, 4
+#define dsb	mcr	p15, 0, r0, c7, c10, 4
+#define TTB (0x59)
+
+.Lcoherency_level:
+	.word	_C_LABEL(arm_cache_loc)
+.Lcache_type:
+	.word	_C_LABEL(arm_cache_type)
+.Lway_mask:
+	.word	0x3ff
+.Lmax_index:
+	.word	0x7fff
+.Lpage_mask:
+	.word	0xfff
+
+ENTRY(armv7_setttb)
+	bic	r0, r0, #0x18
+	mcr	p15, 0, r0, c2, c0, 0	/* load new TTB */
+	mcr	p15, 0, r0, c7, c5, 4	/* ISB */
+	RET
+
+ENTRY(armv7_tlb_flushID)
+	mcr	p15, 0, r0, c7, c10, 4	/* DSB */
+	mcr	p15, 0, r0, c8, c7, 0	/* flush I+D tlb */
+	mcr	p15, 0, r0, c7, c5, 6	/* flush BTB */
+	mcr	p15, 0, r0, c7, c10, 4	/* DSB */
+	mov	pc, lr
+
+ENTRY(armv7_tlb_flushID_SE)
+	ldr	r1, .Lpage_mask
+	bic	r0, r0, r1	
+	mcr	p15, 0, r0, c8, c7, 1	/* flush D tlb single entry */
+	mcr	p15, 0, r0, c7, c5, 6	/* flush BTB */
+	mcr	p15, 0, r0, c7, c10, 4	/* DSB */
+	mov	pc, lr
+
+/* Based on algorithm from ARM Architecture Reference Manual */
+ENTRY(armv7_dcache_wbinv_all)
+	stmdb	sp!, {r4, r5, r6, r7, r8, r9}
+
+	/* Get cache level */
+	ldr	r0, .Lcoherency_level
+	ldr	r3, [r0]
+	cmp	r3, #0
+	beq	Finished
+	/* For each cache level */
+	mov	r8, #0
+Loop1:
+	/* Get cache type for given level */
+	mov	r2, r8, lsl #2
+	ldr	r0, .Lcache_type
+	ldr	r1, [r0, r2]	
+
+	/* Get line size */
+	and	r2, r1, #7
+	add	r2, r2, #4
+
+	/* Get number of ways */
+	ldr	r4, .Lway_mask
+	ands	r4, r4, r1, lsr #3
+	clz	r5, r4
+	
+	/* Get max index */
+	ldr	r7, .Lmax_index
+	ands	r7, r7, r1, lsr #13
+Loop2:
+	mov	r9, r4
+Loop3:
+	mov	r6, r8, lsl #1
+	orr	r6, r6, r9, lsl r5
+	orr	r6, r6, r7, lsl r2
+
+	/* Clean and invalidate data cache by way/index */
+	mcr	p15, 0, r6, c7, c14, 2
+	subs	r9, r9, #1
+	bge	Loop3
+	subs	r7, r7, #1
+	bge	Loop2
+Skip:
+	add	r8, r8, #1
+	cmp	r3, r8
+	bne Loop1
+Finished:
+	mcr	p15, 0, r0, c7, c5, 4
+	mcr	p15, 0, r0, c7, c5, 5
+	ldmia	sp!, {r4, r5, r6, r7, r8, r9}
+RET
+
+ENTRY(armv7_idcache_wbinv_all)
+	stmdb	sp!, {lr}
+	bl armv7_dcache_wbinv_all
+	mcr	p15, 0, r0, c7, c5, 0	/* Invalidate I cache SE with VA */
+	mcr	p15, 0, r0, c7, c10, 4
+	mcr	p15, 0, r0, c7, c5, 4
+	ldmia	sp!, {lr}
+RET
+
+/* XXX Temporary set it to 32 for MV cores, however this value should be
+ * get from Cache Type register
+ */
+.Larmv7_line_size:
+	.word	32
+
+ENTRY(armv7_dcache_wb_range)
+	ldr	ip, .Larmv7_line_size
+.Larmv7_wb_next:
+	mcr	p15, 0, r0, c7, c10, 1	/* Clean D cache SE with VA */
+	add	r0, r0, ip
+	subs	r1, r1, ip
+	bhi	.Larmv7_wb_next
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	bx	lr
+
+	
+ENTRY(armv7_dcache_wbinv_range)
+	ldr	ip, .Larmv7_line_size
+.Larmv7_wbinv_next:
+	mcr	p15, 0, r0, c7, c14, 1	/* Purge D cache SE with VA */
+	add	r0, r0, ip
+	subs	r1, r1, ip
+	bhi	.Larmv7_wbinv_next
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	bx	lr
+	
+/*
+ * Note, we must not invalidate everything.  If the range is too big we
+ * must use wb-inv of the entire cache.
+ */
+ENTRY(armv7_dcache_inv_range)
+	ldr	ip, .Larmv7_line_size
+.Larmv7_inv_next:
+	mcr	p15, 0, r0, c7, c6, 1	/* Invalidate D cache SE with VA */
+	add	r0, r0, ip
+	subs	r1, r1, ip
+	bhi	.Larmv7_inv_next
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	bx	lr
+
+ENTRY(armv7_idcache_wbinv_range)
+	ldr	ip, .Larmv7_line_size
+.Larmv7_id_wbinv_next:
+	mcr	p15, 0, r0, c7, c5, 1	/* Invalidate I cache SE with VA */
+	mcr	p15, 0, r0, c7, c14, 1	/* Purge D cache SE with VA */
+	add	r0, r0, ip
+	subs	r1, r1, ip
+	bhi	.Larmv7_id_wbinv_next
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	bx	lr
+
+ENTRY_NP(armv7_icache_sync_range)
+	ldr	ip, .Larmv7_line_size
+.Larmv7_sync_next:
+	mcr	p15, 0, r0, c7, c5, 1	/* Invalidate I cache SE with VA */
+	mcr	p15, 0, r0, c7, c10, 1	/* Clean D cache SE with VA */
+	add	r0, r0, ip
+	subs	r1, r1, ip
+	bhi	.Larmv7_sync_next
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	bx	lr
+

Added: projects/armv6/sys/arm/arm/cpufunc_asm_pj4b.S
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/armv6/sys/arm/arm/cpufunc_asm_pj4b.S	Wed Aug 24 07:49:18 2011	(r225134)
@@ -0,0 +1,190 @@
+/*-
+ * Copyright (C) 2011 MARVELL INTERNATIONAL LTD.
+ * All rights reserved.
+ *
+ * Developed by Semihalf.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of MARVELL nor the names of contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+#include <machine/param.h>
+
+.Lpj4b_cache_line_size:
+	.word	_C_LABEL(arm_pdcache_line_size)
+
+ENTRY(pj4b_setttb)
+	/* Cache synchronization is not required as this core has PIPT caches */
+	mcr	p15, 0, r1, c7, c10, 4	/* drain the write buffer */
+	mcr	p15, 0, r0, c2, c0, 0	/* load new TTB */
+	mcr	p15, 0, r0, c8, c7, 0	/* invalidate I+D TLBs */
+	RET
+
+ENTRY_NP(armv6_icache_sync_all)
+	/*
+	 * We assume that the code here can never be out of sync with the
+	 * dcache, so that we can safely flush the Icache and fall through
+	 * into the Dcache cleaning code.
+	 */
+	mov	r0, #0
+	mcr	p15, 0, r0, c7, c5, 0	/* Invalidate ICache */
+	mcr	p15, 0, r0, c7, c10, 0	/* Clean (don't invalidate) DCache */
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	RET
+
+ENTRY(pj4b_icache_sync_range)
+	sub	r1, r1, #1
+	add	r1, r0, r1
+	mcrr	p15, 0, r1, r0, c5	/* invalidate IC range */
+	mcrr	p15, 0, r1, r0, c12	/* clean DC range */
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	RET
+
+ENTRY(pj4b_dcache_inv_range)
+	sub	r1, r1, #1
+	add	r1, r0, r1
+	mcrr	p15, 0, r1, r0, c6	/* invalidate DC range */
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	RET
+
+ENTRY(pj4b_dcache_wb_range)
+	sub	r1, r1, #1
+	add	r1, r0, r1
+	mcrr	p15, 0, r1, r0, c12	/* clean DC range */
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	RET
+
+ENTRY(pj4b_dcache_wbinv_range)
+	sub	r1, r1, #1
+	add	r1, r0, r1
+	mcrr	p15, 0, r1, r0, c14	/* clean and invalidate DC range */
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	RET
+
+ENTRY(armv6_idcache_wbinv_all)
+	mov	r0, #0
+	mcr	p15, 0, r0, c7, c5, 0	/* invalidate ICache */
+	mcr	p15, 0, r0, c7, c14, 0	/* clean and invalidate DCache */
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	RET
+
+ENTRY(armv6_dcache_wbinv_all)
+	mov	r0, #0
+	mcr	p15, 0, r0, c7, c14, 0	/* clean and invalidate DCache */
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	RET
+
+ENTRY(pj4b_idcache_wbinv_range)
+	sub	r1, r1, #1
+	add	r1, r0, r1
+	mcrr	p15, 0, r1, r0, c5	/* invalidate IC range */
+	mcrr	p15, 0, r1, r0, c14	/* clean and invalidate DC range */
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	RET
+
+ENTRY(pj4b_l2cache_wbinv_all)
+	mcr	p15, 1, r0, c7, c11, 0	/* L2C clean all */
+	mcr	p15, 1, r0, c7, c7, 0	/* L2C invalidate all */
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	RET
+
+ENTRY(pj4b_l2cache_wbinv_range)
+	ldr	ip, .Lpj4b_cache_line_size
+	ldr	ip, [ip]
+	sub	r1, r1, #1		/* Don't overrun */
+	sub	r3, ip, #1
+	and	r2, r0, r3
+	add	r1, r1, r2
+	bic	r0, r0, r3
+1:
+	mcr	p15, 1, r0, c7, c15, 1	/* L2C clean and invalidate entry */
+	add	r0, r0, ip
+	subs	r1, r1, ip
+	bpl	1b
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	RET
+
+ENTRY(pj4b_l2cache_wb_range)
+	ldr	ip, .Lpj4b_cache_line_size
+	ldr	ip, [ip]
+	sub	r1, r1, #1		/* Don't overrun */
+	sub	r3, ip, #1
+	and	r2, r0, r3
+	add	r1, r1, r2
+	bic	r0, r0, r3
+1:
+	mcr	p15, 1, r0, c7, c11, 1	/* L2C clean single entry by MVA */
+	add	r0, r0, ip
+	subs	r1, r1, ip
+	bpl	1b
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	RET
+
+ENTRY(pj4b_l2cache_inv_range)
+	ldr	ip, .Lpj4b_cache_line_size
+	ldr	ip, [ip]
+	sub	r1, r1, #1		/* Don't overrun */
+	sub	r3, ip, #1
+	and	r2, r0, r3
+	add	r1, r1, r2
+	bic	r0, r0, r3
+1:
+	mcr	p15, 1, r0, c7, c7, 1	/* L2C invalidate single entry by MVA */
+	add	r0, r0, ip
+	subs	r1, r1, ip
+	bpl	1b
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	RET
+
+ENTRY(pj4b_drain_readbuf)
+	mcr	p15, 0, r0, c7, c5, 4	/* flush prefetch buffers */
+	RET
+
+ENTRY(pj4b_flush_brnchtgt_all)
+	mcr	p15, 0, r0, c7, c5, 6	/* flush entrie branch target cache */
+	RET
+
+ENTRY(pj4b_flush_brnchtgt_va)
+	mcr	p15, 0, r0, c7, c5, 7	/* flush branch target cache by VA */
+	RET
+
+ENTRY(pj4b_sleep)
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	mcr	p15, 0, r0, c7, c0, 4	/* wait for interrupt */
+	RET
+
+ENTRY(pj4b_config)
+	/* Set Auxiliary Debug Modes Control 2 register */
+	mrc	p15, 1, r0, c15, c1, 2
+	bic	r0, r0, #(1 << 23)
+	orr	r0, r0, #(1 << 25)
+	orr	r0, r0, #(1 << 27)
+	orr	r0, r0, #(1 << 29)
+	orr	r0, r0, #(1 << 30)
+	mcr	p15, 1, r0, c15, c1, 2
+
+	RET

Modified: projects/armv6/sys/arm/arm/elf_trampoline.c
==============================================================================
--- projects/armv6/sys/arm/arm/elf_trampoline.c	Wed Aug 24 07:28:04 2011	(r225133)
+++ projects/armv6/sys/arm/arm/elf_trampoline.c	Wed Aug 24 07:49:18 2011	(r225134)
@@ -72,7 +72,13 @@ void __startC(void);
 #define cpu_idcache_wbinv_all	xscale_cache_purgeID
 #elif defined(CPU_XSCALE_81342)
 #define cpu_idcache_wbinv_all	xscalec3_cache_purgeID
+#elif defined(CPU_MV_PJ4B)
+#if !defined(SOC_MV_ARMADAXP)
+#define cpu_idcache_wbinv_all	armv6_idcache_wbinv_all
+#else
+#define cpu_idcache_wbinv_all()	armadaxp_idcache_wbinv_all
 #endif
+#endif /* CPU_MV_PJ4B */
 #ifdef CPU_XSCALE_81342
 #define cpu_l2cache_wbinv_all	xscalec3_l2cache_purge
 #elif defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY)
@@ -81,6 +87,7 @@ void __startC(void);
 #define cpu_l2cache_wbinv_all()	
 #endif
 
+static void armadaxp_idcache_wbinv_all(void);
 
 int     arm_picache_size;
 int     arm_picache_line_size;
@@ -306,7 +313,18 @@ arm9_setup(void)
 	arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
 }
 
+static void
+armadaxp_idcache_wbinv_all(void)
+{
+	uint32_t feat;
 
+	__asm __volatile("mrc p15, 0, %0, c0, c1, 0" : "=r" (feat));
+	if (feat & ARM_PFR0_THUMBEE_MASK)
+		armv7_idcache_wbinv_all();
+	else
+		armv6_idcache_wbinv_all();
+
+}
 #ifdef KZIP
 static  unsigned char *orig_input, *i_input, *i_output;
 

Modified: projects/armv6/sys/arm/arm/identcpu.c
==============================================================================
--- projects/armv6/sys/arm/arm/identcpu.c	Wed Aug 24 07:28:04 2011	(r225133)
+++ projects/armv6/sys/arm/arm/identcpu.c	Wed Aug 24 07:49:18 2011	(r225134)
@@ -301,8 +301,17 @@ const struct cpuidtab cpuids[] = {
 
 	{ CPU_ID_MV88FR571_VD,	CPU_CLASS_MARVELL,	"Feroceon 88FR571-VD",
 	  generic_steppings },
-
-	{ CPU_ID_MV88FR571_41,	CPU_CLASS_MARVELL,	"Early Feroceon 88FR571",
+	{ CPU_ID_MV88SV581X_V6,	CPU_CLASS_MARVELL,	"Sheeva 88SV581x",
+	  generic_steppings },
+	{ CPU_ID_ARM_88SV581X_V6, CPU_CLASS_MARVELL,	"Sheeva 88SV581x",
+	  generic_steppings },
+	{ CPU_ID_MV88SV581X_V7,	CPU_CLASS_MARVELL,	"Sheeva 88SV581x",
+	  generic_steppings },
+	{ CPU_ID_ARM_88SV581X_V7, CPU_CLASS_MARVELL,	"Sheeva 88SV581x",
+	  generic_steppings },
+	{ CPU_ID_MV88SV584X,	CPU_CLASS_MARVELL,	"Sheeva 88SV584x",
+	  generic_steppings },
+	{ CPU_ID_ARM_88SV584X,	CPU_CLASS_MARVELL,	"Sheeva 88SV584x",
 	  generic_steppings },
 
 	{ 0, CPU_CLASS_NONE, NULL, NULL }
@@ -358,13 +367,81 @@ static const char * const wtnames[] = {
 	"**unknown 15**",
 };
 
+static void
+print_enadis(int enadis, char *s)
+{
+
+	printf(" %s %sabled", s, (enadis == 0) ? "dis" : "en");
+}
 
 extern int ctrl;
 enum cpu_class cpu_class = CPU_CLASS_NONE;
+
+u_int cpu_pfr(int num)
+{
+	u_int feat;
+
+	switch (num) {
+	case 0:
+		__asm __volatile("mrc p15, 0, %0, c0, c1, 0"
+		    : "=r" (feat));
+		break;
+	case 1:
+		__asm __volatile("mrc p15, 0, %0, c0, c1, 1"
+		    : "=r" (feat));
+		break;
+	default:
+		panic("Processor Feature Register %d not implemented", num);
+		break;
+	}
+
+	return (feat);
+}
+
+static
+void identify_armv7(void)
+{
+	u_int feature;
+
+	printf("Supported features:");
+	/* Get Processor Feature Register 0 */
+	feature = cpu_pfr(0);
+
+	if (feature & ARM_PFR0_ARM_ISA_MASK)
+		printf(" ARM_ISA");
+
+	if (feature & ARM_PFR0_THUMB2)
+		printf(" THUMB2");
+	else if (feature & ARM_PFR0_THUMB)
+		printf(" THUMB");
+
+	if (feature & ARM_PFR0_JAZELLE_MASK)
+		printf(" JAZELLE");

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201108240749.p7O7nIVZ039310>