Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 28 Dec 2011 15:15:00 +0000 (UTC)
From:      Grzegorz Bernacki <gber@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r228930 - in projects/armv6/sys/arm: arm include
Message-ID:  <201112281515.pBSFF0K5011940@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: gber
Date: Wed Dec 28 15:15:00 2011
New Revision: 228930
URL: http://svn.freebsd.org/changeset/base/228930

Log:
  Make low level code SMP aware.
  
  - implement crude TBL broadcasting
  - implement RFO in cache operations
  - use SHARED bit in page descriptor for SMP
  - create defines for different cache/memory model setup
  - cleanup a little
  
  Obtained from:	Marvell, Semihalf

Modified:
  projects/armv6/sys/arm/arm/cpufunc.c
  projects/armv6/sys/arm/arm/cpufunc_asm_pj4b.S
  projects/armv6/sys/arm/arm/pmap-v6.c
  projects/armv6/sys/arm/include/cpufunc.h
  projects/armv6/sys/arm/include/pmap.h
  projects/armv6/sys/arm/include/pte.h

Modified: projects/armv6/sys/arm/arm/cpufunc.c
==============================================================================
--- projects/armv6/sys/arm/arm/cpufunc.c	Wed Dec 28 15:03:34 2011	(r228929)
+++ projects/armv6/sys/arm/arm/cpufunc.c	Wed Dec 28 15:15:00 2011	(r228930)
@@ -571,10 +571,10 @@ struct cpu_functions pj4bv6_cpufuncs = {
 	armv6_idcache_wbinv_all,	/* idcache_wbinv_all	*/
 	pj4b_idcache_wbinv_range,	/* idcache_wbinv_all	*/
 
-	pj4b_l2cache_wbinv_all,		/* l2cache_wbinv_all	*/
-	pj4b_l2cache_wbinv_range,	/* l2cache_wbinv_range	*/
-	pj4b_l2cache_inv_range,		/* l2cache_inv_range	*/
-	pj4b_l2cache_wb_range,		/* l2cache_wb_range	*/
+	(void *)cpufunc_nullop,		/* l2cache_wbinv_all	*/
+	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
+	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
+	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
 
 	/* Other functions */
 
@@ -1298,7 +1298,7 @@ set_cpufuncs()
 		pmap_pte_init_generic();
 		goto out;
 	} else if (cputype == CPU_ID_ARM926EJS  || cputype == CPU_ID_ARM926ES ||
-	    cputype == CPU_ID_ARM1026EJS)
+	    cputype == CPU_ID_ARM1026EJS) {
 		cpufuncs = armv5_ec_cpufuncs;
 		get_cachetype_cp15();
 		pmap_pte_init_generic();
@@ -2241,34 +2241,33 @@ pj4bv6_setup(char *args)
 {
 	int cpuctrl;
 
-	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
-	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
-	    | CPU_CONTROL_BPRD_ENABLE  | CPU_CONTROL_V6_EXTPAGE
-	    | CPU_CONTROL_L2_ENABLE ;
-
+	cpuctrl = CPU_CONTROL_MMU_ENABLE;
 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 #endif
-
+	cpuctrl |= CPU_CONTROL_DC_ENABLE;
+	cpuctrl |= (0xf << 3);
 #ifdef __ARMEB__
 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 #endif
-
+	cpuctrl |= CPU_CONTROL_SYST_ENABLE;
+	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
+	cpuctrl |= CPU_CONTROL_IC_ENABLE;
 	if (vector_page == ARM_VECTORS_HIGH)
 		cpuctrl |= CPU_CONTROL_VECRELOC;
+	cpuctrl |= (0x5 << 16);
+	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
+	/* XXX not yet */
+	/* cpuctrl |= CPU_CONTROL_L2_ENABLE; */
 
-	/* Clear out the cache */
+	/* Make sure caches are clean.  */
 	cpu_idcache_wbinv_all();
 	cpu_l2cache_wbinv_all();
 
-	/* Now really make sure they are clean.  */
-	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
-
 	/* Set the control register */
 	ctrl = cpuctrl;
 	cpu_control(0xffffffff, cpuctrl);
 
-	/* And again. */
 	cpu_idcache_wbinv_all();
 	cpu_l2cache_wbinv_all();
 }
@@ -2279,20 +2278,21 @@ pj4bv7_setup(args)
 {
 	int cpuctrl;
 
-	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
-	    | (0xf << 3) | CPU_CONTROL_BPRD_ENABLE
-	    | CPU_CONTROL_IC_ENABLE | (0x5 << 16) | (1 < 22)
-	    | CPU_CONTROL_V6_EXTPAGE;
-
+	cpuctrl = CPU_CONTROL_MMU_ENABLE;
 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 #endif
+	cpuctrl |= CPU_CONTROL_DC_ENABLE;
+	cpuctrl |= (0xf << 3);
+	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
+	cpuctrl |= CPU_CONTROL_IC_ENABLE;
 	if (vector_page == ARM_VECTORS_HIGH)
 		cpuctrl |= CPU_CONTROL_VECRELOC;
+	cpuctrl |= (0x5 << 16) | (1 < 22);
+	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
 
 	/* Clear out the cache */
 	cpu_idcache_wbinv_all();
-	cpu_l2cache_wbinv_all();
 
 	/* Set the control register */
 	ctrl = cpuctrl;
@@ -2300,7 +2300,6 @@ pj4bv7_setup(args)
 
 	/* And again. */
 	cpu_idcache_wbinv_all();
-	cpu_l2cache_wbinv_all();
 }
 #endif /* CPU_MV_PJ4B */
 

Modified: projects/armv6/sys/arm/arm/cpufunc_asm_pj4b.S
==============================================================================
--- projects/armv6/sys/arm/arm/cpufunc_asm_pj4b.S	Wed Dec 28 15:03:34 2011	(r228929)
+++ projects/armv6/sys/arm/arm/cpufunc_asm_pj4b.S	Wed Dec 28 15:15:00 2011	(r228930)
@@ -65,23 +65,18 @@ ENTRY(pj4b_icache_sync_range)
 	RET
 
 ENTRY(pj4b_dcache_inv_range)
-	sub	r1, r1, #1
-	add	r1, r0, r1
-	mcrr	p15, 0, r1, r0, c6	/* invalidate DC range */
-	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
-	RET
-
-ENTRY(pj4b_dcache_wb_range)
-	sub	r1, r1, #1
-	add	r1, r0, r1
-	mcrr	p15, 0, r1, r0, c12	/* clean DC range */
-	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
-	RET
-
-ENTRY(pj4b_dcache_wbinv_range)
-	sub	r1, r1, #1
-	add	r1, r0, r1
-	mcrr	p15, 0, r1, r0, c14	/* clean and invalidate DC range */
+	ldr	ip, .Lpj4b_cache_line_size
+	ldr	ip, [ip]
+	sub	r1, r1, #1		/* Don't overrun */
+	sub	r3, ip, #1
+	and	r2, r0, r3
+	add	r1, r1, r2
+	bic	r0, r0, r3
+1:
+	mcr	p15, 0, r0, c7, c6, 1
+	add	r0, r0, ip
+	subs	r1, r1, ip
+	bpl	1b
 	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
 	RET
 
@@ -101,18 +96,6 @@ ENTRY(armv6_dcache_wbinv_all)
 ENTRY(pj4b_idcache_wbinv_range)
 	sub	r1, r1, #1
 	add	r1, r0, r1
-	mcrr	p15, 0, r1, r0, c5	/* invalidate IC range */
-	mcrr	p15, 0, r1, r0, c14	/* clean and invalidate DC range */
-	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
-	RET
-
-ENTRY(pj4b_l2cache_wbinv_all)
-	mcr	p15, 1, r0, c7, c11, 0	/* L2C clean all */
-	mcr	p15, 1, r0, c7, c7, 0	/* L2C invalidate all */
-	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
-	RET
-
-ENTRY(pj4b_l2cache_wbinv_range)
 	ldr	ip, .Lpj4b_cache_line_size
 	ldr	ip, [ip]
 	sub	r1, r1, #1		/* Don't overrun */
@@ -121,14 +104,20 @@ ENTRY(pj4b_l2cache_wbinv_range)
 	add	r1, r1, r2
 	bic	r0, r0, r3
 1:
-	mcr	p15, 1, r0, c7, c15, 1	/* L2C clean and invalidate entry */
+#ifdef SMP
+	/* Request for ownership */
+	ldr	r2, [r0]
+	str	r2, [r0]
+#endif
+	mcr	p15, 0, r0, c7, c5, 1
+	mcr	p15, 0, r0, c7, c14, 1	/* L2C clean and invalidate entry */
 	add	r0, r0, ip
 	subs	r1, r1, ip
 	bpl	1b
 	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
 	RET
 
-ENTRY(pj4b_l2cache_wb_range)
+ENTRY(pj4b_dcache_wbinv_range)
 	ldr	ip, .Lpj4b_cache_line_size
 	ldr	ip, [ip]
 	sub	r1, r1, #1		/* Don't overrun */
@@ -137,14 +126,19 @@ ENTRY(pj4b_l2cache_wb_range)
 	add	r1, r1, r2
 	bic	r0, r0, r3
 1:
-	mcr	p15, 1, r0, c7, c11, 1	/* L2C clean single entry by MVA */
+#ifdef SMP
+	/* Request for ownership */
+	ldr	r2, [r0]
+	str	r2, [r0]
+#endif
+	mcr	p15, 0, r0, c7, c14, 1
 	add	r0, r0, ip
 	subs	r1, r1, ip
 	bpl	1b
 	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
 	RET
 
-ENTRY(pj4b_l2cache_inv_range)
+ENTRY(pj4b_dcache_wb_range)
 	ldr	ip, .Lpj4b_cache_line_size
 	ldr	ip, [ip]
 	sub	r1, r1, #1		/* Don't overrun */
@@ -153,7 +147,12 @@ ENTRY(pj4b_l2cache_inv_range)
 	add	r1, r1, r2
 	bic	r0, r0, r3
 1:
-	mcr	p15, 1, r0, c7, c7, 1	/* L2C invalidate single entry by MVA */
+#ifdef SMP
+	/* Request for ownership */
+	ldr	r2, [r0]
+	str	r2, [r0]
+#endif
+	mcr	p15, 0, r0, c7, c10, 1	/* L2C clean single entry by MVA */
 	add	r0, r0, ip
 	subs	r1, r1, ip
 	bpl	1b
@@ -208,5 +207,10 @@ ENTRY(pj4b_config)
 	orr	r0, r0, #(1 << 29)
 	orr	r0, r0, #(1 << 30)
 	mcr	p15, 1, r0, c15, c1, 2
-
+#if defined(SMP)
+	/* Set SMP mode in Auxiliary Control Register */
+	mrc	p15, 0, r0, c1, c0, 1
+	orr	r0, r0, #(1 << 5)
+	mcr	p15, 0, r0, c1, c0, 1
+#endif
 	RET

Modified: projects/armv6/sys/arm/arm/pmap-v6.c
==============================================================================
--- projects/armv6/sys/arm/arm/pmap-v6.c	Wed Dec 28 15:03:34 2011	(r228929)
+++ projects/armv6/sys/arm/arm/pmap-v6.c	Wed Dec 28 15:15:00 2011	(r228930)
@@ -476,13 +476,15 @@ void
 pmap_pte_init_mmu_v6(void)
 {
 
-	pte_l1_s_cache_mode = ARM_L1S_NRML_IWT_OWT;
-	pte_l2_l_cache_mode = ARM_L2L_NRML_IWT_OWT;
-	pte_l2_s_cache_mode = ARM_L2S_NRML_IWT_OWT;
-
-	pte_l1_s_cache_mode_pt = ARM_L1S_NRML_IWT_OWT;
-	pte_l2_l_cache_mode_pt = ARM_L2L_NRML_IWT_OWT;
-	pte_l2_s_cache_mode_pt = ARM_L2S_NRML_IWT_OWT;
+	if (PTE_PAGETABLE >= 3)
+		pmap_needs_pte_sync = 1;
+	pte_l1_s_cache_mode = l1_mem_types[PTE_CACHE];
+	pte_l2_l_cache_mode = l2l_mem_types[PTE_CACHE];
+	pte_l2_s_cache_mode = l2s_mem_types[PTE_CACHE];
+
+	pte_l1_s_cache_mode_pt = l1_mem_types[PTE_PAGETABLE];
+	pte_l2_l_cache_mode_pt = l2l_mem_types[PTE_PAGETABLE];
+	pte_l2_s_cache_mode_pt = l2s_mem_types[PTE_PAGETABLE];
 
 }
 
@@ -2065,11 +2067,16 @@ pmap_kenter_internal(vm_offset_t va, vm_
 		if (opte == 0)
 			l2b->l2b_occupancy++;
 	}
-	*pte = L2_S_PROTO | pa;
-	pmap_set_prot(pte, VM_PROT_READ | VM_PROT_WRITE, flags & KENTER_USER);
 
-	if (flags & KENTER_CACHE)
-		*pte |= pte_l2_s_cache_mode;
+	if (flags & KENTER_CACHE) {
+		*pte = L2_S_PROTO | pa | pte_l2_s_cache_mode;
+		pmap_set_prot(pte, VM_PROT_READ | VM_PROT_WRITE,
+		    flags & KENTER_USER);
+	} else {
+		*pte = L2_S_PROTO | pa;
+		pmap_set_prot(pte, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE,
+		    0);
+	}
 
 	PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n",
 	    (uint32_t) pte, opte, *pte));
@@ -2615,6 +2622,9 @@ do_l2b_alloc:
 		 *   so no need to re-do referenced emulation here.
 		 */
 		npte |= L2_S_PROTO;
+#ifdef SMP
+		npte |= L2_SHARED;
+#endif
 
 		nflags |= PVF_REF;
 

Modified: projects/armv6/sys/arm/include/cpufunc.h
==============================================================================
--- projects/armv6/sys/arm/include/cpufunc.h	Wed Dec 28 15:03:34 2011	(r228929)
+++ projects/armv6/sys/arm/include/cpufunc.h	Wed Dec 28 15:15:00 2011	(r228930)
@@ -176,6 +176,8 @@ extern u_int cputype;
 #define cpu_faultstatus()	cpufuncs.cf_faultstatus()
 #define cpu_faultaddress()	cpufuncs.cf_faultaddress()
 
+#ifndef SMP
+
 #define	cpu_tlb_flushID()	cpufuncs.cf_tlb_flushID()
 #define	cpu_tlb_flushID_SE(e)	cpufuncs.cf_tlb_flushID_SE(e)
 #define	cpu_tlb_flushI()	cpufuncs.cf_tlb_flushI()
@@ -183,6 +185,45 @@ extern u_int cputype;
 #define	cpu_tlb_flushD()	cpufuncs.cf_tlb_flushD()
 #define	cpu_tlb_flushD_SE(e)	cpufuncs.cf_tlb_flushD_SE(e)
 
+#else
+void tlb_broadcast(int);
+
+#define	cpu_tlb_flushID() do { \
+	cpufuncs.cf_tlb_flushID(); \
+	tlb_broadcast(7); \
+} while(0)
+
+#define	cpu_tlb_flushID_SE(e) do { \
+	cpufuncs.cf_tlb_flushID_SE(e); \
+	tlb_broadcast(7); \
+} while(0)
+
+
+#define	cpu_tlb_flushI() do { \
+	cpufuncs.cf_tlb_flushI(); \
+	tlb_broadcast(7); \
+} while(0)
+
+
+#define	cpu_tlb_flushI_SE(e) do { \
+	cpufuncs.cf_tlb_flushI_SE(e); \
+	tlb_broadcast(7); \
+} while(0)
+
+
+#define	cpu_tlb_flushD() do { \
+	cpufuncs.cf_tlb_flushD(); \
+	tlb_broadcast(7); \
+} while(0)
+
+
+#define	cpu_tlb_flushD_SE(e) do { \
+	cpufuncs.cf_tlb_flushD_SE(e); \
+	tlb_broadcast(7); \
+} while(0)
+
+#endif
+
 #define	cpu_icache_sync_all()	cpufuncs.cf_icache_sync_all()
 #define	cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
 
@@ -442,11 +483,6 @@ void	pj4b_dcache_wb_range		(vm_offset_t,
 
 void	pj4b_idcache_wbinv_range	(vm_offset_t, vm_size_t);
 
-void	pj4b_l2cache_wbinv_range	(vm_offset_t, vm_size_t);
-void	pj4b_l2cache_inv_range		(vm_offset_t, vm_size_t);
-void	pj4b_l2cache_wb_range		(vm_offset_t, vm_size_t);
-void	pj4b_l2cache_wbinv_all		(void);
-
 void	pj4b_drain_readbuf		(void);
 void	pj4b_flush_brnchtgt_all		(void);
 void	pj4b_flush_brnchtgt_va		(u_int);

Modified: projects/armv6/sys/arm/include/pmap.h
==============================================================================
--- projects/armv6/sys/arm/include/pmap.h	Wed Dec 28 15:03:34 2011	(r228929)
+++ projects/armv6/sys/arm/include/pmap.h	Wed Dec 28 15:15:00 2011	(r228930)
@@ -55,8 +55,12 @@
 /*
  * Pte related macros
  */
+#ifdef SMP
+#define PTE_NOCACHE	2
+#else
 #define PTE_NOCACHE	1
-#define PTE_CACHE	6
+#endif
+#define PTE_CACHE	4
 #define PTE_PAGETABLE	4
 
 enum mem_type {
@@ -351,14 +355,21 @@ extern int pmap_needs_pte_sync;
 #define	L2_S_PROT_MASK		(L2_S_PROT_U|L2_S_PROT_R)
 #define	L2_S_WRITABLE(pte)	(!(pte & L2_APX))
 
+#ifndef SMP
 #define	L1_S_CACHE_MASK		(L1_S_TEX_MASK|L1_S_B|L1_S_C)
 #define	L2_L_CACHE_MASK		(L2_L_TEX_MASK|L2_B|L2_C)
 #define	L2_S_CACHE_MASK		(L2_S_TEX_MASK|L2_B|L2_C)
+#else
+#define	L1_S_CACHE_MASK		(L1_S_TEX_MASK|L1_S_B|L1_S_C|L1_SHARED)
+#define	L2_L_CACHE_MASK		(L2_L_TEX_MASK|L2_B|L2_C|L2_SHARED)
+#define	L2_S_CACHE_MASK		(L2_S_TEX_MASK|L2_B|L2_C|L2_SHARED)
+#endif  /* SMP */
 
 #define	L1_S_PROTO		(L1_TYPE_S)
 #define	L1_C_PROTO		(L1_TYPE_C)
 #define	L2_S_PROTO		(L2_TYPE_S)
 
+#ifndef SMP
 #define ARM_L1S_STRONG_ORD	(0)
 #define ARM_L1S_DEVICE_NOSHARE	(L1_S_TEX(2))
 #define ARM_L1S_DEVICE_SHARE	(L1_S_B)
@@ -382,7 +393,31 @@ extern int pmap_needs_pte_sync;
 #define ARM_L2S_NRML_IWT_OWT	(L2_C)
 #define ARM_L2S_NRML_IWB_OWB	(L2_C|L2_B)
 #define ARM_L2S_NRML_IWBA_OWBA	(L2_S_TEX(1)|L2_C|L2_B)
+#else
+#define ARM_L1S_STRONG_ORD	(0)
+#define ARM_L1S_DEVICE_NOSHARE	(L1_S_TEX(2))
+#define ARM_L1S_DEVICE_SHARE	(L1_S_B)
+#define ARM_L1S_NRML_NOCACHE	(L1_S_TEX(1)|L1_SHARED)
+#define ARM_L1S_NRML_IWT_OWT	(L1_S_C|L1_SHARED)
+#define ARM_L1S_NRML_IWB_OWB	(L1_S_C|L1_S_B|L1_SHARED)
+#define ARM_L1S_NRML_IWBA_OWBA	(L1_S_TEX(1)|L1_S_C|L1_S_B|L1_SHARED)
 
+#define ARM_L2L_STRONG_ORD	(0)
+#define ARM_L2L_DEVICE_NOSHARE	(L2_L_TEX(2))
+#define ARM_L2L_DEVICE_SHARE	(L2_B)
+#define ARM_L2L_NRML_NOCACHE	(L2_L_TEX(1)|L2_SHARED)
+#define ARM_L2L_NRML_IWT_OWT	(L2_C|L2_SHARED)
+#define ARM_L2L_NRML_IWB_OWB	(L2_C|L2_B|L2_SHARED)
+#define ARM_L2L_NRML_IWBA_OWBA	(L2_L_TEX(1)|L2_C|L2_B|L2_SHARED)
+
+#define ARM_L2S_STRONG_ORD	(0)
+#define ARM_L2S_DEVICE_NOSHARE	(L2_S_TEX(2))
+#define ARM_L2S_DEVICE_SHARE	(L2_B)
+#define ARM_L2S_NRML_NOCACHE	(L2_S_TEX(1)|L2_SHARED)
+#define ARM_L2S_NRML_IWT_OWT	(L2_C|L2_SHARED)
+#define ARM_L2S_NRML_IWB_OWB	(L2_C|L2_B|L2_SHARED)
+#define ARM_L2S_NRML_IWBA_OWBA	(L2_S_TEX(1)|L2_C|L2_B|L2_SHARED)
+#endif /* SMP */
 #endif /* ARM_NMMUS > 1 */
 
 #if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1)

Modified: projects/armv6/sys/arm/include/pte.h
==============================================================================
--- projects/armv6/sys/arm/include/pte.h	Wed Dec 28 15:03:34 2011	(r228929)
+++ projects/armv6/sys/arm/include/pte.h	Wed Dec 28 15:15:00 2011	(r228930)
@@ -37,32 +37,13 @@
 #ifndef _MACHINE_PTE_H_
 #define _MACHINE_PTE_H_
 
-#define PDSHIFT		20		/* LOG2(NBPDR) */
-#define NBPD		(1 << PDSHIFT)	/* bytes/page dir */
-#define NPTEPD		(NBPD / PAGE_SIZE)
-
 #ifndef LOCORE
 typedef	uint32_t	pd_entry_t;		/* page directory entry */
 typedef	uint32_t	pt_entry_t;		/* page table entry */
 #endif
 
-#define PD_MASK		0xfff00000	/* page directory address bits */
-#define PT_MASK		0x000ff000	/* page table address bits */
-
 #define PG_FRAME	0xfffff000
 
-/* The PT_SIZE definition is misleading... A page table is only 0x400
- * bytes long. But since VM mapping can only be done to 0x1000 a single
- * 1KB blocks cannot be steered to a va by itself. Therefore the
- * pages tables are allocated in blocks of 4. i.e. if a 1 KB block
- * was allocated for a PT then the other 3KB would also get mapped
- * whenever the 1KB was mapped.
- */
- 
-#define PT_RSIZE	0x0400		/* Real page table size */
-#define PT_SIZE		0x1000
-#define PD_SIZE		0x4000
-
 /* Page table types and masks */
 #define L1_PAGE		0x01	/* L1 page table mapping */
 #define L1_SECTION	0x02	/* L1 section mapping */
@@ -73,27 +54,6 @@ typedef	uint32_t	pt_entry_t;		/* page ta
 #define L2_MASK		0x03	/* Mask for L2 entry type */
 #define L2_INVAL	0x00	/* L2 invalid type */
 
-/* PTE construction macros */
-#define	L2_LPTE(p, a, f)	((p) | PT_AP(a) | L2_LPAGE | (f))
-#define L2_SPTE(p, a, f)	((p) | PT_AP(a) | L2_SPAGE | (f))
-#define L2_PTE(p, a)		L2_SPTE((p), (a), PT_CACHEABLE)
-#define L2_PTE_NC(p, a)		L2_SPTE((p), (a), PT_B)
-#define L2_PTE_NC_NB(p, a)	L2_SPTE((p), (a), 0)
-#define L1_SECPTE(p, a, f)	((p) | ((a) << AP_SECTION_SHIFT) | (f) \
-				| L1_SECTION | PT_U)
-
-#define L1_PTE(p)	((p) | 0x00 | L1_PAGE | PT_U)
-#define L1_SEC(p, c)	L1_SECPTE((p), AP_KRW, (c))
-
-#define L1_SEC_SIZE	(1 << PDSHIFT)
-#define L2_LPAGE_SIZE	(NBPG * 16)
-
-/* Domain types */
-#define DOMAIN_FAULT		0x00
-#define DOMAIN_CLIENT		0x01
-#define DOMAIN_RESERVED		0x02
-#define DOMAIN_MANAGER		0x03
-
 /* L1 and L2 address masks */
 #define L1_ADDR_MASK		0xfffffc00
 #define L2_ADDR_MASK		0xfffff000
@@ -260,6 +220,7 @@ typedef	uint32_t	pt_entry_t;		/* page ta
 #define	L2_AP2(x)	((x) << 8)	/* access permissions (sp 2) */
 #define	L2_AP3(x)	((x) << 10)	/* access permissions (sp 3) */
 
+#define	L2_SHARED	(1 << 10)
 #define	L2_APX		(1 << 9)
 #define	L2_XN		(1 << 0)
 #define	L2_L_TEX_MASK	(0x7 << 12)	/* Type Extension */
@@ -324,7 +285,7 @@ typedef	uint32_t	pt_entry_t;		/* page ta
  *
  * Cache attributes with L2 present, S = 0
  * T E X C B   L1 i-cache L1 d-cache L1 DC WP  L2 cacheable write coalesce
- * 0 0 0 0 0 	N	  N 		- 	N		N 
+ * 0 0 0 0 0	N	  N		-	N		N
  * 0 0 0 0 1	N	  N		-	N		Y
  * 0 0 0 1 0	Y	  Y		WT	N		Y
  * 0 0 0 1 1	Y	  Y		WB	Y		Y
@@ -351,7 +312,7 @@ typedef	uint32_t	pt_entry_t;		/* page ta
  *
   * Cache attributes with L2 present, S = 1
  * T E X C B   L1 i-cache L1 d-cache L1 DC WP  L2 cacheable write coalesce
- * 0 0 0 0 0 	N	  N 		- 	N		N 
+ * 0 0 0 0 0	N	  N		-	N		N
  * 0 0 0 0 1	N	  N		-	N		Y
  * 0 0 0 1 0	Y	  Y		-	N		Y
  * 0 0 0 1 1	Y	  Y		WT	Y		Y



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201112281515.pBSFF0K5011940>