Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 5 Mar 2006 02:34:36 GMT
From:      Kip Macy <kmacy@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 92762 for review
Message-ID:  <200603050234.k252Yax3008472@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=92762

Change 92762 by kmacy@kmacy_storage:sun4v_work on 2006/03/05 02:34:18

	remove unused code to simplify future additions

Affected files ...

.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/exception.S#14 edit

Differences ...

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/exception.S#14 (text+ko) ====

@@ -766,443 +766,7 @@
 	.align	128
 	.endm
 
-#ifdef notyet
-/* SUN4V_FIXME */	
-ENTRY(tl0_immu_miss_set_ref)
-	/*
-	 * Set the reference bit.
-	 */
-	TTE_SET_REF(%g4, %g2, %g3)
-
-	/*
-	 * May have become invalid during casxa, in which case start over.
-	 */
-	brgez,pn %g2, 1f
-	 nop
-
-	/*
-	 * Load the tte tag and data into the tlb and retry the instruction.
-	 */
-	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
-	stxa	%g2, [%g0] ASI_ITLB_DATA_IN_REG
-1:	retry
-END(tl0_immu_miss_set_ref)
-
-
-ENTRY(tl0_immu_miss_trap)
-	/*
-	 * Put back the contents of the tag access register, in case we
-	 * faulted.
-	 */
-	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
-	membar	#Sync
-
-	/*
-	 * Switch to alternate globals.
-	 */
-	LOAD_ALT
-
-	/*
-	 * Reload the tag access register.
-	 */
-	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
-
-	/*
-	 * Save the tag access register, and call common trap code.
-	 */
-	tl0_split
-	clr	%o1
-	set	trap, %o2
-	mov	%g2, %o3
-	ba	%xcc, tl0_utrap
-	 mov	T_INSTRUCTION_MISS, %o0
-END(tl0_immu_miss_trap)
-
-	.macro	tl0_dmmu_miss
-	/*
-	 * Load the virtual page number and context from the tag access
-	 * register.  We ignore the context.
-	 */
-	wr	%g0, ASI_DMMU, %asi
-	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
-
-	/*
-	 * Initialize the page size walker.
-	 */
-tl1_dmmu_miss_user:
-	mov	TS_MIN, %g2
-
-	/*
-	 * Loop over all supported page sizes.
-	 */
-
-	/*
-	 * Compute the page shift for the page size we are currently looking
-	 * for.
-	 */
-1:	add	%g2, %g2, %g3
-	add	%g3, %g2, %g3
-	add	%g3, PAGE_SHIFT, %g3
-
-	/*
-	 * Extract the virtual page number from the contents of the tag
-	 * access register.
-	 */
-	srlx	%g1, %g3, %g3
-
-	/*
-	 * Compute the tte bucket address.
-	 */
-	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
-	and	%g3, TSB_BUCKET_MASK, %g4
-	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
-	add	%g4, %g5, %g4
-
-	/*
-	 * Compute the tte tag target.
-	 */
-	sllx	%g3, TV_SIZE_BITS, %g3
-	or	%g3, %g2, %g3
-
-	/*
-	 * Loop over the ttes in this bucket
-	 */
-
-	/*
-	 * Load the tte.  Note that this instruction may fault, clobbering
-	 * the contents of the tag access register, %g5, %g6, and %g7.  We
-	 * do not use %g5, and %g6 and %g7 are not used until this instruction
-	 * completes successfully.
-	 */
-2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
-
-	/*
-	 * Check that its valid and that the virtual page numbers match.
-	 */
-	brgez,pn %g7, 3f
-	 cmp	%g3, %g6
-	bne,pn	%xcc, 3f
-	 EMPTY
-
-	/*
-	 * We matched a tte, load the tlb.
-	 */
-
-	/*
-	 * Set the reference bit, if it's currently clear.
-	 */
-	 andcc	%g7, TD_REF, %g0
-	bz,a,pn	%xcc, tl0_dmmu_miss_set_ref
-	 nop
-
-	/*
-	 * Load the tte tag and data into the tlb and retry the instruction.
-	 */
-	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
-	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
-	retry
-
-	/*
-	 * Advance to the next tte in this bucket, and check the low bits
-	 * of the bucket pointer to see if we've finished the bucket.
-	 */
-3:	add	%g4, 1 << TTE_SHIFT, %g4
-	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
-	bnz,pt	%xcc, 2b
-	 EMPTY
 
-	/*
-	 * See if we just checked the largest page size, and advance to the
-	 * next one if not.
-	 */
-	 cmp	%g2, TS_MAX
-	bne,pt	%xcc, 1b
-	 add	%g2, 1, %g2
-
-	/*
-	 * Not in user tsb, call c code.
-	 */
-	ba,a	%xcc, tl0_dmmu_miss_trap
-	.align	128
-	.endm
-
-ENTRY(tl0_dmmu_miss_set_ref)
-	/*
-	 * Set the reference bit.
-	 */
-	TTE_SET_REF(%g4, %g2, %g3)
-
-	/*
-	 * May have become invalid during casxa, in which case start over.
-	 */
-	brgez,pn %g2, 1f
-	 nop
-
-	/*
-	 * Load the tte tag and data into the tlb and retry the instruction.
-	 */
-	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
-	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
-1:	retry
-END(tl0_dmmu_miss_set_ref)
-
-ENTRY(tl0_dmmu_miss_trap)
-	/*
-	 * Put back the contents of the tag access register, in case we
-	 * faulted.
-	 */
-	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
-	membar	#Sync
-
-	/*
-	 * Switch to alternate globals.
-	 */
-	LOAD_ALT
-
-	/*
-	 * Check if we actually came from the kernel.
-	 */
-	rdpr	%tl, %g1
-	cmp	%g1, 1
-	bgt,a,pn %xcc, 1f
-	 nop
-
-	/*
-	 * Reload the tag access register.
-	 */
-	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
-
-	/*
-	 * Save the tag access register and call common trap code.
-	 */
-	tl0_split
-	clr	%o1
-	set	trap, %o2
-	mov	%g2, %o3
-	ba	%xcc, tl0_utrap
-	 mov	T_DATA_MISS, %o0
-
-	/*
-	 * Handle faults during window spill/fill.
-	 */
-1:	RESUME_SPILLFILL_MMU
-
-	/*
-	 * Reload the tag access register.
-	 */
-	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
-
-	tl1_split
-	clr	%o1
-	set	trap, %o2
-	mov	%g2, %o3
-	ba	%xcc, tl1_trap
-	 mov	T_DATA_MISS | T_KERNEL, %o0
-END(tl0_dmmu_miss_trap)
-
-	.macro	tl0_dmmu_prot
-	ba,a	%xcc, tl0_dmmu_prot_1
-	 nop
-	.align	128
-	.endm
-
-ENTRY(tl0_dmmu_prot_1)
-	/*
-	 * Load the virtual page number and context from the tag access
-	 * register.  We ignore the context.
-	 */
-	wr	%g0, ASI_DMMU, %asi
-	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
-
-	/*
-	 * Initialize the page size walker.
-	 */
-tl1_dmmu_prot_user:
-	mov	TS_MIN, %g2
-
-	/*
-	 * Loop over all supported page sizes.
-	 */
-
-	/*
-	 * Compute the page shift for the page size we are currently looking
-	 * for.
-	 */
-1:	add	%g2, %g2, %g3
-	add	%g3, %g2, %g3
-	add	%g3, PAGE_SHIFT, %g3
-
-	/*
-	 * Extract the virtual page number from the contents of the tag
-	 * access register.
-	 */
-	srlx	%g1, %g3, %g3
-
-	/*
-	 * Compute the tte bucket address.
-	 */
-	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
-	and	%g3, TSB_BUCKET_MASK, %g4
-	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
-	add	%g4, %g5, %g4
-
-	/*
-	 * Compute the tte tag target.
-	 */
-	sllx	%g3, TV_SIZE_BITS, %g3
-	or	%g3, %g2, %g3
-
-	/*
-	 * Loop over the ttes in this bucket
-	 */
-
-	/*
-	 * Load the tte.  Note that this instruction may fault, clobbering
-	 * the contents of the tag access register, %g5, %g6, and %g7.  We
-	 * do not use %g5, and %g6 and %g7 are not used until this instruction
-	 * completes successfully.
-	 */
-2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
-
-	/*
-	 * Check that its valid and writable and that the virtual page
-	 * numbers match.
-	 */
-	brgez,pn %g7, 4f
-	 andcc	%g7, TD_SW, %g0
-	bz,pn	%xcc, 4f
-	 cmp	%g3, %g6
-	bne,pn	%xcc, 4f
-	 nop
-
-	/*
-	 * Set the hardware write bit.
-	 */
-	TTE_SET_W(%g4, %g2, %g3)
-
-	/*
-	 * Delete the old TLB entry and clear the sfsr.
-	 */
-	srlx	%g1, PAGE_SHIFT, %g3
-	sllx	%g3, PAGE_SHIFT, %g3
-	stxa	%g0, [%g3] ASI_DMMU_DEMAP
-	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
-	membar	#Sync
-
-	/*
-	 * May have become invalid during casxa, in which case start over.
-	 */
-	brgez,pn %g2, 3f
-	 or	%g2, TD_W, %g2
-
-	/*
-	 * Load the tte data into the tlb and retry the instruction.
-	 */
-	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
-	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
-3:	retry
-
-	/*
-	 * Check the low bits to see if we've finished the bucket.
-	 */
-4:	add	%g4, 1 << TTE_SHIFT, %g4
-	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
-	bnz,pt	%xcc, 2b
-	 EMPTY
-
-	/*
-	 * See if we just checked the largest page size, and advance to the
-	 * next one if not.
-	 */
-	 cmp	%g2, TS_MAX
-	bne,pt	%xcc, 1b
-	 add	%g2, 1, %g2
-
-	/*
-	 * Not in user tsb, call c code.
-	 */
-	ba,a	%xcc, tl0_dmmu_prot_trap
-	 nop
-END(tl0_dmmu_prot_1)
-
-ENTRY(tl0_dmmu_prot_trap)
-	/*
-	 * Put back the contents of the tag access register, in case we
-	 * faulted.
-	 */
-	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
-	membar	#Sync
-
-	/*
-	 * Switch to alternate globals.
-	 */
-	LOAD_ALT
-
-	/*
-	 * Check if we actually came from the kernel.
-	 */
-	rdpr	%tl, %g1
-	cmp	%g1, 1
-	bgt,a,pn %xcc, 1f
-	 nop
-
-	/*
-	 * Load the tar, sfar and sfsr.
-	 */
-	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
-	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
-	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
-	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
-	membar	#Sync
-
-	/*
-	 * Save the mmu registers and call common trap code.
-	 */
-	tl0_split
-	clr	%o1
-	set	trap, %o2
-	mov	%g2, %o3
-	mov	%g3, %o4
-	mov	%g4, %o5
-	ba	%xcc, tl0_utrap
-	 mov	T_DATA_PROTECTION, %o0
-
-	/*
-	 * Handle faults during window spill/fill.
-	 */
-1:	RESUME_SPILLFILL_MMU_CLR_SFSR
-
-	/*
-	 * Load the sfar, sfsr and tar.  Clear the sfsr.
-	 */
-	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
-	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
-	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
-	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
-	membar	#Sync
-
-	tl1_split
-	clr	%o1
-	set	trap, %o2
-	mov	%g2, %o3
-	mov	%g3, %o4
-	mov	%g4, %o5
-	ba	%xcc, tl1_trap
-	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
-END(tl0_dmmu_prot_trap)
-
-#endif /* notyet */
-ENTRY(tl0_sftrap)
-	rdpr	%tstate, %g1
-	and	%g1, TSTATE_CWP_MASK, %g1
-	wrpr	%g1, 0, %cwp
-	tl0_split
-	clr	%o1
-	set	trap, %o2
-	ba	%xcc, tl0_trap
-	 mov	%g2, %o0
-END(tl0_sftrap)
-
 	.macro	tl0_spill_bad	count
 	.rept	\count
 	sir
@@ -1252,449 +816,8 @@
 	.macro	tl1_insn_excptn
 	.align	32
 	.endm
-#if 0
-ENTRY(tl1_insn_exceptn_trap)
-	tl1_split
-	clr	%o1
-	set	trap, %o2
-	mov	%g3, %o4
-	mov	%g4, %o5
-	ba	%xcc, tl1_trap
-	 mov	%g2, %o0
-END(tl1_insn_exceptn_trap)
-
-	.macro	tl1_fp_disabled
-	ba,a	%xcc, tl1_fp_disabled_1
-	 nop
-	.align	32
-	.endm
-
-ENTRY(tl1_fp_disabled_1)
-	rdpr	%tpc, %g1
-	set	fpu_fault_begin, %g2
-	sub	%g1, %g2, %g1
-	cmp	%g1, fpu_fault_size
-	bgeu,a,pn %xcc, 1f
-	 nop
-
-	GET_PCB(PCB_REG)
-	wr	%g0, FPRS_FEF, %fprs
-	wr	%g0, ASI_BLK_S, %asi
-	ldda	[PCB_REG + PCB_KFP + (0 * 64)] %asi, %f0
-	ldda	[PCB_REG + PCB_KFP + (1 * 64)] %asi, %f16
-	ldda	[PCB_REG + PCB_KFP + (2 * 64)] %asi, %f32
-	ldda	[PCB_REG + PCB_KFP + (3 * 64)] %asi, %f48
-	membar	#Sync
-	retry
-
-1:	tl1_split
-	clr	%o1
-	set	trap, %o2
-	ba	%xcc, tl1_trap
-	 mov	T_FP_DISABLED | T_KERNEL, %o0
-END(tl1_fp_disabled_1)
-
-	.macro	tl1_data_excptn
-	LOAD_ALT
-	ba,a	%xcc, tl1_data_excptn_trap
-	 nop
-	.align	32
-	.endm
-
-ENTRY(tl1_data_excptn_trap)
-
-END(tl1_data_excptn_trap)
-
-	.macro	tl1_align
-	ba,a	%xcc, tl1_align_trap
-	 nop
-	.align	32
-	.endm
-
-ENTRY(tl1_align_trap)
-END(tl1_data_excptn_trap)
-
-ENTRY(tl1_sfsr_trap)
-END(tl1_sfsr_trap)
-#endif
-	.macro	tl1_intr level, mask
-	tl1_split
-	set	\mask, %o1
-	ba	%xcc, tl1_intr
-	 mov	\level, %o0
-	.align	32
-	.endm
-
-	.macro	tl1_intr_level
-	INTR_LEVEL(1)
-	.endm
-#ifdef notyet
-/* SUN4V_FIXME */
-	.macro	tl1_immu_miss
-	/*
-	 * Load the context and the virtual page number from the tag access
-	 * register.  We ignore the context.
-	 */
-	wr	%g0, ASI_IMMU, %asi
-	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g5
-
-	/*
-	 * Compute the address of the tte.  The tsb mask and address of the
-	 * tsb are patched at startup.
-	 */
-	.globl	tl1_immu_miss_patch_1
-tl1_immu_miss_patch_1:
-	sethi	%hi(TSB_KERNEL_MASK), %g6
-	or	%g6, %lo(TSB_KERNEL_MASK), %g6
-	sethi	%hi(TSB_KERNEL), %g7
-
-	srlx	%g5, TAR_VPN_SHIFT, %g5
-	and	%g5, %g6, %g6
-	sllx	%g6, TTE_SHIFT, %g6
-	add	%g6, %g7, %g6
-
-	/*
-	 * Load the tte.
-	 */
-	ldda	[%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
-
-	/*
-	 * Check that its valid and executable and that the virtual page
-	 * numbers match.
-	 */
-	brgez,pn %g7, tl1_immu_miss_trap
-	 andcc	%g7, TD_EXEC, %g0
-	bz,pn	%xcc, tl1_immu_miss_trap
-	 srlx	%g6, TV_SIZE_BITS, %g6
-	cmp	%g5, %g6
-	bne,pn	%xcc, tl1_immu_miss_trap
-	 EMPTY
-
-	/*
-	 * Set the reference bit if its currently clear.
-	 */
-	 andcc	%g7, TD_REF, %g0
-	bz,a,pn	%xcc, tl1_immu_miss_set_ref
-	 nop
-
-	/*
-	 * Load the tte data into the TLB and retry the instruction.
-	 */
-	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
-	retry
-	.align	128
-	.endm
-
-ENTRY(tl1_immu_miss_set_ref)
-	/*
-	 * Recompute the tte address, which we clobbered loading the tte.  The
-	 * tsb mask and address of the tsb are patched at startup.
-	 */
-	.globl	tl1_immu_miss_patch_2
-tl1_immu_miss_patch_2:
-	sethi	%hi(TSB_KERNEL_MASK), %g6
-	or	%g6, %lo(TSB_KERNEL_MASK), %g6
-	sethi	%hi(TSB_KERNEL), %g7
-
-	and	%g5, %g6, %g5
-	sllx	%g5, TTE_SHIFT, %g5
-	add	%g5, %g7, %g5
-
-	/*
-	 * Set the reference bit.
-	 */
-	TTE_SET_REF(%g5, %g6, %g7)
-
-	/*
-	 * May have become invalid during casxa, in which case start over.
-	 */
-	brgez,pn %g6, 1f
-	 nop
-
-	/*
-	 * Load the tte data into the TLB and retry the instruction.
-	 */
-	stxa	%g6, [%g0] ASI_ITLB_DATA_IN_REG
-1:	retry
-END(tl1_immu_miss_set_ref)
-
-ENTRY(tl1_immu_miss_trap)
-	/*
-	 * Switch to alternate globals.
-	 */
-	LOAD_ALT
-
-	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
-
-	tl1_split
-	clr	%o1
-	set	trap, %o2
-	mov	%g2, %o3
-	ba	%xcc, tl1_trap
-	 mov	T_INSTRUCTION_MISS | T_KERNEL, %o0
-END(tl1_immu_miss_trap)
-
-	.macro	tl1_dmmu_miss
-	/*
-	 * Load the context and the virtual page number from the tag access
-	 * register.
-	 */
-	wr	%g0, ASI_DMMU, %asi
-	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
-
-	/*
-	 * Extract the context from the contents of the tag access register.
-	 * If its non-zero this is a fault on a user address.  Note that the
-	 * faulting address is passed in %g1.
-	 */
-	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
-	brnz,a,pn %g6, tl1_dmmu_miss_user
-	 mov	%g5, %g1
-
-	/*
-	 * Check for the direct mapped physical region.  These addresses have
-	 * the high bit set so they are negative.
-	 */
-	brlz,pn %g5, tl1_dmmu_miss_direct
-	 EMPTY
-
-	/*
-	 * Compute the address of the tte.  The tsb mask and address of the
-	 * tsb are patched at startup.
-	 */
-	.globl	tl1_dmmu_miss_patch_1
-tl1_dmmu_miss_patch_1:
-	sethi	%hi(TSB_KERNEL_MASK), %g6
-	or	%g6, %lo(TSB_KERNEL_MASK), %g6
-	sethi	%hi(TSB_KERNEL), %g7
-
-	srlx	%g5, TAR_VPN_SHIFT, %g5
-	and	%g5, %g6, %g6
-	sllx	%g6, TTE_SHIFT, %g6
-	add	%g6, %g7, %g6
-
-	/*
-	 * Load the tte.
-	 */
-	ldda	[%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
-
-	/*
-	 * Check that its valid and that the virtual page numbers match.
-	 */
-	brgez,pn %g7, tl1_dmmu_miss_trap
-	 srlx	%g6, TV_SIZE_BITS, %g6
-	cmp	%g5, %g6
-	bne,pn %xcc, tl1_dmmu_miss_trap
-	 EMPTY
-
-	/*
-	 * Set the reference bit if its currently clear.
-	 */
-	 andcc	%g7, TD_REF, %g0
-	bz,a,pt	%xcc, tl1_dmmu_miss_set_ref
-	 nop
-
-	/*
-	 * Load the tte data into the TLB and retry the instruction.
-	 */
-	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
-	retry
-	.align	128
-	.endm
-
-ENTRY(tl1_dmmu_miss_set_ref)
-	/*
-	 * Recompute the tte address, which we clobbered loading the tte.  The
-	 * tsb mask and address of the tsb are patched at startup.
-	 */
-	.globl	tl1_dmmu_miss_patch_2
-tl1_dmmu_miss_patch_2:
-	sethi	%hi(TSB_KERNEL_MASK), %g6
-	or	%g6, %lo(TSB_KERNEL_MASK), %g6
-	sethi	%hi(TSB_KERNEL), %g7
-
-	and	%g5, %g6, %g5
-	sllx	%g5, TTE_SHIFT, %g5
-	add	%g5, %g7, %g5
-
-	/*
-	 * Set the reference bit.
-	 */
-	TTE_SET_REF(%g5, %g6, %g7)
-
-	/*
-	 * May have become invalid during casxa, in which case start over.
-	 */
-	brgez,pn %g6, 1f
-	 nop
 
-	/*
-	 * Load the tte data into the TLB and retry the instruction.
-	 */
-	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
-1:	retry
-END(tl1_dmmu_miss_set_ref)
 
-ENTRY(tl1_dmmu_miss_trap)
-	/*
-	 * Switch to alternate globals.
-	 */
-	LOAD_ALT
-
-	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
-
-	KSTACK_CHECK
-
-	tl1_split
-	clr	%o1
-	set	trap, %o2
-	mov	%g2, %o3
-	ba	%xcc, tl1_trap
-	 mov	T_DATA_MISS | T_KERNEL, %o0
-END(tl1_dmmu_miss_trap)
-
-ENTRY(tl1_dmmu_miss_direct)
-	/*
-	 * Mask off the high bits of the virtual address to get the physical
-	 * address, and or in the tte bits.  The virtual address bits that
-	 * correspond to the tte valid and page size bits are left set, so
-	 * they don't have to be included in the tte bits below.  We know they
-	 * are set because the virtual address is in the upper va hole.
-	 */
-	setx	TLB_DIRECT_TO_TTE_MASK, %g7, %g6
-	and	%g5, %g6, %g5
-	or	%g5, TD_CP | TD_CV | TD_W, %g5
-
-	/*
-	 * Load the tte data into the TLB and retry the instruction.
-	 */
-	stxa	%g5, [%g0] ASI_DTLB_DATA_IN_REG
-	retry
-END(tl1_dmmu_miss_direct)
-
-	.macro	tl1_dmmu_prot
-	ba,a	%xcc, tl1_dmmu_prot_1
-	 nop
-	.align	128
-	.endm
-
-ENTRY(tl1_dmmu_prot_1)
-	/*
-	 * Load the context and the virtual page number from the tag access
-	 * register.
-	 */
-	wr	%g0, ASI_DMMU, %asi
-	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
-
-	/*
-	 * Extract the context from the contents of the tag access register.
-	 * If its non-zero this is a fault on a user address.  Note that the
-	 * faulting address is passed in %g1.
-	 */
-	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
-	brnz,a,pn %g6, tl1_dmmu_prot_user
-	 mov	%g5, %g1
-
-	/*
-	 * Compute the address of the tte.  The tsb mask and address of the
-	 * tsb are patched at startup.
-	 */
-	.globl	tl1_dmmu_prot_patch_1
-tl1_dmmu_prot_patch_1:
-	sethi	%hi(TSB_KERNEL_MASK), %g6
-	or	%g6, %lo(TSB_KERNEL_MASK), %g6
-	sethi	%hi(TSB_KERNEL), %g7
-
-	srlx	%g5, TAR_VPN_SHIFT, %g5
-	and	%g5, %g6, %g6
-	sllx	%g6, TTE_SHIFT, %g6
-	add	%g6, %g7, %g6
-
-	/*
-	 * Load the tte.
-	 */
-	ldda	[%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
-
-	/*
-	 * Check that its valid and writeable and that the virtual page
-	 * numbers match.
-	 */
-	brgez,pn %g7, tl1_dmmu_prot_trap
-	 andcc	%g7, TD_SW, %g0
-	bz,pn	%xcc, tl1_dmmu_prot_trap
-	 srlx	%g6, TV_SIZE_BITS, %g6
-	cmp	%g5, %g6
-	bne,pn	%xcc, tl1_dmmu_prot_trap
-	 EMPTY
-
-	/*
-	 * Delete the old TLB entry and clear the sfsr.
-	 */
-	 sllx	%g5, TAR_VPN_SHIFT, %g6
-	or	%g6, TLB_DEMAP_NUCLEUS, %g6
-	stxa	%g0, [%g6] ASI_DMMU_DEMAP
-	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
-	membar	#Sync
-
-	/*
-	 * Recompute the tte address, which we clobbered loading the tte.  The
-	 * tsb mask and address of the tsb are patched at startup.
-	 */
-	.globl	tl1_dmmu_prot_patch_2
-tl1_dmmu_prot_patch_2:
-	sethi	%hi(TSB_KERNEL_MASK), %g6
-	or	%g6, %lo(TSB_KERNEL_MASK), %g6
-	sethi	%hi(TSB_KERNEL), %g7
-
-	and	%g5, %g6, %g5
-	sllx	%g5, TTE_SHIFT, %g5
-	add	%g5, %g7, %g5
-
-	/*
-	 * Set the hardware write bit.
-	 */
-	TTE_SET_W(%g5, %g6, %g7)
-
-	/*
-	 * May have become invalid during casxa, in which case start over.
-	 */
-	brgez,pn %g6, 1f
-	 or	%g6, TD_W, %g6
-
-	/*
-	 * Load the tte data into the TLB and retry the instruction.
-	 */
-	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
-1:	retry
-END(tl1_dmmu_prot_1)
-
-ENTRY(tl1_dmmu_prot_trap)
-	/*
-	 * Switch to alternate globals.
-	 */
-	LOAD_ALT
-
-	/*
-	 * Load the sfar, sfsr and tar.  Clear the sfsr.
-	 */
-	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
-	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
-	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
-	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
-	membar	#Sync
-
-	tl1_split
-	clr	%o1
-	set	trap, %o2
-	mov	%g2, %o3
-	mov	%g3, %o4
-	mov	%g4, %o5
-	ba	%xcc, tl1_trap
-	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
-END(tl1_dmmu_prot_trap)
-#endif /* notyet */
-
-
 	.macro	tl1_soft	count
 	.rept	\count
 	tl1_gen	T_SOFT | T_KERNEL
@@ -1896,9 +1019,6 @@
 	tl1_reserved		256
 .globl tl1_end
 tl1_end:					
-/* 
- * no discretionary traps at TL 1 - leaving us with a 24k trap table 
- */
 
 spill_clean:
 	sethi	%hi(nwin_minus_one), %g5
@@ -1921,140 +1041,6 @@
 
 	
 	
-/*
- * User trap entry point.
- *
- * void tl0_utrap(u_long type, u_long o1, u_long o2, u_long tar, u_long sfar,
- *                u_long sfsr)
- *
- * This handles redirecting a trap back to usermode as a user trap.  The user
- * program must have first registered a trap handler with the kernel using
- * sysarch(SPARC_UTRAP_INSTALL).  The trap handler is passed enough state
- * for it to return to the trapping code directly, it will not return through
- * the kernel.  The trap type is passed in %o0, all out registers must be
- * passed through to tl0_trap or to usermode untouched.  Note that the
- * parameters passed in out registers may be used by the user trap handler.
- * Do not change the registers they are passed in or you will break the ABI.
- *
- * If the trap type allows user traps, setup state to execute the user trap
- * handler and bounce back to usermode, otherwise branch to tl0_trap.
- */
-#if 0
-ENTRY(tl0_utrap)
-	/*
-	 * Check if the trap type allows user traps.
-	 */
-	cmp	%o0, UT_MAX
-	bge,a,pt %xcc, tl0_trap
-	 nop
-
-	/*
-	 * Load the user trap handler from the utrap table.
-	 */
-	ldx	[PCPU(CURTHREAD)], %l0
-	ldx	[%l0 + TD_PROC], %l0
-	ldx	[%l0 + P_MD + MD_UTRAP], %l0
-	brz,pt	%l0, tl0_trap
-	 sllx	%o0, PTR_SHIFT, %l1
-	ldx	[%l0 + %l1], %l0
-	brz,a,pt %l0, tl0_trap
-	 nop
-
-	/*
-	 * If the save we did on entry to the kernel had to spill a window
-	 * to the pcb, pretend we took a spill trap instead.  Any windows
-	 * that are in the pcb must be copied out or the fill handler will
-	 * not be able to find them, since the user trap handler returns
-	 * directly to the trapping code.  Note that we only support precise
-	 * user traps, which implies that the condition that caused the trap
-	 * in the first place is still valid, so it will occur again when we
-	 * re-execute the trapping instruction.
-	 */
-#if 0		
-	ldx	[PCB_REG + PCB_NSAVED], %l1
-#endif
-	brnz,a,pn %l1, tl0_trap
-	 mov	T_SPILL, %o0
-
-	/*
-	 * Pass %fsr in %l4, %tstate in %l5, %tpc in %l6 and %tnpc in %l7.
-	 * The ABI specifies only %l6 and %l7, but we need to pass %fsr or
-	 * it may be clobbered by an interrupt before the user trap code
-	 * can read it, and we must pass %tstate in order to restore %ccr
-	 * and %asi.  The %fsr must be stored to memory, so we use the
-	 * temporary stack for that.
-	 */
-	rd	%fprs, %l1
-	or	%l1, FPRS_FEF, %l2
-	wr	%l2, 0, %fprs
-#if 0
-	dec	8, ASP_REG
-	stx	%fsr, [ASP_REG]
-	ldx	[ASP_REG], %l4
-	inc	8, ASP_REG
-#endif
-	wr	%l1, 0, %fprs
-
-	rdpr	%tstate, %l5
-	rdpr	%tpc, %l6
-	rdpr	%tnpc, %l7
-
-	/*
-	 * Setup %tnpc to return to.

>>> TRUNCATED FOR MAIL (1000 lines) <<<



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200603050234.k252Yax3008472>