Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 21 Feb 2006 07:33:44 GMT
From:      Kip Macy <kmacy@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 92117 for review
Message-ID:  <200602210733.k1L7XiNf097883@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=92117

Change 92117 by kmacy@kmacy_storage:sun4v_work on 2006/02/21 07:33:10

	separate sun4v specific cpufunc into sun4v_cpufunc.h
	add page directory to pcb
	rename per-cpu pmap to curpmap
	make ptov more sensible by using pv_entrys
	remove useless PMAP_STATS
	import solaris' bad trap level defines
	add defines for TSB manipulation
	simplify tsb manipulation to straightforward get / set
	simplify tte manipulation with get/set/clear functions
	gut wstate.h - switch to solaris' model of wstate usage
	add skeleton wbuf.S for spill / fill exceptions
	ifdef out references to old WSTATE defines in exception.S
	import panic_bad_hcall into hcall.S
	shuffle calls in sparc64_init into safer order
	pmap.c is starting to fall into place
	  - where possible import bits from i386' pmap.c
	  - basic tsb manipulation is in place
	  - tte manipulation appears somewhat sano
	add pmap related calls (invlpg, invlctx, invltlb, set_pdir_scratchpad, and load_real_dw) to support.S
	#ifdef out currently invalid bits in swtch.S
	enable ofw setting of trap table in trap.c
	re-write tsb.c from scratch currently only has get / set functions
	remove PMAP_STATS reference from vm_machdep.c

Affected files ...

.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/_types.h#2 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/cpufunc.h#4 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/param.h#4 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/pcb.h#2 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/pcpu.h#3 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/pmap.h#5 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/trap.h#3 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/tsb.h#3 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/tte.h#3 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/wstate.h#2 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/exception.S#9 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/hcall.S#3 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/machdep.c#7 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/pmap.c#8 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/support.S#4 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/swtch.S#4 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/trap.c#5 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tsb.c#3 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte.c#1 add
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/vm_machdep.c#4 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/wbuf.S#1 add

Differences ...

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/_types.h#2 (text+ko) ====

@@ -55,7 +55,7 @@
  * Standard type definitions.
  */
 typedef	__int32_t	__clock_t;		/* clock()... */
-typedef	unsigned int	__cpumask_t;
+typedef	__uint64_t	__cpumask_t;
 typedef	__int64_t	__critical_t;
 typedef	double		__double_t;
 typedef	float		__float_t;

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/cpufunc.h#4 (text+ko) ====

@@ -224,33 +224,8 @@
 void ascopyfrom(u_long sasi, vm_offset_t src, caddr_t dst, size_t len);
 void ascopyto(caddr_t src, u_long dasi, vm_offset_t dst, size_t len);
 void aszero(u_long asi, vm_offset_t dst, size_t len);
-void set_mmfsa_scratchpad(vm_paddr_t mmfsa);
-void setwstate(u_long wstate);
-void set_tba(void *tba);
-/*
- * Ultrasparc II doesn't implement popc in hardware.  Suck.
- */
-#if 0
-#define	HAVE_INLINE_FFS
-/*
- * See page 202 of the SPARC v9 Architecture Manual.
- */
-static __inline int
-ffs(int mask)
-{
-	int result;
-	int neg;
-	int tmp;
 
-	__asm __volatile(
-	"	neg	%3, %1 ;	"
-	"	xnor	%3, %1, %2 ;	"
-	"	popc	%2, %0 ;	"
-	"	movrz	%3, %%g0, %0 ;	"
-	: "=r" (result), "=r" (neg), "=r" (tmp) : "r" (mask));
-	return (result);
-}
-#endif
+#include <machine/sun4v_cpufunc.h>
 
 #undef LDNC_GEN
 #undef STNC_GEN

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/param.h#4 (text+ko) ====

@@ -131,5 +131,10 @@
 
 #define	pgtok(x)		((unsigned long)(x) * (PAGE_SIZE / 1024))
 
+#define NPGPTD          1                          /* number of page table directory pages */
+#define NBPTD           (NPGPTD << PAGE_SHIFT)     /* number of bytes in a page table directory */
+#define NPDEPG          (PAGE_SIZE/(sizeof (vm_offset_t)))
+
+
 #endif /* !_MACHINE_PARAM_H_ */
 #endif /* !_NO_NAMESPACE_POLLUTION */

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/pcb.h#2 (text+ko) ====

@@ -45,7 +45,8 @@
 	uint64_t pcb_nsaved;
 	uint64_t pcb_pc;
 	uint64_t pcb_sp;
-	uint64_t pcb_pad[4];
+	vm_paddr_t *pcb_pdir;
+	uint64_t pcb_pad[3];
 } __aligned(64);
 
 #ifdef _KERNEL

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/pcpu.h#3 (text+ko) ====

@@ -49,8 +49,9 @@
 	struct	intr_request *pc_irhead;				\
 	struct	intr_request **pc_irtail;				\
 	struct	intr_request *pc_irfree;				\
-	struct 	pmap *pc_pmap;						\
+	struct 	pmap *pc_curpmap;					\
 	vm_offset_t pc_addr;						\
+	vm_offset_t pc_tsb;						\
 	vm_offset_t *pc_mondo_data;                                     \
         vm_offset_t *pc_cpu_list;                                       \
 	vm_offset_t *pc_cpu_q;                                          \

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/pmap.h#5 (text+ko) ====

@@ -39,28 +39,31 @@
 #ifndef	_MACHINE_PMAP_H_
 #define	_MACHINE_PMAP_H_
 
+#include <sys/types.h>
 #include <sys/queue.h>
 #include <sys/_lock.h>
 #include <sys/_mutex.h>
 #include <machine/cache.h>
-#include <machine/tte.h>
 
 #define	PMAP_CONTEXT_MAX	8192
 
 typedef	struct pmap *pmap_t;
 
+struct pv_entry;
+
 struct md_page {
-	TAILQ_HEAD(, tte) tte_list;
-	struct	pmap *pmap;
-	uint32_t flags;
+	int pv_list_count;
+	TAILQ_HEAD(, pv_entry) pv_list;
 };
 
+
 struct pmap {
 	struct	mtx pm_mtx;
-	struct	tte *pm_tsb;
+	vm_paddr_t *pm_pdir;
+	TAILQ_HEAD(,pv_entry)	pm_pvlist;	/* list of mappings in pmap */
 	vm_object_t pm_tsb_obj;
-	u_int	pm_active;
-	u_int	pm_context[MAXCPU];
+	cpumask_t pm_active;
+	uint16_t pm_context;
 	struct	pmap_statistics pm_stats;
 };
 
@@ -75,25 +78,34 @@
 #define	PMAP_TRYLOCK(pmap)	mtx_trylock(&(pmap)->pm_mtx)
 #define	PMAP_UNLOCK(pmap)	mtx_unlock(&(pmap)->pm_mtx)
 
+/*
+ * For each vm_page_t, there is a list of all currently valid virtual
+ * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
+ */
+
+typedef struct pv_entry {
+	pmap_t  pv_pmap;
+	vm_offset_t	pv_va;
+	TAILQ_ENTRY(pv_entry) pv_list;
+	TAILQ_ENTRY(pv_entry) pv_plist;
+} *pv_entry_t;
+
+#define pmap_page_is_mapped(m)  (!TAILQ_EMPTY(&(m)->md.pv_list))
+
 void	pmap_bootstrap(vm_offset_t ekva);
 vm_paddr_t pmap_kextract(vm_offset_t va);
-void	pmap_kenter(vm_offset_t va, vm_page_t m);
+void	pmap_kenter(vm_offset_t va, vm_paddr_t pa);
 void	pmap_kremove(vm_offset_t);
 void	pmap_kenter_flags(vm_offset_t va, vm_paddr_t pa, u_long flags);
 void	pmap_kremove_flags(vm_offset_t va);
 
-int	pmap_cache_enter(vm_page_t m, vm_offset_t va);
-void	pmap_cache_remove(vm_page_t m, vm_offset_t va);
-
-int	pmap_remove_tte(struct pmap *pm1, struct pmap *pm2, struct tte *tp,
-			vm_offset_t va);
-int	pmap_protect_tte(struct pmap *pm1, struct pmap *pm2, struct tte *tp,
-			 vm_offset_t va);
-
 void	pmap_set_tsbs(void);
 
 void	pmap_clear_write(vm_page_t m);
 
+void    pmap_invalidate_page(pmap_t pmap, vm_offset_t va);
+void    pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
+void    pmap_invalidate_all(pmap_t pmap);
 #define	vtophys(va)	pmap_kextract((vm_offset_t)(va))
 
 extern	struct pmap kernel_pmap_store;
@@ -113,25 +125,4 @@
 		return (1);
 }
 
-#define pmap_page_is_mapped(m)  (!TAILQ_EMPTY(&(m)->md.tte_list))
-
-#ifdef PMAP_STATS
-
-SYSCTL_DECL(_debug_pmap_stats);
-
-#define	PMAP_STATS_VAR(name) \
-	static long name; \
-	SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, name, CTLFLAG_RW, \
-	    &name, 0, "")
-
-#define	PMAP_STATS_INC(var) \
-	atomic_add_long(&var, 1)
-
-#else
-
-#define	PMAP_STATS_VAR(name)
-#define	PMAP_STATS_INC(var)
-
-#endif
-
 #endif /* !_MACHINE_PMAP_H_ */

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/trap.h#3 (text+ko) ====

@@ -89,6 +89,23 @@
 
 #define	T_KERNEL			64
 
+#define	PTL1_BAD_DEBUG		0
+#define	PTL1_BAD_WTRAP		1
+#define	PTL1_BAD_KMISS		2
+#define	PTL1_BAD_KPROT_FAULT	3
+#define	PTL1_BAD_ISM		4
+#define	PTL1_BAD_MMUTRAP	5
+#define	PTL1_BAD_TRAP		6
+#define	PTL1_BAD_FPTRAP		7
+#define	PTL1_BAD_INTR_REQ	8
+#define	PTL1_BAD_TRACE_PTR	9
+#define	PTL1_BAD_STACK		10
+#define	PTL1_BAD_DTRACE_FLAGS	11
+#define	PTL1_BAD_CTX_STEAL	12
+#define	PTL1_BAD_ECC		13
+#define	PTL1_BAD_HCALL		14
+#define	PTL1_BAD_GL		15
+
 #ifndef LOCORE
 extern const char *trap_msg[];
 void trap_init(void);

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/tsb.h#3 (text+ko) ====

@@ -32,6 +32,8 @@
 #ifndef	_MACHINE_TSB_H_
 #define	_MACHINE_TSB_H_
 
+#define MAX_TSB_INFO                     2
+
 #define	TSB_PAGES_SHIFT			(4)
 #define	TSB_PAGES			(1 << TSB_PAGES_SHIFT)
 #define	TSB_BSHIFT			(TSB_PAGES_SHIFT + PAGE_SHIFT)
@@ -43,26 +45,43 @@
 	(TSB_BSHIFT - TSB_BUCKET_SHIFT - TTE_SHIFT)
 #define	TSB_BUCKET_MASK			((1 << TSB_BUCKET_ADDRESS_BITS) - 1)
 
-extern vm_size_t tsb_kernel_mask;
-extern vm_size_t tsb_kernel_size;
-extern vm_paddr_t tsb_kernel_phys;
+
+#define	TSB_ENTRY_SHIFT		4	/* each entry = 128 bits = 16 bytes */
+#define	TSB_ENTRY_SIZE		(1 << 4)
+#define	TSB_START_SIZE		9
+#define	TSB_ENTRIES(tsbsz)	(1 << (TSB_START_SIZE + tsbsz))
+#define	TSB_BYTES(tsbsz)	(TSB_ENTRIES(tsbsz) << TSB_ENTRY_SHIFT)
+#define	TSB_OFFSET_MASK(tsbsz)	(TSB_ENTRIES(tsbsz) - 1)
+
+
+/*
+ * Values for "tsb_ttesz_mask" bitmask.
+ */
+#define	TSB8K	(1 << TTE8K)
+#define	TSB64K  (1 << TTE64K)
+#define	TSB512K (1 << TTE512K)
+#define	TSB4M   (1 << TTE4M)
+#define	TSB32M  (1 << TTE32M)
+#define	TSB256M (1 << TTE256M)
 
 
-typedef int (tsb_callback_t)(struct pmap *, struct pmap *, struct tte *, vm_offset_t);
+typedef struct tsb_info {
+	vm_offset_t     tsb_va;
+	vm_paddr_t      tsb_pa;              /* tsb physical address */
+	uint16_t        tsb_szc;             /* tsb size code        */
+	uint16_t        tsb_flags;           
+	uint32_t        tsb_ttesz_mask;      /* page size masks      */
+} *tsb_info_t;
+	
 
-void tsb_set_tte(vm_paddr_t tsb_phys, vm_offset_t va, uint64_t tsb_tag, uint64_t tsb_data);
 
-struct tte *tsb_tte_lookup(pmap_t pm, vm_offset_t va);
 
-void tsb_tte_remove(struct tte *stp);
 
-struct tte *tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, u_long sz,
-			   u_long data);
+void tsb_set_tte(tsb_info_t tsb, vm_offset_t va, vm_paddr_t pa, uint64_t flags, uint64_t ctx);
 
-void tsb_tte_local_remove(struct tte *tp);
+tte_t tsb_get_tte(tsb_info_t tsb, vm_offset_t va, uint64_t ctx);
 
-void tsb_foreach(pmap_t pm1, pmap_t pm2, vm_offset_t start, vm_offset_t end,
-		 tsb_callback_t *callback);
+void tsb_clear_tte(tsb_info_t tsb, vm_offset_t, uint64_t ctx);
 
 
 #endif /* !_MACHINE_TSB_H_ */

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/tte.h#3 (text+ko) ====

@@ -32,7 +32,7 @@
 #ifndef	_MACHINE_TTE_H_
 #define	_MACHINE_TTE_H_
 
-#define	TTE_SHIFT	(5)
+#define	TTE_SHIFT	(4)
 
 #define	TD_SIZE_SHIFT	(61)
 #define	TD_SOFT2_SHIFT	(50)
@@ -40,30 +40,25 @@
 #define	TD_PA_SHIFT	(13)
 #define	TD_SOFT_SHIFT	(7)
 
-#define	TD_SIZE_BITS	(2)
 #define	TD_SOFT2_BITS	(9)
 #define	TD_DIAG_BITS	(9)
 #define	TD_PA_BITS	(28)
 #define	TD_SOFT_BITS	(6)
 
-#define	TD_SIZE_MASK	((1UL << TD_SIZE_BITS) - 1)
 #define	TD_SOFT2_MASK	((1UL << TD_SOFT2_BITS) - 1)
 #define	TD_DIAG_MASK	((1UL << TD_DIAG_BITS) - 1)
 #define	TD_PA_MASK	((1UL << TD_PA_BITS) - 1)
 #define	TD_SOFT_MASK	((1UL << TD_SOFT_BITS) - 1)
 
-#define	TS_8K		(0UL)
-#define	TS_64K		(1UL)
-#define	TS_512K		(2UL)
-#define	TS_4M		(3UL)
-#define	TS_32M		(4UL)
-#define	TS_256M		(5UL)
-#define	TS_2G		(6UL)
-#define	TS_16G		(7UL)
+#define	TTE8K		(0UL)
+#define	TTE64K		(1UL)
+#define	TTE512K	        (2UL)
+#define	TTE4M		(3UL)
+#define	TTE32M		(4UL)
+#define	TTE256M	        (5UL)
+#define	TTE2G		(6UL)
+#define	TTE16G		(7UL)
 
-#define	TS_MIN		TS_8K
-#define	TS_MAX		TS_4M
-
 #define	TD_V		(1UL << 63)
 #define	TD_8K		(TS_8K << TD_SIZE_SHIFT)
 #define	TD_64K		(TS_64K << TD_SIZE_SHIFT)
@@ -87,8 +82,6 @@
 #define	TD_W		(1UL << 1)
 #define	TD_G		(1UL << 0)
 
-#define	TV_SIZE_BITS	(TD_SIZE_BITS)
-#define	TV_VPN(va, sz)	((((va) >> TTE_PAGE_SHIFT(sz)) << TV_SIZE_BITS) | sz)
 
 #define	TTE_SIZE_SPREAD	(3)
 #define	TTE_PAGE_SHIFT(sz) \
@@ -117,19 +110,8 @@
 	memset(tp, 0, sizeof(*tp))
 
 struct pmap;
-
-typedef struct tte {
-	u_long	tte_vpn;
-	u_long	tte_data;
-	TAILQ_ENTRY(tte) tte_link;
-} *tte_t;
-
-static __inline int
-tte_match(struct tte *tp, vm_offset_t va)
-{
-	return (((tp->tte_data & TD_V) != 0) &&
-	    (tp->tte_vpn == TV_VPN(va, TTE_GET_SIZE(tp))));
-}
+#define PTE_SHIFT       (3)
+#define PT_SHIFT        (PAGE_SHIFT - PTE_SHIFT)
 
 #define	VTD_SOFT_SHIFT	(56)
 
@@ -144,16 +126,112 @@
 #define	VTD_X		(1UL << 7)
 #define	VTD_W		(1UL << 6)
 
-#define	VTD_REF		((1UL << 3) << VTD_SOFT_SHIFT)
-#define	VTD_SW		((1UL << 1) << VTD_SOFT_SHIFT)
+#define	VTD_REF		((1UL << 3) << VTD_SOFT_SHIFT) /* XXX WRONG */
+#define	VTD_SW		((1UL << 1) << VTD_SOFT_SHIFT) /* XXX WRONG */
+#define	VTD_WIRED	((1UL << 0) << VTD_SOFT_SHIFT)
+
+
+
+#define	VTD_8K		TTE8K 
+#define	VTD_64K		TTE64K 
+#define	VTD_512K	TTE512K 
+#define	VTD_4M		TTE4M 
+#define	VTD_32M		TTE32M 
+#define	VTD_256M	TTE256M 
+
+/*
+ * default flags for kernel pages
+ */
+#define TTE_KERNEL      VTD_V | VTD_CP | VTD_CV | VTD_P | VTD_X | VTD_W
+
+
+typedef union {
+	struct tte {
+		unsigned int	v:1;		/* <63> valid */
+		unsigned int	nfo:1;		/* <62> non-fault only */
+		unsigned int	sw:4;	        /* <61:58> sw */
+		unsigned int    wired:1;        /* <57> wired */
+		unsigned int	lock:1;		/* <56> sw - locked */
+		unsigned long	pa:43;	        /* <55:13> pa */
+		unsigned int	ie:1;		/* <12> 1=invert endianness */
+		unsigned int	e:1;		/* <11> side effect */
+		unsigned int	cp:1;		/* <10> physically cache */
+		unsigned int	cv:1;		/* <9> virtually cache */
+		unsigned int	p:1;		/* <8> privilege required */
+		unsigned int	x:1;		/* <7> execute perm */
+		unsigned int	w:1;		/* <6> write perm */
+		unsigned int	ref:1;		/* <5> sw - ref */
+		unsigned int	wr_perm:1;	/* <4> sw - write perm */
+		unsigned int	rsvd:1;		/* <3> reserved */
+		unsigned int	sz:3;		/* <2:0> pagesize */
+	} tte_bit;
+	uint64_t		ll;
+} tte_t;
+
+#define	tte_val 	tte_bit.v		/* use < 0 check in asm */
+#define	tte_size	tte_bit.sz
+#define	tte_nfo		tte_bit.nfo
+#define	tte_ie		tte_bit.ie		
+#define	tte_wired       tte_bit.wired
+#define	tte_pa	        tte_bit.pa
+#define	tte_ref		tte_bit.ref
+#define	tte_wr_perm	tte_bit.wr_perm
+#define	tte_exec_perm	tte_bit.x
+#define	tte_lock	tte_bit.lock
+#define	tte_cp		tte_bit.cp
+#define	tte_cv		tte_bit.cv
+#define	tte_se		tte_bit.e
+#define	tte_priv	tte_bit.p
+#define	tte_hwwr	tte_bit.w
+
+#define	TTE_IS_VALID(ttep)	((ttep)->tte_inthi < 0)
+#define	TTE_SET_INVALID(ttep)	((ttep)->tte_val = 0)
+#define	TTE_IS_8K(ttep)		(TTE_CSZ(ttep) == TTE8K)
+#define	TTE_IS_WIRED(ttep)	((ttep)->tte_wired)
+#define	TTE_IS_WRITABLE(ttep)	((ttep)->tte_wr_perm)
+#define	TTE_IS_EXECUTABLE(ttep)	((ttep)->tte_exec_perm)
+#define	TTE_IS_PRIVILEGED(ttep)	((ttep)->tte_priv)
+#define	TTE_IS_NOSYNC(ttep)	((ttep)->tte_no_sync)
+#define	TTE_IS_LOCKED(ttep)	((ttep)->tte_lock)
+#define	TTE_IS_SIDEFFECT(ttep)	((ttep)->tte_se)
+#define	TTE_IS_NFO(ttep)	((ttep)->tte_nfo)
+
+#define	TTE_IS_REF(ttep)	((ttep)->tte_ref)
+#define	TTE_IS_MOD(ttep)	((ttep)->tte_hwwr)
+#define	TTE_IS_IE(ttep)		((ttep)->tte_ie)
+#define	TTE_SET_SUSPEND(ttep)	((ttep)->tte_suspend = 1)
+#define	TTE_CLR_SUSPEND(ttep)	((ttep)->tte_suspend = 0)
+#define	TTE_IS_SUSPEND(ttep)	((ttep)->tte_suspend)
+#define	TTE_SET_REF(ttep)	((ttep)->tte_ref = 1)
+#define	TTE_CLR_REF(ttep)	((ttep)->tte_ref = 0)
+#define	TTE_SET_LOCKED(ttep)	((ttep)->tte_lock = 1)
+#define	TTE_CLR_LOCKED(ttep)	((ttep)->tte_lock = 0)
+#define	TTE_SET_MOD(ttep)	((ttep)->tte_hwwr = 1)
+#define	TTE_CLR_MOD(ttep)	((ttep)->tte_hwwr = 0)
+#define	TTE_SET_RM(ttep)						\
+	(((ttep)->tte_intlo) =						\
+	(ttep)->tte_intlo | TTE_HWWR_INT | TTE_REF_INT)
+#define	TTE_CLR_RM(ttep)						\
+	(((ttep)->tte_intlo) =						\
+	(ttep)->tte_intlo & ~(TTE_HWWR_INT | TTE_REF_INT))
+
+#define	TTE_SET_WRT(ttep)	((ttep)->tte_wr_perm = 1)
+#define	TTE_CLR_WRT(ttep)	((ttep)->tte_wr_perm = 0)
+#define	TTE_SET_EXEC(ttep)	((ttep)->tte_exec_perm = 1)
+#define	TTE_CLR_EXEC(ttep)	((ttep)->tte_exec_perm = 0)
+#define	TTE_SET_PRIV(ttep)	((ttep)->tte_priv = 1)
+#define	TTE_CLR_PRIV(ttep)	((ttep)->tte_priv = 0)
+
+#define	TTE_BSZS_SHIFT(sz)	((sz) * 3)
 
+struct pmap;
 
-#define	VTD_8K		TS_8K 
-#define	VTD_64K		TS_64K 
-#define	VTD_512K	TS_512K 
-#define	VTD_4M		TS_4M 
-#define	VTD_32M		TS_32M 
-#define	VTD_256M	TS_256M 
+void tte_clear_phys_bit(vm_page_t m, uint64_t flags);
+void tte_set_phys_bit(vm_page_t m, uint64_t flags);
+boolean_t tte_get_phys_bit(vm_page_t m, uint64_t flags);
 
+void tte_clear_virt_bit(struct pmap *pmap, vm_offset_t va, uint64_t flags);
+void tte_set_virt_bit(struct pmap *pmap, vm_offset_t va, uint64_t flags);
+boolean_t tte_get_virt_bit(struct pmap *pmap, vm_offset_t va, uint64_t flags);
 
 #endif /* !_MACHINE_TTE_H_ */

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/wstate.h#2 (text+ko) ====

@@ -1,91 +1,32 @@
-/*-
- * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Berkeley Software Design Inc's name may not be used to endorse or
- *    promote products derived from this software without specific prior
- *    written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- *	from: BSDI: wstate.h,v 1.4 1997/09/18 13:05:51 torek Exp
- * $FreeBSD: src/sys/sparc64/include/wstate.h,v 1.4 2002/02/25 18:37:17 jake Exp $
- */
 
 #ifndef	_MACHINE_WSTATE_H_
 #define	_MACHINE_WSTATE_H_
 
 /*
- * Window state register bits.
+ * Window State Register (WSTATE)
  *
- * There really are no bits per se, just the two fields WSTATE.NORMAL
- * and WSTATE.OTHER.  The rest is up to software.
- *
- * We use WSTATE_NORMAL to represent user mode or kernel mode saves
- * (whichever is currently in effect) and WSTATE_OTHER to represent
- * user mode saves (only).
- *
- * We use the low bit to suggest 32-bit mode, with the next bit set
- * once we succeed in saving in some mode.  That is, if the WSTATE_ASSUME
- * bit is set, the spill or fill handler we use will be one that makes
- * an assumption about the proper window-save mode.  If the spill or
- * fill fails with an alignment fault, the spill or fill op should
- * take the `assume' bit away retry the instruction that caused the
- * spill or fill.  This will use the new %wstate, which will test for
- * which mode to use.  The alignment fault code helps us out here by
- * resuming the spill vector at offset +70, where we are allowed to
- * execute two instructions (i.e., write to %wstate and RETRY).
- *
- * If the ASSUME bit is not set when the alignment fault occurs, the
- * given stack pointer is hopelessly wrong (and the spill, if it is a
- * spill, should be done as a sort of "panic spill") -- so those two
- * instructions will be a branch sequence.
- *
- * Note that locore.s assumes this same bit layout (since the translation
- * from "bits" to "{spill,fill}_N_{normal,other}" is done in hardware).
- *
- * The value 0 is preferred for unknown to make it easy to start in
- * unknown state and continue in whichever state unknown succeeds in --
- * a successful "other" save, for instance, can just set %wstate to
- * ASSUMExx << USERSHIFT and thus leave the kernel state "unknown".
- *
- * We also need values for managing the somewhat tricky transition from
- * user to kernel and back, so we use the one remaining free bit to mean
- * "although this looks like kernel mode, the window(s) involved are
- * user windows and should be saved ASI_AIUP".  Everything else is
- * otherwise the same, but we need not bother with assumptions in this
- * mode (we expect it to apply to at most one window spill or fill),
- * i.e., WSTATE_TRANSITION can ignore WSTATE_ASSUME if it likes.
+ *   |------------|
+ *   |OTHER|NORMAL|
+ *   |-----|------|
+ *    5	  3 2    0
  */
 
-#define	WSTATE_NORMAL_MASK	1	/* wstate normal minus transition */
-#define	WSTATE_OTHER_SHIFT	3	/* for wstate other / user */
-#define	WSTATE_OTHER_MASK		/* wstate other minus nested */ \
-	(WSTATE_NORMAL_MASK << WSTATE_OTHER_SHIFT)
+#define	WSTATE_BAD	0	/* unused */
+#define	WSTATE_U32	1	/* 32b stack */
+#define	WSTATE_U64	2	/* 64b stack */
+#define	WSTATE_CLEAN32	3	/* cleanwin workaround, 32b stack */
+#define	WSTATE_CLEAN64	4	/* cleanwin workaround, 64b stack */
+#define	WSTATE_K32	5	/* priv 32b stack */
+#define	WSTATE_K64	6	/* priv 64b stack */
+#define	WSTATE_KMIX	7	/* priv mixed stack */
 
-#define	WSTATE_KERNEL		0	/* normal kernel wstate */
-#define	WSTATE_USER_64		0	/* normal 64bit user wstate */
-#define	WSTATE_USER_32		1	/* normal 32bit user wstate */
+#define	WSTATE_CLEAN_OFFSET	2
+#define	WSTATE_SHIFT	3	/* normal-to-other shift */
+#define	WSTATE_MASK	7	/* mask for each set */
+#define	WSTATE(o, n)	(((o) << WSTATE_SHIFT) | (n))
 
-#define	WSTATE_TRANSITION	2	/* if set, force user window */
-#define	WSTATE_NESTED			/* if set, spill must not fault */ \
-	(WSTATE_TRANSITION << WSTATE_OTHER_SHIFT)
+#define	WSTATE_USER32	WSTATE(WSTATE_BAD, WSTATE_U32)
+#define	WSTATE_USER64	WSTATE(WSTATE_BAD, WSTATE_U64)
+#define	WSTATE_KERN	WSTATE(WSTATE_U32, WSTATE_K64)
 
 #endif /* !_MACHINE_WSTATE_H_ */

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/exception.S#9 (text+ko) ====

@@ -69,6 +69,8 @@
 
 #include "assym.s"
 
+#include <sun4v/sun4v/wbuf.S>
+
 #define	TSB_KERNEL_MASK	0x0
 #define	TSB_KERNEL	0x0
 
@@ -98,7 +100,8 @@
 	mov	SCRATCH_REG_PCB, %g0 ; \
 	ldxa	[%g0]ASI_SCRATCHPAD, PCB_REG
 
-	
+
+		
 /*
  * Macros for spilling and filling live windows.
  * Here we use the more complicated [regaddr] format which requires
@@ -204,7 +207,7 @@
 	ldx	[ASP_REG + 8], %g2 ; \
 	ldx	[ASP_REG + 0], %g1 ; \
 	inc	16, ASP_REG
-
+#if 0
 ENTRY(tl1_kstack_fault)
 	rdpr	%tl, %g1
 1:	cmp	%g1, 2
@@ -261,7 +264,8 @@
 	ba	%xcc, tl1_trap
 	 mov	T_KSTACK_FAULT | T_KERNEL, %o0
 END(tl1_kstack_fault)
-
+#endif
+	
 /*
  * Magic to resume from a spill or fill trap.  If we get an alignment or an
  * mmu fault during a spill or a fill, this macro will detect the fault and
@@ -356,9 +360,9 @@
 	stx	%g2, [%g1 + KTR_PARM2]
 9:
 #endif
-
+#if 0
 	KSTACK_CHECK
-
+#endif
 	sir
 END(rsf_fatal)
 
@@ -425,9 +429,11 @@
 	 */
 
 	.macro	tl0_split
+#if 0
 	rdpr	%wstate, %g1
 	wrpr	%g1, WSTATE_TRANSITION, %wstate
 	save
+#endif
 	.endm
 
 	.macro	tl0_setup	type
@@ -455,13 +461,13 @@
 	tl0_gen	T_RESERVED
 	.endr
 	.endm
-
+#if 0
 	.macro	tl1_split
 	rdpr	%wstate, %g1
 	wrpr	%g1, WSTATE_NESTED, %wstate
 	save	%sp, -(CCFSZ + TF_SIZEOF), %sp
 	.endm
-
+#endif
 	.macro	tl1_setup	type
 	tl1_split
 	clr	%o1
@@ -477,7 +483,9 @@
 
 	.macro	tl1_reserved	count
 	.rept	\count
+#if 0
 	tl1_gen	T_RESERVED
+#endif
 	.endr
 	.endm
 
@@ -504,7 +512,7 @@
 	.macro	tl0_align
 	.align	32
 	.endm
-
+	
 	.macro	cpu_mondo
 	.align	32
 	.endm
@@ -522,6 +530,7 @@
 	.endm
 
 
+#define ALIGN_128   .align  128
 	
 #define spill_32bit_asi(asi, target)		\
 	srl	%sp, 0, %sp	;		\
@@ -530,7 +539,7 @@
 	retry			; 		\
 	.skip (31-26)*4		; 		\
 	ba,a,pt %xcc, fault_32bit_##target ; \
-	.align  128
+	ALIGN_128
 
 #define spill_64bit_asi(asi, target)		\
 	SPILL(stxa, SPOFF, 8, asi) ; 		\
@@ -538,7 +547,7 @@
 	retry			   ;		\
 	.skip (31-25)*4		   ;		\
 	ba,a,pt %xcc, fault_64bit_##target ; \
-	.align  128
+	ALIGN_128	
 
 #define	spill_32clean(asi, target)		\
 	srl	%sp, 0, %sp	; 		\
@@ -547,7 +556,7 @@
 	  mov	WSTATE_USER32, %g7 ; 		\
 	.skip (31-26)*4		; 		\
 	ba,a,pt    %xcc, fault_32bit_##target ; \
-	.align	128
+	ALIGN_128	
 	
 #define	spill_64clean(asi, target)		\
 	SPILL(stxa, SPOFF, 8, asi) ; 		\
@@ -555,7 +564,7 @@
 	  mov	WSTATE_USER64, %g7 ; 		\
 	.skip (31-25)*4		   ;		\
 	ba,a,pt %xcc, fault_64bit_##target ; 	\
-	.align	128				
+	ALIGN_128	
 
 #define fill_32bit_asi(asi, target)		\
 	srl	%sp, 0, %sp	;		\
@@ -564,7 +573,7 @@
 	retry			; 		\
 	.skip (31-26)*4		; 		\
 	ba,a,pt %xcc, fault_32bit_##target ; \
-	.align  128
+	ALIGN_128	
 
 #define fill_64bit_asi(asi, target)		\
 	FILL(ldxa, SPOFF, 8, asi) ; 		\
@@ -572,7 +581,7 @@
 	retry			   ;		\
 	.skip (31-25)*4		   ;		\
 	ba,a,pt %xcc, fault_64bit_##target ; \
-	.align  128
+
 		
 	.macro	spill_32bit_primary_sn0
 	spill_32bit_asi(ASI_AIUP, sn0)
@@ -669,8 +678,11 @@
 	.macro	fill_mixed
 	.align	128
 	.endm
+
+	.macro	tl1_align
+	.align	32
+	.endm		
 		
-		
 ENTRY(tl0_sfsr_trap)
 	tl0_split
 	clr	%o1
@@ -1204,7 +1216,7 @@
 	.macro	tl1_insn_excptn
 	.align	32
 	.endm
-
+#if 0
 ENTRY(tl1_insn_exceptn_trap)
 	tl1_split
 	clr	%o1
@@ -1267,7 +1279,7 @@
 
 ENTRY(tl1_sfsr_trap)
 END(tl1_sfsr_trap)
-
+#endif
 	.macro	tl1_intr level, mask
 	tl1_split
 	set	\mask, %o1
@@ -1786,7 +1798,7 @@
 tl1_divide:
  	tl1_reserved	8				! 0x228-0x22f
 tl1_data_excptn:
- 	tl1_data_excptn					! 0x230
+ 	data_excptn					! 0x230
 	data_miss					! 0x231
  	tl1_reserved	2				! 0x232-0x233
 tl1_align:
@@ -1924,7 +1936,9 @@
 	 * Setup %wstate for return, clear WSTATE_TRANSITION.
 	 */
 	rdpr	%wstate, %l1
+	#if 0
 	and	%l1, WSTATE_NORMAL_MASK, %l1
+	#endif
 	wrpr	%l1, 0, %wstate
 
 	/*
@@ -1990,9 +2004,11 @@
 9:
 #endif
 
+#if 0
 1:	and	%l5, WSTATE_NORMAL_MASK, %l5
 	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
 	wrpr	%l5, WSTATE_KERNEL, %wstate
+#endif
 	rdpr	%canrestore, %l6
 	wrpr	%l6, 0, %otherwin
 	wrpr	%g0, 0, %canrestore
@@ -2080,9 +2096,11 @@
 	wrpr	%o0, 0, %pil
 	wr	%o1, 0, %clear_softint
 
+#if 0
 	and	%l5, WSTATE_NORMAL_MASK, %l5
 	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
 	wrpr	%l5, WSTATE_KERNEL, %wstate
+#endif
 	rdpr	%canrestore, %l6
 	wrpr	%l6, 0, %otherwin
 	wrpr	%g0, 0, %canrestore
@@ -2320,9 +2338,10 @@
 	 * set the transition bit so the restore will be handled specially
 	 * if it traps, use the xor feature of wrpr to do that.
 	 */
+#if 0
 	srlx	%l6, WSTATE_OTHER_SHIFT, %g3
 	wrpr	%g3, WSTATE_TRANSITION, %wstate
-
+#endif
 	/*
 	 * Setup window management registers for return.  If not all user
 	 * windows were spilled in the kernel %otherwin will be non-zero,
@@ -2473,10 +2492,10 @@
 #endif
 
 	wrpr	%g0, 1, %tl
-
+#if 0
 	and	%l5, WSTATE_OTHER_MASK, %l5
 	wrpr	%l5, WSTATE_KERNEL, %wstate
-
+#endif
 	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
 	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
 	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
@@ -2616,10 +2635,10 @@
 	wr	%o1, 0, %clear_softint
 
 	wrpr	%g0, 1, %tl
-
+#if 0
 	and	%l5, WSTATE_OTHER_MASK, %l5
 	wrpr	%l5, WSTATE_KERNEL, %wstate
-
+#endif
 	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
 	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
 	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/hcall.S#3 (text+ko) ====

@@ -585,7 +585,7 @@
 	 */
 	ENTRY(hvio_intr_getvalid)
 	mov	%o1, %o2
-	mov	HVIO_INTR_GETVALID, %o5
+	mov	HVIO_INTR_GETENABLED, %o5
 	ta	FAST_TRAP
 	brz,a	%o0, 1f
 	stuw	%o1, [%o2]
@@ -600,7 +600,7 @@
 	 * ret0 - status
 	 */
 	ENTRY(hvio_intr_setvalid)
-	mov	HVIO_INTR_SETVALID, %o5
+	mov	HVIO_INTR_SETENABLED, %o5
 	ta	FAST_TRAP
 	retl
 	nop
@@ -1366,4 +1366,24 @@
 	nop
 	SET_SIZE(hv_hpriv)
 
+	/*
+	 * panic_bad_hcall is called when a hcall returns
+	 * unexpected error
+	 * %o0 error number
+	 * %o1 hcall number
+	 */
+
+	.text
+bad_hcall_error:
+	.asciz	"hypervisor call 0x%x returned an unexpected error %d"
+
+	ENTRY(panic_bad_hcall)
+	mov	%o0, %o2
+	sethi	%hi(bad_hcall_error), %o0
+	or	%o0, %lo(bad_hcall_error), %o0
+	mov	%o7, %o3
+	call	panic
+	mov	%o3, %o7
+	SET_SIZE(panic_bad_hcall)
+	
 #endif	/* lint || __lint */

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/machdep.c#7 (text+ko) ====

@@ -347,15 +347,7 @@
 	mp_tramp = mp_tramp_alloc();
 #endif
 
-	/*
-	 * Initialize virtual memory and calculate physmem.
-	 */
-	pmap_bootstrap(end);
 
-	/*

>>> TRUNCATED FOR MAIL (1000 lines) <<<



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200602210733.k1L7XiNf097883>