Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 30 May 2011 18:57:31 +0000 (UTC)
From:      Nathan Whitehorn <nwhitehorn@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r222506 - in projects/pseries/powerpc: aim include
Message-ID:  <201105301857.p4UIvVJo089322@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: nwhitehorn
Date: Mon May 30 18:57:31 2011
New Revision: 222506
URL: http://svn.freebsd.org/changeset/base/222506

Log:
  The POWER7, unlike previous CPUs, has only 32 SLB slots instead of 64, so
  allow this parameter to be dynamically configured and set it to 32 for
  POWER7. This gets us to a mountroot prompt.

Modified:
  projects/pseries/powerpc/aim/machdep.c
  projects/pseries/powerpc/aim/slb.c
  projects/pseries/powerpc/aim/trap_subr64.S
  projects/pseries/powerpc/include/slb.h

Modified: projects/pseries/powerpc/aim/machdep.c
==============================================================================
--- projects/pseries/powerpc/aim/machdep.c	Mon May 30 18:57:01 2011	(r222505)
+++ projects/pseries/powerpc/aim/machdep.c	Mon May 30 18:57:31 2011	(r222506)
@@ -132,6 +132,7 @@ extern vm_offset_t ksym_start, ksym_end;
 
 int cold = 1;
 #ifdef __powerpc64__
+extern int n_slbs;
 int cacheline_size = 128;
 #else
 int cacheline_size = 32;
@@ -337,13 +338,13 @@ powerpc_init(vm_offset_t startkernel, vm
 
 	kdb_init();
 
-	/*
-	 * PowerPC 970 CPUs have a misfeature requested by Apple that makes
-	 * them pretend they have a 32-byte cacheline. Turn this off
-	 * before we measure the cacheline size.
-	 */
-
+	/* Various very early CPU fix ups */
 	switch (mfpvr() >> 16) {
+		/*
+		 * PowerPC 970 CPUs have a misfeature requested by Apple that
+		 * makes them pretend they have a 32-byte cacheline. Turn this
+		 * off before we measure the cacheline size.
+		 */
 		case IBM970:
 		case IBM970FX:
 		case IBM970MP:
@@ -352,6 +353,12 @@ powerpc_init(vm_offset_t startkernel, vm
 			scratch &= ~HID5_970_DCBZ_SIZE_HI;
 			mtspr(SPR_HID5, scratch);
 			break;
+	#ifdef __powerpc64__
+		case IBMPOWER7:
+			/* XXX: get from ibm,slb-size in device tree */
+			n_slbs = 32;
+			break;
+	#endif
 	}
 
 	/*

Modified: projects/pseries/powerpc/aim/slb.c
==============================================================================
--- projects/pseries/powerpc/aim/slb.c	Mon May 30 18:57:01 2011	(r222505)
+++ projects/pseries/powerpc/aim/slb.c	Mon May 30 18:57:31 2011	(r222506)
@@ -51,8 +51,9 @@ uintptr_t moea64_get_unique_vsid(void);
 void moea64_release_vsid(uint64_t vsid);
 static void slb_zone_init(void *);
 
-uma_zone_t slbt_zone;
-uma_zone_t slb_cache_zone;
+static uma_zone_t slbt_zone;
+static uma_zone_t slb_cache_zone;
+int n_slbs = 64;
 
 SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
 
@@ -426,16 +427,18 @@ slb_insert_kernel(uint64_t slbe, uint64_
 
 	/* Check for an unused slot, abusing the user slot as a full flag */
 	if (slbcache[USER_SLB_SLOT].slbe == 0) {
-		for (i = 0; i < USER_SLB_SLOT; i++) {
+		for (i = 0; i < n_slbs; i++) {
+			if (i == USER_SLB_SLOT)
+				continue;
 			if (!(slbcache[i].slbe & SLBE_VALID)) 
 				goto fillkernslb;
 		}
 
-		if (i == USER_SLB_SLOT)
+		if (i == n_slbs)
 			slbcache[USER_SLB_SLOT].slbe = 1;
 	}
 
-	for (i = mftb() % 64, j = 0; j < 64; j++, i = (i+1) % 64) {
+	for (i = mftb() % n_slbs, j = 0; j < n_slbs; j++, i = (i+1) % n_slbs) {
 		if (i == USER_SLB_SLOT)
 			continue;
 
@@ -443,9 +446,11 @@ slb_insert_kernel(uint64_t slbe, uint64_
 			break;
 	}
 
-	KASSERT(j < 64, ("All kernel SLB slots locked!"));
+	KASSERT(j < n_slbs, ("All kernel SLB slots locked!"));
 
 fillkernslb:
+	KASSERT(i != USER_SLB_SLOT,
+	    ("Filling user SLB slot with a kernel mapping"));
 	slbcache[i].slbv = slbv;
 	slbcache[i].slbe = slbe | (uint64_t)i;
 
@@ -466,11 +471,11 @@ slb_insert_user(pmap_t pm, struct slb *s
 
 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
 
-	if (pm->pm_slb_len < 64) {
+	if (pm->pm_slb_len < n_slbs) {
 		i = pm->pm_slb_len;
 		pm->pm_slb_len++;
 	} else {
-		i = mftb() % 64;
+		i = mftb() % n_slbs;
 	}
 
 	/* Note that this replacement is atomic with respect to trap_subr */
@@ -521,8 +526,9 @@ slb_zone_init(void *dummy)
 
 	slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode),
 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
-	slb_cache_zone = uma_zcreate("SLB cache", 64*sizeof(struct slb *),
-	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
+	slb_cache_zone = uma_zcreate("SLB cache",
+	    (n_slbs + 1)*sizeof(struct slb *), NULL, NULL, NULL, NULL,
+	    UMA_ALIGN_PTR, UMA_ZONE_VM);
 
 	if (platform_real_maxaddr() != VM_MAX_ADDRESS) {
 		uma_zone_set_allocf(slb_cache_zone, slb_uma_real_alloc);

Modified: projects/pseries/powerpc/aim/trap_subr64.S
==============================================================================
--- projects/pseries/powerpc/aim/trap_subr64.S	Mon May 30 18:57:01 2011	(r222505)
+++ projects/pseries/powerpc/aim/trap_subr64.S	Mon May 30 18:57:31 2011	(r222506)
@@ -53,55 +53,53 @@
  * User SRs are loaded through a pointer to the current pmap.
  */
 restore_usersrs:
-	GET_CPUINFO(%r28);
-	ld	%r28,PC_USERSLB(%r28);
+	GET_CPUINFO(%r28)
+	ld	%r28,PC_USERSLB(%r28)
 	li	%r29, 0			/* Set the counter to zero */
 
 	slbia
 	slbmfee	%r31,%r29		
 	clrrdi	%r31,%r31,28
 	slbie	%r31
-instuserslb:
-	ld	%r31, 0(%r28);		/* Load SLB entry pointer */
-	cmpli	0, %r31, 0;		/* If NULL, stop */
-	beqlr;
+1:	ld	%r31, 0(%r28)		/* Load SLB entry pointer */
+	cmpli	0, %r31, 0		/* If NULL, stop */
+	beqlr
 
 	ld	%r30, 0(%r31)		/* Load SLBV */
 	ld	%r31, 8(%r31)		/* Load SLBE */
 	or	%r31, %r31, %r29	/*  Set SLBE slot */
-	slbmte	%r30, %r31;		/* Install SLB entry */
+	slbmte	%r30, %r31		/* Install SLB entry */
 
-	addi	%r28, %r28, 8;		/* Advance pointer */
-	addi	%r29, %r29, 1;
-	cmpli	0, %r29, 64;		/* Repeat if we are not at the end */
-	blt instuserslb;
-	blr;
+	addi	%r28, %r28, 8		/* Advance pointer */
+	addi	%r29, %r29, 1
+	b	1b			/* Repeat */
 
 /*
  * Kernel SRs are loaded directly from the PCPU fields
  */
 restore_kernsrs:
-	GET_CPUINFO(%r28);
-	addi	%r28,%r28,PC_KERNSLB;
+	GET_CPUINFO(%r28)
+	addi	%r28,%r28,PC_KERNSLB
 	li	%r29, 0			/* Set the counter to zero */
 
 	slbia
 	slbmfee	%r31,%r29		
 	clrrdi	%r31,%r31,28
 	slbie	%r31
-instkernslb:
-	ld	%r31, 8(%r28);		/* Load SLBE */
+1:	cmpli	0, %r29, USER_SLB_SLOT	/* Skip the user slot */
+	beq-	2f
 
-	cmpli	0, %r31, 0;		/* If SLBE is not valid, stop */
-	beqlr;
+	ld	%r31, 8(%r28)		/* Load SLBE */
+	cmpli	0, %r31, 0		/* If SLBE is not valid, stop */
+	beqlr
 	ld	%r30, 0(%r28)		/* Load SLBV  */
-	slbmte	%r30, %r31;		/* Install SLB entry */
+	slbmte	%r30, %r31		/* Install SLB entry */
 
-	addi	%r28, %r28, 16;		/* Advance pointer */
-	addi	%r29, %r29, 1;
-	cmpli	0, %r29, USER_SLB_SLOT;	/* Repeat if we are not at the end */
-	blt instkernslb;
-	blr;
+2:	addi	%r28, %r28, 16		/* Advance pointer */
+	addi	%r29, %r29, 1
+	cmpli	0, %r29, 64		/* Repeat if we are not at the end */
+	blt	1b 
+	blr
 
 /*
  * FRAME_SETUP assumes:

Modified: projects/pseries/powerpc/include/slb.h
==============================================================================
--- projects/pseries/powerpc/include/slb.h	Mon May 30 18:57:01 2011	(r222505)
+++ projects/pseries/powerpc/include/slb.h	Mon May 30 18:57:31 2011	(r222506)
@@ -65,7 +65,7 @@
 /*
  * User segment for copyin/out
  */
-#define USER_SLB_SLOT 63
+#define USER_SLB_SLOT 0
 #define USER_SLB_SLBE (((USER_ADDR >> ADDR_SR_SHFT) << SLBE_ESID_SHIFT) | \
 			SLBE_VALID | USER_SLB_SLOT)
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201105301857.p4UIvVJo089322>