Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 29 Jul 2010 15:03:34 -0700
From:      Sean Bruno <seanbru@yahoo-inc.com>
To:        "freebsd-stable@freebsd.org" <freebsd-stable@freebsd.org>
Subject:   Re: Unable to install 8.x on a PowerEdge R810
Message-ID:  <1280441014.2530.16.camel@localhost.localdomain>
In-Reply-To: <1279938675.2442.15.camel@localhost.localdomain>
References:  <20100722213836.GH15227@martini.nu> <1279836216.2456.14.camel@localhost.localdomain> <20100723003611.GA66678@martini.nu> <7573B69C-3C37-449A-A27F-5B0B2ED84757@mac.com> <1279938675.2442.15.camel@localhost.localdomain>

next in thread | previous in thread | raw e-mail | index | archive | help

--=-HH/4kVuH97/ENX7tPWsX
Content-Type: text/plain; charset="UTF-8"
Content-Transfer-Encoding: 7bit

> Kind of a large patch, but in order to make an omlette, you need to
> break a few servers.
> 
> This is a diff against -CURRENT, not stable-8 as I didn't get a chance
> to test it.  It is directly based off of changes that peter@ made to the
> Yahoo FreeBSD 7 tree.
> 
> I have compile and boot tested this on my local machines, but I don't
> have 64 CPU machines to test upon.
> 
> Sean

Here is a patch version that applies to stable-8 at the moment.

I changed 2 more cases where the data types were wrong, one in a printf
and a case where a negative truth value was being used.

Sea


--=-HH/4kVuH97/ENX7tPWsX
Content-Disposition: attachment; filename="cpumask_8.diff"
Content-Type: text/x-patch; name="cpumask_8.diff"; charset="UTF-8"
Content-Transfer-Encoding: 7bit

Index: sys/kern/subr_smp.c
===================================================================
--- sys/kern/subr_smp.c	(revision 210622)
+++ sys/kern/subr_smp.c	(working copy)
@@ -181,7 +181,7 @@
 	id = td->td_oncpu;
 	if (id == NOCPU)
 		return;
-	ipi_selected(1 << id, IPI_AST);
+	ipi_selected(cputomask(id), IPI_AST);
 }
 
 /*
@@ -318,7 +318,7 @@
 	CTR1(KTR_SMP, "restart_cpus(%x)", map);
 
 	/* signal other cpus to restart */
-	atomic_store_rel_int(&started_cpus, map);
+	atomic_store_rel_long(&started_cpus, map);
 
 	/* wait for each to clear its bit */
 	while ((stopped_cpus & map) != 0)
@@ -399,7 +399,7 @@
 		if (((1 << i) & map) != 0 && !CPU_ABSENT(i))
 			ncpus++;
 	if (ncpus == 0)
-		panic("ncpus is 0 with map=0x%x", map);
+		panic("ncpus is 0 with map=0x%lx", map);
 
 	/* obtain rendezvous lock */
 	mtx_lock_spin(&smp_ipi_mtx);
@@ -415,10 +415,10 @@
 	atomic_store_rel_int(&smp_rv_waiters[0], 0);
 
 	/* signal other processors, which will enter the IPI with interrupts off */
-	ipi_selected(map & ~(1 << curcpu), IPI_RENDEZVOUS);
+	ipi_selected(map & ~cputomask(curcpu), IPI_RENDEZVOUS);
 
 	/* Check if the current CPU is in the map */
-	if ((map & (1 << curcpu)) != 0)
+	if ((map & cputomask(curcpu)) != 0)
 		smp_rendezvous_action();
 
 	if (teardown_func == smp_no_rendevous_barrier)
@@ -490,7 +490,7 @@
 		panic("Built bad topology at %p.  CPU count %d != %d",
 		    top, top->cg_count, mp_ncpus);
 	if (top->cg_mask != all_cpus)
-		panic("Built bad topology at %p.  CPU mask 0x%X != 0x%X",
+		panic("Built bad topology at %p.  CPU mask 0x%lX != 0x%lX",
 		    top, top->cg_mask, all_cpus);
 	return (top);
 }
@@ -531,7 +531,7 @@
 	parent->cg_children++;
 	for (; parent != NULL; parent = parent->cg_parent) {
 		if ((parent->cg_mask & child->cg_mask) != 0)
-			panic("Duplicate children in %p.  mask 0x%X child 0x%X",
+			panic("Duplicate children in %p.  mask 0x%lX child 0x%lX",
 			    parent, parent->cg_mask, child->cg_mask);
 		parent->cg_mask |= child->cg_mask;
 		parent->cg_count += child->cg_count;
Index: sys/kern/sched_ule.c
===================================================================
--- sys/kern/sched_ule.c	(revision 210622)
+++ sys/kern/sched_ule.c	(working copy)
@@ -851,7 +851,7 @@
 		 * IPI the target cpu to force it to reschedule with the new
 		 * workload.
 		 */
-		ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT);
+		ipi_cpu(TDQ_ID(low), IPI_PREEMPT);
 	}
 	tdq_unlock_pair(high, low);
 	return (moved);
@@ -974,7 +974,7 @@
 			return;
 	}
 	tdq->tdq_ipipending = 1;
-	ipi_selected(1 << cpu, IPI_PREEMPT);
+	ipi_cpu(cpu, IPI_PREEMPT);
 }
 
 /*
@@ -2413,7 +2413,7 @@
 	cpu = ts->ts_cpu;
 	ts->ts_cpu = sched_pickcpu(td, 0);
 	if (cpu != PCPU_GET(cpuid))
-		ipi_selected(1 << cpu, IPI_PREEMPT);
+		ipi_cpu(cpu, IPI_PREEMPT);
 #endif
 }
 
@@ -2644,11 +2644,11 @@
 
 	sbuf_printf(sb, "%*s<group level=\"%d\" cache-level=\"%d\">\n", indent,
 	    "", indent, cg->cg_level);
-	sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"0x%x\">", indent, "",
+	sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"0x%lx\">", indent, "",
 	    cg->cg_count, cg->cg_mask);
 	first = TRUE;
 	for (i = 0; i < MAXCPU; i++) {
-		if ((cg->cg_mask & (1 << i)) != 0) {
+		if ((cg->cg_mask & cputomask(i)) != 0) {
 			if (!first)
 				sbuf_printf(sb, ", ");
 			else
Index: sys/kern/kern_ktr.c
===================================================================
--- sys/kern/kern_ktr.c	(revision 210622)
+++ sys/kern/kern_ktr.c	(working copy)
@@ -207,7 +207,7 @@
 	if ((ktr_mask & mask) == 0)
 		return;
 	cpu = KTR_CPU;
-	if (((1 << cpu) & ktr_cpumask) == 0)
+	if ((cputomask(cpu) & ktr_cpumask) == 0)
 		return;
 #if defined(KTR_VERBOSE) || defined(KTR_ALQ)
 	td = curthread;
Index: sys/kern/kern_pmc.c
===================================================================
--- sys/kern/kern_pmc.c	(revision 210622)
+++ sys/kern/kern_pmc.c	(working copy)
@@ -34,6 +34,7 @@
 #include "opt_hwpmc_hooks.h"
 
 #include <sys/types.h>
+#include <sys/systm.h>
 #include <sys/pmc.h>
 #include <sys/pmckern.h>
 #include <sys/smp.h>
@@ -110,7 +111,7 @@
 {
 #ifdef	SMP
 	return (pmc_cpu_is_present(cpu) &&
-	    (hlt_cpus_mask & (1 << cpu)) == 0);
+	    (hlt_cpus_mask & cputomask(cpu)) == 0);
 #else
 	return (1);
 #endif
@@ -137,7 +138,7 @@
 pmc_cpu_is_primary(int cpu)
 {
 #ifdef	SMP
-	return ((logical_cpus_mask & (1 << cpu)) == 0);
+	return ((logical_cpus_mask & cputomask(cpu)) == 0);
 #else
 	return (1);
 #endif
Index: sys/kern/subr_pcpu.c
===================================================================
--- sys/kern/subr_pcpu.c	(revision 210622)
+++ sys/kern/subr_pcpu.c	(working copy)
@@ -88,7 +88,7 @@
 	KASSERT(cpuid >= 0 && cpuid < MAXCPU,
 	    ("pcpu_init: invalid cpuid %d", cpuid));
 	pcpu->pc_cpuid = cpuid;
-	pcpu->pc_cpumask = 1 << cpuid;
+	pcpu->pc_cpumask = cputomask(cpuid);
 	cpuid_to_pcpu[cpuid] = pcpu;
 	SLIST_INSERT_HEAD(&cpuhead, pcpu, pc_allcpu);
 	cpu_pcpu_init(pcpu, cpuid, size);
Index: sys/kern/sched_4bsd.c
===================================================================
--- sys/kern/sched_4bsd.c	(revision 210622)
+++ sys/kern/sched_4bsd.c	(working copy)
@@ -1086,7 +1086,7 @@
 	me = PCPU_GET(cpumask);
 
 	/* Don't bother if we should be doing it ourself. */
-	if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
+	if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == cputomask(cpunum)))
 		return (0);
 
 	dontuse = me | stopped_cpus | hlt_cpus_mask;
@@ -1108,7 +1108,7 @@
 		/* If they are both on, compare and use loop if different. */
 		if (forward_wakeup_use_loop) {
 			if (map != map3) {
-				printf("map (%02X) != map3 (%02X)\n", map,
+				printf("map (%02lX) != map3 (%02lX)\n", map,
 				    map3);
 				map = map3;
 			}
@@ -1120,7 +1120,7 @@
 	/* If we only allow a specific CPU, then mask off all the others. */
 	if (cpunum != NOCPU) {
 		KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
-		map &= (1 << cpunum);
+		map &= cputomask(cpunum);
 	} else {
 		/* Try choose an idle die. */
 		if (forward_wakeup_use_htt) {
Index: sys/dev/hwpmc/hwpmc_mod.c
===================================================================
--- sys/dev/hwpmc/hwpmc_mod.c	(revision 210622)
+++ sys/dev/hwpmc/hwpmc_mod.c	(working copy)
@@ -1991,7 +1991,7 @@
 		 * had already processed the interrupt).  We don't
 		 * lose the interrupt sample.
 		 */
-		atomic_clear_int(&pmc_cpumask, (1 << PCPU_GET(cpuid)));
+		atomic_clear_long(&pmc_cpumask, PCPU_GET(cpuid));
 		pmc_process_samples(PCPU_GET(cpuid));
 		break;
 
@@ -4086,7 +4086,7 @@
 
  done:
 	/* mark CPU as needing processing */
-	atomic_set_rel_int(&pmc_cpumask, (1 << cpu));
+	atomic_set_rel_long(&pmc_cpumask, cputomask(cpu));
 
 	return (error);
 }
@@ -4196,7 +4196,7 @@
 			break;
 		if (ps->ps_nsamples == PMC_SAMPLE_INUSE) {
 			/* Need a rescan at a later time. */
-			atomic_set_rel_int(&pmc_cpumask, (1 << cpu));
+			atomic_set_rel_long(&pmc_cpumask, cputomask(cpu));
 			break;
 		}
 
@@ -4785,7 +4785,7 @@
 	PMCDBG(MOD,INI,0, "%s", "cleanup");
 
 	/* switch off sampling */
-	atomic_store_rel_int(&pmc_cpumask, 0);
+	atomic_store_rel_long(&pmc_cpumask, 0);
 	pmc_intr = NULL;
 
 	sx_xlock(&pmc_sx);
Index: sys/geom/eli/g_eli.c
===================================================================
--- sys/geom/eli/g_eli.c	(revision 210622)
+++ sys/geom/eli/g_eli.c	(working copy)
@@ -499,7 +499,7 @@
 g_eli_cpu_is_disabled(int cpu)
 {
 #ifdef SMP
-	return ((hlt_cpus_mask & (1 << cpu)) != 0);
+	return ((hlt_cpus_mask & cputomask(cpu)) != 0);
 #else
 	return (0);
 #endif
Index: sys/i386/include/smp.h
===================================================================
--- sys/i386/include/smp.h	(revision 210622)
+++ sys/i386/include/smp.h	(working copy)
@@ -62,6 +62,7 @@
 void	init_secondary(void);
 int	ipi_nmi_handler(void);
 void	ipi_selected(cpumask_t cpus, u_int ipi);
+#define ipi_cpu(_c, _i) ipi_selected(cputomask(_c), _i)
 void	ipi_all_but_self(u_int ipi);
 #ifndef XEN
 void 	ipi_bitmap_handler(struct trapframe frame);
Index: sys/i386/include/_types.h
===================================================================
--- sys/i386/include/_types.h	(revision 210622)
+++ sys/i386/include/_types.h	(working copy)
@@ -74,7 +74,7 @@
  * Standard type definitions.
  */
 typedef	unsigned long	__clock_t;		/* clock()... */
-typedef	unsigned int	__cpumask_t;
+typedef	unsigned long	__cpumask_t;
 typedef	__int32_t	__critical_t;
 typedef	long double	__double_t;
 typedef	long double	__float_t;
Index: sys/i386/i386/vm_machdep.c
===================================================================
--- sys/i386/i386/vm_machdep.c	(revision 210622)
+++ sys/i386/i386/vm_machdep.c	(working copy)
@@ -609,7 +609,7 @@
 
 			/* Restart CPU #0. */
 			/* XXX: restart_cpus(1 << 0); */
-			atomic_store_rel_int(&started_cpus, (1 << 0));
+			atomic_store_rel_long(&started_cpus, cputomask(0));
 
 			cnt = 0;
 			while (cpu_reset_proxy_active == 0 && cnt < 10000000)
Index: sys/i386/i386/mp_machdep.c
===================================================================
--- sys/i386/i386/mp_machdep.c	(revision 210622)
+++ sys/i386/i386/mp_machdep.c	(working copy)
@@ -1306,7 +1306,7 @@
 	 * Set the mask of receiving CPUs for this purpose.
 	 */
 	if (ipi == IPI_STOP_HARD)
-		atomic_set_int(&ipi_nmi_pending, cpus);
+		atomic_set_long(&ipi_nmi_pending, cpus);
 
 	CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
 	while ((cpu = ffs(cpus)) != 0) {
@@ -1369,7 +1369,7 @@
 	if ((ipi_nmi_pending & cpumask) == 0)
 		return (1);
 
-	atomic_clear_int(&ipi_nmi_pending, cpumask);
+	atomic_clear_long(&ipi_nmi_pending, cpumask);
 	cpustop_handler();
 	return (0);
 }
@@ -1387,14 +1387,14 @@
 	savectx(&stoppcbs[cpu]);
 
 	/* Indicate that we are stopped */
-	atomic_set_int(&stopped_cpus, cpumask);
+	atomic_set_long(&stopped_cpus, cpumask);
 
 	/* Wait for restart */
 	while (!(started_cpus & cpumask))
 	    ia32_pause();
 
-	atomic_clear_int(&started_cpus, cpumask);
-	atomic_clear_int(&stopped_cpus, cpumask);
+	atomic_clear_long(&started_cpus, cpumask);
+	atomic_clear_long(&stopped_cpus, cpumask);
 
 	if (cpu == 0 && cpustop_restartfunc != NULL) {
 		cpustop_restartfunc();
Index: sys/cddl/dev/dtrace/amd64/dtrace_subr.c
===================================================================
--- sys/cddl/dev/dtrace/amd64/dtrace_subr.c	(revision 210622)
+++ sys/cddl/dev/dtrace/amd64/dtrace_subr.c	(working copy)
@@ -120,14 +120,14 @@
 	if (cpu == DTRACE_CPUALL)
 		cpus = all_cpus;
 	else
-		cpus = (cpumask_t) (1 << cpu);
+		cpus = cputomask(cpu);
 
 	/* If the current CPU is in the set, call the function directly: */
-	if ((cpus & (1 << curcpu)) != 0) {
+	if ((cpus & cputomask(curcpu)) != 0) {
 		(*func)(arg);
 
 		/* Mask the current CPU from the set */
-		cpus &= ~(1 << curcpu);
+		cpus &= ~cputomask(curcpu);
 	}
 
 	/* If there are any CPUs in the set, cross-call to those CPUs */
Index: sys/amd64/include/smp.h
===================================================================
--- sys/amd64/include/smp.h	(revision 210622)
+++ sys/amd64/include/smp.h	(working copy)
@@ -54,6 +54,7 @@
 void	init_secondary(void);
 int	ipi_nmi_handler(void);
 void	ipi_selected(cpumask_t cpus, u_int ipi);
+void	ipi_cpu(int cpu, u_int ipi);
 void	ipi_all_but_self(u_int ipi);
 void 	ipi_bitmap_handler(struct trapframe frame);
 u_int	mp_bootaddress(u_int);
Index: sys/amd64/include/param.h
===================================================================
--- sys/amd64/include/param.h	(revision 210622)
+++ sys/amd64/include/param.h	(working copy)
@@ -71,7 +71,7 @@
 #endif
 
 #if defined(SMP) || defined(KLD_MODULE)
-#define MAXCPU		32
+#define MAXCPU		64
 #else
 #define MAXCPU		1
 #endif
Index: sys/amd64/include/_types.h
===================================================================
--- sys/amd64/include/_types.h	(revision 210622)
+++ sys/amd64/include/_types.h	(working copy)
@@ -61,7 +61,7 @@
  * Standard type definitions.
  */
 typedef	__int32_t	__clock_t;		/* clock()... */
-typedef	unsigned int	__cpumask_t;
+typedef	unsigned long	__cpumask_t;
 typedef	__int64_t	__critical_t;
 typedef	double		__double_t;
 typedef	float		__float_t;
Index: sys/amd64/amd64/vm_machdep.c
===================================================================
--- sys/amd64/amd64/vm_machdep.c	(revision 210622)
+++ sys/amd64/amd64/vm_machdep.c	(working copy)
@@ -536,7 +536,7 @@
 			printf("cpu_reset: Restarting BSP\n");
 
 			/* Restart CPU #0. */
-			atomic_store_rel_int(&started_cpus, 1 << 0);
+			atomic_store_rel_long(&started_cpus, cputomask(0));
 
 			cnt = 0;
 			while (cpu_reset_proxy_active == 0 && cnt < 10000000)
Index: sys/amd64/amd64/mptable.c
===================================================================
--- sys/amd64/amd64/mptable.c	(revision 210622)
+++ sys/amd64/amd64/mptable.c	(working copy)
@@ -888,13 +888,13 @@
 	 * already in the table, then kill the fixup.
 	 */
 	for (id = 0; id <= MAX_LAPIC_ID; id++) {
-		if ((id_mask & 1 << id) == 0)
+		if ((id_mask & (1ul << id)) == 0)
 			continue;
 		/* First, make sure we are on a logical_cpus boundary. */
 		if (id % logical_cpus != 0)
 			return;
 		for (i = id + 1; i < id + logical_cpus; i++)
-			if ((id_mask & 1 << i) != 0)
+			if ((id_mask & (1ul << i)) != 0)
 				return;
 	}
 
@@ -911,7 +911,7 @@
 				    i, id);
 			lapic_create(i, 0);
 		}
-		id_mask &= ~(1 << id);
+		id_mask &= ~(1ul << id);
 	}
 }
 #endif /* MPTABLE_FORCE_HTT */
Index: sys/amd64/amd64/pmap.c
===================================================================
--- sys/amd64/amd64/pmap.c	(revision 210622)
+++ sys/amd64/amd64/pmap.c	(working copy)
@@ -548,7 +548,7 @@
 	PMAP_LOCK_INIT(kernel_pmap);
 	kernel_pmap->pm_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(KPML4phys);
 	kernel_pmap->pm_root = NULL;
-	kernel_pmap->pm_active = -1;	/* don't allow deactivation */
+	kernel_pmap->pm_active = (cpumask_t)0;	/* don't allow deactivation */
 	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
 
 	/*
@@ -860,8 +860,8 @@
 void
 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
 {
-	u_int cpumask;
-	u_int other_cpus;
+	cpumask_t cpumask;
+	cpumask_t other_cpus;
 
 	sched_pin();
 	if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
@@ -881,8 +881,8 @@
 void
 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 {
-	u_int cpumask;
-	u_int other_cpus;
+	cpumask_t cpumask;
+	cpumask_t other_cpus;
 	vm_offset_t addr;
 
 	sched_pin();
@@ -906,8 +906,8 @@
 void
 pmap_invalidate_all(pmap_t pmap)
 {
-	u_int cpumask;
-	u_int other_cpus;
+	cpumask_t cpumask;
+	cpumask_t other_cpus;
 
 	sched_pin();
 	if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
Index: sys/amd64/amd64/mp_machdep.c
===================================================================
--- sys/amd64/amd64/mp_machdep.c	(revision 210622)
+++ sys/amd64/amd64/mp_machdep.c	(working copy)
@@ -118,7 +118,7 @@
  * Local data and functions.
  */
 
-static u_int logical_cpus;
+static cpumask_t logical_cpus;
 static volatile cpumask_t ipi_nmi_pending;
 
 /* used to hold the AP's until we are ready to release them */
@@ -880,7 +880,7 @@
 			panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
 		}
 
-		all_cpus |= (1 << cpu);		/* record AP in CPU map */
+		all_cpus |= cputomask(cpu);		/* record AP in CPU map */
 	}
 
 	/* build our map of 'other' CPUs */
@@ -1002,27 +1002,16 @@
 static void
 smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
 {
-	int ncpu, othercpus;
+	int cpu, ncpu, othercpus;
 
 	othercpus = mp_ncpus - 1;
-	if (mask == (u_int)-1) {
-		ncpu = othercpus;
-		if (ncpu < 1)
+	if (mask == (cpumask_t)-1) {
+		if (othercpus < 1)
 			return;
 	} else {
 		mask &= ~PCPU_GET(cpumask);
 		if (mask == 0)
 			return;
-		ncpu = bitcount32(mask);
-		if (ncpu > othercpus) {
-			/* XXX this should be a panic offence */
-			printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
-			    ncpu, othercpus);
-			ncpu = othercpus;
-		}
-		/* XXX should be a panic, implied by mask == 0 above */
-		if (ncpu < 1)
-			return;
 	}
 	if (!(read_rflags() & PSL_I))
 		panic("%s: interrupts disabled", __func__);
@@ -1030,10 +1019,18 @@
 	smp_tlb_addr1 = addr1;
 	smp_tlb_addr2 = addr2;
 	atomic_store_rel_int(&smp_tlb_wait, 0);
-	if (mask == (u_int)-1)
+	if (mask == (cpumask_t)-1) {
+		ncpu = othercpus;
 		ipi_all_but_self(vector);
-	else
-		ipi_selected(mask, vector);
+	} else {
+		ncpu = 0;
+		while ((cpu = ffsl(mask)) != 0) {
+			cpu--;
+			mask &= ~cputomask(cpu);
+			lapic_ipi_vectored(vector, cpu_apic_ids[cpu]);
+			ncpu++;
+		}
+	}
 	while (smp_tlb_wait < ncpu)
 		ia32_pause();
 	mtx_unlock_spin(&smp_ipi_mtx);
@@ -1145,12 +1142,12 @@
 	 * Set the mask of receiving CPUs for this purpose.
 	 */
 	if (ipi == IPI_STOP_HARD)
-		atomic_set_int(&ipi_nmi_pending, cpus);
+		atomic_set_long(&ipi_nmi_pending, cpus);
 
 	CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
-	while ((cpu = ffs(cpus)) != 0) {
+	while ((cpu = ffsl(cpus)) != 0) {
 		cpu--;
-		cpus &= ~(1 << cpu);
+		cpus &= ~(cputomask(cpu));
 
 		KASSERT(cpu_apic_ids[cpu] != -1,
 		    ("IPI to non-existent CPU %d", cpu));
@@ -1171,6 +1168,41 @@
 }
 
 /*
+ * send an IPI to a specific cpu.
+ */
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+        u_int bitmap = 0;
+        u_int old_pending;
+        u_int new_pending;
+
+        if (IPI_IS_BITMAPED(ipi)) {
+                bitmap = 1 << ipi;
+                ipi = IPI_BITMAP_VECTOR;
+        }
+
+#ifdef STOP_NMI
+        if (ipi == IPI_STOP && stop_cpus_with_nmi) {
+                ipi_nmi_selected(cputomask(cpu));
+                return;
+        }
+#endif  
+        CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
+
+        KASSERT(cpu_apic_ids[cpu] != -1,
+            ("IPI to non-existent CPU %d", cpu));
+
+        if (bitmap) {
+                do {
+                        old_pending = cpu_ipi_pending[cpu];
+                        new_pending = old_pending | bitmap;
+                } while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));
+        }
+        lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
+}
+
+/*
  * send an IPI to all CPUs EXCEPT myself
  */
 void
@@ -1188,7 +1220,7 @@
 	 * Set the mask of receiving CPUs for this purpose.
 	 */
 	if (ipi == IPI_STOP_HARD)
-		atomic_set_int(&ipi_nmi_pending, PCPU_GET(other_cpus));
+		atomic_set_long(&ipi_nmi_pending, PCPU_GET(other_cpus));
 
 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
 	lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
@@ -1209,7 +1241,7 @@
 	if ((ipi_nmi_pending & cpumask) == 0)
 		return (1);
 
-	atomic_clear_int(&ipi_nmi_pending, cpumask);
+	atomic_clear_long(&ipi_nmi_pending, cpumask);
 	cpustop_handler();
 	return (0);
 }
@@ -1222,19 +1254,19 @@
 cpustop_handler(void)
 {
 	int cpu = PCPU_GET(cpuid);
-	int cpumask = PCPU_GET(cpumask);
+	cpumask_t cpumask = PCPU_GET(cpumask);
 
 	savectx(&stoppcbs[cpu]);
 
 	/* Indicate that we are stopped */
-	atomic_set_int(&stopped_cpus, cpumask);
+	atomic_set_long(&stopped_cpus, cpumask);
 
 	/* Wait for restart */
 	while (!(started_cpus & cpumask))
 	    ia32_pause();
 
-	atomic_clear_int(&started_cpus, cpumask);
-	atomic_clear_int(&stopped_cpus, cpumask);
+	atomic_clear_long(&started_cpus, cpumask);
+	atomic_clear_long(&stopped_cpus, cpumask);
 
 	if (cpu == 0 && cpustop_restartfunc != NULL) {
 		cpustop_restartfunc();
@@ -1260,7 +1292,7 @@
 	if (savectx2(&stopxpcbs[cpu])) {
 		fpugetregs(curthread, stopfpu);
 		wbinvd();
-		atomic_set_int(&stopped_cpus, cpumask);
+		atomic_set_long(&stopped_cpus, cpumask);
 	} else
 		fpusetregs(curthread, stopfpu);
 
@@ -1268,8 +1300,8 @@
 	while (!(started_cpus & cpumask))
 		ia32_pause();
 
-	atomic_clear_int(&started_cpus, cpumask);
-	atomic_clear_int(&stopped_cpus, cpumask);
+	atomic_clear_long(&started_cpus, cpumask);
+	atomic_clear_long(&stopped_cpus, cpumask);
 
 	/* Restore CR3 and enable interrupts */
 	load_cr3(cr3);
@@ -1301,7 +1333,7 @@
 	int error;
 
 	mask = hlt_cpus_mask;
-	error = sysctl_handle_int(oidp, &mask, 0, req);
+	error = sysctl_handle_long(oidp, &mask, 0, req);
 	if (error || !req->newptr)
 		return (error);
 
@@ -1315,11 +1347,11 @@
 		mask |= hyperthreading_cpus_mask;
 
 	if ((mask & all_cpus) == all_cpus)
-		mask &= ~(1<<0);
+		mask &= ~cputomask(0);
 	hlt_cpus_mask = mask;
 	return (error);
 }
-SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
+SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_LONG|CTLFLAG_RW,
     0, 0, sysctl_hlt_cpus, "IU",
     "Bitmap of CPUs to halt.  101 (binary) will halt CPUs 0 and 2.");
 
@@ -1342,7 +1374,7 @@
 		hlt_cpus_mask |= hyperthreading_cpus_mask;
 
 	if ((hlt_cpus_mask & all_cpus) == all_cpus)
-		hlt_cpus_mask &= ~(1<<0);
+		hlt_cpus_mask &= ~~cputomask(0);
 
 	hlt_logical_cpus = disable;
 	return (error);
@@ -1380,7 +1412,7 @@
 		hlt_logical_cpus = 0;
 
 	if ((hlt_cpus_mask & all_cpus) == all_cpus)
-		hlt_cpus_mask &= ~(1<<0);
+		hlt_cpus_mask &= ~cputomask(0);
 
 	hyperthreading_allowed = allowed;
 	return (error);
@@ -1426,9 +1458,9 @@
 int
 mp_grab_cpu_hlt(void)
 {
-	u_int mask = PCPU_GET(cpumask);
+	cpumask_t mask = PCPU_GET(cpumask);
 #ifdef MP_WATCHDOG
-	u_int cpuid = PCPU_GET(cpuid);
+	cpumask_t cpuid = PCPU_GET(cpuid);
 #endif
 	int retval;
 
Index: sys/amd64/amd64/intr_machdep.c
===================================================================
--- sys/amd64/amd64/intr_machdep.c	(revision 210622)
+++ sys/amd64/amd64/intr_machdep.c	(working copy)
@@ -444,7 +444,7 @@
  */
 
 /* The BSP is always a valid target. */
-static cpumask_t intr_cpus = (1 << 0);
+static cpumask_t intr_cpus = cputomask(0);
 static int current_cpu;
 
 /*
@@ -466,7 +466,7 @@
 		current_cpu++;
 		if (current_cpu > mp_maxid)
 			current_cpu = 0;
-	} while (!(intr_cpus & (1 << current_cpu)));
+	} while (!(intr_cpus & cputomask(current_cpu)));
 	mtx_unlock_spin(&icu_lock);
 	return (apic_id);
 }
@@ -497,7 +497,7 @@
 		printf("INTR: Adding local APIC %d as a target\n",
 		    cpu_apic_ids[cpu]);
 
-	intr_cpus |= (1 << cpu);
+	intr_cpus |= cputomask(cpu);
 }
 
 /*
Index: sys/sys/smp.h
===================================================================
--- sys/sys/smp.h	(revision 210622)
+++ sys/sys/smp.h	(working copy)
@@ -89,7 +89,8 @@
  * time, thus permitting us to configure sparse maps of cpuid-dependent
  * (per-CPU) structures.
  */
-#define	CPU_ABSENT(x_cpu)	((all_cpus & (1 << (x_cpu))) == 0)
+#include <sys/systm.h>
+#define	CPU_ABSENT(x_cpu)	((all_cpus & (cputomask(x_cpu))) == 0)
 
 #ifdef SMP
 /*
Index: sys/sys/gmon.h
===================================================================
--- sys/sys/gmon.h	(revision 210622)
+++ sys/sys/gmon.h	(working copy)
@@ -197,6 +197,7 @@
 #define	GPROF_FROMS	2	/* struct: from location hash bucket */
 #define	GPROF_TOS	3	/* struct: destination/count structure */
 #define	GPROF_GMONPARAM	4	/* struct: profiling parameters (see above) */
+#define GPROF_FREEBUF   5       /* int: free flat profiling buffer */
 
 #ifdef _KERNEL
 
Index: sys/sys/systm.h
===================================================================
--- sys/sys/systm.h	(revision 210622)
+++ sys/sys/systm.h	(working copy)
@@ -415,4 +415,6 @@
 	return (x);
 }
 
+#define cputomask(_cpu) ((__cpumask_t)1 << _cpu)
+
 #endif /* !_SYS_SYSTM_H_ */

--=-HH/4kVuH97/ENX7tPWsX--




Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?1280441014.2530.16.camel>