Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 8 Oct 2008 05:30:30 +0000 (UTC)
From:      Peter Wemm <peter@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-user@freebsd.org
Subject:   svn commit: r183688 - in user/peter/long_cpumask/sys: amd64/amd64 amd64/include kern
Message-ID:  <200810080530.m985UUS2022692@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: peter
Date: Wed Oct  8 05:30:30 2008
New Revision: 183688
URL: http://svn.freebsd.org/changeset/base/183688

Log:
  Expand cpumask_t from 'int' to 'long' on amd64.  This compiles and boots,
  and might even boot on machines with >32 cores.  I'll work on the type
  change for other platforms.
  
  Summary of changes:
  * printf formats
  * clean up various garbage. use cpumask_t instead of 'u_int' etc.
  * atomic macro names (xx_int -> xx_long)
  * bitmask operations ("1 << cpu"  ->  "1ul << cpu")

Modified:
  user/peter/long_cpumask/sys/amd64/amd64/cpu_switch.S
  user/peter/long_cpumask/sys/amd64/amd64/intr_machdep.c
  user/peter/long_cpumask/sys/amd64/amd64/local_apic.c
  user/peter/long_cpumask/sys/amd64/amd64/mp_machdep.c
  user/peter/long_cpumask/sys/amd64/amd64/mptable.c
  user/peter/long_cpumask/sys/amd64/amd64/pmap.c
  user/peter/long_cpumask/sys/amd64/amd64/vm_machdep.c
  user/peter/long_cpumask/sys/amd64/include/_types.h
  user/peter/long_cpumask/sys/amd64/include/pmap.h
  user/peter/long_cpumask/sys/amd64/include/smp.h
  user/peter/long_cpumask/sys/kern/kern_ktr.c
  user/peter/long_cpumask/sys/kern/kern_pmc.c
  user/peter/long_cpumask/sys/kern/sched_4bsd.c
  user/peter/long_cpumask/sys/kern/sched_ule.c
  user/peter/long_cpumask/sys/kern/subr_pcpu.c
  user/peter/long_cpumask/sys/kern/subr_smp.c

Modified: user/peter/long_cpumask/sys/amd64/amd64/cpu_switch.S
==============================================================================
--- user/peter/long_cpumask/sys/amd64/amd64/cpu_switch.S	Wed Oct  8 05:23:50 2008	(r183687)
+++ user/peter/long_cpumask/sys/amd64/amd64/cpu_switch.S	Wed Oct  8 05:30:30 2008	(r183688)
@@ -80,7 +80,7 @@ ENTRY(cpu_throw)
 	/* release bit from old pm_active */
 	movq	TD_PROC(%rdi), %rdx		/* oldtd->td_proc */
 	movq	P_VMSPACE(%rdx), %rdx		/* proc->p_vmspace */
-	LK btrl	%eax, VM_PMAP+PM_ACTIVE(%rdx)	/* clear old */
+	LK btrq	%rax, VM_PMAP+PM_ACTIVE(%rdx)	/* clear old */
 	movq	TD_PCB(%rsi),%r8		/* newtd->td_proc */
 	movq	PCB_CR3(%r8),%rdx
 	movq	%rdx,%cr3			/* new address space */
@@ -165,13 +165,13 @@ swinact:
 	/* Release bit from old pmap->pm_active */
 	movq	TD_PROC(%rdi), %rcx		/* oldproc */
 	movq	P_VMSPACE(%rcx), %rcx
-	LK btrl	%eax, VM_PMAP+PM_ACTIVE(%rcx)	/* clear old */
+	LK btrq	%rax, VM_PMAP+PM_ACTIVE(%rcx)	/* clear old */
 	SETLK	%rdx, TD_LOCK(%rdi)		/* Release the old thread */
 swact:
 	/* Set bit in new pmap->pm_active */
 	movq	TD_PROC(%rsi),%rdx		/* newproc */
 	movq	P_VMSPACE(%rdx), %rdx
-	LK btsl	%eax, VM_PMAP+PM_ACTIVE(%rdx)	/* set new */
+	LK btsq	%rax, VM_PMAP+PM_ACTIVE(%rdx)	/* set new */
 
 sw1:
 #if defined(SCHED_ULE) && defined(SMP)

Modified: user/peter/long_cpumask/sys/amd64/amd64/intr_machdep.c
==============================================================================
--- user/peter/long_cpumask/sys/amd64/amd64/intr_machdep.c	Wed Oct  8 05:23:50 2008	(r183687)
+++ user/peter/long_cpumask/sys/amd64/amd64/intr_machdep.c	Wed Oct  8 05:30:30 2008	(r183688)
@@ -435,7 +435,7 @@ DB_SHOW_COMMAND(irqs, db_show_irqs)
  */
 
 /* The BSP is always a valid target. */
-static cpumask_t intr_cpus = (1 << 0);
+static cpumask_t intr_cpus = (1ul << 0);
 static int current_cpu;
 
 static void
@@ -450,7 +450,7 @@ intr_assign_next_cpu(struct intsrc *isrc
 		current_cpu++;
 		if (current_cpu > mp_maxid)
 			current_cpu = 0;
-	} while (!(intr_cpus & (1 << current_cpu)));
+	} while (!(intr_cpus & (1ul << current_cpu)));
 }
 
 /* Attempt to bind the specified IRQ to the specified CPU. */
@@ -479,7 +479,7 @@ intr_add_cpu(u_int cpu)
 		printf("INTR: Adding local APIC %d as a target\n",
 		    cpu_apic_ids[cpu]);
 
-	intr_cpus |= (1 << cpu);
+	intr_cpus |= (1ul << cpu);
 }
 
 /*

Modified: user/peter/long_cpumask/sys/amd64/amd64/local_apic.c
==============================================================================
--- user/peter/long_cpumask/sys/amd64/amd64/local_apic.c	Wed Oct  8 05:23:50 2008	(r183687)
+++ user/peter/long_cpumask/sys/amd64/amd64/local_apic.c	Wed Oct  8 05:30:30 2008	(r183688)
@@ -672,7 +672,7 @@ lapic_handle_timer(struct trapframe *fra
 	 * and unlike other schedulers it actually schedules threads to
 	 * those CPUs.
 	 */
-	if ((hlt_cpus_mask & (1 << PCPU_GET(cpuid))) != 0)
+	if ((hlt_cpus_mask & (1ul << PCPU_GET(cpuid))) != 0)
 		return;
 #endif
 

Modified: user/peter/long_cpumask/sys/amd64/amd64/mp_machdep.c
==============================================================================
--- user/peter/long_cpumask/sys/amd64/amd64/mp_machdep.c	Wed Oct  8 05:23:50 2008	(r183687)
+++ user/peter/long_cpumask/sys/amd64/amd64/mp_machdep.c	Wed Oct  8 05:30:30 2008	(r183688)
@@ -112,7 +112,7 @@ extern inthand_t IDTVEC(fast_syscall), I
 #ifdef STOP_NMI
 volatile cpumask_t ipi_nmi_pending;
 
-static void	ipi_nmi_selected(u_int32_t cpus);
+static void	ipi_nmi_selected(cpumask_t cpus);
 #endif 
 
 /*
@@ -733,7 +733,7 @@ start_all_aps(void)
 			panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
 		}
 
-		all_cpus |= (1 << cpu);		/* record AP in CPU map */
+		all_cpus |= (1ul << cpu);	/* record AP in CPU map */
 	}
 
 	/* build our map of 'other' CPUs */
@@ -853,12 +853,12 @@ smp_tlb_shootdown(u_int vector, vm_offse
 }
 
 static void
-smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
+smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
 {
 	int ncpu, othercpus;
 
 	othercpus = mp_ncpus - 1;
-	if (mask == (u_int)-1) {
+	if (mask == (cpumask_t)-1) {
 		ncpu = othercpus;
 		if (ncpu < 1)
 			return;
@@ -883,7 +883,7 @@ smp_targeted_tlb_shootdown(u_int mask, u
 	smp_tlb_addr1 = addr1;
 	smp_tlb_addr2 = addr2;
 	atomic_store_rel_int(&smp_tlb_wait, 0);
-	if (mask == (u_int)-1)
+	if (mask == (cpumask_t)-1)
 		ipi_all_but_self(vector);
 	else
 		ipi_selected(mask, vector);
@@ -927,7 +927,7 @@ smp_invlpg_range(vm_offset_t addr1, vm_o
 }
 
 void
-smp_masked_invltlb(u_int mask)
+smp_masked_invltlb(cpumask_t mask)
 {
 
 	if (smp_started) {
@@ -936,7 +936,7 @@ smp_masked_invltlb(u_int mask)
 }
 
 void
-smp_masked_invlpg(u_int mask, vm_offset_t addr)
+smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
 {
 
 	if (smp_started) {
@@ -945,7 +945,7 @@ smp_masked_invlpg(u_int mask, vm_offset_
 }
 
 void
-smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
+smp_masked_invlpg_range(cpumask_t mask, vm_offset_t addr1, vm_offset_t addr2)
 {
 
 	if (smp_started) {
@@ -961,7 +961,7 @@ ipi_bitmap_handler(struct trapframe fram
 
 	ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
 
-	if (ipi_bitmap & (1 << IPI_PREEMPT))
+	if (ipi_bitmap & (1ul << IPI_PREEMPT))
 		sched_preempt(curthread);
 
 	/* Nothing to do for AST */
@@ -971,7 +971,7 @@ ipi_bitmap_handler(struct trapframe fram
  * send an IPI to a set of cpus.
  */
 void
-ipi_selected(u_int32_t cpus, u_int ipi)
+ipi_selected(cpumask_t cpus, u_int ipi)
 {
 	int cpu;
 	u_int bitmap = 0;
@@ -990,9 +990,9 @@ ipi_selected(u_int32_t cpus, u_int ipi)
 	}
 #endif
 	CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
-	while ((cpu = ffs(cpus)) != 0) {
+	while ((cpu = ffsl(cpus)) != 0) {
 		cpu--;
-		cpus &= ~(1 << cpu);
+		cpus &= ~(1ul << cpu);
 
 		KASSERT(cpu_apic_ids[cpu] != -1,
 		    ("IPI to non-existent CPU %d", cpu));
@@ -1035,7 +1035,7 @@ ipi_all_but_self(u_int ipi)
 #define	BEFORE_SPIN	1000000
 
 void
-ipi_nmi_selected(u_int32_t cpus)
+ipi_nmi_selected(cpumask_t cpus)
 {
 	int cpu;
 	register_t icrlo;
@@ -1045,11 +1045,11 @@ ipi_nmi_selected(u_int32_t cpus)
 	
 	CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus);
 
-	atomic_set_int(&ipi_nmi_pending, cpus);
+	atomic_set_long(&ipi_nmi_pending, cpus);
 
-	while ((cpu = ffs(cpus)) != 0) {
+	while ((cpu = ffsl(cpus)) != 0) {
 		cpu--;
-		cpus &= ~(1 << cpu);
+		cpus &= ~(1ul << cpu);
 
 		KASSERT(cpu_apic_ids[cpu] != -1,
 		    ("IPI NMI to non-existent CPU %d", cpu));
@@ -1065,12 +1065,12 @@ ipi_nmi_selected(u_int32_t cpus)
 int
 ipi_nmi_handler(void)
 {
-	int cpumask = PCPU_GET(cpumask);
+	cpumask_t cpumask = PCPU_GET(cpumask);
 
 	if (!(ipi_nmi_pending & cpumask))
 		return 1;
 
-	atomic_clear_int(&ipi_nmi_pending, cpumask);
+	atomic_clear_long(&ipi_nmi_pending, cpumask);
 	cpustop_handler();
 	return 0;
 }
@@ -1085,19 +1085,19 @@ void
 cpustop_handler(void)
 {
 	int cpu = PCPU_GET(cpuid);
-	int cpumask = PCPU_GET(cpumask);
+	cpumask_t cpumask = PCPU_GET(cpumask);
 
 	savectx(&stoppcbs[cpu]);
 
 	/* Indicate that we are stopped */
-	atomic_set_int(&stopped_cpus, cpumask);
+	atomic_set_long(&stopped_cpus, cpumask);
 
 	/* Wait for restart */
 	while (!(started_cpus & cpumask))
 	    ia32_pause();
 
-	atomic_clear_int(&started_cpus, cpumask);
-	atomic_clear_int(&stopped_cpus, cpumask);
+	atomic_clear_long(&started_cpus, cpumask);
+	atomic_clear_long(&stopped_cpus, cpumask);
 
 	if (cpu == 0 && cpustop_restartfunc != NULL) {
 		cpustop_restartfunc();
@@ -1245,7 +1245,7 @@ SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_AN
 int
 mp_grab_cpu_hlt(void)
 {
-	u_int mask = PCPU_GET(cpumask);
+	cpumask_t mask = PCPU_GET(cpumask);
 #ifdef MP_WATCHDOG
 	u_int cpuid = PCPU_GET(cpuid);
 #endif

Modified: user/peter/long_cpumask/sys/amd64/amd64/mptable.c
==============================================================================
--- user/peter/long_cpumask/sys/amd64/amd64/mptable.c	Wed Oct  8 05:23:50 2008	(r183687)
+++ user/peter/long_cpumask/sys/amd64/amd64/mptable.c	Wed Oct  8 05:30:30 2008	(r183688)
@@ -888,13 +888,13 @@ mptable_hyperthread_fixup(u_long id_mask
 	 * already in the table, then kill the fixup.
 	 */
 	for (id = 0; id <= MAX_LAPIC_ID; id++) {
-		if ((id_mask & 1 << id) == 0)
+		if ((id_mask & 1ul << id) == 0)
 			continue;
 		/* First, make sure we are on a logical_cpus boundary. */
 		if (id % logical_cpus != 0)
 			return;
 		for (i = id + 1; i < id + logical_cpus; i++)
-			if ((id_mask & 1 << i) != 0)
+			if ((id_mask & 1ul << i) != 0)
 				return;
 	}
 
@@ -911,7 +911,7 @@ mptable_hyperthread_fixup(u_long id_mask
 				    i, id);
 			lapic_create(i, 0);
 		}
-		id_mask &= ~(1 << id);
+		id_mask &= ~(1ul << id);
 	}
 }
 #endif /* MPTABLE_FORCE_HTT */

Modified: user/peter/long_cpumask/sys/amd64/amd64/pmap.c
==============================================================================
--- user/peter/long_cpumask/sys/amd64/amd64/pmap.c	Wed Oct  8 05:23:50 2008	(r183687)
+++ user/peter/long_cpumask/sys/amd64/amd64/pmap.c	Wed Oct  8 05:30:30 2008	(r183688)
@@ -544,7 +544,7 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
 	PMAP_LOCK_INIT(kernel_pmap);
 	kernel_pmap->pm_pml4 = (pdp_entry_t *) (KERNBASE + KPML4phys);
 	kernel_pmap->pm_root = NULL;
-	kernel_pmap->pm_active = -1;	/* don't allow deactivation */
+	kernel_pmap->pm_active = (cpumask_t)-1;	/* don't allow deactivation */
 	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
 
 	/*
@@ -858,8 +858,7 @@ pmap_cache_bits(int mode, boolean_t is_p
 void
 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
 {
-	u_int cpumask;
-	u_int other_cpus;
+	cpumask_t cpumask, other_cpus;
 
 	sched_pin();
 	if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
@@ -879,8 +878,7 @@ pmap_invalidate_page(pmap_t pmap, vm_off
 void
 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 {
-	u_int cpumask;
-	u_int other_cpus;
+	cpumask_t cpumask, other_cpus;
 	vm_offset_t addr;
 
 	sched_pin();
@@ -904,8 +902,7 @@ pmap_invalidate_range(pmap_t pmap, vm_of
 void
 pmap_invalidate_all(pmap_t pmap)
 {
-	u_int cpumask;
-	u_int other_cpus;
+	cpumask_t cpumask, other_cpus;
 
 	sched_pin();
 	if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
@@ -4739,8 +4736,8 @@ pmap_activate(struct thread *td)
 	oldpmap = PCPU_GET(curpmap);
 #ifdef SMP
 if (oldpmap)	/* XXX FIXME */
-	atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
-	atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
+	atomic_clear_long(&oldpmap->pm_active, PCPU_GET(cpumask));
+	atomic_set_long(&pmap->pm_active, PCPU_GET(cpumask));
 #else
 if (oldpmap)	/* XXX FIXME */
 	oldpmap->pm_active &= ~PCPU_GET(cpumask);

Modified: user/peter/long_cpumask/sys/amd64/amd64/vm_machdep.c
==============================================================================
--- user/peter/long_cpumask/sys/amd64/amd64/vm_machdep.c	Wed Oct  8 05:23:50 2008	(r183687)
+++ user/peter/long_cpumask/sys/amd64/amd64/vm_machdep.c	Wed Oct  8 05:30:30 2008	(r183688)
@@ -419,7 +419,8 @@ void
 cpu_reset()
 {
 #ifdef SMP
-	u_int cnt, map;
+	u_int cnt;
+	cpumask_t map;
 
 	if (smp_active) {
 		map = PCPU_GET(other_cpus) & ~stopped_cpus;
@@ -435,7 +436,7 @@ cpu_reset()
 			printf("cpu_reset: Restarting BSP\n");
 
 			/* Restart CPU #0. */
-			atomic_store_rel_int(&started_cpus, 1 << 0);
+			atomic_store_rel_long(&started_cpus, 1 << 0);
 
 			cnt = 0;
 			while (cpu_reset_proxy_active == 0 && cnt < 10000000)

Modified: user/peter/long_cpumask/sys/amd64/include/_types.h
==============================================================================
--- user/peter/long_cpumask/sys/amd64/include/_types.h	Wed Oct  8 05:23:50 2008	(r183687)
+++ user/peter/long_cpumask/sys/amd64/include/_types.h	Wed Oct  8 05:30:30 2008	(r183688)
@@ -61,7 +61,7 @@ typedef	unsigned long		__uint64_t;
  * Standard type definitions.
  */
 typedef	__int32_t	__clock_t;		/* clock()... */
-typedef	unsigned int	__cpumask_t;
+typedef	unsigned long	__cpumask_t;
 typedef	__int64_t	__critical_t;
 typedef	double		__double_t;
 typedef	float		__float_t;

Modified: user/peter/long_cpumask/sys/amd64/include/pmap.h
==============================================================================
--- user/peter/long_cpumask/sys/amd64/include/pmap.h	Wed Oct  8 05:23:50 2008	(r183687)
+++ user/peter/long_cpumask/sys/amd64/include/pmap.h	Wed Oct  8 05:30:30 2008	(r183688)
@@ -247,8 +247,7 @@ struct pmap {
 	struct mtx		pm_mtx;
 	pml4_entry_t		*pm_pml4;	/* KVA of level 4 page table */
 	TAILQ_HEAD(,pv_chunk)	pm_pvchunk;	/* list of mappings in pmap */
-	u_int			pm_active;	/* active on cpus */
-	/* spare u_int here due to padding */
+	cpumask_t		pm_active;	/* active on cpus */
 	struct pmap_statistics	pm_stats;	/* pmap statistics */
 	vm_page_t		pm_root;	/* spare page table pages */
 };

Modified: user/peter/long_cpumask/sys/amd64/include/smp.h
==============================================================================
--- user/peter/long_cpumask/sys/amd64/include/smp.h	Wed Oct  8 05:23:50 2008	(r183687)
+++ user/peter/long_cpumask/sys/amd64/include/smp.h	Wed Oct  8 05:30:30 2008	(r183688)
@@ -54,19 +54,19 @@ inthand_t
 void	cpu_add(u_int apic_id, char boot_cpu);
 void	cpustop_handler(void);
 void	init_secondary(void);
-void	ipi_selected(u_int cpus, u_int ipi);
+void	ipi_selected(cpumask_t cpus, u_int ipi);
 void	ipi_all_but_self(u_int ipi);
 void 	ipi_bitmap_handler(struct trapframe frame);
 u_int	mp_bootaddress(u_int);
 int	mp_grab_cpu_hlt(void);
 void	smp_cache_flush(void);
 void	smp_invlpg(vm_offset_t addr);
-void	smp_masked_invlpg(u_int mask, vm_offset_t addr);
+void	smp_masked_invlpg(cpumask_t mask, vm_offset_t addr);
 void	smp_invlpg_range(vm_offset_t startva, vm_offset_t endva);
-void	smp_masked_invlpg_range(u_int mask, vm_offset_t startva,
+void	smp_masked_invlpg_range(cpumask_t mask, vm_offset_t startva,
 	    vm_offset_t endva);
 void	smp_invltlb(void);
-void	smp_masked_invltlb(u_int mask);
+void	smp_masked_invltlb(cpumask_t mask);
 
 #ifdef STOP_NMI
 int	ipi_nmi_handler(void);

Modified: user/peter/long_cpumask/sys/kern/kern_ktr.c
==============================================================================
--- user/peter/long_cpumask/sys/kern/kern_ktr.c	Wed Oct  8 05:23:50 2008	(r183687)
+++ user/peter/long_cpumask/sys/kern/kern_ktr.c	Wed Oct  8 05:30:30 2008	(r183688)
@@ -207,7 +207,7 @@ ktr_tracepoint(u_int mask, const char *f
 	if ((ktr_mask & mask) == 0)
 		return;
 	cpu = KTR_CPU;
-	if (((1 << cpu) & ktr_cpumask) == 0)
+	if (((1ul << cpu) & ktr_cpumask) == 0)
 		return;
 #if defined(KTR_VERBOSE) || defined(KTR_ALQ)
 	td = curthread;

Modified: user/peter/long_cpumask/sys/kern/kern_pmc.c
==============================================================================
--- user/peter/long_cpumask/sys/kern/kern_pmc.c	Wed Oct  8 05:23:50 2008	(r183687)
+++ user/peter/long_cpumask/sys/kern/kern_pmc.c	Wed Oct  8 05:30:30 2008	(r183688)
@@ -110,7 +110,7 @@ pmc_cpu_is_active(int cpu)
 {
 #ifdef	SMP
 	return (pmc_cpu_is_present(cpu) &&
-	    (hlt_cpus_mask & (1 << cpu)) == 0);
+	    (hlt_cpus_mask & (1ul << cpu)) == 0);
 #else
 	return (1);
 #endif
@@ -137,7 +137,7 @@ int
 pmc_cpu_is_primary(int cpu)
 {
 #ifdef	SMP
-	return ((logical_cpus_mask & (1 << cpu)) == 0);
+	return ((logical_cpus_mask & (1ul << cpu)) == 0);
 #else
 	return (1);
 #endif

Modified: user/peter/long_cpumask/sys/kern/sched_4bsd.c
==============================================================================
--- user/peter/long_cpumask/sys/kern/sched_4bsd.c	Wed Oct  8 05:23:50 2008	(r183687)
+++ user/peter/long_cpumask/sys/kern/sched_4bsd.c	Wed Oct  8 05:30:30 2008	(r183688)
@@ -1067,7 +1067,7 @@ forward_wakeup(int cpunum)
 	me = PCPU_GET(cpumask);
 
 	/* Don't bother if we should be doing it ourself. */
-	if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
+	if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1ul << cpunum)))
 		return (0);
 
 	dontuse = me | stopped_cpus | hlt_cpus_mask;
@@ -1101,7 +1101,7 @@ forward_wakeup(int cpunum)
 	/* If we only allow a specific CPU, then mask off all the others. */
 	if (cpunum != NOCPU) {
 		KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
-		map &= (1 << cpunum);
+		map &= (1ul << cpunum);
 	} else {
 		/* Try choose an idle die. */
 		if (forward_wakeup_use_htt) {
@@ -1628,7 +1628,7 @@ sched_affinity(struct thread *td)
 
 		td->td_flags |= TDF_NEEDRESCHED;
 		if (td != curthread)
-			ipi_selected(1 << cpu, IPI_AST);
+			ipi_selected(1ul << cpu, IPI_AST);
 		break;
 	default:
 		break;

Modified: user/peter/long_cpumask/sys/kern/sched_ule.c
==============================================================================
--- user/peter/long_cpumask/sys/kern/sched_ule.c	Wed Oct  8 05:23:50 2008	(r183687)
+++ user/peter/long_cpumask/sys/kern/sched_ule.c	Wed Oct  8 05:30:30 2008	(r183688)
@@ -540,7 +540,7 @@ struct cpu_search {
 
 #define	CPUMASK_FOREACH(cpu, mask)				\
 	for ((cpu) = 0; (cpu) < sizeof((mask)) * 8; (cpu)++)	\
-		if ((mask) & 1 << (cpu))
+		if ((mask) & 1ul << (cpu))
 
 static __inline int cpu_search(struct cpu_group *cg, struct cpu_search *low,
     struct cpu_search *high, const int match);
@@ -562,14 +562,14 @@ cpu_compare(int cpu, struct cpu_search *
 
 	tdq = TDQ_CPU(cpu);
 	if (match & CPU_SEARCH_LOWEST)
-		if (low->cs_mask & (1 << cpu) &&
+		if (low->cs_mask & (1ul << cpu) &&
 		    tdq->tdq_load < low->cs_load &&
 		    tdq->tdq_lowpri > low->cs_limit) {
 			low->cs_cpu = cpu;
 			low->cs_load = tdq->tdq_load;
 		}
 	if (match & CPU_SEARCH_HIGHEST)
-		if (high->cs_mask & (1 << cpu) &&
+		if (high->cs_mask & (1ul << cpu) &&
 		    tdq->tdq_load >= high->cs_limit && 
 		    tdq->tdq_load > high->cs_load &&
 		    tdq->tdq_transferable) {
@@ -739,7 +739,7 @@ sched_balance_group(struct cpu_group *cg
 	int low;
 	int i;
 
-	mask = -1;
+	mask = (cpumask_t)-1;
 	for (;;) {
 		sched_both(cg, mask, &low, &high);
 		if (low == high || low == -1 || high == -1)
@@ -751,9 +751,9 @@ sched_balance_group(struct cpu_group *cg
 		 * to kick out of the set and try again.
 	 	 */
 		if (TDQ_CPU(high)->tdq_transferable == 0)
-			mask &= ~(1 << high);
+			mask &= ~(1ul << high);
 		else
-			mask &= ~(1 << low);
+			mask &= ~(1ul << low);
 	}
 
 	for (i = 0; i < cg->cg_children; i++)
@@ -839,7 +839,7 @@ sched_balance_pair(struct tdq *high, str
 		 * IPI the target cpu to force it to reschedule with the new
 		 * workload.
 		 */
-		ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT);
+		ipi_selected(1ul << TDQ_ID(low), IPI_PREEMPT);
 	}
 	tdq_unlock_pair(high, low);
 	return (moved);
@@ -894,7 +894,7 @@ tdq_idled(struct tdq *tdq)
 
 	if (smp_started == 0 || steal_idle == 0)
 		return (1);
-	mask = -1;
+	mask = (cpumask_t)-1;
 	mask &= ~PCPU_GET(cpumask);
 	/* We don't want to be preempted while we're iterating. */
 	spinlock_enter();
@@ -909,7 +909,7 @@ tdq_idled(struct tdq *tdq)
 			continue;
 		}
 		steal = TDQ_CPU(cpu);
-		mask &= ~(1 << cpu);
+		mask &= ~(1ul << cpu);
 		tdq_lock_pair(tdq, steal);
 		if (steal->tdq_load < thresh || steal->tdq_transferable == 0) {
 			tdq_unlock_pair(tdq, steal);
@@ -969,7 +969,7 @@ tdq_notify(struct tdq *tdq, struct threa
 			return;
 	}
 	tdq->tdq_ipipending = 1;
-	ipi_selected(1 << cpu, IPI_PREEMPT);
+	ipi_selected(1ul << cpu, IPI_PREEMPT);
 }
 
 /*
@@ -2404,7 +2404,7 @@ sched_affinity(struct thread *td)
 	cpu = ts->ts_cpu;
 	ts->ts_cpu = sched_pickcpu(td, 0);
 	if (cpu != PCPU_GET(cpuid))
-		ipi_selected(1 << cpu, IPI_PREEMPT);
+		ipi_selected(1ul << cpu, IPI_PREEMPT);
 #endif
 }
 

Modified: user/peter/long_cpumask/sys/kern/subr_pcpu.c
==============================================================================
--- user/peter/long_cpumask/sys/kern/subr_pcpu.c	Wed Oct  8 05:23:50 2008	(r183687)
+++ user/peter/long_cpumask/sys/kern/subr_pcpu.c	Wed Oct  8 05:30:30 2008	(r183688)
@@ -70,7 +70,7 @@ pcpu_init(struct pcpu *pcpu, int cpuid, 
 	KASSERT(cpuid >= 0 && cpuid < MAXCPU,
 	    ("pcpu_init: invalid cpuid %d", cpuid));
 	pcpu->pc_cpuid = cpuid;
-	pcpu->pc_cpumask = 1 << cpuid;
+	pcpu->pc_cpumask = 1ul << cpuid;
 	cpuid_to_pcpu[cpuid] = pcpu;
 	SLIST_INSERT_HEAD(&cpuhead, pcpu, pc_allcpu);
 	cpu_pcpu_init(pcpu, cpuid, size);

Modified: user/peter/long_cpumask/sys/kern/subr_smp.c
==============================================================================
--- user/peter/long_cpumask/sys/kern/subr_smp.c	Wed Oct  8 05:23:50 2008	(r183687)
+++ user/peter/long_cpumask/sys/kern/subr_smp.c	Wed Oct  8 05:30:30 2008	(r183688)
@@ -186,7 +186,7 @@ forward_signal(struct thread *td)
 	id = td->td_oncpu;
 	if (id == NOCPU)
 		return;
-	ipi_selected(1 << id, IPI_AST);
+	ipi_selected(1ul << id, IPI_AST);
 }
 
 void
@@ -285,7 +285,12 @@ restart_cpus(cpumask_t map)
 	CTR1(KTR_SMP, "restart_cpus(%x)", map);
 
 	/* signal other cpus to restart */
+#if defined(__amd64__)
+	/* cpumask_t is 64 bit on amd64. */
+	atomic_store_rel_long(&started_cpus, map);
+#else
 	atomic_store_rel_int(&started_cpus, map);
+#endif
 
 	/* wait for each to clear its bit */
 	while ((stopped_cpus & map) != 0)
@@ -363,7 +368,7 @@ smp_rendezvous_cpus(cpumask_t map,
 	}
 
 	for (i = 0; i < mp_maxid; i++)
-		if (((1 << i) & map) != 0 && !CPU_ABSENT(i))
+		if (((1ul << i) & map) != 0 && !CPU_ABSENT(i))
 			ncpus++;
 
 	/* obtain rendezvous lock */
@@ -380,10 +385,10 @@ smp_rendezvous_cpus(cpumask_t map,
 	atomic_store_rel_int(&smp_rv_waiters[0], 0);
 
 	/* signal other processors, which will enter the IPI with interrupts off */
-	ipi_selected(map & ~(1 << curcpu), IPI_RENDEZVOUS);
+	ipi_selected(map & ~(1ul << curcpu), IPI_RENDEZVOUS);
 
 	/* Check if the current CPU is in the map */
-	if ((map & (1 << curcpu)) != 0)
+	if ((map & (1ul << curcpu)) != 0)
 		smp_rendezvous_action();
 
 	if (teardown_func == smp_no_rendevous_barrier)
@@ -455,8 +460,8 @@ smp_topo(void)
 		panic("Built bad topology at %p.  CPU count %d != %d",
 		    top, top->cg_count, mp_ncpus);
 	if (top->cg_mask != all_cpus)
-		panic("Built bad topology at %p.  CPU mask 0x%X != 0x%X",
-		    top, top->cg_mask, all_cpus);
+		panic("Built bad topology at %p.  CPU mask 0x%lX != 0x%lX",
+		    top, (unsigned long)top->cg_mask, (unsigned long)all_cpus);
 	return (top);
 }
 
@@ -468,7 +473,7 @@ smp_topo_none(void)
 	top = &group[0];
 	top->cg_parent = NULL;
 	top->cg_child = NULL;
-	top->cg_mask = (1 << mp_ncpus) - 1;
+	top->cg_mask = (1ul << mp_ncpus) - 1;
 	top->cg_count = mp_ncpus;
 	top->cg_children = 0;
 	top->cg_level = CG_SHARE_NONE;
@@ -485,7 +490,7 @@ smp_topo_addleaf(struct cpu_group *paren
 	int i;
 
 	for (mask = 0, i = 0; i < count; i++, start++)
-		mask |= (1 << start);
+		mask |= (1ul << start);
 	child->cg_parent = parent;
 	child->cg_child = NULL;
 	child->cg_children = 0;
@@ -496,8 +501,9 @@ smp_topo_addleaf(struct cpu_group *paren
 	parent->cg_children++;
 	for (; parent != NULL; parent = parent->cg_parent) {
 		if ((parent->cg_mask & child->cg_mask) != 0)
-			panic("Duplicate children in %p.  mask 0x%X child 0x%X",
-			    parent, parent->cg_mask, child->cg_mask);
+			panic("Duplicate children in %p.  mask 0x%lX child 0x%lX",
+			    parent, (unsigned long)parent->cg_mask,
+			    (unsigned long)child->cg_mask);
 		parent->cg_mask |= child->cg_mask;
 		parent->cg_count += child->cg_count;
 	}
@@ -562,7 +568,7 @@ smp_topo_find(struct cpu_group *top, int
 	int children;
 	int i;
 
-	mask = (1 << cpu);
+	mask = (1ul << cpu);
 	cg = top;
 	for (;;) {
 		if ((cg->cg_mask & mask) == 0)



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200810080530.m985UUS2022692>