Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 25 May 2019 11:27:56 +0000 (UTC)
From:      "Rodney W. Grimes" <rgrimes@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-11@freebsd.org
Subject:   svn commit: r348271 - in stable/11/sys/amd64: include vmm vmm/amd vmm/intel vmm/io
Message-ID:  <201905251127.x4PBRuYu082936@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: rgrimes
Date: Sat May 25 11:27:56 2019
New Revision: 348271
URL: https://svnweb.freebsd.org/changeset/base/348271

Log:
  MFC: r346714: Add accessor function for vm->maxcpus
  
  Replace most VM_MAXCPU constant useses with an accessor function to
  vm->maxcpus which for now is initialized and kept at the value of
  VM_MAXCPUS.
  
  This is a rework of Fabian Freyer (fabian.freyer_physik.tu-berlin.de)
  work from D10070 to adjust it for the cpu topology changes that
  occured in r332298
  
  Approved by:		re (kib)

Modified:
  stable/11/sys/amd64/include/vmm.h
  stable/11/sys/amd64/vmm/amd/svm.c
  stable/11/sys/amd64/vmm/intel/vmx.c
  stable/11/sys/amd64/vmm/io/vlapic.c
  stable/11/sys/amd64/vmm/vmm.c
  stable/11/sys/amd64/vmm/vmm_dev.c
  stable/11/sys/amd64/vmm/vmm_lapic.c
  stable/11/sys/amd64/vmm/vmm_stat.c
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/amd64/include/vmm.h
==============================================================================
--- stable/11/sys/amd64/include/vmm.h	Sat May 25 11:22:49 2019	(r348270)
+++ stable/11/sys/amd64/include/vmm.h	Sat May 25 11:27:56 2019	(r348271)
@@ -184,6 +184,7 @@ int vm_create(const char *name, struct vm **retvm);
 void vm_destroy(struct vm *vm);
 int vm_reinit(struct vm *vm);
 const char *vm_name(struct vm *vm);
+uint16_t vm_get_maxcpus(struct vm *vm);
 void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
     uint16_t *threads, uint16_t *maxcpus);
 int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,

Modified: stable/11/sys/amd64/vmm/amd/svm.c
==============================================================================
--- stable/11/sys/amd64/vmm/amd/svm.c	Sat May 25 11:22:49 2019	(r348270)
+++ stable/11/sys/amd64/vmm/amd/svm.c	Sat May 25 11:27:56 2019	(r348271)
@@ -524,6 +524,7 @@ svm_vminit(struct vm *vm, pmap_t pmap)
 	struct svm_vcpu *vcpu;
 	vm_paddr_t msrpm_pa, iopm_pa, pml4_pa;
 	int i;
+	uint16_t maxcpus;
 
 	svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO);
 	if (((uintptr_t)svm_sc & PAGE_MASK) != 0)
@@ -577,7 +578,8 @@ svm_vminit(struct vm *vm, pmap_t pmap)
 	iopm_pa = vtophys(svm_sc->iopm_bitmap);
 	msrpm_pa = vtophys(svm_sc->msr_bitmap);
 	pml4_pa = svm_sc->nptp;
-	for (i = 0; i < VM_MAXCPU; i++) {
+	maxcpus = vm_get_maxcpus(svm_sc->vm);
+	for (i = 0; i < maxcpus; i++) {
 		vcpu = svm_get_vcpu(svm_sc, i);
 		vcpu->nextrip = ~0;
 		vcpu->lastcpu = NOCPU;

Modified: stable/11/sys/amd64/vmm/intel/vmx.c
==============================================================================
--- stable/11/sys/amd64/vmm/intel/vmx.c	Sat May 25 11:22:49 2019	(r348270)
+++ stable/11/sys/amd64/vmm/intel/vmx.c	Sat May 25 11:27:56 2019	(r348271)
@@ -941,6 +941,7 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
 	struct vmx *vmx;
 	struct vmcs *vmcs;
 	uint32_t exc_bitmap;
+	uint16_t maxcpus;
 
 	vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
 	if ((uintptr_t)vmx & PAGE_MASK) {
@@ -1002,7 +1003,8 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
 		KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
 	}
 
-	for (i = 0; i < VM_MAXCPU; i++) {
+	maxcpus = vm_get_maxcpus(vm);
+	for (i = 0; i < maxcpus; i++) {
 		vmcs = &vmx->vmcs[i];
 		vmcs->identifier = vmx_revision();
 		error = vmclear(vmcs);
@@ -2994,11 +2996,13 @@ vmx_vmcleanup(void *arg)
 {
 	int i;
 	struct vmx *vmx = arg;
+	uint16_t maxcpus;
 
 	if (apic_access_virtualization(vmx, 0))
 		vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
 
-	for (i = 0; i < VM_MAXCPU; i++)
+	maxcpus = vm_get_maxcpus(vmx->vm);
+	for (i = 0; i < maxcpus; i++)
 		vpid_free(vmx->state[i].vpid);
 
 	free(vmx, M_VMX);

Modified: stable/11/sys/amd64/vmm/io/vlapic.c
==============================================================================
--- stable/11/sys/amd64/vmm/io/vlapic.c	Sat May 25 11:22:49 2019	(r348270)
+++ stable/11/sys/amd64/vmm/io/vlapic.c	Sat May 25 11:27:56 2019	(r348271)
@@ -836,7 +836,7 @@ vlapic_calcdest(struct vm *vm, cpuset_t *dmask, uint32
 		 */
 		CPU_ZERO(dmask);
 		vcpuid = vm_apicid2vcpuid(vm, dest);
-		if (vcpuid < VM_MAXCPU)
+		if (vcpuid < vm_get_maxcpus(vm))
 			CPU_SET(vcpuid, dmask);
 	} else {
 		/*
@@ -963,6 +963,7 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic, bool
 	struct vlapic *vlapic2;
 	struct vm_exit *vmexit;
 	struct LAPIC *lapic;
+	uint16_t maxcpus;
 
 	lapic = vlapic->apic_page;
 	lapic->icr_lo &= ~APIC_DELSTAT_PEND;
@@ -1024,11 +1025,12 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic, bool
 		return (0);	/* handled completely in the kernel */
 	}
 
+	maxcpus = vm_get_maxcpus(vlapic->vm);
 	if (mode == APIC_DELMODE_INIT) {
 		if ((icrval & APIC_LEVEL_MASK) == APIC_LEVEL_DEASSERT)
 			return (0);
 
-		if (vlapic->vcpuid == 0 && dest != 0 && dest < VM_MAXCPU) {
+		if (vlapic->vcpuid == 0 && dest != 0 && dest < maxcpus) {
 			vlapic2 = vm_lapic(vlapic->vm, dest);
 
 			/* move from INIT to waiting-for-SIPI state */
@@ -1041,7 +1043,7 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic, bool
 	}
 
 	if (mode == APIC_DELMODE_STARTUP) {
-		if (vlapic->vcpuid == 0 && dest != 0 && dest < VM_MAXCPU) {
+		if (vlapic->vcpuid == 0 && dest != 0 && dest < maxcpus) {
 			vlapic2 = vm_lapic(vlapic->vm, dest);
 
 			/*
@@ -1445,7 +1447,8 @@ void
 vlapic_init(struct vlapic *vlapic)
 {
 	KASSERT(vlapic->vm != NULL, ("vlapic_init: vm is not initialized"));
-	KASSERT(vlapic->vcpuid >= 0 && vlapic->vcpuid < VM_MAXCPU,
+	KASSERT(vlapic->vcpuid >= 0 &&
+	    vlapic->vcpuid < vm_get_maxcpus(vlapic->vm),
 	    ("vlapic_init: vcpuid is not initialized"));
 	KASSERT(vlapic->apic_page != NULL, ("vlapic_init: apic_page is not "
 	    "initialized"));

Modified: stable/11/sys/amd64/vmm/vmm.c
==============================================================================
--- stable/11/sys/amd64/vmm/vmm.c	Sat May 25 11:22:49 2019	(r348270)
+++ stable/11/sys/amd64/vmm/vmm.c	Sat May 25 11:27:56 2019	(r348271)
@@ -273,7 +273,7 @@ vcpu_init(struct vm *vm, int vcpu_id, bool create)
 {
 	struct vcpu *vcpu;
 
-	KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU,
+	KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus,
 	    ("vcpu_init: invalid vcpu %d", vcpu_id));
 	  
 	vcpu = &vm->vcpu[vcpu_id];
@@ -312,7 +312,7 @@ vm_exitinfo(struct vm *vm, int cpuid)
 {
 	struct vcpu *vcpu;
 
-	if (cpuid < 0 || cpuid >= VM_MAXCPU)
+	if (cpuid < 0 || cpuid >= vm->maxcpus)
 		panic("vm_exitinfo: invalid cpuid %d", cpuid);
 
 	vcpu = &vm->vcpu[cpuid];
@@ -424,7 +424,7 @@ vm_init(struct vm *vm, bool create)
 	vm->suspend = 0;
 	CPU_ZERO(&vm->suspended_cpus);
 
-	for (i = 0; i < VM_MAXCPU; i++)
+	for (i = 0; i < vm->maxcpus; i++)
 		vcpu_init(vm, i, create);
 }
 
@@ -462,7 +462,7 @@ vm_create(const char *name, struct vm **retvm)
 	vm->sockets = 1;
 	vm->cores = cores_per_package;	/* XXX backwards compatibility */
 	vm->threads = threads_per_core;	/* XXX backwards compatibility */
-	vm->maxcpus = 0;		/* XXX not implemented */
+	vm->maxcpus = VM_MAXCPU;	/* XXX temp to keep code working */
 
 	vm_init(vm, true);
 
@@ -480,19 +480,25 @@ vm_get_topology(struct vm *vm, uint16_t *sockets, uint
 	*maxcpus = vm->maxcpus;
 }
 
+uint16_t
+vm_get_maxcpus(struct vm *vm)
+{
+	return (vm->maxcpus);
+}
+
 int
 vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
     uint16_t threads, uint16_t maxcpus)
 {
 	if (maxcpus != 0)
 		return (EINVAL);	/* XXX remove when supported */
-	if ((sockets * cores * threads) > VM_MAXCPU)
+	if ((sockets * cores * threads) > vm->maxcpus)
 		return (EINVAL);
 	/* XXX need to check sockets * cores * threads == vCPU, how? */
 	vm->sockets = sockets;
 	vm->cores = cores;
 	vm->threads = threads;
-	vm->maxcpus = maxcpus;
+	vm->maxcpus = VM_MAXCPU;	/* XXX temp to keep code working */
 	return(0);
 }
 
@@ -517,7 +523,7 @@ vm_cleanup(struct vm *vm, bool destroy)
 	vatpic_cleanup(vm->vatpic);
 	vioapic_cleanup(vm->vioapic);
 
-	for (i = 0; i < VM_MAXCPU; i++)
+	for (i = 0; i < vm->maxcpus; i++)
 		vcpu_cleanup(vm, i, destroy);
 
 	VMCLEANUP(vm->cookie);
@@ -952,9 +958,9 @@ vm_gpa_hold(struct vm *vm, int vcpuid, vm_paddr_t gpa,
 	 * guaranteed if at least one vcpu is in the VCPU_FROZEN state.
 	 */
 	int state;
-	KASSERT(vcpuid >= -1 && vcpuid < VM_MAXCPU, ("%s: invalid vcpuid %d",
+	KASSERT(vcpuid >= -1 && vcpuid < vm->maxcpus, ("%s: invalid vcpuid %d",
 	    __func__, vcpuid));
-	for (i = 0; i < VM_MAXCPU; i++) {
+	for (i = 0; i < vm->maxcpus; i++) {
 		if (vcpuid != -1 && vcpuid != i)
 			continue;
 		state = vcpu_get_state(vm, i, NULL);
@@ -1000,7 +1006,7 @@ int
 vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
 {
 
-	if (vcpu < 0 || vcpu >= VM_MAXCPU)
+	if (vcpu < 0 || vcpu >= vm->maxcpus)
 		return (EINVAL);
 
 	if (reg >= VM_REG_LAST)
@@ -1015,7 +1021,7 @@ vm_set_register(struct vm *vm, int vcpuid, int reg, ui
 	struct vcpu *vcpu;
 	int error;
 
-	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
 		return (EINVAL);
 
 	if (reg >= VM_REG_LAST)
@@ -1069,7 +1075,7 @@ vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
 		struct seg_desc *desc)
 {
 
-	if (vcpu < 0 || vcpu >= VM_MAXCPU)
+	if (vcpu < 0 || vcpu >= vm->maxcpus)
 		return (EINVAL);
 
 	if (!is_segment_register(reg) && !is_descriptor_table(reg))
@@ -1082,7 +1088,7 @@ int
 vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
 		struct seg_desc *desc)
 {
-	if (vcpu < 0 || vcpu >= VM_MAXCPU)
+	if (vcpu < 0 || vcpu >= vm->maxcpus)
 		return (EINVAL);
 
 	if (!is_segment_register(reg) && !is_descriptor_table(reg))
@@ -1254,7 +1260,7 @@ static void
 vm_handle_rendezvous(struct vm *vm, int vcpuid)
 {
 
-	KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
+	KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < vm->maxcpus),
 	    ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid));
 
 	mtx_lock(&vm->rendezvous_mtx);
@@ -1530,7 +1536,7 @@ vm_handle_suspend(struct vm *vm, int vcpuid, bool *ret
 	/*
 	 * Wakeup the other sleeping vcpus and return to userspace.
 	 */
-	for (i = 0; i < VM_MAXCPU; i++) {
+	for (i = 0; i < vm->maxcpus; i++) {
 		if (CPU_ISSET(i, &vm->suspended_cpus)) {
 			vcpu_notify_event(vm, i, false);
 		}
@@ -1572,7 +1578,7 @@ vm_suspend(struct vm *vm, enum vm_suspend_how how)
 	/*
 	 * Notify all active vcpus that they are now suspended.
 	 */
-	for (i = 0; i < VM_MAXCPU; i++) {
+	for (i = 0; i < vm->maxcpus; i++) {
 		if (CPU_ISSET(i, &vm->active_cpus))
 			vcpu_notify_event(vm, i, false);
 	}
@@ -1647,7 +1653,7 @@ vm_run(struct vm *vm, struct vm_run *vmrun)
 
 	vcpuid = vmrun->cpuid;
 
-	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
 		return (EINVAL);
 
 	if (!CPU_ISSET(vcpuid, &vm->active_cpus))
@@ -1748,7 +1754,7 @@ vm_restart_instruction(void *arg, int vcpuid)
 	int error;
 
 	vm = arg;
-	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
 		return (EINVAL);
 
 	vcpu = &vm->vcpu[vcpuid];
@@ -1787,7 +1793,7 @@ vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t in
 	struct vcpu *vcpu;
 	int type, vector;
 
-	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
 		return (EINVAL);
 
 	vcpu = &vm->vcpu[vcpuid];
@@ -1928,7 +1934,8 @@ vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *
 	uint64_t info1, info2;
 	int valid;
 
-	KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid));
+	KASSERT(vcpuid >= 0 &&
+	    vcpuid < vm->maxcpus, ("invalid vcpu %d", vcpuid));
 
 	vcpu = &vm->vcpu[vcpuid];
 
@@ -1968,7 +1975,7 @@ vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *in
 {
 	struct vcpu *vcpu;
 
-	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
 		return (EINVAL);
 
 	vcpu = &vm->vcpu[vcpuid];
@@ -1985,7 +1992,7 @@ vm_inject_exception(struct vm *vm, int vcpuid, int vec
 	uint64_t regval;
 	int error;
 
-	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
 		return (EINVAL);
 
 	if (vector < 0 || vector >= 32)
@@ -2076,7 +2083,7 @@ vm_inject_nmi(struct vm *vm, int vcpuid)
 {
 	struct vcpu *vcpu;
 
-	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
 		return (EINVAL);
 
 	vcpu = &vm->vcpu[vcpuid];
@@ -2091,7 +2098,7 @@ vm_nmi_pending(struct vm *vm, int vcpuid)
 {
 	struct vcpu *vcpu;
 
-	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
 		panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
 
 	vcpu = &vm->vcpu[vcpuid];
@@ -2104,7 +2111,7 @@ vm_nmi_clear(struct vm *vm, int vcpuid)
 {
 	struct vcpu *vcpu;
 
-	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
 		panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
 
 	vcpu = &vm->vcpu[vcpuid];
@@ -2123,7 +2130,7 @@ vm_inject_extint(struct vm *vm, int vcpuid)
 {
 	struct vcpu *vcpu;
 
-	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
 		return (EINVAL);
 
 	vcpu = &vm->vcpu[vcpuid];
@@ -2138,7 +2145,7 @@ vm_extint_pending(struct vm *vm, int vcpuid)
 {
 	struct vcpu *vcpu;
 
-	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
 		panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
 
 	vcpu = &vm->vcpu[vcpuid];
@@ -2151,7 +2158,7 @@ vm_extint_clear(struct vm *vm, int vcpuid)
 {
 	struct vcpu *vcpu;
 
-	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
 		panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
 
 	vcpu = &vm->vcpu[vcpuid];
@@ -2166,7 +2173,7 @@ vm_extint_clear(struct vm *vm, int vcpuid)
 int
 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
 {
-	if (vcpu < 0 || vcpu >= VM_MAXCPU)
+	if (vcpu < 0 || vcpu >= vm->maxcpus)
 		return (EINVAL);
 
 	if (type < 0 || type >= VM_CAP_MAX)
@@ -2178,7 +2185,7 @@ vm_get_capability(struct vm *vm, int vcpu, int type, i
 int
 vm_set_capability(struct vm *vm, int vcpu, int type, int val)
 {
-	if (vcpu < 0 || vcpu >= VM_MAXCPU)
+	if (vcpu < 0 || vcpu >= vm->maxcpus)
 		return (EINVAL);
 
 	if (type < 0 || type >= VM_CAP_MAX)
@@ -2263,7 +2270,7 @@ vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_st
 	int error;
 	struct vcpu *vcpu;
 
-	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
 		panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
 
 	vcpu = &vm->vcpu[vcpuid];
@@ -2281,7 +2288,7 @@ vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu
 	struct vcpu *vcpu;
 	enum vcpu_state state;
 
-	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
 		panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
 
 	vcpu = &vm->vcpu[vcpuid];
@@ -2299,7 +2306,7 @@ int
 vm_activate_cpu(struct vm *vm, int vcpuid)
 {
 
-	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
 		return (EINVAL);
 
 	if (CPU_ISSET(vcpuid, &vm->active_cpus))
@@ -2334,7 +2341,7 @@ vcpu_stats(struct vm *vm, int vcpuid)
 int
 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
 {
-	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
 		return (EINVAL);
 
 	*state = vm->vcpu[vcpuid].x2apic_state;
@@ -2345,7 +2352,7 @@ vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2
 int
 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
 {
-	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
 		return (EINVAL);
 
 	if (state >= X2APIC_STATE_LAST)
@@ -2432,7 +2439,7 @@ vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t 
 	 * Enforce that this function is called without any locks
 	 */
 	WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous");
-	KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
+	KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < vm->maxcpus),
 	    ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid));
 
 restart:
@@ -2462,7 +2469,7 @@ restart:
 	 * Wake up any sleeping vcpus and trigger a VM-exit in any running
 	 * vcpus so they handle the rendezvous as soon as possible.
 	 */
-	for (i = 0; i < VM_MAXCPU; i++) {
+	for (i = 0; i < vm->maxcpus; i++) {
 		if (CPU_ISSET(i, &dest))
 			vcpu_notify_event(vm, i, false);
 	}

Modified: stable/11/sys/amd64/vmm/vmm_dev.c
==============================================================================
--- stable/11/sys/amd64/vmm/vmm_dev.c	Sat May 25 11:22:49 2019	(r348270)
+++ stable/11/sys/amd64/vmm/vmm_dev.c	Sat May 25 11:27:56 2019	(r348271)
@@ -94,7 +94,7 @@ vcpu_lock_one(struct vmmdev_softc *sc, int vcpu)
 {
 	int error;
 
-	if (vcpu < 0 || vcpu >= VM_MAXCPU)
+	if (vcpu < 0 || vcpu >= vm_get_maxcpus(sc->vm))
 		return (EINVAL);
 
 	error = vcpu_set_state(sc->vm, vcpu, VCPU_FROZEN, true);
@@ -119,8 +119,10 @@ static int
 vcpu_lock_all(struct vmmdev_softc *sc)
 {
 	int error, vcpu;
+	uint16_t maxcpus;
 
-	for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++) {
+	maxcpus = vm_get_maxcpus(sc->vm);
+	for (vcpu = 0; vcpu < maxcpus; vcpu++) {
 		error = vcpu_lock_one(sc, vcpu);
 		if (error)
 			break;
@@ -138,8 +140,10 @@ static void
 vcpu_unlock_all(struct vmmdev_softc *sc)
 {
 	int vcpu;
+	uint16_t maxcpus;
 
-	for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++)
+	maxcpus = vm_get_maxcpus(sc->vm);
+	for (vcpu = 0; vcpu < maxcpus; vcpu++)
 		vcpu_unlock_one(sc, vcpu);
 }
 
@@ -174,6 +178,7 @@ vmmdev_rw(struct cdev *cdev, struct uio *uio, int flag
 	vm_paddr_t gpa, maxaddr;
 	void *hpa, *cookie;
 	struct vmmdev_softc *sc;
+	uint16_t lastcpu;
 
 	sc = vmmdev_lookup2(cdev);
 	if (sc == NULL)
@@ -182,7 +187,8 @@ vmmdev_rw(struct cdev *cdev, struct uio *uio, int flag
 	/*
 	 * Get a read lock on the guest memory map by freezing any vcpu.
 	 */
-	error = vcpu_lock_one(sc, VM_MAXCPU - 1);
+	lastcpu = vm_get_maxcpus(sc->vm) - 1;
+	error = vcpu_lock_one(sc, lastcpu);
 	if (error)
 		return (error);
 
@@ -201,7 +207,8 @@ vmmdev_rw(struct cdev *cdev, struct uio *uio, int flag
 		 * Since this device does not support lseek(2), dd(1) will
 		 * read(2) blocks of data to simulate the lseek(2).
 		 */
-		hpa = vm_gpa_hold(sc->vm, VM_MAXCPU - 1, gpa, c, prot, &cookie);
+		hpa = vm_gpa_hold(sc->vm, lastcpu, gpa, c,
+		    prot, &cookie);
 		if (hpa == NULL) {
 			if (uio->uio_rw == UIO_READ && gpa < maxaddr)
 				error = uiomove(__DECONST(void *, zero_region),
@@ -213,7 +220,7 @@ vmmdev_rw(struct cdev *cdev, struct uio *uio, int flag
 			vm_gpa_release(cookie);
 		}
 	}
-	vcpu_unlock_one(sc, VM_MAXCPU - 1);
+	vcpu_unlock_one(sc, lastcpu);
 	return (error);
 }
 
@@ -377,7 +384,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t da
 		 * Lock a vcpu to make sure that the memory map cannot be
 		 * modified while it is being inspected.
 		 */
-		vcpu = VM_MAXCPU - 1;
+		vcpu = vm_get_maxcpus(sc->vm) - 1;
 		error = vcpu_lock_one(sc, vcpu);
 		if (error)
 			goto done;
@@ -678,6 +685,7 @@ vmmdev_mmap_single(struct cdev *cdev, vm_ooffset_t *of
 	size_t len;
 	vm_ooffset_t segoff, first, last;
 	int error, found, segid;
+	uint16_t lastcpu;
 	bool sysmem;
 
 	first = *offset;
@@ -694,7 +702,8 @@ vmmdev_mmap_single(struct cdev *cdev, vm_ooffset_t *of
 	/*
 	 * Get a read lock on the guest memory map by freezing any vcpu.
 	 */
-	error = vcpu_lock_one(sc, VM_MAXCPU - 1);
+	lastcpu = vm_get_maxcpus(sc->vm) - 1;
+	error = vcpu_lock_one(sc, lastcpu);
 	if (error)
 		return (error);
 
@@ -723,7 +732,7 @@ vmmdev_mmap_single(struct cdev *cdev, vm_ooffset_t *of
 			error = EINVAL;
 		}
 	}
-	vcpu_unlock_one(sc, VM_MAXCPU - 1);
+	vcpu_unlock_one(sc, lastcpu);
 	return (error);
 }
 
@@ -910,6 +919,7 @@ devmem_mmap_single(struct cdev *cdev, vm_ooffset_t *of
 	vm_ooffset_t first, last;
 	size_t seglen;
 	int error;
+	uint16_t lastcpu;
 	bool sysmem;
 
 	dsc = cdev->si_drv1;
@@ -923,7 +933,8 @@ devmem_mmap_single(struct cdev *cdev, vm_ooffset_t *of
 	if ((nprot & PROT_EXEC) || first < 0 || first >= last)
 		return (EINVAL);
 
-	error = vcpu_lock_one(dsc->sc, VM_MAXCPU - 1);
+	lastcpu = vm_get_maxcpus(dsc->sc->vm) - 1;
+	error = vcpu_lock_one(dsc->sc, lastcpu);
 	if (error)
 		return (error);
 
@@ -931,7 +942,7 @@ devmem_mmap_single(struct cdev *cdev, vm_ooffset_t *of
 	KASSERT(error == 0 && !sysmem && *objp != NULL,
 	    ("%s: invalid devmem segment %d", __func__, dsc->segid));
 
-	vcpu_unlock_one(dsc->sc, VM_MAXCPU - 1);
+	vcpu_unlock_one(dsc->sc, lastcpu);
 
 	if (seglen >= last) {
 		vm_object_reference(*objp);

Modified: stable/11/sys/amd64/vmm/vmm_lapic.c
==============================================================================
--- stable/11/sys/amd64/vmm/vmm_lapic.c	Sat May 25 11:22:49 2019	(r348270)
+++ stable/11/sys/amd64/vmm/vmm_lapic.c	Sat May 25 11:27:56 2019	(r348271)
@@ -54,7 +54,7 @@ lapic_set_intr(struct vm *vm, int cpu, int vector, boo
 {
 	struct vlapic *vlapic;
 
-	if (cpu < 0 || cpu >= VM_MAXCPU)
+	if (cpu < 0 || cpu >= vm_get_maxcpus(vm))
 		return (EINVAL);
 
 	/*
@@ -77,7 +77,7 @@ lapic_set_local_intr(struct vm *vm, int cpu, int vecto
 	cpuset_t dmask;
 	int error;
 
-	if (cpu < -1 || cpu >= VM_MAXCPU)
+	if (cpu < -1 || cpu >= vm_get_maxcpus(vm))
 		return (EINVAL);
 
 	if (cpu == -1)

Modified: stable/11/sys/amd64/vmm/vmm_stat.c
==============================================================================
--- stable/11/sys/amd64/vmm/vmm_stat.c	Sat May 25 11:22:49 2019	(r348270)
+++ stable/11/sys/amd64/vmm/vmm_stat.c	Sat May 25 11:27:56 2019	(r348271)
@@ -86,7 +86,7 @@ vmm_stat_copy(struct vm *vm, int vcpu, int *num_stats,
 	uint64_t *stats;
 	int i;
 
-	if (vcpu < 0 || vcpu >= VM_MAXCPU)
+	if (vcpu < 0 || vcpu >= vm_get_maxcpus(vm))
 		return (EINVAL);
 
 	/* Let stats functions update their counters */



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201905251127.x4PBRuYu082936>