Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 17 Aug 2014 01:23:53 +0000 (UTC)
From:      Peter Grehan <grehan@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org
Subject:   svn commit: r270074 - in stable/10: lib/libvmmapi sys/amd64/include sys/amd64/vmm sys/amd64/vmm/intel usr.sbin/bhyve usr.sbin/bhyveload
Message-ID:  <201408170123.s7H1Nr4s080866@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: grehan
Date: Sun Aug 17 01:23:52 2014
New Revision: 270074
URL: http://svnweb.freebsd.org/changeset/base/270074

Log:
  MFC r267311, r267330, r267811, r267884
  
  Turn on interrupt window exiting unconditionally when an ExtINT is being
  injected into the guest.
  
  Add helper functions to populate VM exit information for rendezvous and
  astpending exits.
  
  Provide APIs to directly get 'lowmem' and 'highmem' size directly.
  
  Expose the amount of resident and wired memory from the guest's vmspace

Modified:
  stable/10/lib/libvmmapi/vmmapi.c
  stable/10/lib/libvmmapi/vmmapi.h
  stable/10/sys/amd64/include/vmm.h
  stable/10/sys/amd64/vmm/intel/vmx.c
  stable/10/sys/amd64/vmm/vmm.c
  stable/10/sys/amd64/vmm/vmm_stat.c
  stable/10/sys/amd64/vmm/vmm_stat.h
  stable/10/usr.sbin/bhyve/pci_emul.c
  stable/10/usr.sbin/bhyve/rtc.c
  stable/10/usr.sbin/bhyve/smbiostbl.c
  stable/10/usr.sbin/bhyveload/bhyveload.c
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/lib/libvmmapi/vmmapi.c
==============================================================================
--- stable/10/lib/libvmmapi/vmmapi.c	Sun Aug 17 01:16:40 2014	(r270073)
+++ stable/10/lib/libvmmapi/vmmapi.c	Sun Aug 17 01:23:52 2014	(r270074)
@@ -274,6 +274,20 @@ vm_map_gpa(struct vmctx *ctx, vm_paddr_t
 	return (NULL);
 }
 
+size_t
+vm_get_lowmem_size(struct vmctx *ctx)
+{
+
+	return (ctx->lowmem);
+}
+
+size_t
+vm_get_highmem_size(struct vmctx *ctx)
+{
+
+	return (ctx->highmem);
+}
+
 int
 vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
 	    uint64_t base, uint32_t limit, uint32_t access)

Modified: stable/10/lib/libvmmapi/vmmapi.h
==============================================================================
--- stable/10/lib/libvmmapi/vmmapi.h	Sun Aug 17 01:16:40 2014	(r270073)
+++ stable/10/lib/libvmmapi/vmmapi.h	Sun Aug 17 01:23:52 2014	(r270074)
@@ -60,6 +60,8 @@ int	vm_get_gpa_pmap(struct vmctx *, uint
 uint32_t vm_get_lowmem_limit(struct vmctx *ctx);
 void	vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit);
 void	vm_set_memflags(struct vmctx *ctx, int flags);
+size_t	vm_get_lowmem_size(struct vmctx *ctx);
+size_t	vm_get_highmem_size(struct vmctx *ctx);
 int	vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
 		    uint64_t base, uint32_t limit, uint32_t access);
 int	vm_get_desc(struct vmctx *ctx, int vcpu, int reg,

Modified: stable/10/sys/amd64/include/vmm.h
==============================================================================
--- stable/10/sys/amd64/include/vmm.h	Sun Aug 17 01:16:40 2014	(r270073)
+++ stable/10/sys/amd64/include/vmm.h	Sun Aug 17 01:23:52 2014	(r270074)
@@ -146,6 +146,8 @@ cpuset_t vm_active_cpus(struct vm *vm);
 cpuset_t vm_suspended_cpus(struct vm *vm);
 struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid);
 void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip);
+void vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip);
+void vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip);
 
 /*
  * Rendezvous all vcpus specified in 'dest' and execute 'func(arg)'.

Modified: stable/10/sys/amd64/vmm/intel/vmx.c
==============================================================================
--- stable/10/sys/amd64/vmm/intel/vmx.c	Sun Aug 17 01:16:40 2014	(r270073)
+++ stable/10/sys/amd64/vmm/intel/vmx.c	Sun Aug 17 01:23:52 2014	(r270074)
@@ -1327,9 +1327,13 @@ vmx_inject_interrupts(struct vmx *vmx, i
 		 * have posted another one.  If that is the case, set
 		 * the Interrupt Window Exiting execution control so
 		 * we can inject that one too.
+		 *
+		 * Also, interrupt window exiting allows us to inject any
+		 * pending APIC vector that was preempted by the ExtINT
+		 * as soon as possible. This applies both for the software
+		 * emulated vlapic and the hardware assisted virtual APIC.
 		 */
-		if (vm_extint_pending(vmx->vm, vcpu))
-			vmx_set_int_window_exiting(vmx, vcpu);
+		vmx_set_int_window_exiting(vmx, vcpu);
 	}
 
 	VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
@@ -2275,32 +2279,7 @@ vmx_exit_process(struct vmx *vmx, int vc
 	return (handled);
 }
 
-static __inline int
-vmx_exit_astpending(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
-{
-
-	vmexit->rip = vmcs_guest_rip();
-	vmexit->inst_length = 0;
-	vmexit->exitcode = VM_EXITCODE_BOGUS;
-	vmx_astpending_trace(vmx, vcpu, vmexit->rip);
-	vmm_stat_incr(vmx->vm, vcpu, VMEXIT_ASTPENDING, 1);
-
-	return (HANDLED);
-}
-
-static __inline int
-vmx_exit_rendezvous(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
-{
-
-	vmexit->rip = vmcs_guest_rip();
-	vmexit->inst_length = 0;
-	vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
-	vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RENDEZVOUS, 1);
-
-	return (UNHANDLED);
-}
-
-static __inline int
+static __inline void
 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
 {
 
@@ -2324,8 +2303,6 @@ vmx_exit_inst_error(struct vmxctx *vmxct
 	default:
 		panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
 	}
-
-	return (UNHANDLED);
 }
 
 /*
@@ -2398,6 +2375,8 @@ vmx_run(void *arg, int vcpu, register_t 
 	vmcs_write(VMCS_GUEST_RIP, startrip);
 	vmx_set_pcpu_defaults(vmx, vcpu, pmap);
 	do {
+		handled = UNHANDLED;
+
 		/*
 		 * Interrupts are disabled from this point on until the
 		 * guest starts executing. This is done for the following
@@ -2420,19 +2399,20 @@ vmx_run(void *arg, int vcpu, register_t 
 		if (vcpu_suspended(suspend_cookie)) {
 			enable_intr();
 			vm_exit_suspended(vmx->vm, vcpu, vmcs_guest_rip());
-			handled = UNHANDLED;
 			break;
 		}
 
 		if (vcpu_rendezvous_pending(rendezvous_cookie)) {
 			enable_intr();
-			handled = vmx_exit_rendezvous(vmx, vcpu, vmexit);
+			vm_exit_rendezvous(vmx->vm, vcpu, vmcs_guest_rip());
 			break;
 		}
 
 		if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) {
 			enable_intr();
-			handled = vmx_exit_astpending(vmx, vcpu, vmexit);
+			vm_exit_astpending(vmx->vm, vcpu, vmcs_guest_rip());
+			vmx_astpending_trace(vmx, vcpu, vmexit->rip);
+			handled = HANDLED;
 			break;
 		}
 
@@ -2452,7 +2432,7 @@ vmx_run(void *arg, int vcpu, register_t 
 			handled = vmx_exit_process(vmx, vcpu, vmexit);
 		} else {
 			enable_intr();
-			handled = vmx_exit_inst_error(vmxctx, rc, vmexit);
+			vmx_exit_inst_error(vmxctx, rc, vmexit);
 		}
 		launched = 1;
 		vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);

Modified: stable/10/sys/amd64/vmm/vmm.c
==============================================================================
--- stable/10/sys/amd64/vmm/vmm.c	Sun Aug 17 01:16:40 2014	(r270073)
+++ stable/10/sys/amd64/vmm/vmm.c	Sun Aug 17 01:23:52 2014	(r270074)
@@ -1331,6 +1331,32 @@ vm_exit_suspended(struct vm *vm, int vcp
 	vmexit->u.suspended.how = vm->suspend;
 }
 
+void
+vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip)
+{
+	struct vm_exit *vmexit;
+
+	KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress"));
+
+	vmexit = vm_exitinfo(vm, vcpuid);
+	vmexit->rip = rip;
+	vmexit->inst_length = 0;
+	vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
+	vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1);
+}
+
+void
+vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip)
+{
+	struct vm_exit *vmexit;
+
+	vmexit = vm_exitinfo(vm, vcpuid);
+	vmexit->rip = rip;
+	vmexit->inst_length = 0;
+	vmexit->exitcode = VM_EXITCODE_BOGUS;
+	vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1);
+}
+
 int
 vm_run(struct vm *vm, struct vm_run *vmrun)
 {
@@ -1966,3 +1992,34 @@ vm_segment_name(int seg)
 	    ("%s: invalid segment encoding %d", __func__, seg));
 	return (seg_names[seg]);
 }
+
+
+/*
+ * Return the amount of in-use and wired memory for the VM. Since
+ * these are global stats, only return the values with for vCPU 0
+ */
+VMM_STAT_DECLARE(VMM_MEM_RESIDENT);
+VMM_STAT_DECLARE(VMM_MEM_WIRED);
+
+static void
+vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
+{
+
+	if (vcpu == 0) {
+		vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT,
+	       	    PAGE_SIZE * vmspace_resident_count(vm->vmspace));
+	}	
+}
+
+static void
+vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
+{
+
+	if (vcpu == 0) {
+		vmm_stat_set(vm, vcpu, VMM_MEM_WIRED,
+	      	    PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace)));
+	}	
+}
+
+VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt);
+VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt);

Modified: stable/10/sys/amd64/vmm/vmm_stat.c
==============================================================================
--- stable/10/sys/amd64/vmm/vmm_stat.c	Sun Aug 17 01:16:40 2014	(r270073)
+++ stable/10/sys/amd64/vmm/vmm_stat.c	Sun Aug 17 01:23:52 2014	(r270074)
@@ -83,12 +83,21 @@ vmm_stat_register(void *arg)
 int
 vmm_stat_copy(struct vm *vm, int vcpu, int *num_stats, uint64_t *buf)
 {
-	int i;
+	struct vmm_stat_type *vst;
 	uint64_t *stats;
+	int i;
 
 	if (vcpu < 0 || vcpu >= VM_MAXCPU)
 		return (EINVAL);
-		
+
+	/* Let stats functions update their counters */
+	for (i = 0; i < vst_num_types; i++) {
+		vst = vsttab[i];
+		if (vst->func != NULL)
+			(*vst->func)(vm, vcpu, vst);
+	}
+
+	/* Copy over the stats */
 	stats = vcpu_stats(vm, vcpu);
 	for (i = 0; i < vst_num_elems; i++)
 		buf[i] = stats[i];

Modified: stable/10/sys/amd64/vmm/vmm_stat.h
==============================================================================
--- stable/10/sys/amd64/vmm/vmm_stat.h	Sun Aug 17 01:16:40 2014	(r270073)
+++ stable/10/sys/amd64/vmm/vmm_stat.h	Sun Aug 17 01:23:52 2014	(r270074)
@@ -42,21 +42,29 @@ enum vmm_stat_scope {
 	VMM_STAT_SCOPE_AMD,		/* AMD SVM specific statistic */
 };
 
+struct vmm_stat_type;
+typedef void (*vmm_stat_func_t)(struct vm *vm, int vcpu,
+    struct vmm_stat_type *stat);
+
 struct vmm_stat_type {
 	int	index;			/* position in the stats buffer */
 	int	nelems;			/* standalone or array */
 	const char *desc;		/* description of statistic */
+	vmm_stat_func_t func;
 	enum vmm_stat_scope scope;
 };
 
 void	vmm_stat_register(void *arg);
 
-#define	VMM_STAT_DEFINE(type, nelems, desc, scope)			\
+#define	VMM_STAT_FDEFINE(type, nelems, desc, func, scope)		\
 	struct vmm_stat_type type[1] = {				\
-		{ -1, nelems, desc, scope }				\
+		{ -1, nelems, desc, func, scope }			\
 	};								\
 	SYSINIT(type##_stat, SI_SUB_KLD, SI_ORDER_ANY, vmm_stat_register, type)
 
+#define VMM_STAT_DEFINE(type, nelems, desc, scope) 			\
+	VMM_STAT_FDEFINE(type, nelems, desc, NULL, scope)
+
 #define	VMM_STAT_DECLARE(type)						\
 	extern struct vmm_stat_type type[1]
 
@@ -67,6 +75,9 @@ void	vmm_stat_register(void *arg);
 #define	VMM_STAT_AMD(type, desc)	\
 	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_AMD)
 
+#define	VMM_STAT_FUNC(type, desc, func)	\
+	VMM_STAT_FDEFINE(type, 1, desc, func, VMM_STAT_SCOPE_ANY)
+
 #define	VMM_STAT_ARRAY(type, nelems, desc)	\
 	VMM_STAT_DEFINE(type, nelems, desc, VMM_STAT_SCOPE_ANY)
 
@@ -93,9 +104,22 @@ vmm_stat_array_incr(struct vm *vm, int v
 		stats[vst->index + statidx] += x;
 #endif
 }
-		   
 
 static void __inline
+vmm_stat_array_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
+		   int statidx, uint64_t val)
+{
+#ifdef VMM_KEEP_STATS
+	uint64_t *stats;
+	
+	stats = vcpu_stats(vm, vcpu);
+
+	if (vst->index >= 0 && statidx < vst->nelems)
+		stats[vst->index + statidx] = val;
+#endif
+}
+		   
+static void __inline
 vmm_stat_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t x)
 {
 
@@ -104,6 +128,15 @@ vmm_stat_incr(struct vm *vm, int vcpu, s
 #endif
 }
 
+static void __inline
+vmm_stat_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t val)
+{
+
+#ifdef VMM_KEEP_STATS
+	vmm_stat_array_set(vm, vcpu, vst, 0, val);
+#endif
+}
+
 VMM_STAT_DECLARE(VCPU_MIGRATIONS);
 VMM_STAT_DECLARE(VMEXIT_COUNT);
 VMM_STAT_DECLARE(VMEXIT_EXTINT);

Modified: stable/10/usr.sbin/bhyve/pci_emul.c
==============================================================================
--- stable/10/usr.sbin/bhyve/pci_emul.c	Sun Aug 17 01:16:40 2014	(r270073)
+++ stable/10/usr.sbin/bhyve/pci_emul.c	Sun Aug 17 01:23:52 2014	(r270074)
@@ -1118,8 +1118,7 @@ init_pci(struct vmctx *ctx)
 	 * Accesses to memory addresses that are not allocated to system
 	 * memory or PCI devices return 0xff's.
 	 */
-	error = vm_get_memory_seg(ctx, 0, &lowmem, NULL);
-	assert(error == 0);
+	lowmem = vm_get_lowmem_size(ctx);
 
 	memset(&pci_mem_hole, 0, sizeof(struct mem_range));
 	pci_mem_hole.name = "PCI hole";

Modified: stable/10/usr.sbin/bhyve/rtc.c
==============================================================================
--- stable/10/usr.sbin/bhyve/rtc.c	Sun Aug 17 01:16:40 2014	(r270073)
+++ stable/10/usr.sbin/bhyve/rtc.c	Sun Aug 17 01:23:52 2014	(r270074)
@@ -343,19 +343,14 @@ rtc_init(struct vmctx *ctx)
 	 * 0x34/0x35 - 64KB chunks above 16MB, below 4GB
 	 * 0x5b/0x5c/0x5d - 64KB chunks above 4GB
 	 */
-	err = vm_get_memory_seg(ctx, 0, &lomem, NULL);
-	assert(err == 0);
-
-	lomem = (lomem - m_16MB) / m_64KB;
+	lomem = (vm_get_lowmem_size(ctx) - m_16MB) / m_64KB;
 	rtc_nvram[nvoff(RTC_LMEM_LSB)] = lomem;
 	rtc_nvram[nvoff(RTC_LMEM_MSB)] = lomem >> 8;
 
-	if (vm_get_memory_seg(ctx, m_4GB, &himem, NULL) == 0) {	  
-		himem /= m_64KB;
-		rtc_nvram[nvoff(RTC_HMEM_LSB)] = himem;
-		rtc_nvram[nvoff(RTC_HMEM_SB)]  = himem >> 8;
-		rtc_nvram[nvoff(RTC_HMEM_MSB)] = himem >> 16;
-	}
+	himem = vm_get_highmem_size(ctx) / m_64KB;
+	rtc_nvram[nvoff(RTC_HMEM_LSB)] = himem;
+	rtc_nvram[nvoff(RTC_HMEM_SB)]  = himem >> 8;
+	rtc_nvram[nvoff(RTC_HMEM_MSB)] = himem >> 16;
 }
 
 INOUT_PORT(rtc, IO_RTC, IOPORT_F_INOUT, rtc_addr_handler);

Modified: stable/10/usr.sbin/bhyve/smbiostbl.c
==============================================================================
--- stable/10/usr.sbin/bhyve/smbiostbl.c	Sun Aug 17 01:16:40 2014	(r270073)
+++ stable/10/usr.sbin/bhyve/smbiostbl.c	Sun Aug 17 01:23:52 2014	(r270074)
@@ -779,13 +779,8 @@ smbios_build(struct vmctx *ctx)
 	int				i;
 	int				err;
 
-	err = vm_get_memory_seg(ctx, 0, &guest_lomem, NULL);
-	if (err != 0)
-		return (err);
-
-	err = vm_get_memory_seg(ctx, 4*GB, &guest_himem, NULL);
-	if (err != 0)
-		return (err);
+	guest_lomem = vm_get_lowmem_size(ctx);
+	guest_himem = vm_get_highmem_size(ctx);
 
 	startaddr = paddr_guest2host(ctx, SMBIOS_BASE, SMBIOS_MAX_LENGTH);
 	if (startaddr == NULL) {

Modified: stable/10/usr.sbin/bhyveload/bhyveload.c
==============================================================================
--- stable/10/usr.sbin/bhyveload/bhyveload.c	Sun Aug 17 01:16:40 2014	(r270073)
+++ stable/10/usr.sbin/bhyveload/bhyveload.c	Sun Aug 17 01:23:52 2014	(r270074)
@@ -505,8 +505,8 @@ static void
 cb_getmem(void *arg, uint64_t *ret_lowmem, uint64_t *ret_highmem)
 {
 
-	vm_get_memory_seg(ctx, 0, ret_lowmem, NULL);
-	vm_get_memory_seg(ctx, 4 * GB, ret_highmem, NULL);
+	*ret_lowmem = vm_get_lowmem_size(ctx);
+	*ret_highmem = vm_get_highmem_size(ctx);
 }
 
 struct env {



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201408170123.s7H1Nr4s080866>