Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 18 Sep 2013 17:18:19 +0000 (UTC)
From:      Neel Natu <neel@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r255669 - in projects/bhyve_npt_pmap/sys/amd64: amd64 vmm vmm/intel
Message-ID:  <201309181718.r8IHIKOn032549@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: neel
Date: Wed Sep 18 17:18:19 2013
New Revision: 255669
URL: http://svnweb.freebsd.org/changeset/base/255669

Log:
  Make multi-line KASSERTs style(9) compliant by using 4 spaces to indent the
  continuation lines.
  
  Pointed out by:	alc@

Modified:
  projects/bhyve_npt_pmap/sys/amd64/amd64/pmap.c
  projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.c
  projects/bhyve_npt_pmap/sys/amd64/vmm/vmm.c

Modified: projects/bhyve_npt_pmap/sys/amd64/amd64/pmap.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/amd64/pmap.c	Wed Sep 18 16:39:01 2013	(r255668)
+++ projects/bhyve_npt_pmap/sys/amd64/amd64/pmap.c	Wed Sep 18 17:18:19 2013	(r255669)
@@ -1064,7 +1064,7 @@ pmap_swap_pat(pmap_t pmap, pt_entry_t en
 	case PT_X86:
 		/* Verify that both PAT bits are not set at the same time */
 		KASSERT((entry & x86_pat_bits) != x86_pat_bits,
-			("Invalid PAT bits in entry %#lx", entry));
+		    ("Invalid PAT bits in entry %#lx", entry));
 
 		/* Swap the PAT bits if one of them is set */
 		if ((entry & x86_pat_bits) != 0)
@@ -1278,7 +1278,7 @@ pmap_invalidate_ept(pmap_t pmap)
 {
 
 	KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
-		("pmap_invalidate_ept: absurd pm_active"));
+	    ("pmap_invalidate_ept: absurd pm_active"));
 
 	/*
 	 * The TLB mappings associated with a vcpu context are not
@@ -2738,7 +2738,8 @@ reclaim_pv_chunk(pmap_t locked_pmap, str
 				m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
 				if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 					KASSERT((tpte & PG_RO) == 0,
-					  ("readonly modified PTE %#lx", tpte));
+					    ("readonly modified PTE %#lx",
+					    tpte));
 					vm_page_dirty(m);
 				}
 				if ((tpte & PG_A) != 0)
@@ -3450,7 +3451,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t 
 		    va < eva; va += PAGE_SIZE, m++) {
 			if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 				KASSERT((oldpde & PG_RO) == 0,
-					("readonly modified PDE %#lx", oldpde));
+				    ("readonly modified PDE %#lx", oldpde));
 				vm_page_dirty(m);
 			}
 			if (oldpde & PG_A)
@@ -3500,7 +3501,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t 
 		m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
 		if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 			KASSERT((oldpte & PG_RO) == 0,
-				("readonly modified PTE %#lx", oldpte));
+			    ("readonly modified PTE %#lx", oldpte));
 			vm_page_dirty(m);
 		}
 		if (oldpte & PG_A)
@@ -3750,7 +3751,7 @@ small_mappings:
 		 */
 		if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 			KASSERT((tpte & PG_RO) == 0,
-				("readonly modified PTE %#lx", tpte));
+			    ("readonly modified PTE %#lx", tpte));
 			vm_page_dirty(m);
 		}
 		pmap_unuse_pt(pmap, pv->pv_va, *pde, &free);
@@ -3792,7 +3793,7 @@ retry:
 		    va < eva; va += PAGE_SIZE, m++)
 			if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 				KASSERT((oldpde & PG_RO) == 0,
-					("readonly modified PDE %#lx", oldpde));
+				    ("readonly modified PDE %#lx", oldpde));
 				vm_page_dirty(m);
 			}
 	}
@@ -3931,8 +3932,8 @@ retry:
 				if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
 				    (PG_MANAGED | PG_M | PG_RW)) {
 					KASSERT((pbits & PG_RO) == 0,
-						("readonly modified PTE %#lx",
-						pbits));
+					    ("readonly modified PTE %#lx",
+					    pbits));
 					m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
 					vm_page_dirty(m);
 				}
@@ -4027,8 +4028,8 @@ setpte:
 		}
 		if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
 			KASSERT(!pmap_emulate_ad_bits(pmap),
-				("invalid RW/M bits for dirty bit "
-				 "emulation %#lx", oldpte));
+			    ("invalid RW/M bits for dirty bit emulation %#lx",
+			    oldpte));
 			/*
 			 * When PG_M is already clear, PG_RW can be cleared
 			 * without a TLB invalidation.
@@ -4298,8 +4299,8 @@ validate:
 				if ((origpte & (PG_M | PG_RW)) == (PG_M |
 				    PG_RW)) {
 					KASSERT((origpte & PG_RO) == 0,
-						("readonly modified PTE %#lx",
-						origpte));
+					    ("readonly modified PTE %#lx",
+					    origpte));
 					vm_page_dirty(om);
 				}
 				if ((origpte & PG_A) != 0)
@@ -5269,8 +5270,8 @@ pmap_remove_pages(pmap_t pmap)
 				 */
 				if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 					KASSERT((tpte & PG_RO) == 0,
-						("readonly modified PTE %#lx",
-						tpte));
+					    ("readonly modified PTE %#lx",
+					    tpte));
 					if (superpage) {
 						for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
 							vm_page_dirty(mt);
@@ -5558,7 +5559,7 @@ retry:
 				goto retry;
 			if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 				KASSERT((oldpte & PG_RO) == 0,
-					("readonly modified PTE %#lx", oldpte));
+				    ("readonly modified PTE %#lx", oldpte));
 				vm_page_dirty(m);
 			}
 			pmap_invalidate_page(pmap, pv->pv_va);
@@ -5986,8 +5987,8 @@ small_mappings:
 				    ("modified readonly pte %#lx", oldpte));
 			} else {
 				KASSERT((oldpte & (PG_M | PG_RW)) == 0,
-					("invalid RW/M bits for dirty bit "
-					 "emulation %#lx", oldpte));
+				    ("invalid RW/M bits for dirty bit "
+				    "emulation %#lx", oldpte));
 			}
 		}
 		if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
@@ -6678,8 +6679,8 @@ pmap_emulate_dirty(pmap_t pmap, vm_offse
 		pte = pmap_pde_to_pte(pde, va);
 		if ((*pte & (PG_V | PG_RO)) == PG_V) {
 			KASSERT((*pte & PG_A) != 0,
-				("pmap_emulate_dirty: accessed and valid bits "
-				 "mismatch %#lx", *pte));
+			    ("pmap_emulate_dirty: accessed and valid bits "
+			    "mismatch %#lx", *pte));
 			atomic_set_long(pte, PG_M | PG_RW);
 			rv = 0;		/* success */
 

Modified: projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.c	Wed Sep 18 16:39:01 2013	(r255668)
+++ projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.c	Wed Sep 18 17:18:19 2013	(r255669)
@@ -1543,9 +1543,9 @@ vmx_run(void *arg, int vcpu, register_t 
 	vmexit = vm_exitinfo(vmx->vm, vcpu);
 
 	KASSERT(vmxctx->pmap == pmap,
-		("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
+	    ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
 	KASSERT(vmxctx->eptp == vmx->eptp,
-		("eptp %p different than ctx eptp %#lx", eptp, vmxctx->eptp));
+	    ("eptp %p different than ctx eptp %#lx", eptp, vmxctx->eptp));
 
 	/*
 	 * XXX Can we avoid doing this every time we do a vm run?

Modified: projects/bhyve_npt_pmap/sys/amd64/vmm/vmm.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/vmm/vmm.c	Wed Sep 18 16:39:01 2013	(r255668)
+++ projects/bhyve_npt_pmap/sys/amd64/vmm/vmm.c	Wed Sep 18 17:18:19 2013	(r255669)
@@ -462,8 +462,8 @@ vm_gpa_unwire(struct vm *vm)
 				   seg->gpa, seg->gpa + seg->len,
 				   VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
 		KASSERT(rv == KERN_SUCCESS, ("vm(%s) memory segment "
-			"%#lx/%ld could not be unwired: %d",
-			vm_name(vm), seg->gpa, seg->len, rv));
+		    "%#lx/%ld could not be unwired: %d",
+		    vm_name(vm), seg->gpa, seg->len, rv));
 
 		seg->wired = FALSE;
 	}
@@ -514,16 +514,15 @@ vm_iommu_modify(struct vm *vm, boolean_t
 
 	for (i = 0; i < vm->num_mem_segs; i++) {
 		seg = &vm->mem_segs[i];
-		KASSERT(seg->wired,
-			("vm(%s) memory segment %#lx/%ld not wired",
-			vm_name(vm), seg->gpa, seg->len));
-		
+		KASSERT(seg->wired, ("vm(%s) memory segment %#lx/%ld not wired",
+		    vm_name(vm), seg->gpa, seg->len));
+
 		gpa = seg->gpa;
 		while (gpa < seg->gpa + seg->len) {
 			vp = vm_gpa_hold(vm, gpa, PAGE_SIZE, VM_PROT_WRITE,
 					 &cookie);
 			KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
-				vm_name(vm), gpa));
+			    vm_name(vm), gpa));
 
 			vm_gpa_release(cookie);
 
@@ -584,7 +583,7 @@ vm_assign_pptdev(struct vm *vm, int bus,
 	 */
 	if (ppt_num_devices(vm) == 0) {
 		KASSERT(vm->iommu == NULL,
-			("vm_assign_pptdev: iommu must be NULL"));
+		    ("vm_assign_pptdev: iommu must be NULL"));
 		maxaddr = vmm_mem_maxaddr();
 		vm->iommu = iommu_create_domain(maxaddr);
 
@@ -906,10 +905,9 @@ vm_handle_paging(struct vm *vm, int vcpu
 	vme = &vcpu->exitinfo;
 
 	ftype = vme->u.paging.fault_type;
-	KASSERT(ftype == VM_PROT_WRITE ||
-		ftype == VM_PROT_EXECUTE ||
-		ftype == VM_PROT_READ,
-		("vm_handle_paging: invalid fault_type %d", ftype));
+	KASSERT(ftype == VM_PROT_READ ||
+	    ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE,
+	    ("vm_handle_paging: invalid fault_type %d", ftype));
 
 	/*
 	 * If the mapping exists then the write fault may be intentional
@@ -1007,7 +1005,7 @@ restart:
 	critical_enter();
 
 	KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
-		("vm_run: absurd pm_active"));
+	    ("vm_run: absurd pm_active"));
 
 	tscval = rdtsc();
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201309181718.r8IHIKOn032549>