Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 24 Jul 2014 16:33:30 +0000 (UTC)
From:      Konstantin Belousov <kib@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org
Subject:   svn commit: r269073 - stable/10/sys/kern
Message-ID:  <201407241633.s6OGXUiS034217@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: kib
Date: Thu Jul 24 16:33:29 2014
New Revision: 269073
URL: http://svnweb.freebsd.org/changeset/base/269073

Log:
  MFC r268466:
  Calculate the amount of resident pages by looking in the objects chain
  backing the region. Add a knob to disable the residency calculation at
  all.
  
  MFC r268490:
  Unconditionally initialize addr to handle the case of changed map
  timestamp while the map is unlocked.
  
  MFC r268711:
  Change the calculation of the kinfo_vmentry field kve_private_resident
  to reflect its name.
  
  MFC r268712:
  Followup to r268466.
  - Move the code to calculate resident count into separate function.
    It reduces the indent level and makes the operation of
    vmmap_skip_res_cnt tunable more clear.
  - Optimize the calculation of the resident page count for map entry.
    Skip directly to the next lowest available index and page among the
    whole shadow chain.
  - Restore the use of pmap_incore(9), only to verify that current
    mapping is indeed superpage.
  - Note the issue with the invalid pages.

Modified:
  stable/10/sys/kern/kern_proc.c
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/kern/kern_proc.c
==============================================================================
--- stable/10/sys/kern/kern_proc.c	Thu Jul 24 16:29:44 2014	(r269072)
+++ stable/10/sys/kern/kern_proc.c	Thu Jul 24 16:33:29 2014	(r269073)
@@ -142,6 +142,10 @@ uma_zone_t proc_zone;
 int kstack_pages = KSTACK_PAGES;
 SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0,
     "Kernel stack size in pages");
+static int vmmap_skip_res_cnt = 0;
+SYSCTL_INT(_kern, OID_AUTO, proc_vmmap_skip_resident_count, CTLFLAG_RW,
+    &vmmap_skip_res_cnt, 0,
+    "Skip calculation of the pages resident count in kern.proc.vmmap");
 
 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
 #ifdef COMPAT_FREEBSD32
@@ -2130,6 +2134,66 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_A
 CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE);
 #endif
 
+static void
+kern_proc_vmmap_resident(vm_map_t map, vm_map_entry_t entry,
+    struct kinfo_vmentry *kve)
+{
+	vm_object_t obj, tobj;
+	vm_page_t m, m_adv;
+	vm_offset_t addr;
+	vm_paddr_t locked_pa;
+	vm_pindex_t pi, pi_adv, pindex;
+
+	locked_pa = 0;
+	obj = entry->object.vm_object;
+	addr = entry->start;
+	m_adv = NULL;
+	pi = OFF_TO_IDX(entry->offset + addr - entry->start);
+	for (; addr < entry->end; addr += IDX_TO_OFF(pi_adv), pi += pi_adv) {
+		if (m_adv != NULL) {
+			m = m_adv;
+		} else {
+			pi_adv = OFF_TO_IDX(entry->end - addr);
+			pindex = pi;
+			for (tobj = obj;; tobj = tobj->backing_object) {
+				m = vm_page_find_least(tobj, pindex);
+				if (m != NULL) {
+					if (m->pindex == pindex)
+						break;
+					if (pi_adv > m->pindex - pindex) {
+						pi_adv = m->pindex - pindex;
+						m_adv = m;
+					}
+				}
+				if (tobj->backing_object == NULL)
+					goto next;
+				pindex += OFF_TO_IDX(tobj->
+				    backing_object_offset);
+			}
+		}
+		m_adv = NULL;
+		if (m->psind != 0 && addr + pagesizes[1] <= entry->end &&
+		    (addr & (pagesizes[1] - 1)) == 0 &&
+		    (pmap_mincore(map->pmap, addr, &locked_pa) &
+		    MINCORE_SUPER) != 0) {
+			kve->kve_flags |= KVME_FLAG_SUPER;
+			pi_adv = OFF_TO_IDX(pagesizes[1]);
+		} else {
+			/*
+			 * We do not test the found page on validity.
+			 * Either the page is busy and being paged in,
+			 * or it was invalidated.  The first case
+			 * should be counted as resident, the second
+			 * is not so clear; we do account both.
+			 */
+			pi_adv = 1;
+		}
+		kve->kve_resident += pi_adv;
+next:;
+	}
+	PA_UNLOCK_COND(locked_pa);
+}
+
 /*
  * Must be called with the process locked and will return unlocked.
  */
@@ -2137,15 +2201,17 @@ int
 kern_proc_vmmap_out(struct proc *p, struct sbuf *sb)
 {
 	vm_map_entry_t entry, tmp_entry;
-	unsigned int last_timestamp;
+	struct vattr va;
+	vm_map_t map;
+	vm_object_t obj, tobj, lobj;
 	char *fullpath, *freepath;
 	struct kinfo_vmentry *kve;
-	struct vattr va;
 	struct ucred *cred;
-	int error;
 	struct vnode *vp;
 	struct vmspace *vm;
-	vm_map_t map;
+	vm_offset_t addr;
+	unsigned int last_timestamp;
+	int error;
 
 	PROC_LOCK_ASSERT(p, MA_OWNED);
 
@@ -2163,44 +2229,30 @@ kern_proc_vmmap_out(struct proc *p, stru
 	vm_map_lock_read(map);
 	for (entry = map->header.next; entry != &map->header;
 	    entry = entry->next) {
-		vm_object_t obj, tobj, lobj;
-		vm_offset_t addr;
-		vm_paddr_t locked_pa;
-		int mincoreinfo;
-
 		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
 			continue;
 
+		addr = entry->end;
 		bzero(kve, sizeof(*kve));
-
-		kve->kve_private_resident = 0;
 		obj = entry->object.vm_object;
 		if (obj != NULL) {
-			VM_OBJECT_RLOCK(obj);
-			if (obj->shadow_count == 1)
+			for (tobj = obj; tobj != NULL;
+			    tobj = tobj->backing_object) {
+				VM_OBJECT_RLOCK(tobj);
+				lobj = tobj;
+			}
+			if (obj->backing_object == NULL)
 				kve->kve_private_resident =
 				    obj->resident_page_count;
-		}
-		kve->kve_resident = 0;
-		addr = entry->start;
-		while (addr < entry->end) {
-			locked_pa = 0;
-			mincoreinfo = pmap_mincore(map->pmap, addr, &locked_pa);
-			if (locked_pa != 0)
-				vm_page_unlock(PHYS_TO_VM_PAGE(locked_pa));
-			if (mincoreinfo & MINCORE_INCORE)
-				kve->kve_resident++;
-			if (mincoreinfo & MINCORE_SUPER)
-				kve->kve_flags |= KVME_FLAG_SUPER;
-			addr += PAGE_SIZE;
-		}
-
-		for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
-			if (tobj != obj)
-				VM_OBJECT_RLOCK(tobj);
-			if (lobj != obj)
-				VM_OBJECT_RUNLOCK(lobj);
-			lobj = tobj;
+			if (!vmmap_skip_res_cnt)
+				kern_proc_vmmap_resident(map, entry, kve);
+			for (tobj = obj; tobj != NULL;
+			    tobj = tobj->backing_object) {
+				if (tobj != obj && tobj != lobj)
+					VM_OBJECT_RUNLOCK(tobj);
+			}
+		} else {
+			lobj = NULL;
 		}
 
 		kve->kve_start = entry->start;
@@ -2230,7 +2282,7 @@ kern_proc_vmmap_out(struct proc *p, stru
 
 		freepath = NULL;
 		fullpath = "";
-		if (lobj) {
+		if (lobj != NULL) {
 			vp = NULL;
 			switch (lobj->type) {
 			case OBJT_DEFAULT:



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201407241633.s6OGXUiS034217>