Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 26 Nov 2009 05:16:07 +0000 (UTC)
From:      Alan Cox <alc@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r199819 - in head/sys: kern vm
Message-ID:  <200911260516.nAQ5G7L8071420@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: alc
Date: Thu Nov 26 05:16:07 2009
New Revision: 199819
URL: http://svn.freebsd.org/changeset/base/199819

Log:
  Replace VM_PROT_OVERRIDE_WRITE by VM_PROT_COPY.  VM_PROT_OVERRIDE_WRITE has
  represented a write access that is allowed to override write protection.
  Until now, VM_PROT_OVERRIDE_WRITE has been used to write breakpoints into
  text pages.  Text pages are not just write protected but they are also
  copy-on-write.  VM_PROT_OVERRIDE_WRITE overrides the write protection on the
  text page and triggers the replication of the page so that the breakpoint
  will be written to a private copy.  However, here is where things become
  confused.  It is the debugger, not the process being debugged that requires
  write access to the copied page.  Nonetheless, the copied page is being
  mapped into the process with write access enabled.  In other words, once the
  debugger sets a breakpoint within a text page, the program can write to its
  private copy of that text page.  Whereas prior to setting the breakpoint, a
  SIGSEGV would have occurred upon a write access.  VM_PROT_COPY addresses
  this problem.  The combination of VM_PROT_READ and VM_PROT_COPY forces the
  replication of a copy-on-write page even though the access is only for read.
  Moreover, the replicated page is only mapped into the process with read
  access, and not write access.
  
  Reviewed by:	kib
  MFC after:	4 weeks

Modified:
  head/sys/kern/sys_process.c
  head/sys/vm/vm.h
  head/sys/vm/vm_fault.c
  head/sys/vm/vm_map.c

Modified: head/sys/kern/sys_process.c
==============================================================================
--- head/sys/kern/sys_process.c	Thu Nov 26 03:26:59 2009	(r199818)
+++ head/sys/kern/sys_process.c	Thu Nov 26 05:16:07 2009	(r199819)
@@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$");
 #include <vm/vm_kern.h>
 #include <vm/vm_object.h>
 #include <vm/vm_page.h>
+#include <vm/vm_pager.h>
 #include <vm/vm_param.h>
 
 #ifdef COMPAT_IA32
@@ -213,10 +214,10 @@ int
 proc_rwmem(struct proc *p, struct uio *uio)
 {
 	vm_map_t map;
-	vm_object_t backing_object, object = NULL;
-	vm_offset_t pageno = 0;		/* page number */
+	vm_object_t backing_object, object;
+	vm_offset_t pageno;		/* page number */
 	vm_prot_t reqprot;
-	int error, fault_flags, writing;
+	int error, writing;
 
 	/*
 	 * Assert that someone has locked this vmspace.  (Should be
@@ -232,9 +233,7 @@ proc_rwmem(struct proc *p, struct uio *u
 	map = &p->p_vmspace->vm_map;
 
 	writing = uio->uio_rw == UIO_WRITE;
-	reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) :
-	    VM_PROT_READ;
-	fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; 
+	reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ;
 
 	/*
 	 * Only map in one page at a time.  We don't have to, but it
@@ -269,7 +268,7 @@ proc_rwmem(struct proc *p, struct uio *u
 		/*
 		 * Fault the page on behalf of the process
 		 */
-		error = vm_fault(map, pageno, reqprot, fault_flags);
+		error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL);
 		if (error) {
 			if (error == KERN_RESOURCE_SHORTAGE)
 				error = ENOMEM;
@@ -279,8 +278,8 @@ proc_rwmem(struct proc *p, struct uio *u
 		}
 
 		/*
-		 * Now we need to get the page.  out_entry, wired,
-		 * and single_use aren't used.  One would think the vm code
+		 * Now we need to get the page.  out_entry and wired
+		 * aren't used.  One would think the vm code
 		 * would be a *bit* nicer...  We use tmap because
 		 * vm_map_lookup() can change the map argument.
 		 */
@@ -303,6 +302,10 @@ proc_rwmem(struct proc *p, struct uio *u
 			VM_OBJECT_UNLOCK(object);
 			object = backing_object;
 		}
+		if (writing && m != NULL) {
+			vm_page_dirty(m);
+			vm_pager_page_unswapped(m);
+		}
 		VM_OBJECT_UNLOCK(object);
 		if (m == NULL) {
 			vm_map_lookup_done(tmap, out_entry);

Modified: head/sys/vm/vm.h
==============================================================================
--- head/sys/vm/vm.h	Thu Nov 26 03:26:59 2009	(r199818)
+++ head/sys/vm/vm.h	Thu Nov 26 05:16:07 2009	(r199819)
@@ -76,7 +76,7 @@ typedef u_char vm_prot_t;	/* protection 
 #define	VM_PROT_READ		((vm_prot_t) 0x01)
 #define	VM_PROT_WRITE		((vm_prot_t) 0x02)
 #define	VM_PROT_EXECUTE		((vm_prot_t) 0x04)
-#define	VM_PROT_OVERRIDE_WRITE	((vm_prot_t) 0x08)	/* copy-on-write */
+#define	VM_PROT_COPY		((vm_prot_t) 0x08)	/* copy-on-read */
 
 #define	VM_PROT_ALL		(VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
 #define VM_PROT_RW		(VM_PROT_READ|VM_PROT_WRITE)

Modified: head/sys/vm/vm_fault.c
==============================================================================
--- head/sys/vm/vm_fault.c	Thu Nov 26 03:26:59 2009	(r199818)
+++ head/sys/vm/vm_fault.c	Thu Nov 26 05:16:07 2009	(r199819)
@@ -702,7 +702,7 @@ vnode_locked:
 		/*
 		 * We only really need to copy if we want to write it.
 		 */
-		if (fault_type & VM_PROT_WRITE) {
+		if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) {
 			/*
 			 * This allows pages to be virtually copied from a 
 			 * backing_object into the first_object, where the 

Modified: head/sys/vm/vm_map.c
==============================================================================
--- head/sys/vm/vm_map.c	Thu Nov 26 03:26:59 2009	(r199818)
+++ head/sys/vm/vm_map.c	Thu Nov 26 05:16:07 2009	(r199819)
@@ -3554,14 +3554,8 @@ RetryLookup:;
 
 	/*
 	 * Check whether this task is allowed to have this page.
-	 * Note the special case for MAP_ENTRY_COW
-	 * pages with an override.  This is to implement a forced
-	 * COW for debuggers.
-	 */
-	if (fault_type & VM_PROT_OVERRIDE_WRITE)
-		prot = entry->max_protection;
-	else
-		prot = entry->protection;
+	 */
+	prot = entry->protection;
 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
 	if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
 		vm_map_unlock_read(map);
@@ -3569,8 +3563,7 @@ RetryLookup:;
 	}
 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
 	    (entry->eflags & MAP_ENTRY_COW) &&
-	    (fault_type & VM_PROT_WRITE) &&
-	    (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
+	    (fault_type & VM_PROT_WRITE)) {
 		vm_map_unlock_read(map);
 		return (KERN_PROTECTION_FAILURE);
 	}
@@ -3581,7 +3574,7 @@ RetryLookup:;
 	 */
 	*wired = (entry->wired_count != 0);
 	if (*wired)
-		prot = fault_type = entry->protection;
+		fault_type = entry->protection;
 	size = entry->end - entry->start;
 	/*
 	 * If the entry was copy-on-write, we either ...
@@ -3594,7 +3587,8 @@ RetryLookup:;
 		 * If we don't need to write the page, we just demote the
 		 * permissions allowed.
 		 */
-		if (fault_type & VM_PROT_WRITE) {
+		if ((fault_type & VM_PROT_WRITE) != 0 ||
+		    (fault_typea & VM_PROT_COPY) != 0) {
 			/*
 			 * Make a new object, and place it in the object
 			 * chain.  Note that no new references have appeared
@@ -3717,21 +3711,14 @@ vm_map_lookup_locked(vm_map_t *var_map,	
 
 	/*
 	 * Check whether this task is allowed to have this page.
-	 * Note the special case for MAP_ENTRY_COW
-	 * pages with an override.  This is to implement a forced
-	 * COW for debuggers.
-	 */
-	if (fault_type & VM_PROT_OVERRIDE_WRITE)
-		prot = entry->max_protection;
-	else
-		prot = entry->protection;
+	 */
+	prot = entry->protection;
 	fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
 	if ((fault_type & prot) != fault_type)
 		return (KERN_PROTECTION_FAILURE);
 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
 	    (entry->eflags & MAP_ENTRY_COW) &&
-	    (fault_type & VM_PROT_WRITE) &&
-	    (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0)
+	    (fault_type & VM_PROT_WRITE))
 		return (KERN_PROTECTION_FAILURE);
 
 	/*
@@ -3740,7 +3727,7 @@ vm_map_lookup_locked(vm_map_t *var_map,	
 	 */
 	*wired = (entry->wired_count != 0);
 	if (*wired)
-		prot = fault_type = entry->protection;
+		fault_type = entry->protection;
 
 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
 		/*



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200911260516.nAQ5G7L8071420>