Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 15 Oct 2019 22:32:45 +0200
From:      Oliver Pinter <oliver.pinter@hardenedbsd.org>
To:        Jeff Roberson <jeff@freebsd.org>
Cc:        "src-committers@freebsd.org" <src-committers@freebsd.org>,  "svn-src-all@freebsd.org" <svn-src-all@freebsd.org>,  "svn-src-head@freebsd.org" <svn-src-head@freebsd.org>
Subject:   Re: svn commit: r353539 - in head/sys: amd64/sgx cddl/contrib/opensolaris/uts/common/fs/zfs compat/linuxkpi/common/src dev/drm2/ttm dev/md dev/netmap dev/xen/gntdev dev/xen/privcmd fs/nfsclient fs/smbf...
Message-ID:  <CAPQ4ffuu6ZjNqWW9fvo=0n49vgvTHeAiyFX06JtEE%2BtZUSrGfg@mail.gmail.com>
In-Reply-To: <201910150345.x9F3jgYw028767@repo.freebsd.org>
References:  <201910150345.x9F3jgYw028767@repo.freebsd.org>

next in thread | previous in thread | raw e-mail | index | archive | help
On Tuesday, October 15, 2019, Jeff Roberson <jeff@freebsd.org> wrote:

> Author: jeff
> Date: Tue Oct 15 03:45:41 2019
> New Revision: 353539
> URL: https://svnweb.freebsd.org/changeset/base/353539
>
> Log:
>   (4/6) Protect page valid with the busy lock.
>
>   Atomics are used for page busy and valid state when the shared busy is
>   held.  The details of the locking protocol and valid and dirty
>   synchronization are in the updated vm_page.h comments.
>
>   Reviewed by:    kib, markj
>   Tested by:      pho
>   Sponsored by:   Netflix, Intel
>   Differential Revision:        https://reviews.freebsd.org/D21594
>
> Modified:
>   head/sys/amd64/sgx/sgx.c
>   head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c
>   head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
>   head/sys/compat/linuxkpi/common/src/linux_compat.c
>   head/sys/dev/drm2/ttm/ttm_bo_vm.c
>   head/sys/dev/drm2/ttm/ttm_tt.c
>   head/sys/dev/md/md.c
>   head/sys/dev/netmap/netmap_freebsd.c
>   head/sys/dev/xen/gntdev/gntdev.c
>   head/sys/dev/xen/privcmd/privcmd.c
>   head/sys/fs/nfsclient/nfs_clbio.c
>   head/sys/fs/smbfs/smbfs_io.c
>   head/sys/fs/tmpfs/tmpfs_subr.c
>   head/sys/kern/kern_exec.c
>   head/sys/kern/uipc_shm.c
>   head/sys/kern/vfs_bio.c
>   head/sys/kern/vfs_cluster.c
>   head/sys/vm/device_pager.c
>   head/sys/vm/phys_pager.c
>   head/sys/vm/sg_pager.c
>   head/sys/vm/swap_pager.c
>   head/sys/vm/vm_fault.c
>   head/sys/vm/vm_map.c
>   head/sys/vm/vm_mmap.c
>   head/sys/vm/vm_object.c
>   head/sys/vm/vm_page.c
>   head/sys/vm/vm_page.h
>   head/sys/vm/vm_pageout.c
>   head/sys/vm/vm_swapout.c
>   head/sys/vm/vnode_pager.c
>
> Modified: head/sys/amd64/sgx/sgx.c
> ============================================================
> ==================
> --- head/sys/amd64/sgx/sgx.c    Tue Oct 15 03:41:36 2019        (r353538)
> +++ head/sys/amd64/sgx/sgx.c    Tue Oct 15 03:45:41 2019        (r353539)
> @@ -220,8 +220,8 @@ sgx_va_slot_init_by_index(struct sgx_softc *sc, vm_obj
>
>                 page = PHYS_TO_VM_PAGE(epc->phys);
>
> -               vm_page_insert(page, object, idx);
>                 page->valid = VM_PAGE_BITS_ALL;


This wouldn't be vm_page_valid(page)?


> +               vm_page_insert(page, object, idx);
>         }
>
>         return (0);
> @@ -610,8 +610,8 @@ sgx_insert_epc_page_by_index(vm_page_t page, vm_object
>
>         VM_OBJECT_ASSERT_WLOCKED(object);
>
> -       vm_page_insert(page, object, pidx);
>         page->valid = VM_PAGE_BITS_ALL;


And here too?


> +       vm_page_insert(page, object, pidx);
>  }
>
>  static void
>
> Modified: head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c
> ============================================================
> ==================
> --- head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c   Tue Oct
> 15 03:41:36 2019        (r353538)
> +++ head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c   Tue Oct
> 15 03:45:41 2019        (r353539)
> @@ -1731,11 +1731,13 @@ dmu_read_pages(objset_t *os, uint64_t object,
> vm_page_
>         db = dbp[0];
>         for (i = 0; i < *rbehind; i++) {
>                 m = vm_page_grab(vmobj, ma[0]->pindex - 1 - i,
> -                   VM_ALLOC_NORMAL | VM_ALLOC_NOWAIT | VM_ALLOC_NOBUSY);
> +                   VM_ALLOC_NORMAL | VM_ALLOC_NOWAIT |
> +                   VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
>                 if (m == NULL)
>                         break;
> -               if (m->valid != 0) {
> +               if (!vm_page_none_valid(m)) {
>                         ASSERT3U(m->valid, ==, VM_PAGE_BITS_ALL);
> +                       vm_page_sunbusy(m);
>                         break;
>                 }
>                 ASSERT(m->dirty == 0);
> @@ -1746,13 +1748,14 @@ dmu_read_pages(objset_t *os, uint64_t object,
> vm_page_
>                 va = zfs_map_page(m, &sf);
>                 bcopy((char *)db->db_data + bufoff, va, PAGESIZE);
>                 zfs_unmap_page(sf);
> -               m->valid = VM_PAGE_BITS_ALL;
> +               vm_page_valid(m);
>                 vm_page_lock(m);
>                 if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
>                         vm_page_activate(m);
>                 else
>                         vm_page_deactivate(m);
>                 vm_page_unlock(m);
> +               vm_page_sunbusy(m);
>         }
>         *rbehind = i;
>
> @@ -1763,7 +1766,7 @@ dmu_read_pages(objset_t *os, uint64_t object,
> vm_page_
>                         m = ma[mi];
>                         if (m != bogus_page) {
>                                 vm_page_assert_xbusied(m);
> -                               ASSERT(m->valid == 0);
> +                               ASSERT(vm_page_none_valid(m));
>                                 ASSERT(m->dirty == 0);
>                                 ASSERT(!pmap_page_is_mapped(m));
>                                 va = zfs_map_page(m, &sf);
> @@ -1791,7 +1794,7 @@ dmu_read_pages(objset_t *os, uint64_t object,
> vm_page_
>                 if (pgoff == PAGESIZE) {
>                         if (m != bogus_page) {
>                                 zfs_unmap_page(sf);
> -                               m->valid = VM_PAGE_BITS_ALL;
> +                               vm_page_valid(m);
>                         }
>                         ASSERT(mi < count);
>                         mi++;
> @@ -1840,16 +1843,18 @@ dmu_read_pages(objset_t *os, uint64_t object,
> vm_page_
>                 ASSERT(m != bogus_page);
>                 bzero(va + pgoff, PAGESIZE - pgoff);
>                 zfs_unmap_page(sf);
> -               m->valid = VM_PAGE_BITS_ALL;
> +               vm_page_valid(m);
>         }
>
>         for (i = 0; i < *rahead; i++) {
>                 m = vm_page_grab(vmobj, ma[count - 1]->pindex + 1 + i,
> -                   VM_ALLOC_NORMAL | VM_ALLOC_NOWAIT | VM_ALLOC_NOBUSY);
> +                   VM_ALLOC_NORMAL | VM_ALLOC_NOWAIT |
> +                   VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
>                 if (m == NULL)
>                         break;
> -               if (m->valid != 0) {
> +               if (!vm_page_none_valid(m)) {
>                         ASSERT3U(m->valid, ==, VM_PAGE_BITS_ALL);
> +                       vm_page_sunbusy(m);
>                         break;
>                 }
>                 ASSERT(m->dirty == 0);
> @@ -1866,13 +1871,14 @@ dmu_read_pages(objset_t *os, uint64_t object,
> vm_page_
>                         bzero(va + tocpy, PAGESIZE - tocpy);
>                 }
>                 zfs_unmap_page(sf);
> -               m->valid = VM_PAGE_BITS_ALL;
> +               vm_page_valid(m);
>                 vm_page_lock(m);
>                 if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
>                         vm_page_activate(m);
>                 else
>                         vm_page_deactivate(m);
>                 vm_page_unlock(m);
> +               vm_page_sunbusy(m);
>         }
>         *rahead = i;
>         zfs_vmobject_wunlock(vmobj);
>
> Modified: head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
> ============================================================
> ==================
> --- head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
>  Tue Oct 15 03:41:36 2019        (r353538)
> +++ head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
>  Tue Oct 15 03:45:41 2019        (r353539)
> @@ -534,7 +534,7 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
>
>                 pp = vm_page_grab(obj, OFF_TO_IDX(start), VM_ALLOC_SBUSY |
>                     VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY);
> -               if (pp->valid == 0) {
> +               if (vm_page_none_valid(pp)) {
>                         zfs_vmobject_wunlock(obj);
>                         va = zfs_map_page(pp, &sf);
>                         error = dmu_read(os, zp->z_id, start, bytes, va,
> @@ -543,17 +543,16 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
>                                 bzero(va + bytes, PAGESIZE - bytes);
>                         zfs_unmap_page(sf);
>                         zfs_vmobject_wlock(obj);
> -                       vm_page_sunbusy(pp);
> -                       if (error) {
> -                               if (!vm_page_busied(pp) &&
> !vm_page_wired(pp) &&
> -                                   pp->valid == 0)
> -                                       vm_page_free(pp);
> -                       } else {
> -                               pp->valid = VM_PAGE_BITS_ALL;
> +                       if (error == 0) {
> +                               vm_page_valid(pp);
>                                 vm_page_lock(pp);
>                                 vm_page_activate(pp);
>                                 vm_page_unlock(pp);
>                         }
> +                       vm_page_sunbusy(pp);
> +                       if (error != 0 && !vm_page_wired(pp) == 0 &&
> +                           pp->valid == 0 && vm_page_tryxbusy(pp))
> +                               vm_page_free(pp);
>                 } else {
>                         ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
>                         vm_page_sunbusy(pp);
>
> Modified: head/sys/compat/linuxkpi/common/src/linux_compat.c
> ============================================================
> ==================
> --- head/sys/compat/linuxkpi/common/src/linux_compat.c  Tue Oct 15
> 03:41:36 2019        (r353538)
> +++ head/sys/compat/linuxkpi/common/src/linux_compat.c  Tue Oct 15
> 03:45:41 2019        (r353539)
> @@ -514,7 +514,7 @@ linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_
>                         vm_page_free(*mres);
>                         *mres = page;
>                 }
> -               page->valid = VM_PAGE_BITS_ALL;
> +               vm_page_valid(page);
>                 return (VM_PAGER_OK);
>         }
>         return (VM_PAGER_FAIL);
>
> Modified: head/sys/dev/drm2/ttm/ttm_bo_vm.c
> ============================================================
> ==================
> --- head/sys/dev/drm2/ttm/ttm_bo_vm.c   Tue Oct 15 03:41:36 2019
> (r353538)
> +++ head/sys/dev/drm2/ttm/ttm_bo_vm.c   Tue Oct 15 03:45:41 2019
> (r353539)
> @@ -252,7 +252,7 @@ reserve:
>                     ("inconsistent insert bo %p m %p m1 %p offset %jx",
>                     bo, m, m1, (uintmax_t)offset));
>         }
> -       m->valid = VM_PAGE_BITS_ALL;
> +       vm_page_valid(m);
>         if (*mres != NULL) {
>                 KASSERT(*mres != m, ("losing %p %p", *mres, m));
>                 vm_page_free(*mres);
>
> Modified: head/sys/dev/drm2/ttm/ttm_tt.c
> ============================================================
> ==================
> --- head/sys/dev/drm2/ttm/ttm_tt.c      Tue Oct 15 03:41:36 2019
> (r353538)
> +++ head/sys/dev/drm2/ttm/ttm_tt.c      Tue Oct 15 03:45:41 2019
> (r353539)
> @@ -344,7 +344,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t per
>                         continue;
>                 to_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL);
>                 pmap_copy_page(from_page, to_page);
> -               to_page->valid = VM_PAGE_BITS_ALL;
> +               vm_page_valid(to_page);
>                 vm_page_dirty(to_page);
>                 vm_page_xunbusy(to_page);
>         }
>
> Modified: head/sys/dev/md/md.c
> ============================================================
> ==================
> --- head/sys/dev/md/md.c        Tue Oct 15 03:41:36 2019        (r353538)
> +++ head/sys/dev/md/md.c        Tue Oct 15 03:45:41 2019        (r353539)
> @@ -1074,7 +1074,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
>                 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
>                 m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM);
>                 if (bp->bio_cmd == BIO_READ) {
> -                       if (m->valid == VM_PAGE_BITS_ALL)
> +                       if (vm_page_all_valid(m))
>                                 rv = VM_PAGER_OK;
>                         else
>                                 rv = vm_pager_get_pages(sc->object, &m, 1,
> @@ -1090,7 +1090,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
>                                  * can be recreated if thrown out.
>                                  */
>                                 pmap_zero_page(m);
> -                               m->valid = VM_PAGE_BITS_ALL;
> +                               vm_page_valid(m);
>                         }
>                         if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
>                                 pmap_copy_pages(&m, offs, bp->bio_ma,
> @@ -1104,7 +1104,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
>                                 cpu_flush_dcache(p, len);
>                         }
>                 } else if (bp->bio_cmd == BIO_WRITE) {
> -                       if (len == PAGE_SIZE || m->valid ==
> VM_PAGE_BITS_ALL)
> +                       if (len == PAGE_SIZE || vm_page_all_valid(m))
>                                 rv = VM_PAGER_OK;
>                         else
>                                 rv = vm_pager_get_pages(sc->object, &m, 1,
> @@ -1125,13 +1125,13 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
>                                 physcopyin(p, VM_PAGE_TO_PHYS(m) + offs,
> len);
>                         }
>
> -                       m->valid = VM_PAGE_BITS_ALL;
> +                       vm_page_valid(m);
>                         if (m->dirty != VM_PAGE_BITS_ALL) {
>                                 vm_page_dirty(m);
>                                 vm_pager_page_unswapped(m);
>                         }
>                 } else if (bp->bio_cmd == BIO_DELETE) {
> -                       if (len == PAGE_SIZE || m->valid ==
> VM_PAGE_BITS_ALL)
> +                       if (len == PAGE_SIZE || vm_page_all_valid(m))
>                                 rv = VM_PAGER_OK;
>                         else
>                                 rv = vm_pager_get_pages(sc->object, &m, 1,
>
> Modified: head/sys/dev/netmap/netmap_freebsd.c
> ============================================================
> ==================
> --- head/sys/dev/netmap/netmap_freebsd.c        Tue Oct 15 03:41:36 2019
>       (r353538)
> +++ head/sys/dev/netmap/netmap_freebsd.c        Tue Oct 15 03:45:41 2019
>       (r353539)
> @@ -1056,7 +1056,7 @@ netmap_dev_pager_fault(vm_object_t object,
> vm_ooffset_
>                 *mres = page;
>                 vm_page_insert(page, object, pidx);
>         }
> -       page->valid = VM_PAGE_BITS_ALL;
> +       vm_page_valid(page);
>         return (VM_PAGER_OK);
>  }
>
>
> Modified: head/sys/dev/xen/gntdev/gntdev.c
> ============================================================
> ==================
> --- head/sys/dev/xen/gntdev/gntdev.c    Tue Oct 15 03:41:36 2019
> (r353538)
> +++ head/sys/dev/xen/gntdev/gntdev.c    Tue Oct 15 03:45:41 2019
> (r353539)
> @@ -836,8 +836,8 @@ gntdev_gmap_pg_fault(vm_object_t object, vm_ooffset_t
>         }
>
>         vm_page_busy_acquire(page, 0);
> +       vm_page_valid(page);
>         vm_page_insert(page, object, pidx);
> -       page->valid = VM_PAGE_BITS_ALL;
>         *mres = page;
>         return (VM_PAGER_OK);
>  }
>
> Modified: head/sys/dev/xen/privcmd/privcmd.c
> ============================================================
> ==================
> --- head/sys/dev/xen/privcmd/privcmd.c  Tue Oct 15 03:41:36 2019
> (r353538)
> +++ head/sys/dev/xen/privcmd/privcmd.c  Tue Oct 15 03:45:41 2019
> (r353539)
> @@ -179,8 +179,8 @@ privcmd_pg_fault(vm_object_t object, vm_ooffset_t offs
>         }
>
>         vm_page_busy_acquire(page, 0);
> +       vm_page_valid(page);
>         vm_page_insert(page, object, pidx);
> -       page->valid = VM_PAGE_BITS_ALL;
>         *mres = page;
>         return (VM_PAGER_OK);
>  }
>
> Modified: head/sys/fs/nfsclient/nfs_clbio.c
> ============================================================
> ==================
> --- head/sys/fs/nfsclient/nfs_clbio.c   Tue Oct 15 03:41:36 2019
> (r353538)
> +++ head/sys/fs/nfsclient/nfs_clbio.c   Tue Oct 15 03:45:41 2019
> (r353539)
> @@ -174,7 +174,7 @@ ncl_getpages(struct vop_getpages_args *ap)
>          * XXXGL: is that true for NFS, where short read can occur???
>          */
>         VM_OBJECT_WLOCK(object);
> -       if (pages[npages - 1]->valid != 0 && --npages == 0)
> +       if (!vm_page_none_valid(pages[npages - 1]) && --npages == 0)
>                 goto out;
>         VM_OBJECT_WUNLOCK(object);
>
> @@ -227,14 +227,14 @@ ncl_getpages(struct vop_getpages_args *ap)
>                         /*
>                          * Read operation filled an entire page
>                          */
> -                       m->valid = VM_PAGE_BITS_ALL;
> +                       vm_page_valid(m);
>                         KASSERT(m->dirty == 0,
>                             ("nfs_getpages: page %p is dirty", m));
>                 } else if (size > toff) {
>                         /*
>                          * Read operation filled a partial page.
>                          */
> -                       m->valid = 0;
> +                       vm_page_invalid(m);
>                         vm_page_set_valid_range(m, 0, size - toff);
>                         KASSERT(m->dirty == 0,
>                             ("nfs_getpages: page %p is dirty", m));
>
> Modified: head/sys/fs/smbfs/smbfs_io.c
> ============================================================
> ==================
> --- head/sys/fs/smbfs/smbfs_io.c        Tue Oct 15 03:41:36 2019
> (r353538)
> +++ head/sys/fs/smbfs/smbfs_io.c        Tue Oct 15 03:45:41 2019
> (r353539)
> @@ -457,7 +457,7 @@ smbfs_getpages(ap)
>          * XXXGL: is that true for SMB filesystem?
>          */
>         VM_OBJECT_WLOCK(object);
> -       if (pages[npages - 1]->valid != 0 && --npages == 0)
> +       if (!vm_page_none_valid(pages[npages - 1]) && --npages == 0)
>                 goto out;
>         VM_OBJECT_WUNLOCK(object);
>
> @@ -505,14 +505,14 @@ smbfs_getpages(ap)
>                         /*
>                          * Read operation filled an entire page
>                          */
> -                       m->valid = VM_PAGE_BITS_ALL;
> +                       vm_page_valid(m);
>                         KASSERT(m->dirty == 0,
>                             ("smbfs_getpages: page %p is dirty", m));
>                 } else if (size > toff) {
>                         /*
>                          * Read operation filled a partial page.
>                          */
> -                       m->valid = 0;
> +                       vm_page_invalid(m);
>                         vm_page_set_valid_range(m, 0, size - toff);
>                         KASSERT(m->dirty == 0,
>                             ("smbfs_getpages: page %p is dirty", m));
>
> Modified: head/sys/fs/tmpfs/tmpfs_subr.c
> ============================================================
> ==================
> --- head/sys/fs/tmpfs/tmpfs_subr.c      Tue Oct 15 03:41:36 2019
> (r353538)
> +++ head/sys/fs/tmpfs/tmpfs_subr.c      Tue Oct 15 03:45:41 2019
> (r353539)
> @@ -1408,7 +1408,7 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize,
> bool
>  retry:
>                         m = vm_page_grab(uobj, idx, VM_ALLOC_NOCREAT);
>                         if (m != NULL) {
> -                               MPASS(m->valid == VM_PAGE_BITS_ALL);
> +                               MPASS(vm_page_all_valid(m));
>                         } else if (vm_pager_has_page(uobj, idx, NULL,
> NULL)) {
>                                 m = vm_page_alloc(uobj, idx,
> VM_ALLOC_NORMAL |
>                                     VM_ALLOC_WAITFAIL);
>
> Modified: head/sys/kern/kern_exec.c
> ============================================================
> ==================
> --- head/sys/kern/kern_exec.c   Tue Oct 15 03:41:36 2019        (r353538)
> +++ head/sys/kern/kern_exec.c   Tue Oct 15 03:45:41 2019        (r353539)
> @@ -979,11 +979,15 @@ exec_map_first_page(struct image_params *imgp)
>  retry:
>         ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
>             VM_ALLOC_WIRED);
> -       if (ma[0]->valid != VM_PAGE_BITS_ALL) {
> +       if (!vm_page_all_valid(ma[0])) {
>                 if (vm_page_busy_acquire(ma[0], VM_ALLOC_WAITFAIL) == 0) {
>                         vm_page_unwire_noq(ma[0]);
>                         goto retry;
>                 }
> +               if (vm_page_all_valid(ma[0])) {
> +                       vm_page_xunbusy(ma[0]);
> +                       goto out;
> +               }
>                 if (!vm_pager_has_page(object, 0, NULL, &after)) {
>                         if (vm_page_unwire_noq(ma[0]))
>                                 vm_page_free(ma[0]);
> @@ -1029,6 +1033,8 @@ retry:
>                 for (i = 1; i < initial_pagein; i++)
>                         vm_page_readahead_finish(ma[i]);
>         }
> +
> +out:
>         VM_OBJECT_WUNLOCK(object);
>
>         imgp->firstpage = sf_buf_alloc(ma[0], 0);
>
> Modified: head/sys/kern/uipc_shm.c
> ============================================================
> ==================
> --- head/sys/kern/uipc_shm.c    Tue Oct 15 03:41:36 2019        (r353538)
> +++ head/sys/kern/uipc_shm.c    Tue Oct 15 03:45:41 2019        (r353539)
> @@ -459,7 +459,7 @@ shm_dotruncate_locked(struct shmfd *shmfd, off_t lengt
>  retry:
>                         m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
>                         if (m != NULL) {
> -                               MPASS(m->valid == VM_PAGE_BITS_ALL);
> +                               MPASS(vm_page_all_valid(m));
>                         } else if (vm_pager_has_page(object, idx, NULL,
> NULL)) {
>                                 m = vm_page_alloc(object, idx,
>                                     VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
> @@ -485,7 +485,7 @@ retry:
>                         }
>                         if (m != NULL) {
>                                 pmap_zero_page_area(m, base, PAGE_SIZE -
> base);
> -                               KASSERT(m->valid == VM_PAGE_BITS_ALL,
> +                               KASSERT(vm_page_all_valid(m),
>                                     ("shm_dotruncate: page %p is invalid",
> m));
>                                 vm_page_dirty(m);
>                                 vm_page_xunbusy(m);
>
> Modified: head/sys/kern/vfs_bio.c
> ============================================================
> ==================
> --- head/sys/kern/vfs_bio.c     Tue Oct 15 03:41:36 2019        (r353538)
> +++ head/sys/kern/vfs_bio.c     Tue Oct 15 03:45:41 2019        (r353539)
> @@ -956,6 +956,12 @@ vfs_buf_test_cache(struct buf *bp, vm_ooffset_t foff,
>  {
>
>         VM_OBJECT_ASSERT_LOCKED(m->object);
> +
> +       /*
> +        * This function and its results are protected by higher level
> +        * synchronization requiring vnode and buf locks to page in and
> +        * validate pages.
> +        */
>         if (bp->b_flags & B_CACHE) {
>                 int base = (foff + off) & PAGE_MASK;
>                 if (vm_page_is_valid(m, base, size) == 0)
> @@ -4640,7 +4646,7 @@ vfs_busy_pages(struct buf *bp, int clear_modify)
>                 if (clear_modify) {
>                         pmap_remove_write(m);
>                         vfs_page_set_validclean(bp, foff, m);
> -               } else if (m->valid == VM_PAGE_BITS_ALL &&
> +               } else if (vm_page_all_valid(m) &&
>                     (bp->b_flags & B_CACHE) == 0) {
>                         bp->b_pages[i] = bogus_page;
>                         bogus = true;
> @@ -4681,6 +4687,14 @@ vfs_bio_set_valid(struct buf *bp, int base, int
> size)
>         n = PAGE_SIZE - (base & PAGE_MASK);
>
>         VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
> +
> +       /*
> +        * Busy may not be strictly necessary here because the pages are
> +        * unlikely to be fully valid and the vnode lock will synchronize
> +        * their access via getpages.  It is grabbed for consistency with
> +        * other page validation.
> +        */
> +       vfs_busy_pages_acquire(bp);
>         for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
>                 m = bp->b_pages[i];
>                 if (n > size)
> @@ -4690,6 +4704,7 @@ vfs_bio_set_valid(struct buf *bp, int base, int size)
>                 size -= n;
>                 n = PAGE_SIZE;
>         }
> +       vfs_busy_pages_release(bp);
>         VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
>  }
>
> @@ -4717,6 +4732,7 @@ vfs_bio_clrbuf(struct buf *bp)
>         bp->b_flags &= ~B_INVAL;
>         bp->b_ioflags &= ~BIO_ERROR;
>         VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
> +       vfs_busy_pages_acquire(bp);
>         if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
>             (bp->b_offset & PAGE_MASK) == 0) {
>                 if (bp->b_pages[0] == bogus_page)
> @@ -4758,6 +4774,7 @@ vfs_bio_clrbuf(struct buf *bp)
>                 bp->b_pages[i]->valid |= mask;
>         }
>  unlock:
> +       vfs_busy_pages_release(bp);
>         VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
>         bp->b_resid = 0;
>  }
> @@ -5189,7 +5206,7 @@ again:
>                  * the end of the function catches the race in a
>                  * reliable way (protected by the object lock).
>                  */
> -               if (m->valid == VM_PAGE_BITS_ALL)
> +               if (vm_page_all_valid(m))
>                         continue;
>
>                 poff = IDX_TO_OFF(m->pindex);
> @@ -5219,7 +5236,7 @@ again:
>                                  * cache pressure.
>                                  */
>                                 if (buf_pager_relbuf ||
> -                                   m->valid != VM_PAGE_BITS_ALL)
> +                                   !vm_page_all_valid(m))
>                                         bp->b_flags |= B_RELBUF;
>
>                                 bp->b_flags &= ~B_NOCACHE;
> @@ -5229,12 +5246,12 @@ again:
>                         }
>                 }
>                 KASSERT(1 /* racy, enable for debugging */ ||
> -                   m->valid == VM_PAGE_BITS_ALL || i == count - 1,
> +                   vm_page_all_valid(m) || i == count - 1,
>                     ("buf %d %p invalid", i, m));
>                 if (i == count - 1 && lpart) {
>                         VM_OBJECT_WLOCK(object);
> -                       if (m->valid != 0 &&
> -                           m->valid != VM_PAGE_BITS_ALL)
> +                       if (!vm_page_none_valid(m) &&
> +                           !vm_page_all_valid(m))
>                                 vm_page_zero_invalid(m, TRUE);
>                         VM_OBJECT_WUNLOCK(object);
>                 }
> @@ -5261,7 +5278,7 @@ end_pages:
>                  * invalidated or removed, so we must restart for
>                  * safety as well.
>                  */
> -               if (ma[i]->valid != VM_PAGE_BITS_ALL)
> +               if (!vm_page_all_valid(ma[i]))
>                         redo = true;
>         }
>         if (redo && error == 0)
>
> Modified: head/sys/kern/vfs_cluster.c
> ============================================================
> ==================
> --- head/sys/kern/vfs_cluster.c Tue Oct 15 03:41:36 2019        (r353538)
> +++ head/sys/kern/vfs_cluster.c Tue Oct 15 03:45:41 2019        (r353539)
> @@ -465,11 +465,13 @@ cluster_rbuild(struct vnode *vp, u_quad_t filesize,
> da
>                                 if (toff + tinc > PAGE_SIZE)
>                                         tinc = PAGE_SIZE - toff;
>                                 VM_OBJECT_ASSERT_WLOCKED(tbp->
> b_pages[j]->object);
> -                               if ((tbp->b_pages[j]->valid &
> -                                   vm_page_bits(toff, tinc)) != 0)
> -                                       break;
>                                 if (vm_page_trysbusy(tbp->b_pages[j]) ==
> 0)
>                                         break;
> +                               if ((tbp->b_pages[j]->valid &
> +                                   vm_page_bits(toff, tinc)) != 0) {
> +                                       vm_page_sunbusy(tbp->b_pages[j]);
> +                                       break;
> +                               }
>                                 vm_object_pip_add(tbp->b_bufobj->bo_object,
> 1);
>                                 off += tinc;
>                                 tsize -= tinc;
> @@ -524,7 +526,7 @@ clean_sbusy:
>                                 bp->b_pages[bp->b_npages] = m;
>                                 bp->b_npages++;
>                         }
> -                       if (m->valid == VM_PAGE_BITS_ALL)
> +                       if (vm_page_all_valid(m))
>                                 tbp->b_pages[j] = bogus_page;
>                 }
>                 VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
> @@ -548,7 +550,7 @@ clean_sbusy:
>         VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
>         for (j = 0; j < bp->b_npages; j++) {
>                 VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[j]->object);
> -               if (bp->b_pages[j]->valid == VM_PAGE_BITS_ALL)
> +               if (vm_page_all_valid(bp->b_pages[j]))
>                         bp->b_pages[j] = bogus_page;
>         }
>         VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
>
> Modified: head/sys/vm/device_pager.c
> ============================================================
> ==================
> --- head/sys/vm/device_pager.c  Tue Oct 15 03:41:36 2019        (r353538)
> +++ head/sys/vm/device_pager.c  Tue Oct 15 03:45:41 2019        (r353539)
> @@ -395,7 +395,7 @@ old_dev_pager_fault(vm_object_t object, vm_ooffset_t o
>                 vm_page_free(*mres);
>                 *mres = page;
>         }
> -       page->valid = VM_PAGE_BITS_ALL;
> +       vm_page_valid(page);
>         return (VM_PAGER_OK);
>  }
>
>
> Modified: head/sys/vm/phys_pager.c
> ============================================================
> ==================
> --- head/sys/vm/phys_pager.c    Tue Oct 15 03:41:36 2019        (r353538)
> +++ head/sys/vm/phys_pager.c    Tue Oct 15 03:45:41 2019        (r353539)
> @@ -145,12 +145,12 @@ phys_pager_getpages(vm_object_t object, vm_page_t
> *m,
>
>         VM_OBJECT_ASSERT_WLOCKED(object);
>         for (i = 0; i < count; i++) {
> -               if (m[i]->valid == 0) {
> +               if (vm_page_none_valid(m[i])) {
>                         if ((m[i]->flags & PG_ZERO) == 0)
>                                 pmap_zero_page(m[i]);
> -                       m[i]->valid = VM_PAGE_BITS_ALL;
> +                       vm_page_valid(m[i]);
>                 }
> -               KASSERT(m[i]->valid == VM_PAGE_BITS_ALL,
> +               KASSERT(vm_page_all_valid(m[i]),
>                     ("phys_pager_getpages: partially valid page %p",
> m[i]));
>                 KASSERT(m[i]->dirty == 0,
>                     ("phys_pager_getpages: dirty page %p", m[i]));
> @@ -209,10 +209,8 @@ phys_pager_populate(vm_object_t object, vm_pindex_t
> pi
>                 ahead = MIN(end - i, PHYSALLOC);
>                 m = vm_page_grab(object, i,
>                     VM_ALLOC_NORMAL | VM_ALLOC_COUNT(ahead));
> -               if (m->valid != VM_PAGE_BITS_ALL) {
> +               if (!vm_page_all_valid(m))
>                         vm_page_zero_invalid(m, TRUE);
> -                       m->valid = VM_PAGE_BITS_ALL;
> -               }
>                 KASSERT(m->dirty == 0,
>                     ("phys_pager_populate: dirty page %p", m));
>         }
>
> Modified: head/sys/vm/sg_pager.c
> ============================================================
> ==================
> --- head/sys/vm/sg_pager.c      Tue Oct 15 03:41:36 2019        (r353538)
> +++ head/sys/vm/sg_pager.c      Tue Oct 15 03:45:41 2019        (r353539)
> @@ -198,7 +198,7 @@ sg_pager_getpages(vm_object_t object, vm_page_t *m, in
>         vm_page_free(m[0]);
>         vm_page_unlock(m[0]);
>         m[0] = page;
> -       page->valid = VM_PAGE_BITS_ALL;
> +       vm_page_valid(page);
>
>         if (rbehind)
>                 *rbehind = 0;
>
> Modified: head/sys/vm/swap_pager.c
> ============================================================
> ==================
> --- head/sys/vm/swap_pager.c    Tue Oct 15 03:41:36 2019        (r353538)
> +++ head/sys/vm/swap_pager.c    Tue Oct 15 03:45:41 2019        (r353539)
> @@ -1554,7 +1554,7 @@ swp_pager_async_iodone(struct buf *bp)
>                                  * be overridden by the original caller of
>                                  * getpages so don't play cute tricks here.
>                                  */
> -                               m->valid = 0;
> +                               vm_page_invalid(m);
>                         } else {
>                                 /*
>                                  * If a write error occurs, reactivate page
> @@ -1582,7 +1582,7 @@ swp_pager_async_iodone(struct buf *bp)
>                         KASSERT(m->dirty == 0,
>                             ("swp_pager_async_iodone: page %p is dirty",
> m));
>
> -                       m->valid = VM_PAGE_BITS_ALL;
> +                       vm_page_valid(m);
>                         if (i < bp->b_pgbefore ||
>                             i >= bp->b_npages - bp->b_pgafter)
>                                 vm_page_readahead_finish(m);
>
> Modified: head/sys/vm/vm_fault.c
> ============================================================
> ==================
> --- head/sys/vm/vm_fault.c      Tue Oct 15 03:41:36 2019        (r353538)
> +++ head/sys/vm/vm_fault.c      Tue Oct 15 03:45:41 2019        (r353539)
> @@ -211,6 +211,7 @@ vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_p
>                 return;
>
>         VM_OBJECT_ASSERT_LOCKED(m->object);
> +       VM_PAGE_OBJECT_BUSY_ASSERT(m);
>
>         need_dirty = ((fault_type & VM_PROT_WRITE) != 0 &&
>             (fault_flags & VM_FAULT_WIRE) == 0) ||
> @@ -285,7 +286,7 @@ vm_fault_soft_fast(struct faultstate *fs, vm_offset_t
>         m = vm_page_lookup(fs->first_object, fs->first_pindex);
>         /* A busy page can be mapped for read|execute access. */
>         if (m == NULL || ((prot & VM_PROT_WRITE) != 0 &&
> -           vm_page_busied(m)) || m->valid != VM_PAGE_BITS_ALL) {
> +           vm_page_busied(m)) || !vm_page_all_valid(m)) {
>                 rv = KERN_FAILURE;
>                 goto out;
>         }
> @@ -368,7 +369,7 @@ vm_fault_populate_check_page(vm_page_t m)
>          * valid, and exclusively busied.
>          */
>         MPASS(m != NULL);
> -       MPASS(m->valid == VM_PAGE_BITS_ALL);
> +       MPASS(vm_page_all_valid(m));
>         MPASS(vm_page_xbusied(m));
>  }
>
> @@ -830,7 +831,7 @@ RetryFault_oom:
>                          * (readable), jump to readrest, else break-out (
> we
>                          * found the page ).
>                          */
> -                       if (fs.m->valid != VM_PAGE_BITS_ALL)
> +                       if (!vm_page_all_valid(fs.m))
>                                 goto readrest;
>                         break; /* break to PAGE HAS BEEN FOUND */
>                 }
> @@ -1154,7 +1155,7 @@ readrest:
>                                 VM_CNT_INC(v_ozfod);
>                         }
>                         VM_CNT_INC(v_zfod);
> -                       fs.m->valid = VM_PAGE_BITS_ALL;
> +                       vm_page_valid(fs.m);
>                         /* Don't try to prefault neighboring pages. */
>                         faultcount = 1;
>                         break;  /* break to PAGE HAS BEEN FOUND */
> @@ -1245,7 +1246,7 @@ readrest:
>                                  * Oh, well, lets copy it.
>                                  */
>                                 pmap_copy_page(fs.m, fs.first_m);
> -                               fs.first_m->valid = VM_PAGE_BITS_ALL;
> +                               vm_page_valid(fs.first_m);
>                                 if (wired && (fault_flags &
>                                     VM_FAULT_WIRE) == 0) {
>                                         vm_page_wire(fs.first_m);
> @@ -1364,7 +1365,7 @@ readrest:
>          * Page must be completely valid or it is not fit to
>          * map into user space.  vm_pager_get_pages() ensures this.
>          */
> -       KASSERT(fs.m->valid == VM_PAGE_BITS_ALL,
> +       KASSERT(vm_page_all_valid(fs.m),
>             ("vm_fault: page %p partially invalid", fs.m));
>         VM_OBJECT_WUNLOCK(fs.object);
>
> @@ -1480,7 +1481,7 @@ vm_fault_dontneed(const struct faultstate *fs,
> vm_offs
>                             entry->start);
>                         while ((m = m_next) != NULL && m->pindex < pend) {
>                                 m_next = TAILQ_NEXT(m, listq);
> -                               if (m->valid != VM_PAGE_BITS_ALL ||
> +                               if (!vm_page_all_valid(m) ||
>                                     vm_page_busied(m))
>                                         continue;
>
> @@ -1577,7 +1578,7 @@ vm_fault_prefault(const struct faultstate *fs,
> vm_offs
>                                 VM_OBJECT_RUNLOCK(lobject);
>                         break;
>                 }
> -               if (m->valid == VM_PAGE_BITS_ALL &&
> +               if (vm_page_all_valid(m) &&
>                     (m->flags & PG_FICTITIOUS) == 0)
>                         pmap_enter_quick(pmap, addr, m, entry->protection);
>                 if (!obj_locked || lobject != entry->object.vm_object)
> @@ -1852,7 +1853,7 @@ again:
>                  * all copies of the wired map entry have similar
>                  * backing pages.
>                  */
> -               if (dst_m->valid == VM_PAGE_BITS_ALL) {
> +               if (vm_page_all_valid(dst_m)) {
>                         pmap_enter(dst_map->pmap, vaddr, dst_m, prot,
>                             access | (upgrade ? PMAP_ENTER_WIRED : 0), 0);
>                 }
>
> Modified: head/sys/vm/vm_map.c
> ============================================================
> ==================
> --- head/sys/vm/vm_map.c        Tue Oct 15 03:41:36 2019        (r353538)
> +++ head/sys/vm/vm_map.c        Tue Oct 15 03:45:41 2019        (r353539)
> @@ -2358,7 +2358,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr,
> vm_p
>                         psize = tmpidx;
>                         break;
>                 }
> -               if (p->valid == VM_PAGE_BITS_ALL) {
> +               if (vm_page_all_valid(p)) {
>                         if (p_start == NULL) {
>                                 start = addr + ptoa(tmpidx);
>                                 p_start = p;
>
> Modified: head/sys/vm/vm_mmap.c
> ============================================================
> ==================
> --- head/sys/vm/vm_mmap.c       Tue Oct 15 03:41:36 2019        (r353538)
> +++ head/sys/vm/vm_mmap.c       Tue Oct 15 03:45:41 2019        (r353539)
> @@ -893,7 +893,7 @@ RestartScan:
>                                         }
>                                 } else
>                                         vm_page_unlock(m);
> -                               KASSERT(m->valid == VM_PAGE_BITS_ALL,
> +                               KASSERT(vm_page_all_valid(m),
>                                     ("mincore: page %p is mapped but
> invalid",
>                                     m));
>                         } else if (mincoreinfo == 0) {
> @@ -915,7 +915,7 @@ RestartScan:
>                                         pindex =
> OFF_TO_IDX(current->offset +
>                                             (addr - current->start));
>                                         m = vm_page_lookup(object, pindex);
> -                                       if (m != NULL && m->valid == 0)
> +                                       if (m != NULL &&
> vm_page_none_valid(m))
>                                                 m = NULL;
>                                         if (m != NULL)
>                                                 mincoreinfo =
> MINCORE_INCORE;
>
> Modified: head/sys/vm/vm_object.c
> ============================================================
> ==================
> --- head/sys/vm/vm_object.c     Tue Oct 15 03:41:36 2019        (r353538)
> +++ head/sys/vm/vm_object.c     Tue Oct 15 03:45:41 2019        (r353539)
> @@ -841,7 +841,7 @@ rescan:
>                 if (pi >= tend)
>                         break;
>                 np = TAILQ_NEXT(p, listq);
> -               if (p->valid == 0)
> +               if (vm_page_none_valid(p))
>                         continue;
>                 if (vm_page_busy_acquire(p, VM_ALLOC_WAITFAIL) == 0) {
>                         if (object->generation != curgeneration) {
> @@ -1161,10 +1161,10 @@ next_page:
>                 }
>
>                 /*
> -                * If the page is not in a normal state, skip it.
> +                * If the page is not in a normal state, skip it.  The page
> +                * can not be invalidated while the object lock is held.
>                  */
> -               if (tm->valid != VM_PAGE_BITS_ALL ||
> -                   vm_page_wired(tm))
> +               if (!vm_page_all_valid(tm) || vm_page_wired(tm))
>                         goto next_pindex;
>                 KASSERT((tm->flags & PG_FICTITIOUS) == 0,
>                     ("vm_object_madvise: page %p is fictitious", tm));
> @@ -1488,7 +1488,11 @@ vm_object_scan_all_shadowed(vm_object_t object)
>                  * object and we might as well give up now.
>                  */
>                 pp = vm_page_lookup(object, new_pindex);
> -               if ((pp == NULL || pp->valid == 0) &&
> +               /*
> +                * The valid check here is stable due to object lock being
> +                * required to clear valid and initiate paging.
> +                */
> +               if ((pp == NULL || vm_page_none_valid(pp)) &&
>                     !vm_pager_has_page(object, new_pindex, NULL, NULL))
>                         return (false);
>         }
> @@ -1567,7 +1571,7 @@ vm_object_collapse_scan(vm_object_t object, int op)
>                         continue;
>                 }
>
> -               KASSERT(pp == NULL || pp->valid != 0,
> +               KASSERT(pp == NULL || !vm_page_none_valid(pp),
>                     ("unbusy invalid page %p", pp));
>
>                 if (pp != NULL || vm_pager_has_page(object, new_pindex,
> NULL,
> @@ -1894,7 +1898,7 @@ wired:
>                             object->ref_count != 0)
>                                 pmap_remove_all(p);
>                         if ((options & OBJPR_CLEANONLY) == 0) {
> -                               p->valid = 0;
> +                               vm_page_invalid(p);
>                                 vm_page_undirty(p);
>                         }
>                         vm_page_xunbusy(p);
> @@ -1902,7 +1906,8 @@ wired:
>                 }
>                 KASSERT((p->flags & PG_FICTITIOUS) == 0,
>                     ("vm_object_page_remove: page %p is fictitious", p));
> -               if ((options & OBJPR_CLEANONLY) != 0 && p->valid != 0) {
> +               if ((options & OBJPR_CLEANONLY) != 0 &&
> +                   !vm_page_none_valid(p)) {
>                         if ((options & OBJPR_NOTMAPPED) == 0 &&
>



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?CAPQ4ffuu6ZjNqWW9fvo=0n49vgvTHeAiyFX06JtEE%2BtZUSrGfg>