Skip to content

Commit

Permalink
Replace OBJ_MIGHTBEDIRTY with a system using atomics. Remove the TMPF…
Browse files Browse the repository at this point in the history
…S_DIRTY

flag and use the same system.

This enables further fault locking improvements by allowing more faults to
proceed with a shared lock.

Reviewed by:	kib
Tested by:	pho
Differential Revision:	https://reviews.freebsd.org/D22116
  • Loading branch information
Jeff Roberson authored and Jeff Roberson committed Oct 29, 2019
1 parent 51df532 commit 67d0e29
Show file tree
Hide file tree
Showing 11 changed files with 70 additions and 91 deletions.
2 changes: 1 addition & 1 deletion sys/fs/nfsclient/nfs_clvnops.c
Original file line number Diff line number Diff line change
Expand Up @@ -715,7 +715,7 @@ nfs_open(struct vop_open_args *ap)
*/
if (vp->v_writecount <= -1) {
if ((obj = vp->v_object) != NULL &&
(obj->flags & OBJ_MIGHTBEDIRTY) != 0) {
vm_object_mightbedirty(obj)) {
VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
VM_OBJECT_WUNLOCK(obj);
Expand Down
6 changes: 2 additions & 4 deletions sys/fs/nfsserver/nfs_nfsdport.c
Original file line number Diff line number Diff line change
Expand Up @@ -1498,8 +1498,7 @@ nfsvno_fsync(struct vnode *vp, u_int64_t off, int cnt, struct ucred *cred,
/*
* Give up and do the whole thing
*/
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
if (vp->v_object && vm_object_mightbedirty(vp->v_object)) {
VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC);
VM_OBJECT_WUNLOCK(vp->v_object);
Expand Down Expand Up @@ -1529,8 +1528,7 @@ nfsvno_fsync(struct vnode *vp, u_int64_t off, int cnt, struct ucred *cred,
}
lblkno = off / iosize;

if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
if (vp->v_object && vm_object_mightbedirty(vp->v_object)) {
VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, off, off + cnt,
OBJPC_SYNC);
Expand Down
6 changes: 3 additions & 3 deletions sys/fs/tmpfs/tmpfs_subr.c
Original file line number Diff line number Diff line change
Expand Up @@ -1477,10 +1477,10 @@ tmpfs_check_mtime(struct vnode *vp)
KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) ==
(OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj"));
/* unlocked read */
if ((obj->flags & OBJ_TMPFS_DIRTY) != 0) {
if (obj->generation != obj->cleangeneration) {
VM_OBJECT_WLOCK(obj);
if ((obj->flags & OBJ_TMPFS_DIRTY) != 0) {
obj->flags &= ~OBJ_TMPFS_DIRTY;
if (obj->generation != obj->cleangeneration) {
obj->cleangeneration = obj->generation;
node = VP_TO_TMPFS_NODE(vp);
node->tn_status |= TMPFS_NODE_MODIFIED |
TMPFS_NODE_CHANGED;
Expand Down
2 changes: 1 addition & 1 deletion sys/fs/tmpfs/tmpfs_vfsops.c
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ tmpfs_update_mtime(struct mount *mp, bool lazy)
* For non-lazy case, we must flush all pending
* metadata changes now.
*/
if (!lazy || (obj->flags & OBJ_TMPFS_DIRTY) != 0) {
if (!lazy || obj->generation != obj->cleangeneration) {
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK,
curthread) != 0)
continue;
Expand Down
2 changes: 1 addition & 1 deletion sys/fs/tmpfs/tmpfs_vnops.c
Original file line number Diff line number Diff line change
Expand Up @@ -1323,7 +1323,7 @@ tmpfs_need_inactive(struct vop_need_inactive_args *ap)
goto need;
if (vp->v_type == VREG) {
obj = vp->v_object;
if ((obj->flags & OBJ_TMPFS_DIRTY) != 0)
if (obj->generation != obj->cleangeneration)
goto need;
}
return (0);
Expand Down
6 changes: 3 additions & 3 deletions sys/kern/vfs_subr.c
Original file line number Diff line number Diff line change
Expand Up @@ -3346,7 +3346,7 @@ vinactive(struct vnode *vp, struct thread *td)
* pending I/O and dirty pages in the object.
*/
if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 &&
(obj->flags & OBJ_MIGHTBEDIRTY) != 0) {
vm_object_mightbedirty(obj)) {
VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, 0);
VM_OBJECT_WUNLOCK(obj);
Expand Down Expand Up @@ -4406,7 +4406,7 @@ vfs_msync(struct mount *mp, int flags)

MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) {
obj = vp->v_object;
if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 &&
if (obj != NULL && vm_object_mightbedirty(obj) &&
(flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) {
if (!vget(vp,
LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
Expand Down Expand Up @@ -4696,7 +4696,7 @@ vn_need_pageq_flush(struct vnode *vp)
MPASS(mtx_owned(VI_MTX(vp)));
need = 0;
if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 &&
(obj->flags & OBJ_MIGHTBEDIRTY) != 0)
vm_object_mightbedirty(obj))
need = 1;
return (need);
}
Expand Down
4 changes: 2 additions & 2 deletions sys/ufs/ffs/ffs_rawread.c
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ ffs_rawread_sync(struct vnode *vp)
if (bo->bo_numoutput > 0 ||
bo->bo_dirty.bv_cnt > 0 ||
((obj = vp->v_object) != NULL &&
(obj->flags & OBJ_MIGHTBEDIRTY) != 0)) {
vm_object_mightbedirty(obj))) {
VI_UNLOCK(vp);
BO_UNLOCK(bo);

Expand Down Expand Up @@ -140,7 +140,7 @@ ffs_rawread_sync(struct vnode *vp)
}
/* Attempt to msync mmap() regions to clean dirty mmap */
if ((obj = vp->v_object) != NULL &&
(obj->flags & OBJ_MIGHTBEDIRTY) != 0) {
vm_object_mightbedirty(obj)) {
VI_UNLOCK(vp);
VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
Expand Down
40 changes: 14 additions & 26 deletions sys/vm/vm_fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ unlock_and_deallocate(struct faultstate *fs)

static void
vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot,
vm_prot_t fault_type, int fault_flags, bool set_wd)
vm_prot_t fault_type, int fault_flags, bool excl)
{
bool need_dirty;

Expand All @@ -226,11 +226,11 @@ vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot,
(fault_flags & VM_FAULT_WIRE) == 0) ||
(fault_flags & VM_FAULT_DIRTY) != 0;

if (set_wd)
vm_object_set_writeable_dirty(m->object);
else
vm_object_set_writeable_dirty(m->object);

if (!excl)
/*
* If two callers of vm_fault_dirty() with set_wd ==
* If two callers of vm_fault_dirty() with excl ==
* FALSE, one for the map entry with MAP_ENTRY_NOSYNC
* flag set, other with flag clear, race, it is
* possible for the no-NOSYNC thread to see m->dirty
Expand Down Expand Up @@ -267,7 +267,7 @@ vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot,
*/
if (need_dirty)
vm_page_dirty(m);
if (!set_wd)
if (!excl)
vm_page_unlock(m);
else if (need_dirty)
vm_pager_page_unswapped(m);
Expand Down Expand Up @@ -758,29 +758,17 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
/*
* Try to avoid lock contention on the top-level object through
* special-case handling of some types of page faults, specifically,
* those that are both (1) mapping an existing page from the top-
* level object and (2) not having to mark that object as containing
* dirty pages. Under these conditions, a read lock on the top-level
* object suffices, allowing multiple page faults of a similar type to
* run in parallel on the same top-level object.
* those that are mapping an existing page from the top-level object.
* Under this condition, a read lock on the object suffices, allowing
* multiple page faults of a similar type to run in parallel.
*/
if (fs.vp == NULL /* avoid locked vnode leak */ &&
(fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0 &&
/* avoid calling vm_object_set_writeable_dirty() */
((prot & VM_PROT_WRITE) == 0 ||
(fs.first_object->type != OBJT_VNODE &&
(fs.first_object->flags & OBJ_TMPFS_NODE) == 0) ||
(fs.first_object->flags & OBJ_MIGHTBEDIRTY) != 0)) {
(fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) {
VM_OBJECT_RLOCK(fs.first_object);
if ((prot & VM_PROT_WRITE) == 0 ||
(fs.first_object->type != OBJT_VNODE &&
(fs.first_object->flags & OBJ_TMPFS_NODE) == 0) ||
(fs.first_object->flags & OBJ_MIGHTBEDIRTY) != 0) {
rv = vm_fault_soft_fast(&fs, vaddr, prot, fault_type,
fault_flags, wired, m_hold);
if (rv == KERN_SUCCESS)
return (rv);
}
rv = vm_fault_soft_fast(&fs, vaddr, prot, fault_type,
fault_flags, wired, m_hold);
if (rv == KERN_SUCCESS)
return (rv);
if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) {
VM_OBJECT_RUNLOCK(fs.first_object);
VM_OBJECT_WLOCK(fs.first_object);
Expand Down
75 changes: 30 additions & 45 deletions sys/vm/vm_object.c
Original file line number Diff line number Diff line change
Expand Up @@ -112,10 +112,10 @@ SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0,
"Use old (insecure) msync behavior");

static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
int pagerflags, int flags, boolean_t *clearobjflags,
int pagerflags, int flags, boolean_t *allclean,
boolean_t *eio);
static boolean_t vm_object_page_remove_write(vm_page_t p, int flags,
boolean_t *clearobjflags);
boolean_t *allclean);
static void vm_object_qcollapse(vm_object_t object);
static void vm_object_vndeallocate(vm_object_t object);

Expand Down Expand Up @@ -282,6 +282,7 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
object->size = size;
object->domain.dr_policy = NULL;
object->generation = 1;
object->cleangeneration = 1;
refcount_init(&object->ref_count, 1);
object->memattr = VM_MEMATTR_DEFAULT;
object->cred = NULL;
Expand Down Expand Up @@ -769,7 +770,7 @@ vm_object_terminate(vm_object_t object)
* page should be flushed, and FALSE otherwise.
*/
static boolean_t
vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *clearobjflags)
vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *allclean)
{

vm_page_assert_busied(p);
Expand All @@ -780,7 +781,7 @@ vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *clearobjflags)
* cleared in this case so we do not have to set them.
*/
if ((flags & OBJPC_NOSYNC) != 0 && (p->aflags & PGA_NOSYNC) != 0) {
*clearobjflags = FALSE;
*allclean = FALSE;
return (FALSE);
} else {
pmap_remove_write(p);
Expand Down Expand Up @@ -813,16 +814,11 @@ vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
vm_page_t np, p;
vm_pindex_t pi, tend, tstart;
int curgeneration, n, pagerflags;
boolean_t clearobjflags, eio, res;
boolean_t eio, res, allclean;

VM_OBJECT_ASSERT_WLOCKED(object);

/*
* The OBJ_MIGHTBEDIRTY flag is only set for OBJT_VNODE
* objects. The check below prevents the function from
* operating on non-vnode objects.
*/
if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 ||
if (object->type != OBJT_VNODE || !vm_object_mightbedirty(object) ||
object->resident_page_count == 0)
return (TRUE);

Expand All @@ -832,7 +828,7 @@ vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,

tstart = OFF_TO_IDX(start);
tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK);
clearobjflags = tstart == 0 && tend >= object->size;
allclean = tstart == 0 && tend >= object->size;
res = TRUE;

rescan:
Expand All @@ -846,32 +842,26 @@ vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
if (vm_page_none_valid(p))
continue;
if (vm_page_busy_acquire(p, VM_ALLOC_WAITFAIL) == 0) {
if (object->generation != curgeneration) {
if ((flags & OBJPC_SYNC) != 0)
goto rescan;
else
clearobjflags = FALSE;
}
if (object->generation != curgeneration &&
(flags & OBJPC_SYNC) != 0)
goto rescan;
np = vm_page_find_least(object, pi);
continue;
}
if (!vm_object_page_remove_write(p, flags, &clearobjflags)) {
if (!vm_object_page_remove_write(p, flags, &allclean)) {
vm_page_xunbusy(p);
continue;
}

n = vm_object_page_collect_flush(object, p, pagerflags,
flags, &clearobjflags, &eio);
flags, &allclean, &eio);
if (eio) {
res = FALSE;
clearobjflags = FALSE;
}
if (object->generation != curgeneration) {
if ((flags & OBJPC_SYNC) != 0)
goto rescan;
else
clearobjflags = FALSE;
allclean = FALSE;
}
if (object->generation != curgeneration &&
(flags & OBJPC_SYNC) != 0)
goto rescan;

/*
* If the VOP_PUTPAGES() did a truncated write, so
Expand All @@ -887,22 +877,22 @@ vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
*/
if (n == 0) {
n = 1;
clearobjflags = FALSE;
allclean = FALSE;
}
np = vm_page_find_least(object, pi + n);
}
#if 0
VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0);
#endif

if (clearobjflags)
vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY);
if (allclean)
object->cleangeneration = curgeneration;
return (res);
}

static int
vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
int flags, boolean_t *clearobjflags, boolean_t *eio)
int flags, boolean_t *allclean, boolean_t *eio)
{
vm_page_t ma[vm_pageout_page_count], p_first, tp;
int count, i, mreq, runlen;
Expand All @@ -918,7 +908,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
tp = vm_page_next(tp);
if (tp == NULL || vm_page_tryxbusy(tp) == 0)
break;
if (!vm_object_page_remove_write(tp, flags, clearobjflags)) {
if (!vm_object_page_remove_write(tp, flags, allclean)) {
vm_page_xunbusy(tp);
break;
}
Expand All @@ -928,7 +918,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
tp = vm_page_prev(p_first);
if (tp == NULL || vm_page_tryxbusy(tp) == 0)
break;
if (!vm_object_page_remove_write(tp, flags, clearobjflags)) {
if (!vm_object_page_remove_write(tp, flags, allclean)) {
vm_page_xunbusy(tp);
break;
}
Expand Down Expand Up @@ -993,7 +983,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
* I/O.
*/
if (object->type == OBJT_VNODE &&
(object->flags & OBJ_MIGHTBEDIRTY) != 0 &&
vm_object_mightbedirty(object) != 0 &&
((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) {
VM_OBJECT_WUNLOCK(object);
(void) vn_start_write(vp, &mp, V_WAIT);
Expand Down Expand Up @@ -2130,18 +2120,13 @@ void
vm_object_set_writeable_dirty(vm_object_t object)
{

VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_VNODE) {
if ((object->flags & OBJ_TMPFS_NODE) != 0) {
KASSERT(object->type == OBJT_SWAP, ("non-swap tmpfs"));
vm_object_set_flag(object, OBJ_TMPFS_DIRTY);
}
return;
}
object->generation++;
if ((object->flags & OBJ_MIGHTBEDIRTY) != 0)
VM_OBJECT_ASSERT_LOCKED(object);

/* Only set for vnodes & tmpfs */
if (object->type != OBJT_VNODE &&
(object->flags & OBJ_TMPFS_NODE) == 0)
return;
vm_object_set_flag(object, OBJ_MIGHTBEDIRTY);
atomic_add_int(&object->generation, 1);
}

/*
Expand Down
Loading

0 comments on commit 67d0e29

Please sign in to comment.