mirror of
https://github.com/freebsd/freebsd-src.git
synced 2024-12-03 23:28:57 +00:00
- Remove GIANT_REQUIRED where giant is no longer required.
- Use VFS_LOCK_GIANT() rather than directly acquiring giant in places where giant is only held because vfs requires it. Sponsored By: Isilon Systems, Inc.
This commit is contained in:
parent
e9f3e3f8ca
commit
ae51ff1127
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=140723
@ -97,6 +97,8 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/vnode_pager.h>
|
||||
#include <vm/vm_extern.h>
|
||||
|
||||
#include <sys/mount.h> /* XXX Temporary for VFS_LOCK_GIANT() */
|
||||
|
||||
#define PFBAK 4
|
||||
#define PFFOR 4
|
||||
#define PAGEORDER_SIZE (PFBAK+PFFOR)
|
||||
@ -165,10 +167,12 @@ unlock_and_deallocate(struct faultstate *fs)
|
||||
vm_object_deallocate(fs->first_object);
|
||||
unlock_map(fs);
|
||||
if (fs->vp != NULL) {
|
||||
mtx_lock(&Giant);
|
||||
int vfslocked;
|
||||
|
||||
vfslocked = VFS_LOCK_GIANT(fs->vp->v_mount);
|
||||
vput(fs->vp);
|
||||
mtx_unlock(&Giant);
|
||||
fs->vp = NULL;
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
}
|
||||
if (!fs->map->system_map)
|
||||
VM_UNLOCK_GIANT();
|
||||
|
@ -1067,11 +1067,14 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
|
||||
struct vattr va;
|
||||
void *handle;
|
||||
vm_object_t obj;
|
||||
struct mount *mp;
|
||||
int error, flags, type;
|
||||
int vfslocked;
|
||||
|
||||
mtx_lock(&Giant);
|
||||
mp = vp->v_mount;
|
||||
vfslocked = VFS_LOCK_GIANT(mp);
|
||||
if ((error = vget(vp, LK_EXCLUSIVE, td)) != 0) {
|
||||
mtx_unlock(&Giant);
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
return (error);
|
||||
}
|
||||
flags = *flagsp;
|
||||
@ -1157,7 +1160,7 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
|
||||
*flagsp = flags;
|
||||
done:
|
||||
vput(vp);
|
||||
mtx_unlock(&Giant);
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
return (error);
|
||||
}
|
||||
|
||||
|
@ -393,7 +393,7 @@ vm_object_vndeallocate(vm_object_t object)
|
||||
{
|
||||
struct vnode *vp = (struct vnode *) object->handle;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
VFS_ASSERT_GIANT(vp->v_mount);
|
||||
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
|
||||
KASSERT(object->type == OBJT_VNODE,
|
||||
("vm_object_vndeallocate: not a vnode object"));
|
||||
@ -434,18 +434,22 @@ vm_object_deallocate(vm_object_t object)
|
||||
vm_object_t temp;
|
||||
|
||||
while (object != NULL) {
|
||||
int vfslocked;
|
||||
/*
|
||||
* In general, the object should be locked when working with
|
||||
* its type. In this case, in order to maintain proper lock
|
||||
* ordering, an exception is possible because a vnode-backed
|
||||
* object never changes its type.
|
||||
*/
|
||||
if (object->type == OBJT_VNODE)
|
||||
mtx_lock(&Giant);
|
||||
vfslocked = 0;
|
||||
if (object->type == OBJT_VNODE) {
|
||||
struct vnode *vp = (struct vnode *) object->handle;
|
||||
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
||||
}
|
||||
VM_OBJECT_LOCK(object);
|
||||
if (object->type == OBJT_VNODE) {
|
||||
vm_object_vndeallocate(object);
|
||||
mtx_unlock(&Giant);
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -671,7 +675,6 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
|
||||
int pagerflags;
|
||||
int curgeneration;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
|
||||
if (object->type != OBJT_VNODE ||
|
||||
(object->flags & OBJ_MIGHTBEDIRTY) == 0)
|
||||
@ -1000,9 +1003,10 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
|
||||
*/
|
||||
if (object->type == OBJT_VNODE &&
|
||||
(object->flags & OBJ_MIGHTBEDIRTY) != 0) {
|
||||
int vfslocked;
|
||||
vp = object->handle;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
mtx_lock(&Giant);
|
||||
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
|
||||
flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
|
||||
flags |= invalidate ? OBJPC_INVAL : 0;
|
||||
@ -1013,7 +1017,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
|
||||
flags);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VOP_UNLOCK(vp, 0, curthread);
|
||||
mtx_unlock(&Giant);
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
VM_OBJECT_LOCK(object);
|
||||
}
|
||||
if ((object->type == OBJT_VNODE ||
|
||||
|
@ -215,6 +215,7 @@ vnode_pager_haspage(object, pindex, before, after)
|
||||
int poff;
|
||||
int bsize;
|
||||
int pagesperblock, blocksperpage;
|
||||
int vfslocked;
|
||||
|
||||
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
|
||||
/*
|
||||
@ -248,9 +249,9 @@ vnode_pager_haspage(object, pindex, before, after)
|
||||
reqblock = pindex * blocksperpage;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
mtx_lock(&Giant);
|
||||
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
||||
err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before);
|
||||
mtx_unlock(&Giant);
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
VM_OBJECT_LOCK(object);
|
||||
if (err)
|
||||
return TRUE;
|
||||
@ -397,7 +398,6 @@ vnode_pager_addr(vp, address, run)
|
||||
daddr_t vblock;
|
||||
int voffset;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
if (address < 0)
|
||||
return -1;
|
||||
|
||||
@ -441,8 +441,6 @@ vnode_pager_input_smlfs(object, m)
|
||||
vm_offset_t bsize;
|
||||
int error = 0;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
|
||||
vp = object->handle;
|
||||
if (vp->v_mount == NULL)
|
||||
return VM_PAGER_BAD;
|
||||
@ -619,14 +617,15 @@ vnode_pager_getpages(object, m, count, reqpage)
|
||||
int rtval;
|
||||
struct vnode *vp;
|
||||
int bytes = count * PAGE_SIZE;
|
||||
int vfslocked;
|
||||
|
||||
vp = object->handle;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
mtx_lock(&Giant);
|
||||
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
||||
rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0);
|
||||
KASSERT(rtval != EOPNOTSUPP,
|
||||
("vnode_pager: FS getpages not implemented\n"));
|
||||
mtx_unlock(&Giant);
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
VM_OBJECT_LOCK(object);
|
||||
return rtval;
|
||||
}
|
||||
@ -653,7 +652,6 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
|
||||
int count;
|
||||
int error = 0;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
object = vp->v_object;
|
||||
count = bytecount / PAGE_SIZE;
|
||||
|
||||
@ -946,7 +944,6 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
|
||||
struct mount *mp;
|
||||
int bytes = count * PAGE_SIZE;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
/*
|
||||
* Force synchronous operation if we are extremely low on memory
|
||||
* to prevent a low-memory deadlock. VOP operations often need to
|
||||
@ -1006,7 +1003,6 @@ vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
|
||||
int error;
|
||||
int ioflags;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
object = vp->v_object;
|
||||
count = bytecount / PAGE_SIZE;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user