mirror of
https://git.hardenedbsd.org/hardenedbsd/HardenedBSD.git
synced 2024-11-22 03:04:34 +01:00
vfs_subr: optimize inval_buf_range
Use a pctrie_lookup to avoid walking over low out-of-range buf list entries, and an early break to avoid the high out-of-range entries. Avoid writing almost identical loops for the dirty and clean lists. Because pctries are built for unsigned keys, and these are signed values, handle the wraparound problem the same way that bnoreuselist() does. Reviewed by: kib Tested by: pho Differential Revision: https://reviews.freebsd.org/D46963
This commit is contained in:
parent
7763b194d8
commit
2c8caa4b39
@ -2616,17 +2616,25 @@ static int
|
|||||||
v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
|
v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
|
||||||
daddr_t startlbn, daddr_t endlbn)
|
daddr_t startlbn, daddr_t endlbn)
|
||||||
{
|
{
|
||||||
|
struct bufv *bv;
|
||||||
struct buf *bp, *nbp;
|
struct buf *bp, *nbp;
|
||||||
bool anyfreed;
|
uint8_t anyfreed;
|
||||||
|
bool clean;
|
||||||
|
|
||||||
ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked");
|
ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked");
|
||||||
ASSERT_BO_LOCKED(bo);
|
ASSERT_BO_LOCKED(bo);
|
||||||
|
|
||||||
|
anyfreed = 1;
|
||||||
|
clean = true;
|
||||||
do {
|
do {
|
||||||
anyfreed = false;
|
bv = clean ? &bo->bo_clean : &bo->bo_dirty;
|
||||||
TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) {
|
bp = BUF_PCTRIE_LOOKUP_GE(&bv->bv_root, startlbn);
|
||||||
if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn)
|
if (bp == NULL || bp->b_lblkno >= endlbn ||
|
||||||
continue;
|
bp->b_lblkno < startlbn)
|
||||||
|
continue;
|
||||||
|
TAILQ_FOREACH_FROM_SAFE(bp, &bv->bv_hd, b_bobufs, nbp) {
|
||||||
|
if (bp->b_lblkno >= endlbn)
|
||||||
|
break;
|
||||||
if (BUF_LOCK(bp,
|
if (BUF_LOCK(bp,
|
||||||
LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
|
LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
|
||||||
BO_LOCKPTR(bo)) == ENOLCK) {
|
BO_LOCKPTR(bo)) == ENOLCK) {
|
||||||
@ -2638,39 +2646,17 @@ v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
|
|||||||
bp->b_flags |= B_INVAL | B_RELBUF;
|
bp->b_flags |= B_INVAL | B_RELBUF;
|
||||||
bp->b_flags &= ~B_ASYNC;
|
bp->b_flags &= ~B_ASYNC;
|
||||||
brelse(bp);
|
brelse(bp);
|
||||||
anyfreed = true;
|
anyfreed = 2;
|
||||||
|
|
||||||
BO_LOCK(bo);
|
BO_LOCK(bo);
|
||||||
if (nbp != NULL &&
|
if (nbp != NULL &&
|
||||||
(((nbp->b_xflags & BX_VNCLEAN) == 0) ||
|
(((nbp->b_xflags &
|
||||||
|
(clean ? BX_VNCLEAN : BX_VNDIRTY)) == 0) ||
|
||||||
nbp->b_vp != vp ||
|
nbp->b_vp != vp ||
|
||||||
(nbp->b_flags & B_DELWRI) != 0))
|
(nbp->b_flags & B_DELWRI) == (clean? B_DELWRI: 0)))
|
||||||
return (EAGAIN);
|
return (EAGAIN);
|
||||||
}
|
}
|
||||||
|
} while (clean = !clean, anyfreed-- > 0);
|
||||||
TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
|
|
||||||
if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn)
|
|
||||||
continue;
|
|
||||||
if (BUF_LOCK(bp,
|
|
||||||
LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
|
|
||||||
BO_LOCKPTR(bo)) == ENOLCK) {
|
|
||||||
BO_LOCK(bo);
|
|
||||||
return (EAGAIN);
|
|
||||||
}
|
|
||||||
bremfree(bp);
|
|
||||||
bp->b_flags |= B_INVAL | B_RELBUF;
|
|
||||||
bp->b_flags &= ~B_ASYNC;
|
|
||||||
brelse(bp);
|
|
||||||
anyfreed = true;
|
|
||||||
|
|
||||||
BO_LOCK(bo);
|
|
||||||
if (nbp != NULL &&
|
|
||||||
(((nbp->b_xflags & BX_VNDIRTY) == 0) ||
|
|
||||||
(nbp->b_vp != vp) ||
|
|
||||||
(nbp->b_flags & B_DELWRI) == 0))
|
|
||||||
return (EAGAIN);
|
|
||||||
}
|
|
||||||
} while (anyfreed);
|
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user