mirror of
https://git.hardenedbsd.org/hardenedbsd/HardenedBSD.git
synced 2025-01-11 17:04:19 +01:00
- If caller specifies readbehind and readahead that together with count
doesn't fit into a buf, then trim readbehind and readahead evenly. If rbehind was limited by the previous BMAP, then roundup its trim to block size. - Add KASSERT to check that b_blkno has proper offset from original blkno returned by BMAP. [1] - Add KASSERT to check that pages in buf are consecutive. Reviewed by: kib Submitted by: kib [1]
This commit is contained in:
parent
b22fe30db1
commit
e48b82bd83
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=308778
@ -743,6 +743,9 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
|
||||
struct bufobj *bo;
|
||||
struct buf *bp;
|
||||
off_t foff;
|
||||
#ifdef INVARIANTS
|
||||
off_t blkno0;
|
||||
#endif
|
||||
int bsize, pagesperblock, *freecnt;
|
||||
int error, before, after, rbehind, rahead, poff, i;
|
||||
int bytecount, secmask;
|
||||
@ -843,6 +846,9 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
|
||||
return (VM_PAGER_OK);
|
||||
}
|
||||
|
||||
#ifdef INVARIANTS
|
||||
blkno0 = bp->b_blkno;
|
||||
#endif
|
||||
bp->b_blkno += (foff % bsize) / DEV_BSIZE;
|
||||
|
||||
/* Recalculate blocks available after/before to pages. */
|
||||
@ -864,7 +870,25 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
|
||||
rbehind = min(rbehind, m[0]->pindex);
|
||||
rahead = min(rahead, after);
|
||||
rahead = min(rahead, object->size - m[count - 1]->pindex);
|
||||
KASSERT(rbehind + rahead + count <= sizeof(bp->b_pages),
|
||||
/*
|
||||
* Check that total amount of pages fit into buf. Trim rbehind and
|
||||
* rahead evenly if not.
|
||||
*/
|
||||
if (rbehind + rahead + count > nitems(bp->b_pages)) {
|
||||
int trim, sum;
|
||||
|
||||
trim = rbehind + rahead + count - nitems(bp->b_pages) + 1;
|
||||
sum = rbehind + rahead;
|
||||
if (rbehind == before) {
|
||||
/* Roundup rbehind trim to block size. */
|
||||
rbehind -= roundup(trim * rbehind / sum, pagesperblock);
|
||||
if (rbehind < 0)
|
||||
rbehind = 0;
|
||||
} else
|
||||
rbehind -= trim * rbehind / sum;
|
||||
rahead -= trim * rahead / sum;
|
||||
}
|
||||
KASSERT(rbehind + rahead + count <= nitems(bp->b_pages),
|
||||
("%s: behind %d ahead %d count %d", __func__,
|
||||
rbehind, rahead, count));
|
||||
|
||||
@ -947,8 +971,14 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
|
||||
if (a_rahead)
|
||||
*a_rahead = bp->b_pgafter;
|
||||
|
||||
#ifdef INVARIANTS
|
||||
KASSERT(bp->b_npages <= nitems(bp->b_pages),
|
||||
("%s: buf %p overflowed", __func__, bp));
|
||||
for (int j = 1; j < bp->b_npages; j++)
|
||||
KASSERT(bp->b_pages[j]->pindex - 1 ==
|
||||
bp->b_pages[j - 1]->pindex,
|
||||
("%s: pages array not consecutive, bp %p", __func__, bp));
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Recalculate first offset and bytecount with regards to read behind.
|
||||
@ -987,6 +1017,13 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
|
||||
bp->b_vp = vp;
|
||||
bp->b_bcount = bp->b_bufsize = bp->b_runningbufspace = bytecount;
|
||||
bp->b_iooffset = dbtob(bp->b_blkno);
|
||||
KASSERT(IDX_TO_OFF(m[0]->pindex - bp->b_pages[0]->pindex) ==
|
||||
(blkno0 - bp->b_blkno) * DEV_BSIZE +
|
||||
IDX_TO_OFF(m[0]->pindex) % bsize,
|
||||
("wrong offsets bsize %d m[0] %ju b_pages[0] %ju "
|
||||
"blkno0 %ju b_blkno %ju", bsize,
|
||||
(uintmax_t)m[0]->pindex, (uintmax_t)bp->b_pages[0]->pindex,
|
||||
(uintmax_t)blkno0, (uintmax_t)bp->b_blkno));
|
||||
|
||||
atomic_add_long(&runningbufspace, bp->b_runningbufspace);
|
||||
PCPU_INC(cnt.v_vnodein);
|
||||
|
Loading…
Reference in New Issue
Block a user