#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/endian.h>
#include <sys/proc.h>
#include <sys/vnode.h>
#include <sys/mount.h>
#include <sys/racct.h>
#include <sys/resourcevar.h>
#include <sys/stat.h>
#include <fs/ext2fs/fs.h>
#include <fs/ext2fs/inode.h>
#include <fs/ext2fs/ext2fs.h>
#include <fs/ext2fs/ext2_dinode.h>
#include <fs/ext2fs/ext2_extern.h>
#include <fs/ext2fs/ext2_mount.h>
int
ext2_bmap(struct vop_bmap_args *ap)
{
daddr_t blkno;
int error;
if (ap->a_bop != NULL)
*ap->a_bop = &VTOI(ap->a_vp)->i_devvp->v_bufobj;
if (ap->a_bnp == NULL)
return (0);
if (VTOI(ap->a_vp)->i_flag & IN_E4EXTENTS)
error = ext4_bmapext(ap->a_vp, ap->a_bn, &blkno,
ap->a_runp, ap->a_runb);
else
error = ext2_bmaparray(ap->a_vp, ap->a_bn, &blkno,
ap->a_runp, ap->a_runb);
*ap->a_bnp = blkno;
return (error);
}
int
ext4_bmapext(struct vnode *vp, int32_t bn, int64_t *bnp, int *runp, int *runb)
{
struct inode *ip;
struct m_ext2fs *fs;
struct mount *mp;
struct ext2mount *ump;
struct ext4_extent_header *ehp;
struct ext4_extent *ep;
struct ext4_extent_path *path = NULL;
daddr_t lbn;
int error, depth, maxrun = 0, bsize;
ip = VTOI(vp);
fs = ip->i_e2fs;
mp = vp->v_mount;
ump = VFSTOEXT2(mp);
lbn = bn;
ehp = (struct ext4_extent_header *)ip->i_data;
depth = le16toh(ehp->eh_depth);
bsize = EXT2_BLOCK_SIZE(ump->um_e2fs);
*bnp = -1;
if (runp != NULL) {
maxrun = mp->mnt_iosize_max / bsize - 1;
*runp = 0;
}
if (runb != NULL)
*runb = 0;
error = ext4_ext_find_extent(ip, lbn, &path);
if (error)
return (error);
ep = path[depth].ep_ext;
if(ep) {
if (lbn < le32toh(ep->e_blk)) {
if (runp != NULL) {
*runp = min(maxrun, le32toh(ep->e_blk) - lbn - 1);
}
} else if (le32toh(ep->e_blk) <= lbn &&
lbn < le32toh(ep->e_blk) + le16toh(ep->e_len)) {
*bnp = fsbtodb(fs, lbn - le32toh(ep->e_blk) +
(le32toh(ep->e_start_lo) |
(daddr_t)le16toh(ep->e_start_hi) << 32));
if (runp != NULL) {
*runp = min(maxrun,
le16toh(ep->e_len) -
(lbn - le32toh(ep->e_blk)) - 1);
}
if (runb != NULL)
*runb = min(maxrun, lbn - le32toh(ep->e_blk));
} else {
if (runb != NULL)
*runb = min(maxrun, le32toh(ep->e_blk) + lbn -
le16toh(ep->e_len));
}
}
ext4_ext_path_free(path);
return (error);
}
static int
readindir(struct vnode *vp, e2fs_lbn_t lbn, e2fs_daddr_t daddr, struct buf **bpp)
{
struct buf *bp;
struct mount *mp;
struct ext2mount *ump;
int error;
mp = vp->v_mount;
ump = VFSTOEXT2(mp);
bp = getblk(vp, lbn, mp->mnt_stat.f_iosize, 0, 0, 0);
if ((bp->b_flags & B_CACHE) == 0) {
KASSERT(daddr != 0,
("readindir: indirect block not in cache"));
bp->b_blkno = blkptrtodb(ump, daddr);
bp->b_iocmd = BIO_READ;
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
vfs_busy_pages(bp, 0);
bp->b_iooffset = dbtob(bp->b_blkno);
bstrategy(bp);
#ifdef RACCT
if (racct_enable) {
PROC_LOCK(curproc);
racct_add_buf(curproc, bp, 0);
PROC_UNLOCK(curproc);
}
#endif
curthread->td_ru.ru_inblock++;
error = bufwait(bp);
if (error != 0) {
brelse(bp);
return (error);
}
}
*bpp = bp;
return (0);
}
int
ext2_bmaparray(struct vnode *vp, daddr_t bn, daddr_t *bnp, int *runp, int *runb)
{
struct inode *ip;
struct buf *bp;
struct ext2mount *ump;
struct mount *mp;
struct indir a[EXT2_NIADDR + 1], *ap;
daddr_t daddr;
e2fs_lbn_t metalbn;
int error, num, maxrun = 0, bsize;
int *nump;
ap = NULL;
ip = VTOI(vp);
mp = vp->v_mount;
ump = VFSTOEXT2(mp);
bsize = EXT2_BLOCK_SIZE(ump->um_e2fs);
if (runp) {
maxrun = mp->mnt_iosize_max / bsize - 1;
*runp = 0;
}
if (runb)
*runb = 0;
ap = a;
nump = #
error = ext2_getlbns(vp, bn, ap, nump);
if (error)
return (error);
num = *nump;
if (num == 0) {
*bnp = blkptrtodb(ump, ip->i_db[bn]);
if (*bnp == 0) {
*bnp = -1;
} else if (runp) {
daddr_t bnb = bn;
for (++bn; bn < EXT2_NDADDR && *runp < maxrun &&
is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]);
++bn, ++*runp);
bn = bnb;
if (runb && (bn > 0)) {
for (--bn; (bn >= 0) && (*runb < maxrun) &&
is_sequential(ump, ip->i_db[bn],
ip->i_db[bn + 1]);
--bn, ++*runb);
}
}
return (0);
}
daddr = ip->i_ib[ap->in_off];
for (bp = NULL, ++ap; --num; ++ap) {
metalbn = ap->in_lbn;
if ((daddr == 0 && !incore(&vp->v_bufobj, metalbn)) || metalbn == bn)
break;
if (bp)
bqrelse(bp);
error = readindir(vp, metalbn, daddr, &bp);
if (error != 0)
return (error);
daddr = le32toh(((e2fs_daddr_t *)bp->b_data)[ap->in_off]);
if (num == 1 && daddr && runp) {
for (bn = ap->in_off + 1;
bn < MNINDIR(ump) && *runp < maxrun &&
is_sequential(ump,
((e2fs_daddr_t *)bp->b_data)[bn - 1],
((e2fs_daddr_t *)bp->b_data)[bn]);
++bn, ++*runp);
bn = ap->in_off;
if (runb && bn) {
for (--bn; bn >= 0 && *runb < maxrun &&
is_sequential(ump,
((e2fs_daddr_t *)bp->b_data)[bn],
((e2fs_daddr_t *)bp->b_data)[bn + 1]);
--bn, ++*runb);
}
}
}
if (bp)
bqrelse(bp);
*bnp = blkptrtodb(ump, daddr);
if (*bnp == 0) {
*bnp = -1;
}
return (0);
}
static e2fs_lbn_t
lbn_count(struct ext2mount *ump, int level)
{
e2fs_lbn_t blockcnt;
for (blockcnt = 1; level > 0; level--)
blockcnt *= MNINDIR(ump);
return (blockcnt);
}
int
ext2_bmap_seekdata(struct vnode *vp, off_t *offp)
{
struct buf *bp;
struct indir a[EXT2_NIADDR + 1], *ap;
struct inode *ip;
struct mount *mp;
struct ext2mount *ump;
e2fs_daddr_t bn, daddr, nextbn;
uint64_t bsize;
off_t numblks;
int error, num, num1, off;
bp = NULL;
error = 0;
ip = VTOI(vp);
mp = vp->v_mount;
ump = VFSTOEXT2(mp);
if (vp->v_type != VREG)
return (EINVAL);
if (*offp < 0 || *offp >= ip->i_size)
return (ENXIO);
bsize = mp->mnt_stat.f_iosize;
for (bn = *offp / bsize, numblks = howmany(ip->i_size, bsize);
bn < numblks; bn = nextbn) {
if (bn < EXT2_NDADDR) {
daddr = ip->i_db[bn];
if (daddr != 0)
break;
nextbn = bn + 1;
continue;
}
ap = a;
error = ext2_getlbns(vp, bn, ap, &num);
if (error != 0)
break;
MPASS(num >= 2);
daddr = ip->i_ib[ap->in_off];
ap++, num--;
for (nextbn = EXT2_NDADDR, num1 = num - 1; num1 > 0; num1--)
nextbn += lbn_count(ump, num1);
if (daddr == 0) {
nextbn += lbn_count(ump, num);
continue;
}
for (; daddr != 0 && num > 0; ap++, num--) {
if (bp != NULL)
bqrelse(bp);
error = readindir(vp, ap->in_lbn, daddr, &bp);
if (error != 0)
return (error);
off = ap->in_off;
do {
daddr = le32toh(((e2fs_daddr_t *)bp->b_data)[off]);
} while (daddr == 0 && ++off < MNINDIR(ump));
nextbn += off * lbn_count(ump, num - 1);
if (off != ap->in_off)
break;
}
if (num == 0) {
bn = nextbn;
break;
}
}
if (bp != NULL)
bqrelse(bp);
if (bn >= numblks)
error = ENXIO;
if (error == 0 && *offp < bn * bsize)
*offp = bn * bsize;
return (error);
}
int
ext2_getlbns(struct vnode *vp, daddr_t bn, struct indir *ap, int *nump)
{
long blockcnt;
e2fs_lbn_t metalbn, realbn;
struct ext2mount *ump;
int i, numlevels, off;
int64_t qblockcnt;
ump = VFSTOEXT2(vp->v_mount);
if (nump)
*nump = 0;
numlevels = 0;
realbn = bn;
if ((long)bn < 0)
bn = -(long)bn;
if (bn < EXT2_NDADDR)
return (0);
for (blockcnt = 1, i = EXT2_NIADDR, bn -= EXT2_NDADDR; ;
i--, bn -= blockcnt) {
if (i == 0)
return (EFBIG);
qblockcnt = (int64_t)blockcnt * MNINDIR(ump);
if (bn < qblockcnt)
break;
blockcnt = qblockcnt;
}
if (realbn >= 0)
metalbn = -(realbn - bn + EXT2_NIADDR - i);
else
metalbn = -(-realbn - bn + EXT2_NIADDR - i);
ap->in_lbn = metalbn;
ap->in_off = off = EXT2_NIADDR - i;
ap++;
for (++numlevels; i <= EXT2_NIADDR; i++) {
if (metalbn == realbn)
break;
off = (bn / blockcnt) % MNINDIR(ump);
++numlevels;
ap->in_lbn = metalbn;
ap->in_off = off;
++ap;
metalbn -= -1 + off * blockcnt;
blockcnt /= MNINDIR(ump);
}
if (nump)
*nump = numlevels;
return (0);
}