* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
/*
- * fs/xfs/linux/xfs_lrw.c (Linux Read Write stuff)
+ * fs/xfs/linux/xfs_iomap.c (Linux Read Write stuff)
*
*/
#include <xfs.h>
#include <linux/pagemap.h>
-#include <linux/capability.h>
#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
<< mp->m_writeio_log)
#define XFS_STRAT_WRITE_IMAPS 2
-
-STATIC int xfs_iomap_read(xfs_iocore_t *, loff_t, size_t, int, page_buf_bmap_t *,
- int *);
-STATIC int xfs_iomap_write(xfs_iocore_t *, loff_t, size_t, page_buf_bmap_t *,
- int *, int);
-STATIC int xfs_iomap_write_delay(xfs_iocore_t *, loff_t, size_t, page_buf_bmap_t *,
- int *, int, int);
-STATIC int xfs_iomap_write_direct(xfs_iocore_t *, loff_t, size_t, page_buf_bmap_t *,
- int *, int, int);
-STATIC int _xfs_imap_to_bmap(xfs_iocore_t *, xfs_off_t, xfs_bmbt_irec_t *,
- page_buf_bmap_t *, int, int);
-
-
-int
-xfs_strategy(
- xfs_inode_t *ip,
- xfs_off_t offset,
- ssize_t count,
- int flags,
- page_buf_bmap_t *pbmapp,
- int *npbmaps)
-{
- xfs_iocore_t *io;
- xfs_mount_t *mp;
- int error;
- xfs_fileoff_t offset_fsb;
- xfs_fileoff_t end_fsb;
- xfs_fileoff_t map_start_fsb;
- xfs_fileoff_t last_block;
- xfs_fsblock_t first_block;
- xfs_bmap_free_t free_list;
- xfs_filblks_t count_fsb;
- int committed, i, loops, nimaps;
- int is_xfs;
- xfs_bmbt_irec_t imap[XFS_MAX_RW_NBMAPS];
- xfs_trans_t *tp;
-
- mp = ip->i_mount;
- io = &ip->i_iocore;
- is_xfs = IO_IS_XFS(io);
- ASSERT((ip->i_d.di_mode & IFMT) == IFREG);
- ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
- ((io->io_flags & XFS_IOCORE_RT) != 0));
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return XFS_ERROR(EIO);
-
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
- nimaps = min(XFS_MAX_RW_NBMAPS, *npbmaps);
- end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
- first_block = NULLFSBLOCK;
-
- XFS_ILOCK(mp, io, XFS_ILOCK_SHARED | XFS_EXTSIZE_RD);
- error = XFS_BMAPI(mp, NULL, io, offset_fsb,
- (xfs_filblks_t)(end_fsb - offset_fsb),
- XFS_BMAPI_ENTIRE, &first_block, 0, imap,
- &nimaps, NULL);
- XFS_IUNLOCK(mp, io, XFS_ILOCK_SHARED | XFS_EXTSIZE_RD);
- if (error) {
- return XFS_ERROR(error);
- }
-
- if (nimaps && !ISNULLSTARTBLOCK(imap[0].br_startblock)) {
- *npbmaps = _xfs_imap_to_bmap(&ip->i_iocore, offset, imap,
- pbmapp, nimaps, *npbmaps);
- return 0;
- }
-
- /*
- * Make sure that the dquots are there.
- */
-
- if (XFS_IS_QUOTA_ON(mp)) {
- if (XFS_NOT_DQATTACHED(mp, ip)) {
- if ((error = xfs_qm_dqattach(ip, 0))) {
- return XFS_ERROR(error);
- }
- }
- }
- XFS_STATS_ADD(xfsstats.xs_xstrat_bytes,
- XFS_FSB_TO_B(mp, imap[0].br_blockcount));
-
- offset_fsb = imap[0].br_startoff;
- count_fsb = imap[0].br_blockcount;
- map_start_fsb = offset_fsb;
- while (count_fsb != 0) {
- /*
- * Set up a transaction with which to allocate the
- * backing store for the file. Do allocations in a
- * loop until we get some space in the range we are
- * interested in. The other space that might be allocated
- * is in the delayed allocation extent on which we sit
- * but before our buffer starts.
- */
- nimaps = 0;
- loops = 0;
- while (nimaps == 0) {
- if (is_xfs) {
- tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
- error = xfs_trans_reserve(tp, 0,
- XFS_WRITE_LOG_RES(mp),
- 0, XFS_TRANS_PERM_LOG_RES,
- XFS_WRITE_LOG_COUNT);
- if (error) {
- xfs_trans_cancel(tp, 0);
- goto error0;
- }
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip,
- XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
- } else {
- tp = NULL;
- XFS_ILOCK(mp, io, XFS_ILOCK_EXCL |
- XFS_EXTSIZE_WR);
- }
-
-
- /*
- * Allocate the backing store for the file.
- */
- XFS_BMAP_INIT(&(free_list),
- &(first_block));
- nimaps = XFS_STRAT_WRITE_IMAPS;
-
- /*
- * Ensure we don't go beyond eof - it is possible
- * the extents changed since we did the read call,
- * we dropped the ilock in the interim.
- */
-
- end_fsb = XFS_B_TO_FSB(mp, XFS_SIZE(mp, io));
- xfs_bmap_last_offset(NULL, ip, &last_block,
- XFS_DATA_FORK);
- last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
- if ((map_start_fsb + count_fsb) > last_block) {
- count_fsb = last_block - map_start_fsb;
- if (count_fsb == 0) {
- if (is_xfs) {
- xfs_bmap_cancel(&free_list);
- xfs_trans_cancel(tp,
- (XFS_TRANS_RELEASE_LOG_RES |
- XFS_TRANS_ABORT));
- }
- XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL |
- XFS_EXTSIZE_WR);
- return XFS_ERROR(EAGAIN);
- }
- }
-
- error = XFS_BMAPI(mp, tp, io, map_start_fsb, count_fsb,
- XFS_BMAPI_WRITE, &first_block, 1,
- imap, &nimaps, &free_list);
- if (error) {
- xfs_bmap_cancel(&free_list);
- xfs_trans_cancel(tp,
- (XFS_TRANS_RELEASE_LOG_RES |
- XFS_TRANS_ABORT));
- XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL |
- XFS_EXTSIZE_WR);
-
- goto error0;
- }
-
- if (is_xfs) {
- error = xfs_bmap_finish(&(tp), &(free_list),
- first_block, &committed);
- if (error) {
- xfs_bmap_cancel(&free_list);
- xfs_trans_cancel(tp,
- (XFS_TRANS_RELEASE_LOG_RES |
- XFS_TRANS_ABORT));
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- goto error0;
- }
-
- error = xfs_trans_commit(tp,
- XFS_TRANS_RELEASE_LOG_RES,
- NULL);
- if (error) {
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- goto error0;
- }
- }
-
- if (nimaps == 0) {
- XFS_IUNLOCK(mp, io,
- XFS_ILOCK_EXCL|XFS_EXTSIZE_WR);
- } /* else hold 'till we maybe loop again below */
- }
-
- /*
- * See if we were able to allocate an extent that
- * covers at least part of the user's requested size.
- */
-
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
- for (i = 0; i < nimaps; i++) {
- int maps;
-
- if ((offset_fsb >= imap[i].br_startoff) &&
- (offset_fsb <
- (imap[i].br_startoff + imap[i].br_blockcount))) {
-
- XFS_IUNLOCK(mp, io,
- XFS_ILOCK_EXCL|XFS_EXTSIZE_WR);
- maps = min(nimaps, *npbmaps);
- *npbmaps = _xfs_imap_to_bmap(io, offset,
- &imap[i], pbmapp,
- maps, *npbmaps);
- XFS_STATS_INC(xfsstats.xs_xstrat_quick);
- return 0;
- }
- count_fsb -= imap[i].br_blockcount; /* for next bmapi,
- if needed. */
- }
-
- /*
- * We didn't get an extent the caller can write into so
- * loop around and try starting after the last imap we got back.
- */
-
- nimaps--; /* Index of last entry */
- ASSERT(nimaps >= 0);
- ASSERT(offset_fsb >=
- imap[nimaps].br_startoff + imap[nimaps].br_blockcount);
- ASSERT(count_fsb);
- offset_fsb =
- imap[nimaps].br_startoff + imap[nimaps].br_blockcount;
- map_start_fsb = offset_fsb;
- XFS_STATS_INC(xfsstats.xs_xstrat_split);
- XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_WR);
- }
-
- ASSERT(0); /* Should never get here */
-
- error0:
- if (error) {
- ASSERT(count_fsb != 0);
- ASSERT(is_xfs || XFS_FORCED_SHUTDOWN(mp));
- }
-
- return XFS_ERROR(error);
-}
-
-
-/*
- * xfs_bmap() is the same as the irix xfs_bmap from xfs_rw.c
- * execpt for slight changes to the params
- */
-int
-xfs_bmap(bhv_desc_t *bdp,
- xfs_off_t offset,
- ssize_t count,
- int flags,
- page_buf_bmap_t *pbmapp,
- int *npbmaps)
-{
- xfs_inode_t *ip;
- int error;
- int lockmode;
- int fsynced = 0;
- vnode_t *vp;
-
- ip = XFS_BHVTOI(bdp);
- ASSERT((ip->i_d.di_mode & IFMT) == IFREG);
- ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
- ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
-
- if (XFS_FORCED_SHUTDOWN(ip->i_iocore.io_mount))
- return XFS_ERROR(EIO);
-
- if (flags & PBF_READ) {
- lockmode = xfs_ilock_map_shared(ip);
- error = xfs_iomap_read(&ip->i_iocore, offset, count,
- XFS_BMAPI_ENTIRE, pbmapp, npbmaps);
- xfs_iunlock_map_shared(ip, lockmode);
- } else if (flags & PBF_FILE_ALLOCATE) {
- error = xfs_strategy(ip, offset, count, flags,
- pbmapp, npbmaps);
- } else { /* PBF_WRITE */
- ASSERT(flags & PBF_WRITE);
- vp = BHV_TO_VNODE(bdp);
- xfs_ilock(ip, XFS_ILOCK_EXCL);
-
- /*
- * Make sure that the dquots are there. This doesn't hold
- * the ilock across a disk read.
- */
-
- if (XFS_IS_QUOTA_ON(ip->i_mount)) {
- if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
- if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_ILOCKED))) {
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- return XFS_ERROR(error);
- }
- }
- }
-retry:
- error = xfs_iomap_write(&ip->i_iocore, offset, count,
- pbmapp, npbmaps, flags);
- /* xfs_iomap_write unlocks/locks/unlocks */
-
- if (error == ENOSPC) {
- switch (fsynced) {
- case 0:
- if (ip->i_delayed_blks) {
- filemap_fdatawrite(LINVFS_GET_IP(vp)->i_mapping);
- fsynced = 1;
- } else {
- fsynced = 2;
- flags |= PBF_SYNC;
- }
- error = 0;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- goto retry;
- case 1:
- fsynced = 2;
- if (!(flags & PBF_SYNC)) {
- flags |= PBF_SYNC;
- error = 0;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- goto retry;
- }
- case 2:
- sync_blockdev(vp->v_vfsp->vfs_super->s_bdev);
- xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
- XFS_LOG_FORCE|XFS_LOG_SYNC);
-
- error = 0;
-/**
- delay(HZ);
-**/
- fsynced++;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- goto retry;
- }
- }
- }
-
- return XFS_ERROR(error);
-}
-
+#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
STATIC int
_xfs_imap_to_bmap(
{
xfs_mount_t *mp;
xfs_fsize_t nisize;
- int im, pbm;
+ int pbm;
xfs_fsblock_t start_block;
mp = io->io_mount;
if (io->io_new_size > nisize)
nisize = io->io_new_size;
- for (im=pbm=0; im < imaps && pbm < pbmaps; im++,pbmapp++,imap++,pbm++) {
+ for (pbm = 0; imaps && pbm < pbmaps; imaps--, pbmapp++, imap++, pbm++) {
pbmapp->pbm_target = io->io_flags & XFS_IOCORE_RT ?
mp->m_rtdev_targp : mp->m_ddev_targp;
pbmapp->pbm_offset = XFS_FSB_TO_B(mp, imap->br_startoff);
pbmapp->pbm_flags = PBMF_DELAY;
} else {
pbmapp->pbm_bn = XFS_FSB_TO_DB_IO(io, start_block);
- if (ISUNWRITTEN(imap)) {
+ if (ISUNWRITTEN(imap))
pbmapp->pbm_flags |= PBMF_UNWRITTEN;
- }
}
if ((pbmapp->pbm_offset + pbmapp->pbm_bsize) >= nisize) {
return pbm; /* Return the number filled */
}
-STATIC int
-xfs_iomap_read(
- xfs_iocore_t *io,
- loff_t offset,
- size_t count,
+int
+xfs_iomap(xfs_iocore_t *io,
+ xfs_off_t offset,
+ ssize_t count,
int flags,
page_buf_bmap_t *pbmapp,
int *npbmaps)
{
- xfs_fileoff_t offset_fsb;
- xfs_fileoff_t end_fsb;
- int nimaps;
+ xfs_mount_t *mp = io->io_mount;
+ xfs_fileoff_t offset_fsb, end_fsb;
int error;
- xfs_mount_t *mp;
- xfs_bmbt_irec_t imap[XFS_MAX_RW_NBMAPS];
+ int lockmode = 0;
+ xfs_bmbt_irec_t imap;
+ int nimaps = 1;
+ int bmap_flags = 0;
- ASSERT(ismrlocked(io->io_lock, MR_UPDATE | MR_ACCESS) != 0);
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return XFS_ERROR(EIO);
+
+ switch (flags & (PBF_READ|PBF_WRITE|PBF_FILE_ALLOCATE)) {
+ case PBF_READ:
+ lockmode = XFS_LCK_MAP_SHARED(mp, io);
+ bmap_flags = XFS_BMAPI_ENTIRE;
+ break;
+ case PBF_WRITE:
+ lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR;
+ bmap_flags = 0;
+ XFS_ILOCK(mp, io, lockmode);
+ break;
+ case PBF_FILE_ALLOCATE:
+ lockmode = XFS_ILOCK_SHARED|XFS_EXTSIZE_RD;
+ bmap_flags = XFS_BMAPI_ENTIRE;
+ XFS_ILOCK(mp, io, lockmode);
+ break;
+ default:
+ ASSERT(flags & (PBF_READ|PBF_WRITE|PBF_FILE_ALLOCATE));
+ }
- mp = io->io_mount;
offset_fsb = XFS_B_TO_FSBT(mp, offset);
- nimaps = sizeof(imap) / sizeof(imap[0]);
- nimaps = min(nimaps, *npbmaps); /* Don't ask for more than caller has */
end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
+
error = XFS_BMAPI(mp, NULL, io, offset_fsb,
- (xfs_filblks_t)(end_fsb - offset_fsb),
- flags, NULL, 0, imap,
- &nimaps, NULL);
- if (error) {
- return XFS_ERROR(error);
+ (xfs_filblks_t)(end_fsb - offset_fsb) ,
+ bmap_flags, NULL, 0, &imap,
+ &nimaps, NULL);
+
+ if (error)
+ goto out;
+
+ switch (flags & (PBF_WRITE|PBF_FILE_ALLOCATE)) {
+ case PBF_WRITE:
+ /* If we found an extent, return it */
+ if (nimaps && (imap.br_startblock != HOLESTARTBLOCK))
+ break;
+
+ if (flags & PBF_DIRECT) {
+ error = XFS_IOMAP_WRITE_DIRECT(mp, io, offset,
+ count, flags, &imap, &nimaps, nimaps);
+ } else {
+ error = XFS_IOMAP_WRITE_DELAY(mp, io, offset, count,
+ flags, &imap, &nimaps);
+ }
+ break;
+ case PBF_FILE_ALLOCATE:
+ /* If we found an extent, return it */
+ XFS_IUNLOCK(mp, io, lockmode);
+ lockmode = 0;
+
+ if (nimaps && !ISNULLSTARTBLOCK(imap.br_startblock))
+ break;
+
+ error = XFS_IOMAP_WRITE_ALLOCATE(mp, io, &imap, &nimaps);
+ break;
}
- if(nimaps) {
- *npbmaps = _xfs_imap_to_bmap(io, offset, imap, pbmapp, nimaps,
- *npbmaps);
- } else
+ if (nimaps) {
+ *npbmaps = _xfs_imap_to_bmap(io, offset, &imap,
+ pbmapp, nimaps, *npbmaps);
+ } else {
*npbmaps = 0;
+ }
+
+out:
+ if (lockmode)
+ XFS_IUNLOCK(mp, io, lockmode);
return XFS_ERROR(error);
}
-/*
- * xfs_iomap_write: return pagebuf_bmap_t's telling higher layers
- * where to write.
- * There are 2 main cases:
- * 1 the extents already exist
- * 2 must allocate.
- * There are 3 cases when we allocate:
- * delay allocation (doesn't really allocate or use transactions)
- * direct allocation (no previous delay allocation)
- * convert delay to real allocations
- */
+static int
+xfs_flush_space(
+ xfs_inode_t *ip,
+ int *fsynced,
+ int *ioflags)
+{
+ vnode_t *vp = XFS_ITOV(ip);
+
+ switch (*fsynced) {
+ case 0:
+ if (ip->i_delayed_blks) {
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ filemap_fdatawrite(LINVFS_GET_IP(vp)->i_mapping);
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ *fsynced = 1;
+ } else {
+ *ioflags |= PBF_SYNC;
+ *fsynced = 2;
+ }
+ return 0;
+ case 1:
+ *fsynced = 2;
+ *ioflags |= PBF_SYNC;
+ return 0;
+ case 2:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ sync_blockdev(vp->v_vfsp->vfs_super->s_bdev);
+ xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
+ XFS_LOG_FORCE|XFS_LOG_SYNC);
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ *fsynced = 3;
+ return 0;
+ }
+ return 1;
+}
-STATIC int
-xfs_iomap_write(
- xfs_iocore_t *io,
+int
+xfs_iomap_write_direct(
+ xfs_inode_t *ip,
loff_t offset,
size_t count,
- page_buf_bmap_t *pbmapp,
- int *npbmaps,
- int ioflag)
+ int ioflag,
+ xfs_bmbt_irec_t *ret_imap,
+ int *nmaps,
+ int found)
{
- int maps;
- int error = 0;
- int found;
- int flags = 0;
+ xfs_mount_t *mp = ip->i_mount;
+ xfs_iocore_t *io = &ip->i_iocore;
+ xfs_fileoff_t offset_fsb;
+ xfs_fileoff_t last_fsb;
+ xfs_filblks_t count_fsb;
+ xfs_fsize_t isize;
+ xfs_fsblock_t firstfsb;
+ int nimaps, maps;
+ int error;
+ int bmapi_flag;
+ int rt;
+ xfs_trans_t *tp;
+ xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS], *imapp;
+ xfs_bmap_free_t free_list;
+ int aeof;
+ xfs_filblks_t datablocks;
+ int committed;
+ int numrtextents;
+ uint resblks;
- maps = *npbmaps;
- if (!maps)
- goto out;
+ /*
+ * Make sure that the dquots are there. This doesn't hold
+ * the ilock across a disk read.
+ */
+
+ if (XFS_IS_QUOTA_ON(mp) && XFS_NOT_DQATTACHED(mp, ip)) {
+ if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_ILOCKED))) {
+ return XFS_ERROR(error);
+ }
+ }
+ maps = min(XFS_WRITE_IMAPS, *nmaps);
+ nimaps = maps;
+
+ isize = ip->i_d.di_size;
+ aeof = (offset + count) > isize;
+
+ if (io->io_new_size > isize)
+ isize = io->io_new_size;
+
+ offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
+ count_fsb = last_fsb - offset_fsb;
+ if (found && (ret_imap->br_startblock == HOLESTARTBLOCK)) {
+ xfs_fileoff_t map_last_fsb;
+
+ map_last_fsb = ret_imap->br_blockcount + ret_imap->br_startoff;
+
+ if (map_last_fsb < last_fsb) {
+ last_fsb = map_last_fsb;
+ count_fsb = last_fsb - offset_fsb;
+ }
+ ASSERT(count_fsb > 0);
+ }
+
+ /*
+ * determine if reserving space on
+ * the data or realtime partition.
+ */
+ if ((rt = ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) {
+ int sbrtextsize, iprtextsize;
+
+ sbrtextsize = mp->m_sb.sb_rextsize;
+ iprtextsize =
+ ip->i_d.di_extsize ? ip->i_d.di_extsize : sbrtextsize;
+ numrtextents = (count_fsb + iprtextsize - 1);
+ do_div(numrtextents, sbrtextsize);
+ datablocks = 0;
+ } else {
+ datablocks = count_fsb;
+ numrtextents = 0;
+ }
+
+ /*
+ * allocate and setup the transaction
+ */
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
+
+ resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks);
+
+ error = xfs_trans_reserve(tp, resblks,
+ XFS_WRITE_LOG_RES(mp), numrtextents,
+ XFS_TRANS_PERM_LOG_RES,
+ XFS_WRITE_LOG_COUNT);
/*
- * If we have extents that are allocated for this range,
- * return them.
+ * check for running out of space
*/
+ if (error)
+ /*
+ * Free the transaction structure.
+ */
+ xfs_trans_cancel(tp, 0);
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
- found = 0;
- error = xfs_iomap_read(io, offset, count, flags, pbmapp, npbmaps);
if (error)
- goto out;
+ goto error_out; /* Don't return in above if .. trans ..,
+ need lock to return */
+
+ if (XFS_IS_QUOTA_ON(mp)) {
+ if (xfs_trans_reserve_blkquota(tp, ip, resblks)) {
+ error = (EDQUOT);
+ goto error1;
+ }
+ }
+ nimaps = 1;
+
+ bmapi_flag = XFS_BMAPI_WRITE;
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ xfs_trans_ihold(tp, ip);
+
+ if (offset < ip->i_d.di_size || rt)
+ bmapi_flag |= XFS_BMAPI_PREALLOC;
/*
- * If we found mappings and they can just have data written
- * without conversion,
- * let the caller write these and call us again.
- *
- * If we have a HOLE or UNWRITTEN, proceed down lower to
- * get the space or to convert to written.
+ * issue the bmapi() call to allocate the blocks
+ */
+ XFS_BMAP_INIT(&free_list, &firstfsb);
+ imapp = &imap[0];
+ error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
+ bmapi_flag, &firstfsb, 0, imapp, &nimaps, &free_list);
+ if (error) {
+ goto error0;
+ }
+
+ /*
+ * complete the transaction
*/
- if (*npbmaps) {
- if (!(pbmapp->pbm_flags & PBMF_HOLE)) {
- *npbmaps = 1; /* Only checked the first one. */
- /* We could check more, ... */
- goto out;
- }
+ error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
+ if (error) {
+ goto error0;
}
- found = *npbmaps;
- *npbmaps = maps; /* Restore to original requested */
- if (ioflag & PBF_DIRECT) {
- error = xfs_iomap_write_direct(io, offset, count, pbmapp,
- npbmaps, ioflag, found);
- } else {
- error = xfs_iomap_write_delay(io, offset, count, pbmapp,
- npbmaps, ioflag, found);
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+ if (error) {
+ goto error_out;
}
-out:
- XFS_IUNLOCK(io->io_mount, io, XFS_ILOCK_EXCL);
+ /* copy any maps to caller's array and return any error. */
+ if (nimaps == 0) {
+ error = (ENOSPC);
+ goto error_out;
+ }
+
+ *ret_imap = imap[0];
+ *nmaps = 1;
+ return 0;
+
+ error0: /* Cancel bmap, unlock inode, and cancel trans */
+ xfs_bmap_cancel(&free_list);
+
+ error1: /* Just cancel transaction */
+ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+ *nmaps = 0; /* nothing set-up here */
+
+error_out:
return XFS_ERROR(error);
}
-STATIC int
+int
xfs_iomap_write_delay(
- xfs_iocore_t *io,
+ xfs_inode_t *ip,
loff_t offset,
size_t count,
- page_buf_bmap_t *pbmapp,
- int *npbmaps,
int ioflag,
- int found)
+ xfs_bmbt_irec_t *ret_imap,
+ int *nmaps)
{
+ xfs_mount_t *mp = ip->i_mount;
+ xfs_iocore_t *io = &ip->i_iocore;
xfs_fileoff_t offset_fsb;
- xfs_fileoff_t ioalign;
xfs_fileoff_t last_fsb;
- xfs_fileoff_t start_fsb;
- xfs_filblks_t count_fsb;
- xfs_off_t aligned_offset;
xfs_fsize_t isize;
xfs_fsblock_t firstblock;
int nimaps;
int error;
- int n;
- unsigned int iosize;
- xfs_mount_t *mp;
-#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
int aeof;
+ int fsynced = 0;
- ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
+ ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
- mp = io->io_mount;
+ /*
+ * Make sure that the dquots are there. This doesn't hold
+ * the ilock across a disk read.
+ */
+
+ if (XFS_IS_QUOTA_ON(mp) && XFS_NOT_DQATTACHED(mp, ip)) {
+ if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_ILOCKED))) {
+ return XFS_ERROR(error);
+ }
+ }
- isize = XFS_SIZE(mp, io);
+retry:
+ isize = ip->i_d.di_size;
if (io->io_new_size > isize) {
isize = io->io_new_size;
}
* then extend the allocation (and the buffer used for the write)
* out to the file system's write iosize. We clean up any extra
* space left over when the file is closed in xfs_inactive().
- * We can only do this if we are sure that we will create buffers
- * over all of the space we allocate beyond the end of the file.
- * Not doing so would allow us to create delalloc blocks with
- * no pages in memory covering them. So, we need to check that
- * there are not any real blocks in the area beyond the end of
- * the file which we are optimistically going to preallocate. If
- * there are then our buffers will stop when they encounter them
- * and we may accidentally create delalloc blocks beyond them
- * that we never cover with a buffer. All of this is because
- * we are not actually going to write the extra blocks preallocated
- * at this point.
*
* We don't bother with this for sync writes, because we need
* to minimize the amount we write for good performance.
*/
- if (!(ioflag & PBF_SYNC) && ((offset + count) > XFS_SIZE(mp, io))) {
- start_fsb = XFS_B_TO_FSBT(mp,
- ((xfs_ufsize_t)(offset + count - 1)));
- count_fsb = mp->m_writeio_blocks;
- while (count_fsb > 0) {
- nimaps = XFS_WRITE_IMAPS;
- error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb,
- 0, NULL, 0, imap, &nimaps,
- NULL);
- if (error) {
- return error;
- }
- for (n = 0; n < nimaps; n++) {
- if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
- (imap[n].br_startblock != DELAYSTARTBLOCK)) {
- goto write_map;
- }
- start_fsb += imap[n].br_blockcount;
- count_fsb -= imap[n].br_blockcount;
- ASSERT(count_fsb < 0xffff000);
- }
- }
+ if (!(ioflag & PBF_SYNC) && ((offset + count) > ip->i_d.di_size)) {
+ xfs_off_t aligned_offset;
+ unsigned int iosize;
+ xfs_fileoff_t ioalign;
+
iosize = mp->m_writeio_blocks;
aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
last_fsb = ioalign + iosize;
aeof = 1;
}
- write_map:
+
nimaps = XFS_WRITE_IMAPS;
firstblock = NULLFSBLOCK;
* roundup the allocation request to m_dalign boundary if file size
* is greater that 512K and we are allocating past the allocation eof
*/
- if (mp->m_dalign && (XFS_SIZE(mp, io) >= mp->m_dalign) && aeof) {
+ if (mp->m_dalign && (isize >= mp->m_dalign) && aeof) {
int eof;
xfs_fileoff_t new_last_fsb;
new_last_fsb = roundup_64(last_fsb, mp->m_dalign);
- error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof);
+ error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
if (error) {
return error;
}
}
}
- error = XFS_BMAPI(mp, NULL, io, offset_fsb,
+ error = xfs_bmapi(NULL, ip, offset_fsb,
(xfs_filblks_t)(last_fsb - offset_fsb),
XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
/*
* This can be EDQUOT, if nimaps == 0
*/
- if (error) {
+ if (error && (error != ENOSPC)) {
return XFS_ERROR(error);
}
/*
* If bmapi returned us nothing, and if we didn't get back EDQUOT,
* then we must have run out of space.
*/
+
if (nimaps == 0) {
- return XFS_ERROR(ENOSPC);
+ if (xfs_flush_space(ip, &fsynced, &ioflag))
+ return XFS_ERROR(ENOSPC);
+
+ error = 0;
+ goto retry;
}
- /*
- * Now map our desired I/O size and alignment over the
- * extents returned by xfs_bmapi().
- */
- *npbmaps = _xfs_imap_to_bmap(io, offset, imap, pbmapp,
- nimaps, *npbmaps);
+ *ret_imap = imap[0];
+ *nmaps = 1;
return 0;
}
-STATIC int
-xfs_iomap_write_direct(
- xfs_iocore_t *io,
- loff_t offset,
- size_t count,
- page_buf_bmap_t *pbmapp,
- int *npbmaps,
- int ioflag,
- int found)
+
+/*
+ * Pass in a delayed allocate extent, convert it to real extents;
+ * return to the caller the extent we create which maps on top of
+ * the originating callers request.
+ *
+ * Called without a lock on the inode.
+ */
+int
+xfs_iomap_write_allocate(
+ xfs_inode_t *ip,
+ xfs_bmbt_irec_t *map,
+ int *retmap)
{
- xfs_inode_t *ip = XFS_IO_INODE(io);
- xfs_mount_t *mp;
- xfs_fileoff_t offset_fsb;
- xfs_fileoff_t last_fsb;
+ xfs_mount_t *mp = ip->i_mount;
+ xfs_fileoff_t offset_fsb, last_block;
+ xfs_fileoff_t end_fsb, map_start_fsb;
+ xfs_fsblock_t first_block;
+ xfs_bmap_free_t free_list;
xfs_filblks_t count_fsb;
- xfs_fsize_t isize;
- xfs_fsblock_t firstfsb;
- int nimaps, maps;
- int error;
+ xfs_bmbt_irec_t imap[XFS_STRAT_WRITE_IMAPS];
xfs_trans_t *tp;
+ int i, nimaps, committed;
+ int error = 0;
-#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
- xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS], *imapp;
- xfs_bmap_free_t free_list;
- int aeof;
- int bmapi_flags;
- xfs_filblks_t datablocks;
- int rt;
- int committed;
- int numrtextents;
- uint resblks;
- int rtextsize;
+ *retmap = 0;
- maps = min(XFS_WRITE_IMAPS, *npbmaps);
- nimaps = maps;
+ /*
+ * Make sure that the dquots are there.
+ */
- mp = io->io_mount;
- isize = XFS_SIZE(mp, io);
- if (io->io_new_size > isize)
- isize = io->io_new_size;
+ if (XFS_IS_QUOTA_ON(mp) && XFS_NOT_DQATTACHED(mp, ip)) {
+ if ((error = xfs_qm_dqattach(ip, 0))) {
+ return XFS_ERROR(error);
+ }
+ }
- aeof = ((offset + count) > isize) ? 1 : 0;
+ offset_fsb = map->br_startoff;
+ count_fsb = map->br_blockcount;
+ map_start_fsb = offset_fsb;
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
- last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
- count_fsb = last_fsb - offset_fsb;
- if (found && (pbmapp->pbm_flags & PBMF_HOLE)) {
- xfs_fileoff_t map_last_fsb;
+ XFS_STATS_ADD(xfsstats.xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
- map_last_fsb = XFS_B_TO_FSB(mp,
- (pbmapp->pbm_bsize + pbmapp->pbm_offset));
+ while (count_fsb != 0) {
+ /*
+ * Set up a transaction with which to allocate the
+ * backing store for the file. Do allocations in a
+ * loop until we get some space in the range we are
+ * interested in. The other space that might be allocated
+ * is in the delayed allocation extent on which we sit
+ * but before our buffer starts.
+ */
- if (map_last_fsb < last_fsb) {
- last_fsb = map_last_fsb;
- count_fsb = last_fsb - offset_fsb;
- }
- ASSERT(count_fsb > 0);
- }
+ nimaps = 0;
+ while (nimaps == 0) {
+ tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
+ error = xfs_trans_reserve(tp, 0, XFS_WRITE_LOG_RES(mp),
+ 0, XFS_TRANS_PERM_LOG_RES,
+ XFS_WRITE_LOG_COUNT);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ return XFS_ERROR(error);
+ }
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ xfs_trans_ihold(tp, ip);
- /*
- * roundup the allocation request to m_dalign boundary if file size
- * is greater that 512K and we are allocating past the allocation eof
- */
- if (!found && mp->m_dalign && (isize >= 524288) && aeof) {
- int eof;
- xfs_fileoff_t new_last_fsb;
+ XFS_BMAP_INIT(&free_list, &first_block);
- new_last_fsb = roundup_64(last_fsb, mp->m_dalign);
- printk("xfs_iomap_write_direct: about to XFS_BMAP_EOF %Ld\n",
- new_last_fsb);
- error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof);
- if (error)
- goto error_out;
- if (eof)
- last_fsb = new_last_fsb;
- }
+ nimaps = XFS_STRAT_WRITE_IMAPS;
+ /*
+ * Ensure we don't go beyond eof - it is possible
+ * the extents changed since we did the read call,
+ * we dropped the ilock in the interim.
+ */
- bmapi_flags = XFS_BMAPI_WRITE|XFS_BMAPI_DIRECT_IO|XFS_BMAPI_ENTIRE;
- bmapi_flags &= ~XFS_BMAPI_DIRECT_IO;
+ end_fsb = XFS_B_TO_FSB(mp, ip->i_d.di_size);
+ xfs_bmap_last_offset(NULL, ip, &last_block,
+ XFS_DATA_FORK);
+ last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
+ if ((map_start_fsb + count_fsb) > last_block) {
+ count_fsb = last_block - map_start_fsb;
+ if (count_fsb == 0) {
+ error = EAGAIN;
+ goto trans_cancel;
+ }
+ }
- /*
- * determine if this is a realtime file
- */
- if ((rt = (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) != 0) {
- rtextsize = mp->m_sb.sb_rextsize;
- } else
- rtextsize = 0;
+ /* Go get the actual blocks */
+ error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb,
+ XFS_BMAPI_WRITE, &first_block, 1,
+ imap, &nimaps, &free_list);
- error = 0;
+ if (error)
+ goto trans_cancel;
- /*
- * allocate file space for the bmapp entries passed in.
- */
+ error = xfs_bmap_finish(&tp, &free_list,
+ first_block, &committed);
- /*
- * determine if reserving space on
- * the data or realtime partition.
- */
- if (rt) {
- numrtextents = (count_fsb + rtextsize - 1);
- do_div(numrtextents, rtextsize);
- datablocks = 0;
- } else {
- datablocks = count_fsb;
- numrtextents = 0;
- }
+ if (error)
+ goto trans_cancel;
- /*
- * allocate and setup the transaction
- */
- tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
- resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks);
+ error = xfs_trans_commit(tp,
+ XFS_TRANS_RELEASE_LOG_RES, NULL);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ if (error)
+ goto error0;
- error = xfs_trans_reserve(tp,
- resblks,
- XFS_WRITE_LOG_RES(mp),
- numrtextents,
- XFS_TRANS_PERM_LOG_RES,
- XFS_WRITE_LOG_COUNT);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ }
- /*
- * check for running out of space
- */
- if (error) {
/*
- * Free the transaction structure.
+ * See if we were able to allocate an extent that
+ * covers at least part of the callers request
*/
- xfs_trans_cancel(tp, 0);
- }
- xfs_ilock(ip, XFS_ILOCK_EXCL);
+ for (i = 0; i < nimaps; i++) {
+ if ((map->br_startoff >= imap[i].br_startoff) &&
+ (map->br_startoff < (imap[i].br_startoff +
+ imap[i].br_blockcount))) {
+ *map = imap[i];
+ *retmap = 1;
+ XFS_STATS_INC(xfsstats.xs_xstrat_quick);
+ return 0;
+ }
+ count_fsb -= imap[i].br_blockcount;
+ }
- if (error) {
- goto error_out; /* Don't return in above if .. trans ..,
- need lock to return */
+ /* So far we have not mapped the requested part of the
+ * file, just surrounding data, try again.
+ */
+ nimaps--;
+ offset_fsb = imap[nimaps].br_startoff +
+ imap[nimaps].br_blockcount;
+ map_start_fsb = offset_fsb;
}
- if (XFS_IS_QUOTA_ON(mp)) {
- if (xfs_trans_reserve_quota(tp,
- ip->i_udquot,
- ip->i_gdquot,
- resblks, 0, 0)) {
- error = (EDQUOT);
- goto error1;
- }
- nimaps = 1;
- } else {
- nimaps = 2;
- }
+trans_cancel:
+ xfs_bmap_cancel(&free_list);
+ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+error0:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return XFS_ERROR(error);
+}
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
+int
+xfs_iomap_write_unwritten(
+ xfs_inode_t *ip,
+ loff_t offset,
+ size_t count)
+{
+ xfs_mount_t *mp = ip->i_mount;
+ xfs_trans_t *tp;
+ xfs_fileoff_t offset_fsb;
+ xfs_filblks_t count_fsb;
+ xfs_filblks_t numblks_fsb;
+ xfs_bmbt_irec_t imap;
+ int committed;
+ int error;
+ int nres;
+ int nimaps;
+ xfs_fsblock_t firstfsb;
+ xfs_bmap_free_t free_list;
- /*
- * issue the bmapi() call to allocate the blocks
- */
- XFS_BMAP_INIT(&free_list, &firstfsb);
- imapp = &imap[0];
- error = XFS_BMAPI(mp, tp, io, offset_fsb, count_fsb,
- bmapi_flags, &firstfsb, 1, imapp, &nimaps, &free_list);
- if (error) {
- goto error0;
- }
+ offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ count_fsb = XFS_B_TO_FSB(mp, count);
- /*
- * complete the transaction
- */
+ do {
+ nres = XFS_DIOSTRAT_SPACE_RES(mp, 0);
- error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
- if (error) {
- goto error0;
- }
+ /*
+ * set up a transaction to convert the range of extents
+ * from unwritten to real. Do allocations in a loop until
+ * we have covered the range passed in.
+ */
- error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
- if (error) {
- goto error_out;
- }
+ tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
+ error = xfs_trans_reserve(tp, nres,
+ XFS_WRITE_LOG_RES(mp), 0,
+ XFS_TRANS_PERM_LOG_RES,
+ XFS_WRITE_LOG_COUNT);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ goto error0;
+ }
- /* copy any maps to caller's array and return any error. */
- if (nimaps == 0) {
- error = (ENOSPC);
- goto error_out;
- }
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ xfs_trans_ihold(tp, ip);
- maps = min(nimaps, maps);
- *npbmaps = _xfs_imap_to_bmap(io, offset, &imap[0], pbmapp, maps,
- *npbmaps);
- if (*npbmaps) {
/*
- * this is new since xfs_iomap_read
- * didn't find it.
+ * Modify the unwritten extent state of the buffer.
*/
- if (*npbmaps != 1) {
- /* NEED MORE WORK FOR MULTIPLE BMAPS (which are new) */
- BUG();
- }
- }
- goto out;
+ XFS_BMAP_INIT(&free_list, &firstfsb);
+ nimaps = 1;
+ error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
+ XFS_BMAPI_WRITE, &firstfsb,
+ 1, &imap, &nimaps, &free_list);
+ if (error)
+ goto error_on_bmapi_transaction;
- error0: /* Cancel bmap, unlock inode, and cancel trans */
- xfs_bmap_cancel(&free_list);
+ error = xfs_bmap_finish(&(tp), &(free_list),
+ firstfsb, &committed);
+ if (error)
+ goto error_on_bmapi_transaction;
- error1: /* Just cancel transaction */
- xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
- *npbmaps = 0; /* nothing set-up here */
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ if (error)
+ goto error0;
-error_out:
-out: /* Just return error and any tracing at end of routine */
+ if ((numblks_fsb = imap.br_blockcount) == 0) {
+ /*
+ * The numblks_fsb value should always get
+ * smaller, otherwise the loop is stuck.
+ */
+ ASSERT(imap.br_blockcount);
+ break;
+ }
+ offset_fsb += numblks_fsb;
+ count_fsb -= numblks_fsb;
+ } while (count_fsb > 0);
+
+ return 0;
+
+error_on_bmapi_transaction:
+ xfs_bmap_cancel(&free_list);
+ xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT));
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+error0:
return XFS_ERROR(error);
}
+