-/*
+/**
* aops.c - NTFS kernel address space operations and page cache handling.
* Part of the Linux-NTFS project.
*
#define page_buffers(page) (page)->buffers
#endif
-/*
+/**
+ * end_buffer_read_file_async -
+ *
* Async io completion handler for accessing files. Adapted from
* end_buffer_read_mst_async().
*/
return;
}
-/* NTFS version of block_read_full_page(). Adapted from ntfs_mst_readpage(). */
+/**
+ * ntfs_file_read_block -
+ *
+ * NTFS version of block_read_full_page(). Adapted from ntfs_mst_readpage().
+ */
static int ntfs_file_read_block(struct page *page)
{
VCN vcn;
ntfs_inode *ni;
ntfs_volume *vol;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
- sector_t iblock, lblock;
+ sector_t iblock, lblock, zblock;
unsigned int blocksize, blocks, vcn_ofs;
int i, nr;
unsigned char blocksize_bits;
blocksize_bits = VFS_I(ni)->i_blkbits;
blocksize = 1 << blocksize_bits;
- create_empty_buffers(page, blocksize);
+ if (!page_has_buffers(page))
+ create_empty_buffers(page, blocksize);
bh = head = page_buffers(page);
if (!bh)
return -ENOMEM;
blocks = PAGE_CACHE_SIZE >> blocksize_bits;
iblock = page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
+ zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits;
#ifdef DEBUG
if (unlikely(!ni->mft_no)) {
/* Loop through all the buffers in the page. */
nr = i = 0;
do {
- BUG_ON(buffer_mapped(bh) || buffer_uptodate(bh));
+ if (unlikely(buffer_uptodate(bh)))
+ continue;
+ if (unlikely(buffer_mapped(bh))) {
+ arr[nr++] = bh;
+ continue;
+ }
bh->b_dev = VFS_I(ni)->i_dev;
/* Is the block within the allowed limits? */
if (iblock < lblock) {
bh->b_blocknr = ((lcn << vol->cluster_size_bits)
+ vcn_ofs) >> blocksize_bits;
bh->b_state |= (1UL << BH_Mapped);
- arr[nr++] = bh;
- continue;
+ /* Only read initialized data blocks. */
+ if (iblock < zblock) {
+ arr[nr++] = bh;
+ continue;
+ }
+ /* Fully non-initialized data block, zero it. */
+ goto handle_zblock;
}
/* It is a hole, need to zero it. */
if (lcn == LCN_HOLE)
handle_hole:
bh->b_blocknr = -1UL;
bh->b_state &= ~(1UL << BH_Mapped);
+handle_zblock:
memset(kmap(page) + i * blocksize, 0, blocksize);
flush_dcache_page(page);
kunmap(page);
bytes);
} else
memset(addr, 0, PAGE_CACHE_SIZE);
+ flush_dcache_page(page);
kunmap(page);
SetPageUptodate(page);
return err;
}
-/*
+/**
+ * end_buffer_read_mftbmp_async -
+ *
* Async io completion handler for accessing mft bitmap. Adapted from
* end_buffer_read_mst_async().
*/
return;
}
-/* Readpage for accessing mft bitmap. Adapted from ntfs_mst_readpage(). */
+/**
+ * ntfs_mftbmp_readpage -
+ *
+ * Readpage for accessing mft bitmap. Adapted from ntfs_mst_readpage().
+ */
static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page)
{
VCN vcn;
LCN lcn;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
- sector_t iblock, lblock;
+ sector_t iblock, lblock, zblock;
unsigned int blocksize, blocks, vcn_ofs;
int nr, i;
unsigned char blocksize_bits;
blocksize = vol->sb->s_blocksize;
blocksize_bits = vol->sb->s_blocksize_bits;
-
- create_empty_buffers(page, blocksize);
+
+ if (!page_has_buffers(page))
+ create_empty_buffers(page, blocksize);
bh = head = page_buffers(page);
if (!bh)
return -ENOMEM;
-
+
blocks = PAGE_CACHE_SIZE >> blocksize_bits;
iblock = page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
lblock = (vol->mftbmp_allocated_size + blocksize - 1) >> blocksize_bits;
-
+ zblock = (vol->mftbmp_initialized_size + blocksize - 1) >>
+ blocksize_bits;
+
/* Loop through all the buffers in the page. */
nr = i = 0;
do {
- BUG_ON(buffer_mapped(bh) || buffer_uptodate(bh));
+ if (unlikely(buffer_uptodate(bh)))
+ continue;
+ if (unlikely(buffer_mapped(bh))) {
+ arr[nr++] = bh;
+ continue;
+ }
bh->b_dev = vol->mft_ino->i_dev;
/* Is the block within the allowed limits? */
if (iblock < lblock) {
bh->b_blocknr = ((lcn << vol->cluster_size_bits)
+ vcn_ofs) >> blocksize_bits;
bh->b_state |= (1UL << BH_Mapped);
- arr[nr++] = bh;
- continue;
+ /* Only read initialized data blocks. */
+ if (iblock < zblock) {
+ arr[nr++] = bh;
+ continue;
+ }
+ /* Fully non-initialized data block, zero it. */
+ goto handle_zblock;
}
if (lcn != LCN_HOLE) {
/* Hard error, zero out region. */
*/
bh->b_blocknr = -1UL;
bh->b_state &= ~(1UL << BH_Mapped);
+handle_zblock:
memset(kmap(page) + i * blocksize, 0, blocksize);
flush_dcache_page(page);
kunmap(page);
* the page before finally marking it uptodate and unlocking it.
*
* Contains an adapted version of fs/buffer.c::block_read_full_page().
- *
- * TODO:/FIXME: The current implementation is simple but wasteful as we perform
- * actual i/o from disk for all data up to allocated size completely ignoring
- * the fact that initialized size, and data size for that matter, may well be
- * lower and hence there is no point in reading them in. We can just zero the
- * page range, which is what is currently done in our async i/o completion
- * handler anyway, once the read from disk completes. However, I am not sure how
- * to setup the buffer heads in that case, so for now we do the pointless i/o.
- * Any help with this would be appreciated...
*/
int ntfs_mst_readpage(struct file *dir, struct page *page)
{
ntfs_inode *ni;
ntfs_volume *vol;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
- sector_t iblock, lblock;
+ sector_t iblock, lblock, zblock;
unsigned int blocksize, blocks, vcn_ofs;
int i, nr;
unsigned char blocksize_bits;
blocksize_bits = VFS_I(ni)->i_blkbits;
blocksize = 1 << blocksize_bits;
- create_empty_buffers(page, blocksize);
+ if (!page_has_buffers(page))
+ create_empty_buffers(page, blocksize);
bh = head = page_buffers(page);
if (!bh)
return -ENOMEM;
blocks = PAGE_CACHE_SIZE >> blocksize_bits;
iblock = page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
+ zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits;
#ifdef DEBUG
if (unlikely(!ni->run_list.rl && !ni->mft_no))
/* Loop through all the buffers in the page. */
nr = i = 0;
do {
- BUG_ON(buffer_mapped(bh) || buffer_uptodate(bh));
+ if (unlikely(buffer_uptodate(bh)))
+ continue;
+ if (unlikely(buffer_mapped(bh))) {
+ arr[nr++] = bh;
+ continue;
+ }
bh->b_dev = VFS_I(ni)->i_dev;
/* Is the block within the allowed limits? */
if (iblock < lblock) {
bh->b_blocknr = ((lcn << vol->cluster_size_bits)
+ vcn_ofs) >> blocksize_bits;
bh->b_state |= (1UL << BH_Mapped);
- arr[nr++] = bh;
- continue;
+ /* Only read initialized data blocks. */
+ if (iblock < zblock) {
+ arr[nr++] = bh;
+ continue;
+ }
+ /* Fully non-initialized data block, zero it. */
+ goto handle_zblock;
}
/* It is a hole, need to zero it. */
if (lcn == LCN_HOLE)
handle_hole:
bh->b_blocknr = -1UL;
bh->b_state &= ~(1UL << BH_Mapped);
+handle_zblock:
memset(kmap(page) + i * blocksize, 0, blocksize);
flush_dcache_page(page);
kunmap(page);
return nr;
}
-/* Address space operations for accessing normal file data. */
+/**
+ * ntfs_file_aops - address space operations for accessing normal file data
+ */
struct address_space_operations ntfs_file_aops = {
writepage: NULL, /* Write dirty page to disk. */
readpage: ntfs_file_readpage, /* Fill page with data. */
typedef int readpage_t(struct file *, struct page *);
-/* Address space operations for accessing mftbmp. */
+/**
+ * ntfs_mftbmp_aops - address space operations for accessing mftbmp
+ */
struct address_space_operations ntfs_mftbmp_aops = {
writepage: NULL, /* Write dirty page to disk. */
readpage: (readpage_t*)ntfs_mftbmp_readpage, /* Fill page with
commit_write: NULL, /* . */
};
-/*
+/**
+ * ntfs_dir_aops -
+ *
* Address space operations for accessing normal directory data (i.e. index
* allocation attribute). We can't just use the same operations as for files
* because 1) the attribute is different and even more importantly 2) the index
a->length = cpu_to_le32(0);
}
-/**
- * format_mft_record2 - initialize an empty mft record
- * @vfs_sb: vfs super block of volume
- * @inum: mft record number / inode number to format
- * @mft_rec: mapped, pinned and locked mft record (optional)
- *
- * Initialize an empty mft record. This is used when extending the MFT.
- *
- * If @mft_rec is NULL, we call map_mft_record() to obtain the record and we
- * unmap it again when finished.
- *
- * We return 0 on success or -errno on error.
- */
-#if 0
-// Can't do this as iget_map_mft_record no longer exists...
-int format_mft_record2(struct super_block *vfs_sb, const unsigned long inum,
- MFT_RECORD *mft_rec)
-{
- MFT_RECORD *m;
- ntfs_inode *ni;
-
- if (mft_rec)
- m = mft_rec;
- else {
- m = iget_map_mft_record(WRITE, vfs_sb, inum, &ni);
- if (IS_ERR(m))
- return PTR_ERR(m);
- }
- __format_mft_record(m, NTFS_SB(vfs_sb)->mft_record_size, inum);
- if (!mft_rec) {
- // TODO: dirty mft record
- unmap_mft_record(WRITE, ni);
- // TODO: Do stuff to get rid of the ntfs_inode
- }
- return 0;
-}
-#endif
-
/**
* format_mft_record - initialize an empty mft record
* @ni: ntfs inode of mft record
return m;
}
-/**
- * iget_map_mft_record - iget, map, pin, lock an mft record
- * @rw: map for read (rw = READ) or write (rw = WRITE)
- * @vfs_sb: vfs super block of mounted volume
- * @inum: inode number / MFT record number whose mft record to map
- * @vfs_ino: output parameter which we set to the inode on successful return
- *
- * Does the same as map_mft_record(), except that it starts out only with the
- * knowledge of the super block (@vfs_sb) and the mft record number which is of
- * course the same as the inode number (@inum).
- *
- * On success, *@vfs_ino will contain a pointer to the inode structure of the
- * mft record on return. On error return, *@vfs_ino is undefined.
- *
- * See map_mft_record() description for details and for a description of how
- * errors are returned and what error codes are defined.
- *
- * IMPROTANT: The caller is responsible for calling iput(@vfs_ino) when
- * finished with the inode, i.e. after unmap_mft_record() has been called. If
- * that is omitted you will get busy inodes upon umount...
- */
-#if 0
-// this is no longer possible. iget() cannot be called as we may be loading
-// an ntfs inode which will never have a corresponding vfs inode counter part.
-// this is not going to be pretty. )-:
-// we need our own hash for ntfs inodes now, ugh. )-:
-// not having vfs inodes associated with all ntfs inodes is a bad mistake I am
-// getting the impression. this will in the end turn out uglier than just
-// having iget_no_wait().
-// my only hope is that we can get away without this functionality in the driver
-// altogether. we are ok for extent inodes already because we only handle them
-// via map_extent_mft_record().
-// if we really need it, we could have a list or hash of "pure ntfs inodes"
-// to cope with this situation, so the lookup would be:
-// look for the inode and if not present look for pure ntfs inode and if not
-// present add a new pure ntfs inode. under this scheme extent inodes have to
-// also be added to the list/hash of pure inodes.
-MFT_RECORD *iget_map_mft_record(const int rw, struct super_block *vfs_sb,
- const unsigned long inum, struct inode **vfs_ino)
-{
- struct inode *inode;
- MFT_RECORD *mrec;
-
- /*
- * The corresponding iput() happens when clear_inode() is called on the
- * base mft record of this extent mft record.
- * When used on base mft records, caller has to perform the iput().
- */
- inode = iget(vfs_sb, inum);
- if (inode && !is_bad_inode(inode)) {
- mrec = map_mft_record(rw, inode);
- if (!IS_ERR(mrec)) {
- ntfs_debug("Success for i_ino 0x%lx.", inum);
- *vfs_ino = inode;
- return mrec;
- }
- } else
- mrec = ERR_PTR(-EIO);
- if (inode)
- iput(inode);
- ntfs_debug("Failed for i_ino 0x%lx.", inum);
- return mrec;
-}
-#endif
-
/**
* unmap_mft_record - release a mapped mft record
* @rw: unmap from read (@rw = READ) or write (@rw = WRITE)