ChangeLog
=========
+NTFS 1.1.20:
+ - Fixed two bugs in ntfs_readwrite_attr(). Thanks to Jan Kara for
+ spotting the out of bounds one.
+ - Check return value of set_blocksize() in ntfs_read_super() and make
+ use of get_hardsect_size() to determine the minimum block size.
+ - Fix return values of ntfs_vcn_to_lcn(). This should stop
+ peoples start of partition being overwritten at random.
+
NTFS 1.1.19:
- Fixed ntfs_getdir_unsorted(), ntfs_readdir() and ntfs_printcb() to
cope with arbitrary cluster sizes. Very important for Win2k+. Also,
.long SYMBOL_NAME(sys_getdents64) /* 220 */
.long SYMBOL_NAME(sys_fcntl64)
.long SYMBOL_NAME(sys_ni_syscall) /* reserved for TUX */
+ .long SYMBOL_NAME(sys_ni_syscall) /* Reserved for Security */
.rept NR_syscalls-(.-sys_call_table)/4
.long SYMBOL_NAME(sys_ni_syscall)
if (!ret) {
bdev->bd_openers++;
bdev->bd_inode->i_size = blkdev_size(rdev);
+ bdev->bd_inode->i_blkbits = blksize_bits(block_size(rdev));
} else if (!bdev->bd_openers)
bdev->bd_op = NULL;
}
static struct buffer_head **hash_table;
static rwlock_t hash_table_lock = RW_LOCK_UNLOCKED;
-#define BUF_CLEAN 0
-#define BUF_LOCKED 1 /* Buffers scheduled for write */
-#define BUF_DIRTY 2 /* Dirty buffers, not yet scheduled for write */
-#define NR_LIST 3
-
static struct buffer_head *lru_list[NR_LIST];
static spinlock_t lru_list_lock = SPIN_LOCK_UNLOCKED;
static int nr_buffers_type[NR_LIST];
if (!atomic_read(&bh->b_count)) {
if (destroy_dirty_buffers || !buffer_dirty(bh)) {
remove_inode_queue(bh);
-#if 0
- __remove_from_queues(bh);
- put_last_free(bh);
-#endif
}
} else
printk("invalidate: busy buffer\n");
}
spin_unlock(&unused_list_lock);
}
-#if 0
- /*
- * (Pending further analysis ...)
- * Ordinary (non-async) requests can use a different memory priority
- * to free up pages. Any swapping thus generated will use async
- * buffer heads.
- */
- if(!async &&
- (bh = kmem_cache_alloc(bh_cachep, SLAB_KERNEL)) != NULL) {
- memset(bh, 0, sizeof(*bh));
- init_waitqueue_head(&bh->b_wait);
- return bh;
- }
-#endif
return NULL;
}
* instead.
*/
if (!offset) {
- if (!try_to_free_buffers(page, 0)) {
- if (drop_pagecache)
- atomic_inc(&buffermem_pages);
+ if (!try_to_free_buffers(page, 0))
return 0;
- }
}
return 1;
return err;
}
+static inline void link_dev_buffers(struct page * page, struct buffer_head *head)
+{
+ struct buffer_head *bh, *tail;
+
+ bh = head;
+ do {
+ tail = bh;
+ bh = bh->b_this_page;
+ } while (bh);
+ tail->b_this_page = head;
+ page->buffers = head;
+ page_cache_get(page);
+}
+
/*
* Create the page-cache page that contains the requested block
*/
static struct page * grow_dev_page(struct block_device *bdev, unsigned long index, int size)
{
struct page * page;
+ struct buffer_head *bh;
page = find_or_create_page(bdev->bd_inode->i_mapping, index, GFP_NOFS);
if (IS_ERR(page))
if (!PageLocked(page))
BUG();
- if (!page->buffers) {
- struct buffer_head *bh, *tail;
- struct buffer_head *head = create_buffers(page, size, 0);
- if (!head)
+ bh = page->buffers;
+ if (bh) {
+ if (bh->b_size == size)
+ return page;
+ if (!try_to_free_buffers(page, GFP_NOFS))
goto failed;
-
- bh = head;
- do {
- tail = bh;
- bh = bh->b_this_page;
- } while (bh);
- tail->b_this_page = head;
- page->buffers = head;
- page_cache_get(page);
- atomic_inc(&buffermem_pages);
}
+
+ bh = create_buffers(page, size, 0);
+ if (!bh)
+ goto failed;
+ link_dev_buffers(page, bh);
return page;
failed:
hash_page_buffers(page, dev, block, size);
UnlockPage(page);
page_cache_release(page);
+
+ /* We hashed up this page, so increment buffermem */
+ atomic_inc(&buffermem_pages);
return 1;
}
obj-y := fs.o sysctl.o support.o util.o inode.o dir.o super.o attr.o unistr.o
obj-m := $(O_TARGET)
# New version format started 3 February 2001.
-EXTRA_CFLAGS = -DNTFS_VERSION=\"1.1.19\" #-DDEBUG
+EXTRA_CFLAGS = -DNTFS_VERSION=\"1.1.20\" #-DDEBUG
include $(TOPDIR)/Rules.make
#include <linux/locks.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
+#include <linux/blkdev.h>
#include <asm/page.h>
#include <linux/nls.h>
#include <linux/ntfs_fs.h>
{
ntfs_volume *vol;
struct buffer_head *bh;
- int i, to_read;
+ int i, to_read, blocksize;
ntfs_debug(DEBUG_OTHER, "ntfs_read_super\n");
vol = NTFS_SB2VOL(sb);
init_ntfs_super_block(vol);
if (!parse_options(vol, (char*)options))
goto ntfs_read_super_vol;
- /* Assume a 512 bytes block device for now. */
- set_blocksize(sb->s_dev, 512);
+ blocksize = get_hardsect_size(sb->s_dev);
+ if (blocksize < 512)
+ blocksize = 512;
+ if (set_blocksize(sb->s_dev, blocksize) < 0) {
+ ntfs_error("Unable to set blocksize %d.\n", blocksize);
+ goto ntfs_read_super_vol;
+ }
/* Read the super block (boot block). */
- if (!(bh = bread(sb->s_dev, 0, 512))) {
+ if (!(bh = bread(sb->s_dev, 0, blocksize))) {
ntfs_error("Reading super block failed\n");
goto ntfs_read_super_unl;
}
ntfs_debug(DEBUG_OTHER, "Done reading boot block\n");
- /* Check for 'NTFS' magic number */
+ /* Check for valid 'NTFS' boot sector. */
if (!is_boot_sector_ntfs(bh->b_data)) {
ntfs_debug(DEBUG_OTHER, "Not a NTFS volume\n");
bforget(bh);
goto ntfs_read_super_unl;
}
ntfs_debug(DEBUG_OTHER, "$Mft at cluster 0x%lx\n", vol->mft_lcn);
- bforget(bh);
+ brelse(bh);
NTFS_SB(vol) = sb;
if (vol->cluster_size > PAGE_SIZE) {
ntfs_error("Partition cluster size is not supported yet (it "
ntfs_debug(DEBUG_OTHER, "Done to init volume\n");
/* Inform the kernel that a device block is a NTFS cluster. */
sb->s_blocksize = vol->cluster_size;
- for (i = sb->s_blocksize, sb->s_blocksize_bits = 0; i != 1; i >>= 1)
- sb->s_blocksize_bits++;
- set_blocksize(sb->s_dev, sb->s_blocksize);
+ sb->s_blocksize_bits = vol->cluster_size_bits;
+ if (blocksize != vol->cluster_size &&
+ set_blocksize(sb->s_dev, sb->s_blocksize) < 0) {
+ ntfs_error("Cluster size too small for device.\n");
+ goto ntfs_read_super_unl;
+ }
ntfs_debug(DEBUG_OTHER, "set_blocksize\n");
/* Allocate an MFT record (MFT record can be smaller than a cluster). */
i = vol->cluster_size;
* If write extends beyond _allocated_ size, extend attribute,
* updating attr->allocated and attr->size in the process. (AIA)
*/
- if (offset + l > attr->allocated) {
+ if ((!attr->resident && offset + l > attr->allocated) ||
+ (attr->resident && offset + l > attr->size)) {
error = ntfs_resize_attr(ino, attr, offset + l);
if (error)
return error;
- } else if (offset + l > attr->size)
- /* If amount of data has increased: update. */
- attr->size = offset + l;
- /* If amount of initialised data has increased: update. */
- if (offset + l > attr->initialized) {
- /* FIXME: Zero-out the section between the old
- * initialised length and the write start. (AIA) */
- attr->initialized = offset + l;
+ }
+ if (!attr->resident) {
+ /* Has amount of data increased? */
+ if (offset + l > attr->size)
+ attr->size = offset + l;
+ /* Has amount of initialised data increased? */
+ if (offset + l > attr->initialized) {
+ /* FIXME: Clear the section between the old
+ * initialised length and the write start.
+ * (AIA) */
+ attr->initialized = offset + l;
+ }
}
}
if (attr->resident) {
if (offset >= attr->initialized)
return ntfs_read_zero(dest, l);
if (offset + l > attr->initialized) {
- dest->size = chunk = offset + l - attr->initialized;
+ dest->size = chunk = attr->initialized - offset;
error = ntfs_readwrite_attr(ino, attr, offset, dest);
- if (error)
+ if (error || (dest->size != chunk && (error = -EIO, 1)))
return error;
+ dest->size += l - chunk;
return ntfs_read_zero(dest, l - chunk);
}
if (attr->flags & ATTR_IS_COMPRESSED)
return ntfs_readwrite_attr(ino, attr, offset, buf);
}
+/* -2 = error, -1 = hole, >= 0 means real disk cluster (lcn). */
int ntfs_vcn_to_lcn(ntfs_inode *ino, int vcn)
{
int rnum;
ntfs_attribute *data;
data = ntfs_find_attr(ino, ino->vol->at_data, 0);
- /* It's hard to give an error code. */
if (!data || data->resident || data->flags & (ATTR_IS_COMPRESSED |
ATTR_IS_ENCRYPTED))
- return -1;
+ return -2;
if (data->size <= (__s64)vcn << ino->vol->cluster_size_bits)
- return -1;
- /*
- * For Linux, block number 0 represents a hole. - No problem as we do
- * not support bmap in any form whatsoever. The FIBMAP sys call is
- * deprecated anyway and NTFS is not a block based file system so
- * allowing bmapping is complete and utter garbage IMO. Use mmap once
- * we implement it... (AIA)
- */
+ return -2;
if (data->initialized <= (__s64)vcn << ino->vol->cluster_size_bits)
- return 0;
+ return -1;
for (rnum = 0; rnum < data->d.r.len &&
- vcn >= data->d.r.runlist[rnum].len; rnum++)
+ vcn >= data->d.r.runlist[rnum].len; rnum++)
vcn -= data->d.r.runlist[rnum].len;
- /* We need to cope with sparse runs. (AIA) */
+ if (data->d.r.runlist[rnum].lcn >= 0)
+ return data->d.r.runlist[rnum].lcn + vcn;
return data->d.r.runlist[rnum].lcn + vcn;
}
#define __NR_madvise1 219 /* delete when C lib stub is removed */
#define __NR_getdents64 220
#define __NR_fcntl64 221
+#define __NR_security 223 /* syscall for security modules */
/* user-visible error numbers are in the range -1 - -124: see <asm-i386/errno.h> */
/* reiserfs_writepage needs this */
extern void set_buffer_async_io(struct buffer_head *bh) ;
+#define BUF_CLEAN 0
+#define BUF_LOCKED 1 /* Buffers scheduled for write */
+#define BUF_DIRTY 2 /* Dirty buffers, not yet scheduled for write */
+#define NR_LIST 3
+
static inline void get_bh(struct buffer_head * bh)
{
atomic_inc(&(bh)->b_count);
if (sysctl_overcommit_memory)
return 1;
- free = atomic_read(&buffermem_pages);
- free += atomic_read(&page_cache_size);
+ /* The page cache contains buffer pages these days.. */
+ free = atomic_read(&page_cache_size);
free += nr_free_pages();
free += nr_swap_pages;