NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL /* revalidate */
};
#endif
struct pci_dev *
pci_find_device(unsigned int vendor, unsigned int device, struct pci_dev *from)
{
- if (!from)
- from = pci_devices;
- else
- from = from->next;
- while (from && (from->vendor != vendor && vendor != PCI_ANY_ID || from->device != device && device != PCI_ANY_ID))
- from = from->next;
- return from;
+ struct pci_dev *next;
+
+ next = pci_devices;
+ if (from)
+ next = from->next;
+
+ while (next) {
+ struct pci_dev *dev = next;
+ next = next->next;
+ if (vendor != PCI_ANY_ID && dev->vendor != vendor)
+ continue;
+ if (device != PCI_ANY_ID && dev->device != device)
+ continue;
+
+ return dev;
+ }
+ return NULL;
}
spinlock_t usb_acm_lock = SPIN_LOCK_UNLOCKED;
-static int acm_irq(int state, void *__buffer, void *dev_id)
+static int acm_irq(int state, void *__buffer, int len, void *dev_id)
{
// unsigned char *data = __buffer;
struct acm_state *acm = &static_acm_state;
};
-static int usb_audio_irq(int state, void *buffer, void *dev_id)
+static int usb_audio_irq(int state, void *buffer, int len, void *dev_id)
{
struct usb_audio *aud = (struct usb_audio*) dev_id;
return 1;
}
}
-static int cpia_isoc_irq(int status, void *__buffer, void *dev_id)
+static int cpia_isoc_irq(int status, void *__buffer, int len, void *dev_id)
{
struct usb_cpia *cpia = dev_id;
struct usb_device *dev = cpia->dev;
* the low-level driver that it wants to be re-activated,
* or zero to say "I'm done".
*/
-static int hub_irq(int status, void *__buffer, void *dev_id)
+static int hub_irq(int status, void *__buffer, int len, void *dev_id)
{
struct usb_hub *hub = dev_id;
unsigned long flags;
}
static int
-usb_kbd_irq(int state, void *buffer, void *dev_id)
+usb_kbd_irq(int state, void *buffer, int len, void *dev_id)
{
struct usb_keyboard *kbd = (struct usb_keyboard*) dev_id;
unsigned long *down = (unsigned long*) buffer;
spinlock_t usb_mouse_lock = SPIN_LOCK_UNLOCKED;
-static int mouse_irq(int state, void *__buffer, void *dev_id)
+static int mouse_irq(int state, void *__buffer, int len, void *dev_id)
{
signed char *data = __buffer;
/* finding the mouse is easy when there's only one */
OHCI_DEBUG( for(i=0; i < data_len; i++ ) printk(" %02x", ((__u8 *) data)[i]);)
OHCI_DEBUG( printk(" ret_status: %x\n", status); })
- ret = handler(cc_to_status[status & 0xf], data, dev_id);
+ ret = handler(cc_to_status[status & 0xf], data, data_len, dev_id);
if(ret == 0) return 0; /* 0 .. do not requeue */
if(status > 0) return -1; /* error occured do not requeue ? */
ohci_trans_req(ohci, ep_addr, 0, NULL, data, 8, (__OHCI_BAG) handler, (__OHCI_BAG) dev_id); /* requeue int request */
*
* This function is called from the interrupt handler.
*/
-static int ohci_control_completed(int stats, void *buffer, void *dev_id)
+static int ohci_control_completed(int stats, void *buffer, int len, void *dev_id)
{
/* pass the TDs completion status back to control_msg */
if (dev_id) {
/* Check if TD should be re-queued */
if ((td->completed != NULL) &&
- (td->completed(cc, td->data, td->dev_id))) {
+ (td->completed(cc, td->data, -1 /* XXX */, td->dev_id))) {
/* Mark the TD as active again:
* Set the not accessed condition code
* Reset the Error count
#if 0
printk(" link = %p, element = %p\n", qh->link, qh->element);
#endif
- if(!qh->element) {
+ if(!(qh->element & ~0xF)) {
printk(" td 0 = NULL\n");
return;
}
tmp = td->first;
printk("uhci_td_result() failed with status %x\n", status);
- show_status(dev->uhci);
+ //show_status(dev->uhci);
do {
show_td(tmp);
if ((tmp->link & 1) || (tmp->link & 2))
/* notify removal */
- td->completed(USB_ST_REMOVED, NULL, td->dev_id);
+ td->completed(USB_ST_REMOVED, NULL, 0, td->dev_id);
/* this is DANGEROUS - not sure whether this is right */
*/
static DECLARE_WAIT_QUEUE_HEAD(control_wakeup);
-static int uhci_control_completed(int status, void *buffer, void *dev_id)
+static int uhci_control_completed(int status, void *buffer, int len, void *dev_id)
{
wake_up(&control_wakeup);
return 0; /* Don't re-instate */
// show_status(dev->uhci);
// show_queues(dev->uhci);
- schedule_timeout(HZ/10);
+ schedule_timeout(HZ*5);
// control should be empty here...
// show_status(dev->uhci);
* information, that's just ridiculously high. Most
* control messages have just a few bytes of data.
*/
-static int uhci_control_msg(struct usb_device *usb_dev, unsigned int pipe,
- devrequest *cmd, void *data, int len)
+static int uhci_control_msg(struct usb_device *usb_dev, unsigned int pipe, void *cmd, void *data, int len)
{
struct uhci_device *dev = usb_to_uhci(usb_dev);
struct uhci_td *first, *td, *prevtd;
}
/*
- * Build the final TD for control status
+ * Build the final TD for control status
*/
destination ^= (0xE1 ^ 0x69); /* OUT -> IN */
destination |= 1 << 19; /* End in Data1 */
- td->link = 1; /* Terminate */
- td->status = status | (1 << 24); /* IOC */
+ td->backptr = &prevtd->link;
+ td->status = (status /* & ~(3 << 27) */) | (1 << 24); /* no limit on final packet */
td->info = destination | (0x7ff << 21); /* 0 bytes of data */
td->buffer = 0;
td->first = first;
- td->backptr = &prevtd->link;
+ td->link = 1; /* Terminate */
+
/* Start it up.. */
ret = uhci_run_control(dev, first, td);
}
if (uhci_debug && ret) {
- __u8 *p = (__u8 *) cmd;
+ __u8 *p = cmd;
printk("Failed cmd - %02X %02X %02X %02X %02X %02X %02X %02X\n",
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]);
*/
static DECLARE_WAIT_QUEUE_HEAD(bulk_wakeup);
-static int uhci_bulk_completed(int status, void *buffer, void *dev_id)
+static int uhci_bulk_completed(int status, void *buffer, int len, void *dev_id)
{
wake_up(&bulk_wakeup);
return 0; /* Don't re-instate */
// show_status(dev->uhci);
// show_queues(dev->uhci);
- schedule_timeout(HZ/10);
+ schedule_timeout(HZ*5);
// show_status(dev->uhci);
// show_queues(dev->uhci);
+ //show_queue(first->qh);
remove_wait_queue(&bulk_wakeup, &wait);
/* Clean up in case it failed.. */
{
struct list_head *head = &uhci->interrupt_list;
struct list_head *tmp;
+ int status;
spin_lock(&irqlist_lock);
tmp = head->next;
next = tmp->next;
- if (!(td->status & (1 << 23))) { /* No longer active? */
+ if (!((status = td->status) & (1 << 23)) || /* No longer active? */
+ ((td->qh->element & ~15) &&
+ !((status = uhci_link_to_td(td->qh->element)->status) & (1 <<23)) &&
+ (status & 0x760000) /* is in error state (Stall, db, babble, timeout, bitstuff) */)) {
/* remove from IRQ list */
__list_del(tmp->prev, next);
INIT_LIST_HEAD(tmp);
- if (td->completed(uhci_map_status((td->status & 0xff)>> 16, 0),
- bus_to_virt(td->buffer), td->dev_id)) {
+ if (td->completed(uhci_map_status(status, 0), bus_to_virt(td->buffer), -1, td->dev_id)) {
list_add(&td->irq_list, &uhci->interrupt_list);
if (!(td->status & (1 << 25))) {
/* If completed wants to not reactivate, then it's */
/* responsible for free'ing the TD's and QH's */
/* or another function (such as run_control) */
- }
+ }
tmp = next;
}
spin_unlock(&irqlist_lock);
{
struct uhci *uhci = (struct uhci *)__uhci;
struct uhci_device * root_hub =usb_to_uhci(uhci->bus->root_hub);
+
lock_kernel();
request_region(uhci->io_addr, 32, "usb-uhci");
* until we come up with a common meaning.
* void *buffer - This is a pointer to the data used in this
* USB transfer.
+ * int length - This is the number of bytes transferred in or out
+ * of the buffer by this transfer. (-1 = unknown/unsupported)
* void *dev_id - This is a user defined pointer set when the IRQ
* is requested that is passed back.
*/
-typedef int (*usb_device_irq)(int, void *, void *);
+typedef int (*usb_device_irq)(int, void *, int, void *);
struct usb_operations {
struct usb_device *(*allocate)(struct usb_device *);
__u8 ep_int; /* interrupt . */
__u8 subclass; /* as in overview */
__u8 protocol; /* .............. */
+ __u8 attention_done; /* force attention on first command */
int (*pop)(Scsi_Cmnd *); /* protocol specific do cmd */
+ int (*pop_reset)(struct us_data *); /* ................. device reset */
GUID(guid); /* unique dev id */
struct Scsi_Host *host; /* our dummy host data */
Scsi_Host_Template *htmplt; /* own host template */
/* we want to retry if the device reported NAK */
if (result == USB_ST_TIMEOUT) {
+ if (partial != this_xfer) {
+ return 0; /* I do not like this */
+ }
if (!maxtry--)
break;
this_xfer -= partial;
/* short data - assume end */
result = USB_ST_DATAUNDERRUN;
break;
+ } else if (result == USB_ST_STALL && us->protocol == US_PR_CB) {
+ if (!maxtry--)
+ break;
+ this_xfer -= partial;
+ buf += partial;
} else
break;
} while ( this_xfer );
}
-static int pop_CBI_irq(int state, void *buffer, void *dev_id)
+static int pop_CBI_irq(int state, void *buffer, int len, void *dev_id)
{
struct us_data *us = (struct us_data *)dev_id;
if (state != USB_ST_REMOVED) {
us->ip_data = *(__u16 *)buffer;
- us->ip_wanted = 0;
+ US_DEBUGP("Interrupt Status %x\n", us->ip_data);
}
- wake_up(&us->ip_waitq);
+ if (us->ip_wanted)
+ wake_up(&us->ip_waitq);
+ us->ip_wanted = 0;
/* we dont want another interrupt */
return 0;
}
+
+static int pop_CB_reset(struct us_data *us)
+{
+ unsigned char cmd[12];
+ devrequest dr;
+ int result;
+
+ dr.requesttype = USB_TYPE_CLASS | USB_RT_INTERFACE;
+ dr.request = US_CBI_ADSC;
+ dr.value = 0;
+ dr.index = us->pusb_dev->ifnum;
+ dr.length = 12;
+ memset(cmd, -1, sizeof(cmd));
+ cmd[0] = SEND_DIAGNOSTIC;
+ cmd[1] = 4;
+ us->pusb_dev->bus->op->control_msg(us->pusb_dev,
+ usb_sndctrlpipe(us->pusb_dev,0),
+ &dr, cmd, 12);
+
+ usb_clear_halt(us->pusb_dev, us->ep_in | 0x80);
+ usb_clear_halt(us->pusb_dev, us->ep_out);
+
+ /* long wait for reset */
+
+ schedule_timeout(HZ*5);
+ return 0;
+}
+
static int pop_CB_command(Scsi_Cmnd *srb)
{
struct us_data *us = (struct us_data *)srb->host_scribble;
devrequest dr;
unsigned char cmd[16];
int result;
- int retry = 1;
+ int retry = 5;
int done_start = 0;
while (retry--) {
result = us->pusb_dev->bus->op->control_msg(us->pusb_dev,
usb_sndctrlpipe(us->pusb_dev,0),
&dr, cmd, us->fixedlength);
- if (!done_start && us->subclass == US_SC_UFI && cmd[0] == TEST_UNIT_READY && result) {
+ if (!done_start && (us->subclass == US_SC_UFI /*|| us->subclass == US_SC_8070*/)
+ && cmd[0] == TEST_UNIT_READY && result) {
/* as per spec try a start command, wait and retry */
done_start++;
return result;
}
-/* Protocol command handlers */
+/*
+ * Control/Bulk status handler
+ */
-static int pop_CBI(Scsi_Cmnd *srb)
+static int pop_CB_status(Scsi_Cmnd *srb)
{
struct us_data *us = (struct us_data *)srb->host_scribble;
int result;
+ __u8 status[2];
+ devrequest dr;
+ int retry = 5;
- /* run the command */
-
- if ((result = pop_CB_command(srb))) {
- US_DEBUGP("CBI command %x\n", result);
- if (result == USB_ST_STALL || result == USB_ST_TIMEOUT)
- return (DID_OK << 16) | 2;
- return DID_ABORT << 16;
- }
-
- /* transfer the data */
-
- if (us_transfer_length(srb)) {
- result = us_transfer(srb, US_DIRECTION(srb->cmnd[0]));
- if (result && result != USB_ST_DATAUNDERRUN) {
- US_DEBUGP("CBI transfer %x\n", result);
+ switch (us->protocol) {
+ case US_PR_CB:
+ /* get from control */
+
+ while (retry--) {
+ dr.requesttype = 0x80 | USB_TYPE_STANDARD | USB_RT_DEVICE;
+ dr.request = USB_REQ_GET_STATUS;
+ dr.index = 0;
+ dr.value = 0;
+ dr.length = 2;
+ result = us->pusb_dev->bus->op->control_msg(us->pusb_dev,
+ usb_rcvctrlpipe(us->pusb_dev,0),
+ &dr, status, sizeof(status));
+ if (result != USB_ST_TIMEOUT)
+ break;
+ }
+ if (result) {
+ US_DEBUGP("Bad AP status request %d\n", result);
return DID_ABORT << 16;
}
- }
-
- /* get status */
+ US_DEBUGP("Got AP status %x %x\n", status[0], status[1]);
+ if (srb->cmnd[0] != REQUEST_SENSE && srb->cmnd[0] != INQUIRY &&
+ ( (status[0] & ~3) || status[1]))
+ return (DID_OK << 16) | 2;
+ else
+ return DID_OK << 16;
+ break;
- if (us->protocol == US_PR_CBI) {
+ case US_PR_CBI:
/* get from interrupt pipe */
/* add interrupt transfer, marked for removal */
return DID_ABORT << 16;
}
return (DID_OK << 16) + ((us->ip_data & 0x300) ? 2 : 0);
- } else {
- /* get from where? */
}
return DID_ERROR << 16;
}
+/* Protocol command handlers */
+
+static int pop_CBI(Scsi_Cmnd *srb)
+{
+ struct us_data *us = (struct us_data *)srb->host_scribble;
+ int result;
+
+ /* run the command */
+
+ if ((result = pop_CB_command(srb))) {
+ US_DEBUGP("CBI command %x\n", result);
+ if (result == USB_ST_STALL || result == USB_ST_TIMEOUT) {
+ return (DID_OK << 16) | 2;
+ }
+ return DID_ABORT << 16;
+ }
+
+ /* transfer the data */
+
+ if (us_transfer_length(srb)) {
+ result = us_transfer(srb, US_DIRECTION(srb->cmnd[0]));
+ if (result && result != USB_ST_DATAUNDERRUN) {
+ US_DEBUGP("CBI transfer %x\n", result);
+ return DID_ABORT << 16;
+ } else if (result == USB_ST_DATAUNDERRUN) {
+ return DID_OK << 16;
+ }
+ } else {
+ if (!result) {
+ return DID_OK << 16;
+ }
+ }
+
+ /* get status */
+
+ return pop_CB_status(srb);
+}
+
static int pop_Bulk_reset(struct us_data *us)
{
devrequest dr;
dr.requesttype = USB_TYPE_CLASS | USB_RT_INTERFACE;
dr.request = US_BULK_RESET;
- dr.value = US_BULK_RESET_SOFT;
+ dr.value = US_BULK_RESET_HARD;
dr.index = 0;
dr.length = 0;
- US_DEBUGP("Bulk soft reset\n");
result = us->pusb_dev->bus->op->control_msg(us->pusb_dev, usb_sndctrlpipe(us->pusb_dev,0), &dr, NULL, 0);
- if (result) {
- US_DEBUGP("Bulk soft reset failed %d\n", result);
- dr.value = US_BULK_RESET_HARD;
- result = us->pusb_dev->bus->op->control_msg(us->pusb_dev, usb_sndctrlpipe(us->pusb_dev,0), &dr, NULL, 0);
- if (result)
- US_DEBUGP("Bulk hard reset failed %d\n", result);
- }
+ if (result)
+ US_DEBUGP("Bulk hard reset failed %d\n", result);
usb_clear_halt(us->pusb_dev, us->ep_in | 0x80);
usb_clear_halt(us->pusb_dev, us->ep_out);
+
+ /* long wait for reset */
+
+ schedule_timeout(HZ*5);
+
return result;
}
/*
stall = 0;
do {
- //usb_settoggle(us->pusb_dev, us->ep_in, 0); /* AAARgh!! */
- US_DEBUGP("Toggle is %d\n", usb_gettoggle(us->pusb_dev, us->ep_in));
result = us->pusb_dev->bus->op->bulk_msg(us->pusb_dev,
usb_rcvbulkpipe(us->pusb_dev, us->ep_in), &bcs,
US_BULK_CS_WRAP_LEN, &partial);
struct us_data *us = (struct us_data *)srb->host->hostdata[0];
US_DEBUGP("Command wakeup\n");
+ if (us->srb) {
+ /* busy */
+ }
srb->host_scribble = (unsigned char *)us;
us->srb = srb;
srb->scsi_done = done;
return 0;
}
-static int us_device_reset( Scsi_Cmnd *srb )
+static int us_bus_reset( Scsi_Cmnd *srb )
{
- return 0;
+ struct us_data *us = (struct us_data *)srb->host->hostdata[0];
+
+ us->pop_reset(us);
+ return SUCCESS;
}
static int us_host_reset( Scsi_Cmnd *srb )
return 0;
}
-static int us_bus_reset( Scsi_Cmnd *srb )
-{
- return 0;
-}
#undef SPRINTF
#define SPRINTF(args...) { if (pos < (buffer + length)) pos += sprintf (pos, ## args); }
if (inout)
return length;
- if (!(vendor = usb_string(us->pusb_dev, us->pusb_dev->descriptor.iManufacturer)))
+ if (!us->pusb_dev || !(vendor = usb_string(us->pusb_dev, us->pusb_dev->descriptor.iManufacturer)))
vendor = "?";
- if (!(product = usb_string(us->pusb_dev, us->pusb_dev->descriptor.iProduct)))
+ if (!us->pusb_dev || !(product = usb_string(us->pusb_dev, us->pusb_dev->descriptor.iProduct)))
product = "?";
switch (us->protocol) {
us_queuecommand,
NULL, /* eh_strategy */
us_abort,
- us_device_reset,
+ us_bus_reset,
us_bus_reset,
us_host_reset,
NULL, /* abort */
TRUE /* emulated */
};
+static unsigned char sense_notready[] = {
+ 0x70, /* current error */
+ 0x00,
+ 0x02, /* not ready */
+ 0x00,
+ 0x00,
+ 10, /* additional length */
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x04, /* not ready */
+ 0x03, /* manual intervention */
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00
+};
+
static int usbscsi_control_thread(void * __us)
{
struct us_data *us = (struct us_data *)__us;
exit_files(current);
//exit_fs(current);
- sprintf(current->comm, "usbscsi%d", us->host_no);
+ sprintf(current->comm, "usbscsi%d", us->host_number);
unlock_kernel();
switch (action) {
case US_ACT_COMMAND :
- if (!us->pusb_dev || us->srb->target || us->srb->lun) {
+ if (us->srb->target || us->srb->lun) {
/* bad device */
US_DEBUGP( "Bad device number (%d/%d) or dev %x\n", us->srb->target, us->srb->lun, (unsigned int)us->pusb_dev);
us->srb->result = DID_BAD_TARGET << 16;
+ } else if (!us->pusb_dev) {
+
+ /* our device has gone - pretend not ready */
+
+ if (us->srb->cmnd[0] == REQUEST_SENSE) {
+ memcpy(us->srb->request_buffer, sense_notready, sizeof(sense_notready));
+ us->srb->result = DID_OK << 16;
+ } else {
+ us->srb->result = (DID_OK << 16) | 2;
+ }
} else {
US_DEBUG(us_show_command(us->srb));
+
+ /* check for variable length - do properly if so */
+
if (us->filter && us->filter->command)
us->srb->result = us->filter->command(us->fdata, us->srb);
- else
+ else if (us->srb->cmnd[0] == START_STOP &&
+ us->pusb_dev->descriptor.idProduct == 0x0001 &&
+ us->pusb_dev->descriptor.idVendor == 0x04e6)
+ us->srb->result = DID_OK << 16;
+ else {
+ unsigned int savelen = us->srb->request_bufflen;
+ unsigned int saveallocation;
+
+ switch (us->srb->cmnd[0]) {
+ case REQUEST_SENSE:
+ if (us->srb->request_bufflen > 18)
+ us->srb->request_bufflen = 18;
+ else
+ break;
+ saveallocation = us->srb->cmnd[4];
+ us->srb->cmnd[4] = 18;
+ break;
+
+ case INQUIRY:
+ if (us->srb->request_bufflen > 36)
+ us->srb->request_bufflen = 36;
+ else
+ break;
+ saveallocation = us->srb->cmnd[4];
+ us->srb->cmnd[4] = 36;
+ break;
+
+ case MODE_SENSE:
+ if (us->srb->request_bufflen > 4)
+ us->srb->request_bufflen = 4;
+ else
+ break;
+ saveallocation = us->srb->cmnd[4];
+ us->srb->cmnd[4] = 4;
+ break;
+
+ case LOG_SENSE:
+ case MODE_SENSE_10:
+ if (us->srb->request_bufflen > 8)
+ us->srb->request_bufflen = 8;
+ else
+ break;
+ saveallocation = (us->srb->cmnd[7] << 8) | us->srb->cmnd[8];
+ us->srb->cmnd[7] = 0;
+ us->srb->cmnd[8] = 8;
+ break;
+
+ default:
+ break;
+ }
us->srb->result = us->pop(us->srb);
+
+ if (savelen != us->srb->request_bufflen &&
+ us->srb->result == (DID_OK << 16)) {
+ unsigned char *p = (unsigned char *)us->srb->request_buffer;
+ unsigned int length;
+
+ /* set correct length and retry */
+ switch (us->srb->cmnd[0]) {
+ case REQUEST_SENSE:
+ /* simply return 18 bytes */
+ p[7] = 10;
+ length = us->srb->request_bufflen;;
+ break;
+
+ case INQUIRY:
+ length = p[4] + 5 > savelen ? savelen : p[4] + 5;
+ us->srb->cmnd[4] = length;
+ break;
+
+ case MODE_SENSE:
+ length = p[0] + 4 > savelen ? savelen : p[0] + 4;
+ us->srb->cmnd[4] = 4;
+ break;
+
+ case LOG_SENSE:
+ length = ((p[2] << 8) + p[3]) + 4 > savelen ? savelen : ((p[2] << 8) + p[3]) + 4;
+ us->srb->cmnd[7] = length >> 8;
+ us->srb->cmnd[8] = length;
+ break;
+
+ case MODE_SENSE_10:
+ length = ((p[0] << 8) + p[1]) + 8 > savelen ? savelen : ((p[0] << 8) + p[1]) + 8;
+ us->srb->cmnd[7] = length >> 8;
+ us->srb->cmnd[8] = length;
+ break;
+ }
+
+ US_DEBUGP("Old/New length = %d/%d\n", savelen, length);
+
+ if (us->srb->request_bufflen != length) {
+ us->srb->request_bufflen = length;
+ us->srb->result = us->pop(us->srb);
+ }
+ /* reset back to original values */
+
+ us->srb->request_bufflen = savelen;
+ switch (us->srb->cmnd[0]) {
+ case REQUEST_SENSE:
+ case INQUIRY:
+ case MODE_SENSE:
+ us->srb->cmnd[4] = saveallocation;
+ break;
+
+ case LOG_SENSE:
+ case MODE_SENSE_10:
+ us->srb->cmnd[7] = saveallocation >> 8;
+ us->srb->cmnd[8] = saveallocation;
+ break;
+ }
+ }
+ /* force attention on first command */
+ if (!us->attention_done) {
+ if (us->srb->cmnd[0] == REQUEST_SENSE) {
+ if (us->srb->result == (DID_OK << 16)) {
+ unsigned char *p = (unsigned char *)us->srb->request_buffer;
+
+ us->attention_done = 1;
+ if ((p[2] & 0x0f) != UNIT_ATTENTION) {
+ p[2] = UNIT_ATTENTION;
+ p[12] = 0x29; /* power on, reset or bus-reset */
+ p[13] = 0;
+ }
+ }
+ } else if (us->srb->cmnd[0] != INQUIRY &&
+ us->srb->result == (DID_OK << 16)) {
+ us->srb->result |= 2; /* force check condition */
+ }
+ }
+ }
}
us->srb->scsi_done(us->srb);
+ us->srb = NULL;
break;
case US_ACT_ABORT :
if (dev->descriptor.idVendor == 0x04e6 &&
dev->descriptor.idProduct == 0x0001) {
/* shuttle E-USB */
- protocol = US_PR_ZIP;
+ protocol = US_PR_CB;
subclass = US_SC_8070; /* an assumption */
} else if (dev->descriptor.bDeviceClass != 0 ||
dev->config->altsetting->interface->bInterfaceClass != 8 ||
usb_string(dev, dev->descriptor.iSerialNumber) ) {
make_guid(guid, dev->descriptor.idVendor, dev->descriptor.idProduct,
usb_string(dev, dev->descriptor.iSerialNumber));
- for (ss = us_list; ss; ss = ss->next) {
- if (GUID_EQUAL(guid, ss->guid)) {
- US_DEBUGP("Found existing GUID " GUID_FORMAT "\n", GUID_ARGS(guid));
- break;
- }
+ } else {
+ make_guid(guid, dev->descriptor.idVendor, dev->descriptor.idProduct,
+ "0");
+ }
+ for (ss = us_list; ss; ss = ss->next) {
+ if (!ss->pusb_dev && GUID_EQUAL(guid, ss->guid)) {
+ US_DEBUGP("Found existing GUID " GUID_FORMAT "\n", GUID_ARGS(guid));
+ flags = ss->flags;
+ break;
}
}
}
ss->subclass = interface->bInterfaceSubClass;
ss->protocol = interface->bInterfaceProtocol;
}
+ ss->attention_done = 0;
/* set the protocol op */
case US_PR_CB:
US_DEBUGPX("Control/Bulk\n");
ss->pop = pop_CBI;
+ ss->pop_reset = pop_CB_reset;
break;
case US_PR_CBI:
US_DEBUGPX("Control/Bulk/Interrupt\n");
ss->pop = pop_CBI;
+ ss->pop_reset = pop_CB_reset;
break;
default:
US_DEBUGPX("Bulk\n");
ss->pop = pop_Bulk;
+ ss->pop_reset = pop_Bulk_reset;
break;
}
/* exit if strange looking */
if (usb_set_configuration(dev, dev->config[0].bConfigurationValue) ||
+ usb_set_interface(dev, interface->bInterfaceNumber, 0) ||
!ss->ep_in || !ss->ep_out || (ss->protocol == US_PR_CBI && ss->ep_int == 0)) {
US_DEBUGP("Problems with device\n");
if (ss->host) {
/* make unique id if possible */
- if (dev->descriptor.iSerialNumber &&
- usb_string(dev, dev->descriptor.iSerialNumber) ) {
- make_guid(ss->guid, dev->descriptor.idVendor, dev->descriptor.idProduct,
- usb_string(dev, dev->descriptor.iSerialNumber));
- }
-
US_DEBUGP("New GUID " GUID_FORMAT "\n", GUID_ARGS(guid));
+ memcpy(ss->guid, guid, sizeof(guid));
/* set class specific stuff */
(struct us_data *)htmplt->proc_dir = ss;
- if (ss->protocol == US_PR_CBI)
+
+ if (dev->descriptor.idVendor == 0x04e6 &&
+ dev->descriptor.idProduct == 0x0001) {
+ devrequest dr;
+ __u8 qstat[2];
+
+ /* shuttle E-USB */
+ dr.requesttype = 0xC0;
+ dr.request = 1;
+ dr.index = 0;
+ dr.value = 0;
+ dr.length = 0;
+ ss->pusb_dev->bus->op->control_msg(ss->pusb_dev, usb_rcvctrlpipe(dev,0), &dr, qstat, 2);
+ US_DEBUGP("C0 status %x %x\n", qstat[0], qstat[1]);
+ init_waitqueue_head(&ss->ip_waitq);
+ ss->pusb_dev->bus->op->request_irq(ss->pusb_dev,
+ usb_rcvctrlpipe(ss->pusb_dev, ss->ep_int),
+ pop_CBI_irq, 0, (void *)ss);
+ interruptible_sleep_on_timeout(&ss->ip_waitq, HZ*5);
+
+ } else if (ss->protocol == US_PR_CBI)
init_waitqueue_head(&ss->ip_waitq);
+
/* start up our thread */
{
case READ_ELEMENT_STATUS: what = "READ_ELEMENT_STATUS"; break;
case SEND_VOLUME_TAG: what = "SEND_VOLUME_TAG"; break;
case WRITE_LONG_2: what = "WRITE_LONG_2"; break;
- default: what = "??"; break;
+ default: break;
}
printk(KERN_DEBUG USB_SCSI "Command %s (%d bytes)\n", what, srb->cmd_len);
printk(KERN_DEBUG USB_SCSI " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
NULL, /* truncate */
NULL, /* permissions */
NULL, /* smap */
- NULL, /* updatepage */
NULL /* revalidate */
};
affs_truncate, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL /* revalidate */
};
affs_truncate, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL /* revalidate */
};
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL /* revalidate */
};
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL /* revalidate */
};
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL /* revalidate */
};
EIO_ERROR, /* rename */
EIO_ERROR, /* readlink */
bad_follow_link, /* follow_link */
+ EIO_ERROR, /* bmap */
EIO_ERROR, /* readpage */
EIO_ERROR, /* writepage */
- EIO_ERROR, /* bmap */
+ EIO_ERROR, /* flushpage */
EIO_ERROR, /* truncate */
EIO_ERROR, /* permission */
EIO_ERROR, /* smap */
- EIO_ERROR, /* update_page */
EIO_ERROR /* revalidate */
};
}
buffercount=0;
}
+ balance_dirty(dev);
if(write_error)
break;
}
* - RMK
*/
+#include <linux/sched.h>
+#include <linux/fs.h>
#include <linux/malloc.h>
#include <linux/locks.h>
#include <linux/errno.h>
/*
* if a new dirty buffer is created we need to balance bdflush.
+ *
+ * in the future we might want to make bdflush aware of different
+ * pressures on different devices - thus the (currently unused)
+ * 'dev' parameter.
*/
-static inline void balance_dirty (kdev_t dev)
+void balance_dirty(kdev_t dev)
{
- int too_many = (nr_buffers * bdf_prm.b_un.nfract/100);
-
- /* This buffer is dirty, maybe we need to start flushing.
- * If too high a percentage of the buffers are dirty...
- */
- if (nr_buffers_type[BUF_DIRTY] > too_many) {
- wakeup_bdflush(1);
+ int dirty = nr_buffers_type[BUF_DIRTY];
+ int ndirty = bdf_prm.b_un.ndirty;
+
+ if (dirty > ndirty) {
+ int wait = 0;
+ if (dirty > 2*ndirty)
+ wait = 1;
+ wakeup_bdflush(wait);
}
+}
- /* If this is a loop device, and
- * more than half of the buffers are dirty...
- * (Prevents no-free-buffers deadlock with loop device.)
- */
- if (MAJOR(dev) == LOOP_MAJOR &&
- nr_buffers_type[BUF_DIRTY]*2>nr_buffers)
- wakeup_bdflush(1);
+atomic_t too_many_dirty_buffers;
+
+static inline void __mark_dirty(struct buffer_head *bh, int flag)
+{
+ set_writetime(bh, flag);
+ refile_buffer(bh);
+ if (atomic_read(&too_many_dirty_buffers))
+ balance_dirty(bh->b_dev);
+}
+
+void __mark_buffer_dirty(struct buffer_head *bh, int flag)
+{
+ __mark_dirty(bh, flag);
+}
+
+void __atomic_mark_buffer_dirty(struct buffer_head *bh, int flag)
+{
+ lock_kernel();
+ __mark_dirty(bh, flag);
+ unlock_kernel();
}
/*
{
int dispose;
- if(buf->b_dev == B_FREE) {
+ if (buf->b_dev == B_FREE) {
printk("Attempt to refile free buffer\n");
return;
}
+
+ dispose = BUF_CLEAN;
+ if (buffer_locked(buf))
+ dispose = BUF_LOCKED;
if (buffer_dirty(buf))
dispose = BUF_DIRTY;
- else if (buffer_locked(buf))
- dispose = BUF_LOCKED;
- else
- dispose = BUF_CLEAN;
- if(dispose != buf->b_list) {
+
+ if (dispose != buf->b_list)
file_buffer(buf, dispose);
- if (dispose == BUF_DIRTY)
- balance_dirty(buf->b_dev);
- }
}
/*
* They show up in the buffer hash table and are registered in
* page->buffers.
*/
+ lock_kernel();
head = create_buffers(page_address(page), size, 1);
+ unlock_kernel();
if (page->buffers)
BUG();
if (!head)
* we have truncated the file and are going to free the
* blocks on-disk..
*/
-int generic_block_flushpage(struct inode *inode, struct page *page, unsigned long offset)
+int block_flushpage(struct inode *inode, struct page *page, unsigned long offset)
{
struct buffer_head *head, *bh, *next;
unsigned int curr_off = 0;
BUG();
if (!page->buffers)
return 0;
+ lock_kernel();
head = page->buffers;
bh = head;
/*
* subtle. We release buffer-heads only if this is
- * the 'final' flushpage. We invalidate the bmap
- * cached value in all cases.
+ * the 'final' flushpage. We have invalidated the bmap
+ * cached value unconditionally, so real IO is not
+ * possible anymore.
*/
if (!offset)
try_to_free_buffers(page);
+ unlock_kernel();
return 0;
}
-static inline void create_empty_buffers (struct page *page,
+static void create_empty_buffers (struct page *page,
struct inode *inode, unsigned long blocksize)
{
struct buffer_head *bh, *head, *tail;
+ lock_kernel();
head = create_buffers(page_address(page), blocksize, 1);
+ unlock_kernel();
if (page->buffers)
BUG();
get_page(page);
}
+/*
+ * block_write_full_page() is SMP-safe - currently it's still
+ * being called with the kernel lock held, but the code is ready.
+ */
int block_write_full_page (struct file *file, struct page *page, fs_getblock_t fs_get_block)
{
struct dentry *dentry = file->f_dentry;
bh->b_state = (1<<BH_Uptodate);
} else {
/*
- * block already exists, just mark it dirty:
+ * block already exists, just mark it uptodate and
+ * dirty:
*/
bh->b_end_io = end_buffer_io_sync;
set_bit(BH_Uptodate, &bh->b_state);
}
- mark_buffer_dirty(bh, 0);
+ atomic_mark_buffer_dirty(bh,0);
bh = bh->b_this_page;
block++;
return err;
}
-int block_write_one_page (struct file *file, struct page *page, unsigned long offset, unsigned long bytes, const char * buf, fs_getblock_t fs_get_block)
+int block_write_partial_page (struct file *file, struct page *page, unsigned long offset, unsigned long bytes, const char * buf, fs_getblock_t fs_get_block)
{
struct dentry *dentry = file->f_dentry;
struct inode *inode = dentry->d_inode;
unsigned long block;
- int err, created;
+ int err, created, partial;
unsigned long blocksize, start_block, end_block;
unsigned long start_offset, start_bytes, end_bytes;
unsigned long bbits, phys, blocks, i, len;
char * target_buf;
target_buf = (char *)page_address(page) + offset;
- lock_kernel();
if (!PageLocked(page))
BUG();
i = 0;
bh = head;
+ partial = 0;
do {
if (!bh)
BUG();
if ((i < start_block) || (i > end_block)) {
+ if (!buffer_uptodate(bh))
+ partial = 1;
goto skip;
}
- unlock_kernel();
-
- err = -EFAULT;
- if (start_offset) {
- len = start_bytes;
- start_offset = 0;
- } else
- if (end_bytes && (i == end_block)) {
- len = end_bytes;
- end_bytes = 0;
- } else {
- /*
- * Overwritten block.
- */
- len = blocksize;
- }
- if (copy_from_user(target_buf, buf, len))
- goto out_nolock;
- target_buf += len;
- buf += len;
-
- /*
- * we dirty buffers only after copying the data into
- * the page - this way we can dirty the buffer even if
- * the bh is still doing IO.
- */
- lock_kernel();
if (!bh->b_blocknr) {
err = -EIO;
down(&inode->i_sem);
/*
* if partially written block which has contents on
* disk, then we have to read it first.
+ * We also rely on the fact that filesystem holes
+ * cannot be written.
*/
if (!created && (start_offset ||
(end_bytes && (i == end_block)))) {
bh->b_state = 0;
ll_rw_block(READ, 1, &bh);
+ lock_kernel();
wait_on_buffer(bh);
+ unlock_kernel();
err = -EIO;
if (!buffer_uptodate(bh))
goto out;
bh->b_end_io = end_buffer_io_sync;
set_bit(BH_Uptodate, &bh->b_state);
}
- mark_buffer_dirty(bh, 0);
+
+ err = -EFAULT;
+ if (start_offset) {
+ len = start_bytes;
+ start_offset = 0;
+ } else
+ if (end_bytes && (i == end_block)) {
+ len = end_bytes;
+ end_bytes = 0;
+ } else {
+ /*
+ * Overwritten block.
+ */
+ len = blocksize;
+ }
+ if (copy_from_user(target_buf, buf, len))
+ goto out;
+ target_buf += len;
+ buf += len;
+
+ /*
+ * we dirty buffers only after copying the data into
+ * the page - this way we can dirty the buffer even if
+ * the bh is still doing IO.
+ */
+ atomic_mark_buffer_dirty(bh,0);
skip:
i++;
block++;
bh = bh->b_this_page;
} while (bh != head);
- unlock_kernel();
- SetPageUptodate(page);
+ /*
+ * is this a partial write that happened to make all buffers
+ * uptodate then we can optimize away a bogus readpage() for
+ * the next read(). Here we 'discover' wether the page went
+ * uptodate as a result of this (potentially partial) write.
+ */
+ if (!partial)
+ SetPageUptodate(page);
return bytes;
out:
- unlock_kernel();
-out_nolock:
ClearPageUptodate(page);
return err;
}
* This function expects the page to be locked and may return
* before I/O is complete. You then have to check page->locked,
* page->uptodate, and maybe wait on page->wait.
+ *
+ * brw_page() is SMP-safe, although it's being called with the
+ * kernel lock held - but the code is ready.
*/
int brw_page(int rw, struct page *page, kdev_t dev, int b[], int size, int bmap)
{
struct buffer_head *head, *bh, *arr[MAX_BUF_PER_PAGE];
- int nr, fresh, block;
+ int nr, fresh /* temporary debugging flag */, block;
if (!PageLocked(page))
panic("brw_page: page not locked for I/O");
BUG();
}
set_bit(BH_Uptodate, &bh->b_state);
- mark_buffer_dirty(bh, 0);
+ atomic_mark_buffer_dirty(bh, 0);
arr[nr++] = bh;
}
bh = bh->b_this_page;
if ((rw == READ) && nr) {
if (Page_Uptodate(page))
BUG();
- unlock_kernel();
ll_rw_block(rw, nr, arr);
- lock_kernel();
} else {
if (!nr && rw == READ) {
SetPageUptodate(page);
page->owner = (int)current;
UnlockPage(page);
}
- if (nr && (rw == WRITE)) {
- unlock_kernel();
+ if (nr && (rw == WRITE))
ll_rw_block(rw, nr, arr);
- lock_kernel();
- }
}
return 0;
}
* mark_buffer_uptodate() functions propagate buffer state into the
* page struct once IO has completed.
*/
-int generic_readpage(struct file * file, struct page * page)
+int block_read_full_page(struct file * file, struct page * page)
{
struct dentry *dentry = file->f_dentry;
struct inode *inode = dentry->d_inode;
- unsigned long block;
- int *p, nr[PAGE_SIZE/512];
- int i;
+ unsigned long iblock, phys_block;
+ struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
+ unsigned int blocksize, blocks;
+ int nr;
- if (page->buffers) {
- printk("hm, no brw_page(%p) because IO already started.\n",
- page);
- goto out;
- }
+ if (!PageLocked(page))
+ PAGE_BUG(page);
+ blocksize = inode->i_sb->s_blocksize;
+ if (!page->buffers)
+ create_empty_buffers(page, inode, blocksize);
+ head = page->buffers;
- i = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
- block = page->offset >> inode->i_sb->s_blocksize_bits;
- p = nr;
+ blocks = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
+ iblock = page->offset >> inode->i_sb->s_blocksize_bits;
+ page->owner = -1;
+ head = page->buffers;
+ bh = head;
+ nr = 0;
do {
- *p = inode->i_op->bmap(inode, block);
- i--;
- block++;
- p++;
- } while (i > 0);
+ phys_block = bh->b_blocknr;
+ /*
+ * important, we have to retry buffers that already have
+ * their bnr cached but had an IO error!
+ */
+ if (!buffer_uptodate(bh)) {
+ phys_block = inode->i_op->bmap(inode, iblock);
+ /*
+ * this is safe to do because we hold the page lock:
+ */
+ if (phys_block) {
+ init_buffer(bh, inode->i_dev, phys_block,
+ end_buffer_io_async, NULL);
+ arr[nr] = bh;
+ nr++;
+ } else {
+ /*
+ * filesystem 'hole' represents zero-contents:
+ */
+ memset(bh->b_data, 0, blocksize);
+ set_bit(BH_Uptodate, &bh->b_state);
+ }
+ }
+ iblock++;
+ bh = bh->b_this_page;
+ } while (bh != head);
- /* IO start */
- brw_page(READ, page, inode->i_dev, nr, inode->i_sb->s_blocksize, 1);
-out:
+ ++current->maj_flt;
+ if (nr) {
+ if (Page_Uptodate(page))
+ BUG();
+ ll_rw_block(READ, nr, arr);
+ } else {
+ /*
+ * all buffers are uptodate - we can set the page
+ * uptodate as well.
+ */
+ SetPageUptodate(page);
+ page->owner = (int)current;
+ UnlockPage(page);
+ }
return 0;
}
tmp = tmp->b_this_page;
if (!buffer_busy(p))
continue;
-
- wakeup_bdflush(0);
return 0;
} while (tmp != bh);
run_task_queue(&tq_disk);
wake_up(&bdflush_done);
- /* If there are still a lot of dirty buffers around, skip the sleep
- and flush some more */
- if(ndirty == 0 || nr_buffers_type[BUF_DIRTY] <= nr_buffers * bdf_prm.b_un.nfract/100) {
+ /*
+ * If there are still a lot of dirty buffers around,
+ * skip the sleep and flush some more
+ */
+ if ((ndirty == 0) || (nr_buffers_type[BUF_DIRTY] <=
+ nr_buffers * bdf_prm.b_un.nfract/100)) {
+
+ atomic_set(&too_many_dirty_buffers, 0);
spin_lock_irq(¤t->sigmask_lock);
flush_signals(current);
spin_unlock_irq(¤t->sigmask_lock);
NULL, /* mknod */
NULL, /* rename */
NULL, /* readlink */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- NULL /* permission */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
/*
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL /* revalidate */
};
ext2_rename, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
ext2_permission, /* permission */
- NULL /* smap */
+ NULL, /* smap */
+ NULL /* revalidate */
};
int ext2_check_dir_entry (const char * function, struct inode * dir,
static long ext2_write_one_page (struct file *file, struct page *page, unsigned long offset, unsigned long bytes, const char * buf)
{
- return block_write_one_page(file, page, offset, bytes, buf, ext2_getblk_block);
+ return block_write_partial_page(file, page, offset, bytes, buf, ext2_getblk_block);
}
/*
static ssize_t
ext2_file_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
{
- return generic_file_write(file, buf, count, ppos, ext2_write_one_page);
+ ssize_t retval = generic_file_write(file, buf, count, ppos, ext2_write_one_page);
+ if (retval > 0) {
+ struct inode *inode = file->f_dentry->d_inode;
+ remove_suid(inode);
+ inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+ mark_inode_dirty(inode);
+ }
+ return retval;
}
/*
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
- generic_readpage, /* readpage */
- ext2_writepage, /* writepage */
ext2_bmap, /* bmap */
+ block_read_full_page, /* readpage */
+ ext2_writepage, /* writepage */
+ block_flushpage, /* flushpage */
ext2_truncate, /* truncate */
ext2_permission, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL, /* revalidate */
- generic_block_flushpage,/* flushpage */
};
#include <linux/string.h>
#include <linux/locks.h>
#include <linux/mm.h>
+#include <linux/smp_lock.h>
static int ext2_update_inode(struct inode * inode, int do_sync);
int ext2_bmap (struct inode * inode, int block)
{
- int i;
+ int i, ret;
int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
int addr_per_block_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
+ ret = 0;
+ lock_kernel();
if (block < 0) {
ext2_warning (inode->i_sb, "ext2_bmap", "block < 0");
- return 0;
+ goto out;
}
if (block >= EXT2_NDIR_BLOCKS + addr_per_block +
(1 << (addr_per_block_bits * 2)) +
((1 << (addr_per_block_bits * 2)) << addr_per_block_bits)) {
ext2_warning (inode->i_sb, "ext2_bmap", "block > big");
- return 0;
+ goto out;
+ }
+ if (block < EXT2_NDIR_BLOCKS) {
+ ret = inode_bmap (inode, block);
+ goto out;
}
- if (block < EXT2_NDIR_BLOCKS)
- return inode_bmap (inode, block);
block -= EXT2_NDIR_BLOCKS;
if (block < addr_per_block) {
i = inode_bmap (inode, EXT2_IND_BLOCK);
if (!i)
- return 0;
- return block_bmap (bread (inode->i_dev, i,
+ goto out;
+ ret = block_bmap (bread (inode->i_dev, i,
inode->i_sb->s_blocksize), block);
+ goto out;
}
block -= addr_per_block;
if (block < (1 << (addr_per_block_bits * 2))) {
i = inode_bmap (inode, EXT2_DIND_BLOCK);
if (!i)
- return 0;
+ goto out;
i = block_bmap (bread (inode->i_dev, i,
inode->i_sb->s_blocksize),
block >> addr_per_block_bits);
if (!i)
- return 0;
- return block_bmap (bread (inode->i_dev, i,
+ goto out;
+ ret = block_bmap (bread (inode->i_dev, i,
inode->i_sb->s_blocksize),
- block & (addr_per_block - 1));
+ block & (addr_per_block - 1));
}
block -= (1 << (addr_per_block_bits * 2));
i = inode_bmap (inode, EXT2_TIND_BLOCK);
if (!i)
- return 0;
+ goto out;
i = block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
block >> (addr_per_block_bits * 2));
if (!i)
- return 0;
+ goto out;
i = block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
(block >> addr_per_block_bits) & (addr_per_block - 1));
if (!i)
- return 0;
- return block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
+ goto out;
+ ret = block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
block & (addr_per_block - 1));
+out:
+ unlock_kernel();
+ return ret;
}
int ext2_bmap_create (struct inode * inode, int block)
unsigned long b;
unsigned long addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
int addr_per_block_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
- int phys_block;
+ int phys_block, ret;
+ lock_kernel();
+ ret = 0;
*err = -EIO;
if (block < 0) {
ext2_warning (inode->i_sb, "ext2_getblk", "block < 0");
- return 0;
+ goto abort;
}
if (block > EXT2_NDIR_BLOCKS + addr_per_block +
(1 << (addr_per_block_bits * 2)) +
((1 << (addr_per_block_bits * 2)) << addr_per_block_bits)) {
ext2_warning (inode->i_sb, "ext2_getblk", "block > big");
- return 0;
+ goto abort;
}
/*
* If this is a sequential block allocation, set the next_alloc_block
inode->i_sb->s_blocksize, b, err, 0, &phys_block, created);
out:
- if (!phys_block) {
- return 0;
- }
- if (*err) {
- return 0;
- }
- return phys_block;
+ if (!phys_block)
+ goto abort;
+ if (*err)
+ goto abort;
+ ret = phys_block;
+abort:
+ unlock_kernel();
+ return ret;
}
struct buffer_head * ext2_getblk (struct inode * inode, long block,
NULL, /* rename */
ext2_readlink, /* readlink */
ext2_follow_link, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
NULL, /* permission */
- NULL /* smap */
+ NULL, /* smap */
+ NULL /* revalidate */
};
static struct dentry * ext2_follow_link(struct dentry * dentry,
NULL, /* mknod */
NULL, /* rename */
NULL, /* readlink */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- NULL /* permission */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL /* revalidate */
};
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL /* revalidate */
};
hfs_file_truncate, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL /* revalidate */
};
cap_info_truncate, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL /* revalidata */
};
hdr_truncate, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL /* revalidate */
};
&hpfs_truncate, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL, /* revalidate */
};
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL, /* revalidate */
};
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL, /* revalidate */
};
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
- generic_readpage, /* readpage */
- NULL, /* writepage */
isofs_bmap, /* bmap */
+ block_read_full_page, /* readpage */
+ NULL, /* writepage */
+ NULL, /* flushpage */
NULL, /* truncate */
NULL /* permission */
};
#include <linux/init.h>
#include <linux/nls.h>
#include <linux/ctype.h>
+#include <linux/smp_lock.h>
#include <asm/system.h>
#include <asm/uaccess.h>
return copy_to_user(buf, &tmp, bufsiz) ? -EFAULT : 0;
}
-int isofs_bmap(struct inode * inode,int block)
+static int do_isofs_bmap(struct inode * inode,int block)
{
off_t b_off, offset, size;
struct inode *ino;
return (b_off - offset + firstext) >> ISOFS_BUFFER_BITS(inode);
}
+int isofs_bmap(struct inode * inode,int block)
+{
+ int retval;
+
+ lock_kernel();
+ retval = do_isofs_bmap(inode, block);
+ unlock_kernel();
+ return retval;
+}
static void test_and_set_uid(uid_t *p, uid_t value)
{
minix_truncate, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL, /* revalidate */
- generic_block_flushpage,/* flushpage */
+ block_flushpage, /* flushpage */
};
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL, /* revalidate */
};
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL, /* revalidate */
};
nfs_rename, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
nfs_revalidate, /* revalidate */
};
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
+ NULL, /* bmap */
nfs_readpage, /* readpage */
nfs_writepage, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
nfs_revalidate, /* revalidate */
- NULL, /* flushpage */
};
/* Hack for future NFS swap support */
#include <linux/pagemap.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
+#include <linux/smp_lock.h>
#include <asm/segment.h>
#include <asm/system.h>
struct inode *inode = dentry->d_inode;
int error;
+ lock_kernel();
dprintk("NFS: nfs_readpage (%p %ld@%ld)\n",
page, PAGE_SIZE, page->offset);
get_page(page);
out_free:
free_page(page_address(page));
out:
+ unlock_kernel();
return error;
}
NULL, /* rename */
nfs_readlink, /* readlink */
nfs_follow_link, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- NULL /* permission */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
/* Symlink caching in the page cache is even more simplistic
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
#include <asm/uaccess.h>
+#include <linux/smp_lock.h>
#define NFS_PARANOIA 1
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
u8 *buffer;
struct nfs_fattr fattr;
+ lock_kernel();
dprintk("NFS: nfs_writepage_sync(%s/%s %d@%ld)\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
count, page->offset + offset);
inode->i_ino, fattr.fileid);
}
+ unlock_kernel();
return written? written : result;
}
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL, /* revalidate */
};
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL, /* revalidate */
};
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL, /* revalidate */
};
NULL, /* mknod */
NULL, /* rename */
NULL, /* readlink */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- NULL /* permission */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
int do_pipe(int *fd)
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- NULL /* permission */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
static ssize_t arraylong_read(struct file * file, char * buf,
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- NULL /* permission */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- NULL /* permission */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
/*
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- proc_permission /* permission */
+ proc_permission, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
/*
&proc_file_operations, /* default proc file-ops */
NULL, /* create */
NULL, /* lookup */
- NULL, /* link */
- NULL, /* unlink */
- NULL, /* symlink */
- NULL, /* mkdir */
- NULL, /* rmdir */
- NULL, /* mknod */
- NULL, /* rename */
- NULL, /* readlink */
- NULL, /* follow_link */
- NULL, /* readpage */
- NULL, /* writepage */
- NULL, /* bmap */
- NULL, /* truncate */
- NULL /* permission */
+ NULL, /* link */
+ NULL, /* unlink */
+ NULL, /* symlink */
+ NULL, /* mkdir */
+ NULL, /* rmdir */
+ NULL, /* mknod */
+ NULL, /* rename */
+ NULL, /* readlink */
+ NULL, /* follow_link */
+ NULL, /* bmap */
+ NULL, /* readpage */
+ NULL, /* writepage */
+ NULL, /* flushpage */
+ NULL, /* truncate */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
/*
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- NULL /* permission */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- NULL /* permission */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
NULL, /* rename */
proc_readlink, /* readlink */
proc_follow_link, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- proc_permission /* permission */
+ proc_permission, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
static struct dentry * proc_follow_link(struct dentry *dentry,
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- proc_permission /* permission */
+ proc_permission, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- NULL /* permission */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
};
struct inode_operations proc_omirr_inode_operations = {
- &omirr_operations,
- NULL, /* create */
- NULL, /* lookup */
- NULL, /* link */
- NULL, /* unlink */
- NULL, /* symlink */
- NULL, /* mkdir */
- NULL, /* rmdir */
- NULL, /* mknod */
- NULL, /* rename */
- NULL, /* readlink */
- NULL, /* follow_link */
- NULL, /* readpage */
- NULL, /* writepage */
- NULL, /* bmap */
- NULL, /* truncate */
- NULL, /* permission */
- NULL /* smap */
+ &omirr_operations,
+ NULL, /* create */
+ NULL, /* lookup */
+ NULL, /* link */
+ NULL, /* unlink */
+ NULL, /* symlink */
+ NULL, /* mkdir */
+ NULL, /* rmdir */
+ NULL, /* mknod */
+ NULL, /* rename */
+ NULL, /* readlink */
+ NULL, /* follow_link */
+ NULL, /* bmap */
+ NULL, /* readpage */
+ NULL, /* writepage */
+ NULL, /* flushpage */
+ NULL, /* truncate */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
NULL, /* rename */
devtree_readlink, /* readlink */
devtree_follow_link, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
NULL, /* permission */
- NULL /* smap */
+ NULL, /* smap */
+ NULL /* revalidate */
};
static struct dentry *devtree_follow_link(struct dentry *dentry,
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- NULL /* permission */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
/*
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- NULL /* permission */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
/*
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- NULL /* permission */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
/*
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- NULL /* permission */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
struct proc_dir_entry proc_openprom = {
NULL, /* rename */
proc_self_readlink, /* readlink */
proc_self_follow_link, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- NULL /* permission */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
static struct inode_operations proc_link_inode_operations = {
NULL, /* rename */
proc_readlink, /* readlink */
proc_follow_link, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- NULL /* permission */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
static struct proc_dir_entry proc_root_loadavg = {
* proc directories can do almost nothing..
*/
struct inode_operations proc_scsi_inode_operations = {
- &proc_scsi_operations, /* default scsi directory file-ops */
- NULL, /* create */
- proc_lookup, /* lookup */
- NULL, /* link */
- NULL, /* unlink */
- NULL, /* symlink */
- NULL, /* mkdir */
- NULL, /* rmdir */
- NULL, /* mknod */
- NULL, /* rename */
- NULL, /* readlink */
- NULL, /* follow_link */
- NULL, /* readpage */
- NULL, /* writepage */
- NULL, /* bmap */
- NULL, /* truncate */
- NULL /* permission */
+&proc_scsi_operations, /* default scsi directory file-ops */
+ NULL, /* create */
+ proc_lookup, /* lookup */
+ NULL, /* link */
+ NULL, /* unlink */
+ NULL, /* symlink */
+ NULL, /* mkdir */
+ NULL, /* rmdir */
+ NULL, /* mknod */
+ NULL, /* rename */
+ NULL, /* readlink */
+ NULL, /* follow_link */
+ NULL, /* bmap */
+ NULL, /* readpage */
+ NULL, /* writepage */
+ NULL, /* flushpage */
+ NULL, /* truncate */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
int get_not_present_info(char *buffer, char **start, off_t offset, int length)
* proc directories can do almost nothing..
*/
struct inode_operations proc_sysvipc_inode_operations = {
- &proc_sysvipc_operations, /* default net file-ops */
- NULL, /* create */
- NULL, /* lookup */
- NULL, /* link */
- NULL, /* unlink */
- NULL, /* symlink */
- NULL, /* mkdir */
- NULL, /* rmdir */
- NULL, /* mknod */
- NULL, /* rename */
- NULL, /* readlink */
- NULL, /* follow_link */
- NULL, /* readpage */
- NULL, /* writepage */
- NULL, /* bmap */
- NULL, /* truncate */
- NULL /* permission */
+ &proc_sysvipc_operations, /* default net file-ops */
+ NULL, /* create */
+ NULL, /* lookup */
+ NULL, /* link */
+ NULL, /* unlink */
+ NULL, /* symlink */
+ NULL, /* mkdir */
+ NULL, /* rmdir */
+ NULL, /* mknod */
+ NULL, /* rename */
+ NULL, /* readlink */
+ NULL, /* follow_link */
+ NULL, /* bmap */
+ NULL, /* readpage */
+ NULL, /* writepage */
+ NULL, /* flushpage */
+ NULL, /* truncate */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
smb_revalidate_inode, /* revalidate */
};
NULL, /* truncate */
smb_file_permission, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
smb_revalidate_inode, /* revalidate */
};
sysv_truncate, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL, /* revalidate */
- generic_block_flushpage,/* flushpage */
+ block_flushpage, /* flushpage */
};
ufs_truncate, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL, /* revalidate */
- generic_block_flushpage,/* flushpage */
+ block_flushpage, /* flushpage */
};
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL, /* revalidate */
};
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL, /* revalidate */
};
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
NULL /* revalidate */
};
struct inode *, struct dentry *);
int (*readlink) (struct dentry *, char *,int);
struct dentry * (*follow_link) (struct dentry *, struct dentry *, unsigned int);
+ /*
+ * the order of these functions within the VFS template has been
+ * changed because SMP locking has changed: from now on all bmap,
+ * readpage, writepage and flushpage functions are supposed to do
+ * whatever locking they need to get proper SMP operation - for
+ * now in most cases this means a lock/unlock_kernel at entry/exit.
+ * [The new order is also slightly more logical :)]
+ */
+ int (*bmap) (struct inode *,int);
int (*readpage) (struct file *, struct page *);
int (*writepage) (struct file *, struct page *);
- int (*bmap) (struct inode *,int);
+ int (*flushpage) (struct inode *, struct page *, unsigned long);
+
void (*truncate) (struct inode *);
int (*permission) (struct inode *, int);
int (*smap) (struct inode *,int);
- int (*updatepage) (struct file *, struct page *, unsigned long, unsigned int);
int (*revalidate) (struct dentry *);
- int (*flushpage) (struct inode *, struct page *, unsigned long);
};
struct super_operations {
extern inline void mark_buffer_clean(struct buffer_head * bh)
{
- if (test_and_clear_bit(BH_Dirty, &bh->b_state)) {
- if (bh->b_list == BUF_DIRTY)
- refile_buffer(bh);
- }
+ if (test_and_clear_bit(BH_Dirty, &bh->b_state))
+ refile_buffer(bh);
}
+extern void FASTCALL(__mark_buffer_dirty(struct buffer_head *bh, int flag));
+extern void FASTCALL(__atomic_mark_buffer_dirty(struct buffer_head *bh, int flag));
+
+#define atomic_set_buffer_dirty(bh) test_and_set_bit(BH_Dirty, &(bh)->b_state)
+
extern inline void mark_buffer_dirty(struct buffer_head * bh, int flag)
{
- if (!test_and_set_bit(BH_Dirty, &bh->b_state)) {
- set_writetime(bh, flag);
- if (bh->b_list != BUF_DIRTY)
- refile_buffer(bh);
- }
+ if (!atomic_set_buffer_dirty(bh))
+ __mark_buffer_dirty(bh, flag);
+}
+
+/*
+ * SMP-safe version of the above - does synchronization with
+ * other users of buffer-cache data structures.
+ *
+ * since we test-set the dirty bit in a CPU-atomic way we also
+ * have optimized the common 'redirtying' case away completely.
+ */
+extern inline void atomic_mark_buffer_dirty(struct buffer_head * bh, int flag)
+{
+ if (!atomic_set_buffer_dirty(bh))
+ __atomic_mark_buffer_dirty(bh, flag);
}
+
+extern void balance_dirty(kdev_t);
extern int check_disk_change(kdev_t);
extern int invalidate_inodes(struct super_block *);
extern void invalidate_inode_pages(struct inode *);
typedef long (*writepage_t)(struct file *, struct page *, unsigned long, unsigned long, const char *);
typedef int (*fs_getblock_t)(struct inode *, long, int, int *, int *);
+/* Generic buffer handling for block filesystems.. */
+extern int block_read_full_page(struct file *, struct page *);
+extern int block_write_full_page (struct file *, struct page *, fs_getblock_t);
+extern int block_write_partial_page (struct file *, struct page *, unsigned long, unsigned long, const char *, fs_getblock_t);
+extern int block_flushpage(struct inode *, struct page *, unsigned long);
-extern int generic_readpage(struct file *, struct page *);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern ssize_t generic_file_read(struct file *, char *, size_t, loff_t *);
extern ssize_t generic_file_write(struct file *, const char *, size_t, loff_t *, writepage_t);
-extern int generic_block_flushpage(struct inode *, struct page *, unsigned long);
-extern int block_write_one_page (struct file *file, struct page *page, unsigned long offset, unsigned long bytes, const char * buf, fs_getblock_t fs_get_block);
-extern int block_write_full_page (struct file *file, struct page *page, fs_getblock_t fs_get_block);
extern struct super_block *get_super(kdev_t);
/*
* Make these inline later once they are working properly.
*/
+extern void __delete_from_swap_cache(struct page *page);
extern void delete_from_swap_cache(struct page *page);
extern void free_page_and_swap_cache(unsigned long addr);
EXPORT_SYMBOL(generic_file_read);
EXPORT_SYMBOL(generic_file_write);
EXPORT_SYMBOL(generic_file_mmap);
-EXPORT_SYMBOL(generic_readpage);
EXPORT_SYMBOL(file_lock_table);
EXPORT_SYMBOL(posix_lock_file);
EXPORT_SYMBOL(posix_test_lock);
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ NULL, /* flushpage */
NULL, /* truncate */
- proc_sys_permission
+ proc_sys_permission, /* permission */
+ NULL, /* smap */
+ NULL /* revalidate */
};
extern struct proc_dir_entry proc_sys_root;
spin_unlock(&pagecache_lock);
}
+extern atomic_t too_many_dirty_buffers;
+
int shrink_mmap(int priority, int gfp_mask)
{
static unsigned long clock = 0;
unsigned long limit = num_physpages;
struct page * page;
- int count, err;
+ int count, users;
count = limit >> priority;
if ((gfp_mask & __GFP_DMA) && !PageDMA(page))
continue;
- if (PageLocked(page))
+ /*
+ * Some common cases that we just short-circuit without
+ * getting the locks - we need to re-check this once we
+ * have the lock, but that's fine.
+ */
+ users = page_count(page);
+ if (!users)
continue;
+ if (!page->buffers) {
+ if (!page->inode)
+ continue;
+ if (users > 1)
+ continue;
+ }
+
+ /*
+ * ok, now the page looks interesting. Re-check things
+ * and keep the lock.
+ */
+ spin_lock(&pagecache_lock);
+ if (!page->inode && !page->buffers) {
+ spin_unlock(&pagecache_lock);
+ continue;
+ }
+ if (!page_count(page)) {
+// BUG();
+ spin_unlock(&pagecache_lock);
+ continue;
+ }
+ get_page(page);
+ if (TryLockPage(page)) {
+ spin_unlock(&pagecache_lock);
+ goto put_continue;
+ }
+
+ /*
+ * we keep pagecache_lock locked and unlock it in
+ * each branch, so that the page->inode case doesnt
+ * have to re-grab it. Here comes the 'real' logic
+ * to free memory:
+ */
/* Is it a buffer page? */
if (page->buffers) {
- if (TryLockPage(page))
- continue;
- err = try_to_free_buffers(page);
- UnlockPage(page);
-
- if (!err)
- continue;
- goto out;
+ kdev_t dev = page->buffers->b_dev;
+ spin_unlock(&pagecache_lock);
+ if (try_to_free_buffers(page))
+ goto made_progress;
+ if (!atomic_read(&too_many_dirty_buffers)) {
+ atomic_set(&too_many_dirty_buffers, 1);
+ balance_dirty(dev);
+ }
+ goto unlock_continue;
}
/* We can't free pages unless there's just one user */
- if (page_count(page) != 1)
- continue;
+ if (page_count(page) != 2)
+ goto spin_unlock_continue;
count--;
* were to be marked referenced..
*/
if (PageSwapCache(page)) {
- if (referenced && swap_count(page->offset) != 1)
- continue;
- delete_from_swap_cache(page);
- err = 1;
- goto out;
+ spin_unlock(&pagecache_lock);
+ if (referenced && swap_count(page->offset) != 2)
+ goto unlock_continue;
+ __delete_from_swap_cache(page);
+ page_cache_release(page);
+ goto made_progress;
}
- if (referenced)
- continue;
-
/* is it a page-cache page? */
- spin_lock(&pagecache_lock);
- if (page->inode) {
- if (pgcache_under_min())
- goto unlock_continue;
- if (TryLockPage(page))
- goto unlock_continue;
-
- if (page_count(page) == 1) {
- remove_page_from_inode_queue(page);
- remove_page_from_hash_queue(page);
- page->inode = NULL;
- }
+ if (!referenced && page->inode && !pgcache_under_min()) {
+ remove_page_from_inode_queue(page);
+ remove_page_from_hash_queue(page);
+ page->inode = NULL;
spin_unlock(&pagecache_lock);
- UnlockPage(page);
page_cache_release(page);
- err = 1;
- goto out;
-unlock_continue:
- spin_unlock(&pagecache_lock);
- continue;
+ goto made_progress;
}
+spin_unlock_continue:
spin_unlock(&pagecache_lock);
+unlock_continue:
+ UnlockPage(page);
+put_continue:
+ put_page(page);
} while (count > 0);
- err = 0;
-out:
- return err;
+ return 0;
+made_progress:
+ UnlockPage(page);
+ put_page(page);
+ return 1;
}
static inline struct page * __find_page_nolock(struct inode * inode, unsigned long offset, struct page *page)
* We do not have to check the return value here
* because it's a readahead.
*/
- lock_kernel();
inode->i_op->readpage(file, page);
- unlock_kernel();
page_cache = 0;
page_cache_release(page);
}
DECLARE_WAITQUEUE(wait, tsk);
add_wait_queue(&page->wait, &wait);
- tsk->state = TASK_UNINTERRUPTIBLE;
- run_task_queue(&tq_disk);
- if (PageLocked(page)) {
- do {
- tsk->state = TASK_UNINTERRUPTIBLE;
- run_task_queue(&tq_disk);
- schedule();
- } while (PageLocked(page));
- }
+ do {
+ tsk->state = TASK_UNINTERRUPTIBLE;
+ run_task_queue(&tq_disk);
+ if (!PageLocked(page))
+ break;
+ schedule();
+ } while (PageLocked(page));
tsk->state = TASK_RUNNING;
remove_wait_queue(&page->wait, &wait);
}
goto page_ok;
}
-read_page:
+readpage:
/* ... and start the actual read. The read will unlock the page. */
- lock_kernel();
error = inode->i_op->readpage(filp, page);
- unlock_kernel();
if (!error) {
if (Page_Uptodate(page))
spin_unlock(&pagecache_lock);
page_cache = 0;
- goto read_page;
+ goto readpage;
}
*ppos = pos;
if (retval)
goto fput_out;
+ unlock_kernel();
retval = 0;
if (count) {
read_descriptor_t desc;
ppos = &in_file->f_pos;
if (offset) {
if (get_user(pos, offset))
- goto fput_out;
+ goto fput_out_lock;
ppos = &pos;
}
put_user(pos, offset);
}
-
+fput_out_lock:
+ lock_kernel();
fput_out:
fput(out_file);
fput_in:
offset = (address & PAGE_MASK) - area->vm_start + area->vm_offset;
if (offset >= inode->i_size && (area->vm_flags & VM_SHARED) && area->vm_mm == current->mm)
goto no_page_nolock;
+
unlock_kernel();
/*
goto failure;
}
- wait_on_page(page);
-
- if (!Page_Uptodate(page))
- PAGE_BUG(page);
+ if (!Page_Uptodate(page)) {
+ lock_page(page);
+ if (!Page_Uptodate(page))
+ goto page_not_uptodate;
+ UnlockPage(page);
+ }
success:
/*
*/
new_page = 0;
- lock_kernel();
+page_not_uptodate:
error = inode->i_op->readpage(file, page);
- unlock_kernel();
if (!error) {
wait_on_page(page);
if (!PageLocked(page))
PAGE_BUG(page);
ClearPageError(page);
- lock_kernel();
error = inode->i_op->readpage(file, page);
- unlock_kernel();
if (error)
goto failure;
wait_on_page(page);
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
+ NULL, /* bmap */
NULL, /* readpage */
NULL, /* writepage */
- NULL, /* bmap */
+ block_flushpage, /* flushpage */
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
- NULL, /* updatepage */
- NULL, /* revalidate */
- generic_block_flushpage, /* flushpage */
+ NULL /* revalidate */
};
struct inode swapper_inode = { i_op: &swapper_inode_operations };
remove_inode_page(page);
}
-
/*
* This must be called only on pages that have
* been verified to be in the swap cache.
*/
-void delete_from_swap_cache(struct page *page)
+void __delete_from_swap_cache(struct page *page)
{
long entry = page->offset;
- lock_page(page);
-
#ifdef SWAP_CACHE_INFO
swap_cache_del_total++;
#endif
page_address(page), page_count(page), entry);
#endif
remove_from_swap_cache (page);
+ swap_free (entry);
+}
+
+/*
+ * This must be called only on pages that have
+ * been verified to be in the swap cache.
+ */
+void delete_from_swap_cache(struct page *page)
+{
+ lock_page(page);
+
+ __delete_from_swap_cache(page);
+
UnlockPage(page);
page_cache_release(page);
- swap_free (entry);
}
/*
/*
* If we are the only user, then free up the swap cache.
*/
- if (PageSwapCache(page) && !is_page_shared(page)) {
+ if (PageSwapCache(page) && !is_page_shared(page))
delete_from_swap_cache(page);
- }
__free_page(page);
}