-BOOKS := wanbook.sgml z8530book.sgml mcabook.sgml videobook.sgml kernel-api.sgml
+BOOKS := wanbook.sgml z8530book.sgml mcabook.sgml videobook.sgml kernel-api.sgml parportbook.sgml
PS := $(patsubst %.sgml, %.ps, $(BOOKS))
PDF := $(patsubst %.sgml, %.pdf, $(BOOKS))
$(TOPDIR)/net/netsyms.c \
<kernel-api.tmpl >kernel-api.sgml
+parportbook.sgml: parportbook.tmpl
+ $(TOPDIR)/scripts/docgen $(TOPDIR)/drivers/parport/init.c \
+ <parportbook.tmpl >parportbook.sgml
+
DVI := $(patsubst %.sgml, %.dvi, $(BOOKS))
AUX := $(patsubst %.sgml, %.aux, $(BOOKS))
TEX := $(patsubst %.sgml, %.tex, $(BOOKS))
+++ /dev/null
---- drivers/usb/usb-storage.c.orig Thu Mar 30 20:31:08 2000
-+++ drivers/usb/usb-storage.c Sun Apr 2 15:20:21 2000
-@@ -1417,6 +1418,9 @@
- /* lock the device pointers */
- spin_lock_irqsave(&(us->dev_spinlock), flags);
-
-+ /* lock the device pointers */
-+ spin_lock_irqsave(&(us->dev_spinlock), flags);
-+
- /* our device has gone - pretend not ready */
- /* FIXME: we also need to handle INQUIRY here,
- * probably */
-@@ -1862,6 +1866,9 @@
- US_DEBUGP("-- device was not in use\n");
- return;
- }
-+
-+ /* lock access to the device data structure */
-+ spin_lock_irqsave(&(ss->dev_spinlock), flags);
-
- /* lock access to the device data structure */
- spin_lock_irqsave(&(ss->dev_spinlock), flags);
elevator_merge_requests(&q->elevator, req, next);
req->bhtail->b_reqnext = next->bh;
req->bhtail = next->bhtail;
- req->nr_sectors += next->nr_sectors;
+ req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
next->rq_status = RQ_INACTIVE;
list_del(&next->queue);
wake_up (&wait_for_request);
break;
req->bhtail->b_reqnext = bh;
req->bhtail = bh;
- req->nr_sectors += count;
+ req->nr_sectors = req->hard_nr_sectors += count;
drive_stat_acct(req, count, 0);
elevator_merge_after(elevator, req, latency);
req->bh = bh;
req->buffer = bh->b_data;
req->current_nr_sectors = count;
- req->sector = sector;
- req->nr_sectors += count;
+ req->sector = req->hard_sector = sector;
+ req->nr_sectors = req->hard_nr_sectors += count;
drive_stat_acct(req, count, 0);
elevator_merge_before(elevator, req, latency);
/* fill up the request-info, and add it to the queue */
req->cmd = rw;
req->errors = 0;
- req->sector = sector;
- req->nr_sectors = count;
+ req->hard_sector = req->sector = sector;
+ req->hard_nr_sectors = req->nr_sectors = count;
req->current_nr_sectors = count;
req->nr_segments = 1; /* Always 1 for a new request. */
req->nr_hw_segments = 1; /* Always 1 for a new request. */
int nsect;
req->errors = 0;
- if (!uptodate) {
+ if (!uptodate)
printk("end_request: I/O error, dev %s (%s), sector %lu\n",
kdevname(req->rq_dev), name, req->sector);
- if ((bh = req->bh) != NULL) {
- nsect = bh->b_size >> 9;
- req->nr_sectors--;
- req->nr_sectors &= ~(nsect - 1);
- req->sector += nsect;
- req->sector &= ~(nsect - 1);
- }
- }
if ((bh = req->bh) != NULL) {
+ nsect = bh->b_size >> 9;
req->bh = bh->b_reqnext;
bh->b_reqnext = NULL;
bh->b_end_io(bh, uptodate);
if ((bh = req->bh) != NULL) {
+ req->hard_sector += nsect;
+ req->hard_nr_sectors -= nsect;
+ req->sector = req->hard_sector;
+ req->nr_sectors = req->hard_nr_sectors;
+
req->current_nr_sectors = bh->b_size >> 9;
if (req->nr_sectors < req->current_nr_sectors) {
req->nr_sectors = req->current_nr_sectors;
pcd_unit = unit;
}
pcd_sector = CURRENT->sector;
- pcd_count = CURRENT->nr_sectors;
+ pcd_count = CURRENT->current_nr_sectors;
pcd_buf = CURRENT->buffer;
pcd_busy = 1;
ps_set_intr(do_pcd_read,0,0,nice);
}
}
+static inline int pd_new_segment(request_queue_t *q, struct request *req, int max_segments)
+{
+ if (max_segments > cluster)
+ max_segments = cluster;
+
+ if (req->nr_segments < max_segments) {
+ req->nr_segments++;
+ q->elevator.nr_segments++;
+ return 1;
+ }
+ return 0;
+}
+
+static int pd_back_merge_fn(request_queue_t *q, struct request *req,
+ struct buffer_head *bh, int max_segments)
+{
+ if (req->bhtail->b_data + req->bhtail->b_size == bh->b_data)
+ return 1;
+ return pd_new_segment(q, req, max_segments);
+}
+
+static int pd_front_merge_fn(request_queue_t *q, struct request *req,
+ struct buffer_head *bh, int max_segments)
+{
+ if (bh->b_data + bh->b_size == req->bh->b_data)
+ return 1;
+ return pd_new_segment(q, req, max_segments);
+}
+
+static int pd_merge_requests_fn(request_queue_t *q, struct request *req,
+ struct request *next, int max_segments)
+{
+ int total_segments = req->nr_segments + next->nr_segments;
+ int same_segment;
+
+ if (max_segments > cluster)
+ max_segments = cluster;
+
+ same_segment = 0;
+ if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data) {
+ total_segments--;
+ same_segment = 1;
+ }
+
+ if (total_segments > max_segments)
+ return 0;
+
+ q->elevator.nr_segments -= same_segment;
+ req->nr_segments = total_segments;
+ return 1;
+}
+
int pd_init (void)
{ int i;
+ request_queue_t * q;
if (disable) return -1;
if (devfs_register_blkdev(MAJOR_NR,name,&pd_fops)) {
name,major);
return -1;
}
- blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+ q = BLK_DEFAULT_QUEUE(MAJOR_NR);
+ blk_init_queue(q, DEVICE_REQUEST);
+ q->back_merge_fn = pd_back_merge_fn;
+ q->front_merge_fn = pd_front_merge_fn;
+ q->merge_requests_fn = pd_merge_requests_fn;
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read ahead */
pd_gendisk.major = major;
static void do_pd_request (request_queue_t * q)
{ struct buffer_head * bh;
- struct request * req;
int unit;
if (pd_busy) return;
pd_dev = MINOR(CURRENT->rq_dev);
pd_unit = unit = DEVICE_NR(CURRENT->rq_dev);
pd_block = CURRENT->sector;
- pd_count = CURRENT->nr_sectors;
+ pd_run = CURRENT->nr_sectors;
+ pd_count = CURRENT->current_nr_sectors;
bh = CURRENT->bh;
- req = CURRENT;
- if (bh->b_reqnext)
- printk("%s: OUCH: b_reqnext != NULL\n",PD.name);
if ((pd_dev >= PD_DEVS) ||
((pd_block+pd_count) > pd_hd[pd_dev].nr_sects)) {
}
pd_cmd = CURRENT->cmd;
- pd_run = pd_count;
- while ((pd_run <= cluster) &&
- (req = blkdev_next_request(req)) &&
- (pd_block+pd_run == req->sector) &&
- (pd_cmd == req->cmd) &&
- (pd_dev == MINOR(req->rq_dev)))
- pd_run += req->nr_sectors;
-
pd_poffs = pd_hd[pd_dev].start_sect;
pd_block += pd_poffs;
pd_buf = CURRENT->buffer;
printk("%s: OUCH: request list changed unexpectedly\n",
PD.name);
- pd_count = CURRENT->nr_sectors;
+ pd_count = CURRENT->current_nr_sectors;
pd_buf = CURRENT->buffer;
spin_unlock_irqrestore(&io_request_lock,saved_flags);
}
}
}
+static inline int pf_new_segment(request_queue_t *q, struct request *req, int max_segments)
+{
+ if (max_segments > cluster)
+ max_segments = cluster;
+
+ if (req->nr_segments < max_segments) {
+ req->nr_segments++;
+ q->elevator.nr_segments++;
+ return 1;
+ }
+ return 0;
+}
+
+static int pf_back_merge_fn(request_queue_t *q, struct request *req,
+ struct buffer_head *bh, int max_segments)
+{
+ if (req->bhtail->b_data + req->bhtail->b_size == bh->b_data)
+ return 1;
+ return pf_new_segment(q, req, max_segments);
+}
+
+static int pf_front_merge_fn(request_queue_t *q, struct request *req,
+ struct buffer_head *bh, int max_segments)
+{
+ if (bh->b_data + bh->b_size == req->bh->b_data)
+ return 1;
+ return pf_new_segment(q, req, max_segments);
+}
+
+static int pf_merge_requests_fn(request_queue_t *q, struct request *req,
+ struct request *next, int max_segments)
+{
+ int total_segments = req->nr_segments + next->nr_segments;
+ int same_segment;
+
+ if (max_segments > cluster)
+ max_segments = cluster;
+
+ same_segment = 0;
+ if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data) {
+ total_segments--;
+ same_segment = 1;
+ }
+
+ if (total_segments > max_segments)
+ return 0;
+
+ q->elevator.nr_segments -= same_segment;
+ req->nr_segments = total_segments;
+ return 1;
+}
+
int pf_init (void) /* preliminary initialisation */
{ int i;
+ request_queue_t * q;
if (disable) return -1;
major);
return -1;
}
- blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+ q = BLK_DEFAULT_QUEUE(MAJOR_NR);
+ blk_init_queue(q, DEVICE_REQUEST);
+ q->back_merge_fn = pf_back_merge_fn;
+ q->front_merge_fn = pf_front_merge_fn;
+ q->merge_requests_fn = pf_merge_requests_fn;
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read ahead */
for (i=0;i<PF_UNITS;i++) pf_blocksizes[i] = 1024;
static void do_pf_request (request_queue_t * q)
{ struct buffer_head * bh;
- struct request * req;
int unit;
if (pf_busy) return;
pf_unit = unit = DEVICE_NR(CURRENT->rq_dev);
pf_block = CURRENT->sector;
- pf_count = CURRENT->nr_sectors;
+ pf_run = CURRENT->nr_sectors;
+ pf_count = CURRENT->current_nr_sectors;
bh = CURRENT->bh;
- req = CURRENT;
- if (bh->b_reqnext)
- printk("%s: OUCH: b_reqnext != NULL\n",PF.name);
if ((pf_unit >= PF_UNITS) || (pf_block+pf_count > PF.capacity)) {
end_request(0);
}
pf_cmd = CURRENT->cmd;
- pf_run = pf_count;
- while ((pf_run <= cluster) &&
- (req = blkdev_next_request(req)) &&
- (pf_block+pf_run == req->sector) &&
- (pf_cmd == req->cmd) &&
- (pf_unit == DEVICE_NR(req->rq_dev)))
- pf_run += req->nr_sectors;
-
pf_buf = CURRENT->buffer;
pf_retries = 0;
printk("%s: OUCH: request list changed unexpectedly\n",
PF.name);
- pf_count = CURRENT->nr_sectors;
+ pf_count = CURRENT->current_nr_sectors;
pf_buf = CURRENT->buffer;
spin_unlock_irqrestore(&io_request_lock,saved_flags);
}
* 990605 Made changes to code to support Firmware 1.22a, added
* fairly useless proc entry.
* 990610 removed said useless proc code for the merge <alan>
+ * 000403 Removed last traces of proc code. <davej>
*/
#include <linux/module.h>
mode_debug = 0;
}
-static int pcwd_proc_get_info(char *buffer, char **start, off_t offset,
- int length, int inout)
-{
- int len;
- off_t begin = 0;
-
- revision = get_revision();
- len = sprintf(buffer, "Version = " WD_VER "\n");
-
- if (revision == PCWD_REVISION_A)
- len += sprintf(buffer + len, "Revision = A\n");
- else
- len += sprintf(buffer + len, "Revision = C\n");
-
- if (supports_temp) {
- unsigned short c = inb(current_readport);
-
- len += sprintf(buffer + len, "Temp = Yes\n"
- "Current temp = %d (Celsius)\n",
- c);
- } else
- len += sprintf(buffer + len, "Temp = No\n");
-
- *start = buffer + (offset);
- len -= offset;
-
- if (len > length)
- len = length;
-
- return len;
-}
-
static struct file_operations pcwd_fops = {
read: pcwd_read,
write: pcwd_write,
unsigned int minor = MINOR (inode->i_rdev);
struct pp_struct *pp = file->private_data;
- if (pp->pdev->port->ieee1284.mode != IEEE1284_MODE_COMPAT) {
+ if (pp->pdev && pp->pdev->port->ieee1284.mode != IEEE1284_MODE_COMPAT) {
if (!(pp->flags & PP_CLAIMED)) {
parport_claim_or_block (pp->pdev);
pp->flags |= PP_CLAIMED;
(!drive->forced_geom) && drive->bios_sect && drive->bios_head)
drive->bios_cyl = (capacity / drive->bios_sect) / drive->bios_head;
-#if 0 /* done instead for entire identify block in arch/ide.h stuff */
- /* fix byte-ordering of buffer size field */
- id->buf_size = le16_to_cpu(id->buf_size);
-#endif
- printk (KERN_INFO "%s: %.40s, %ldMB w/%dkB Cache, CHS=%d/%d/%d",
- drive->name, id->model,
- capacity/2048L, id->buf_size/2,
- drive->bios_cyl, drive->bios_head, drive->bios_sect);
+ printk (KERN_INFO "%s: %ld sectors", drive->name, capacity);
+
+ /* Give size in megabytes (MB), not mebibytes (MiB). */
+ /* We compute the exact rounded value, avoiding overflow. */
+ printk (" (%ld MB)", (capacity - capacity/625 + 974)/1950);
+
+ /* Only print cache size when it was specified */
+ if (id->buf_size)
+ printk (" w/%dKiB Cache", id->buf_size/2);
+
+ printk(", CHS=%d/%d/%d",
+ drive->bios_cyl, drive->bios_head, drive->bios_sect);
#ifdef CONFIG_BLK_DEV_IDEDMA
if (drive->using_dma)
(void) HWIF(drive)->dmaproc(ide_dma_verbose, drive);
rq->cmd = IDE_DRIVE_CMD;
rq->sector = 0;
rq->nr_sectors = 0;
+ rq->nr_segments = 0;
rq->current_nr_sectors = 0;
rq->sem = NULL;
rq->bh = NULL;
* Aug 8, 1998 acme Initial version.
*/
-#ifdef MODULE
-#ifdef MODVERSIONS
-#include <linux/modversions.h>
-#endif
+#include <linux/init.h> /* __init */
#include <linux/module.h>
-#else
-#define EXPORT_SYMBOL(function)
-#endif
#include <linux/kernel.h> /* printk(), and other useful stuff */
#include <linux/stddef.h> /* offsetof(), etc. */
#include <linux/errno.h> /* return codes */
#include <asm/io.h> /* read[wl], write[wl], ioremap, iounmap */
#define MOD_VERSION 0
-#define MOD_RELEASE 5
+#define MOD_RELEASE 6
-#ifdef MODULE
MODULE_AUTHOR("Arnaldo Carvalho de Melo");
MODULE_DESCRIPTION("Cyclom 2x Sync Card Driver");
-#endif
/* Function Prototypes */
/* Module entry points. These are called by the OS and must be public. */
* Return: 0 Ok
* < 0 error.
* Context: process */
-#ifdef MODULE
-int init_module(void)
+
+int __init cycx_drv_init(void)
{
printk(KERN_INFO "%s v%u.%u %s\n", fullname, MOD_VERSION, MOD_RELEASE,
copyright);
return 0;
}
+
/* Module 'remove' entry point.
* o release all remaining system resources */
-void cleanup_module(void)
+void cycx_drv_cleanup(void)
{
}
-#endif
+
/* Kernel APIs */
/* Set up adapter.
* o detect adapter type
return crc;
}
+
+module_init(cycx_drv_init);
+module_exit(cycx_drv_cleanup);
+
/* End */
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
* ============================================================================
+* 2000/04/02 acme dprintk and cycx_debug
+* module_init/module_exit
* 2000/01/21 acme rename cyclomx_open to cyclomx_mod_inc_use_count
* and cyclomx_close to cyclomx_mod_dec_use_count
* 2000/01/08 acme cleanup
#include <asm/uaccess.h> /* kernel <-> user copy */
#include <linux/init.h> /* __init (when not using as a module) */
+/* Debug */
+
+unsigned int cycx_debug = 0;
+
#ifdef MODULE
MODULE_AUTHOR("Arnaldo Carvalho de Melo");
MODULE_DESCRIPTION("Cyclom 2X Sync Card Driver.");
+MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, "cyclomx debug level");
#endif
/* Defines & Macros */
#define DRV_VERSION 0 /* version number */
-#define DRV_RELEASE 6 /* release (minor version) number */
+#define DRV_RELEASE 7 /* release (minor version) number */
#define MAX_CARDS 1 /* max number of adapters */
#ifndef CONFIG_CYCLOMX_CARDS /* configurable option */
/* Function Prototypes */
-/* Module entry points */
-int init_module (void);
-void cleanup_module (void);
-
/* WAN link driver entry points */
static int setup (wan_device_t *wandev, wandev_conf_t *conf);
static int shutdown (wan_device_t *wandev);
* < 0 error.
* Context: process
*/
-#ifdef MODULE
-int init_module (void)
-#else
int __init cyclomx_init (void)
-#endif
{
int cnt, err = 0;
* o unregister all adapters from the WAN router
* o release all remaining system resources
*/
-#ifdef MODULE
-void cleanup_module (void)
+void cyclomx_cleanup (void)
{
int i = 0;
kfree(card_array);
}
-#endif
+
/* WAN Device Driver Entry Points */
/*
* Setup/configure WAN link driver.
spin_unlock_irqrestore(&card->lock, host_cpu_flags);
}
+module_init(cyclomx_init);
+module_exit(cyclomx_cleanup);
+
/* End */
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
* ============================================================================
+* 2000/04/02 acme dprintk, cycx_debug
+* fixed the bug introduced in get_dev_by_lcn and
+* get_dev_by_dte_addr by the anonymous hacker
+* that converted this driver to softnet
* 2000/01/08 acme cleanup
* 1999/10/27 acme use ARPHRD_HWX25 so that the X.25 stack know
* that we have a X.25 stack implemented in
u32 idle_tmout; /* sec, before disconnecting */
struct sk_buff *rx_skb; /* receive socket buffer */
cycx_t *card; /* -> owner */
- struct enet_statistics ifstats; /* interface statistics */
+ struct net_device_stats ifstats;/* interface statistics */
} x25_channel_t;
/* Function Prototypes */
static void x25_dump_config(TX25Config *conf);
static void x25_dump_stats(TX25Stats *stats);
static void x25_dump_devs(wan_device_t *wandev);
-#define dprintk(format, a...) printk(format, ##a)
#else
#define hex_dump(msg, p, len)
#define x25_dump_config(conf)
#define x25_dump_stats(stats)
#define x25_dump_devs(wandev)
-#define dprintk(format, a...)
#endif
/* Public Functions */
if (sizerem)
nibble_to_byte(d + (sizeloc >> 1), rem, sizerem, sizeloc & 1);
- dprintk(KERN_INFO "connect_intr:lcn=%d, local=%s, remote=%s\n",
+ dprintk(1, KERN_INFO "connect_intr:lcn=%d, local=%s, remote=%s\n",
lcn, loc, rem);
if ((dev = get_dev_by_dte_addr(wandev, rem)) == NULL) {
cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
cycx_peek(&card->hw, cmd->buf + 1, &key, sizeof(key));
- dprintk(KERN_INFO "%s: connect_confirm_intr:lcn=%d, key=%d\n",
+ dprintk(1, KERN_INFO "%s: connect_confirm_intr:lcn=%d, key=%d\n",
card->devname, lcn, key);
if ((dev = get_dev_by_lcn(wandev, -key)) == NULL) {
u8 lcn;
cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
- dprintk(KERN_INFO "%s: disconnect_confirm_intr:lcn=%d\n",
+ dprintk(1, KERN_INFO "%s: disconnect_confirm_intr:lcn=%d\n",
card->devname, lcn);
if ((dev = get_dev_by_lcn(wandev, lcn)) == NULL) {
/* Invalid channel, discard packet */
u8 lcn;
cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
- dprintk(KERN_INFO "disconnect_intr:lcn=%d\n", lcn);
+ dprintk(1, KERN_INFO "disconnect_intr:lcn=%d\n", lcn);
if ((dev = get_dev_by_lcn(wandev, lcn)) != NULL) {
x25_channel_t *chan = dev->priv;
key = ffz(card->u.x.connection_keys);
set_bit(key, (void*)&card->u.x.connection_keys);
++key;
- dprintk(KERN_INFO "%s:x25_place_call:key=%d\n", card->devname, key);
+ dprintk(1, KERN_INFO "%s:x25_place_call:key=%d\n", card->devname, key);
memset(d, 0, sizeof(d));
d[1] = key; /* user key */
d[2] = 0x10;
x25_channel_t *chan;
while (dev) {
+ chan = (x25_channel_t*)dev->priv;
+
if (chan->lcn == lcn)
break;
dev = chan->slave;
x25_channel_t *chan;
while (dev) {
+ chan = (x25_channel_t*)dev->priv;
+
if (!strcmp(chan->addr, dte))
break;
dev = chan->slave;
if (!chan->addr[0])
return -EINVAL; /* no destination address */
- dprintk(KERN_INFO "%s: placing X.25 call to %s...\n",
+ dprintk(1, KERN_INFO "%s: placing X.25 call to %s...\n",
card->devname, chan->addr);
if (x25_place_call(card, chan))
request_module ("parport_lowlevel");
}
+/**
+ * parport_register_driver - register a parallel port device driver
+ * @drv: structure describing the driver
+ *
+ * This can be called by a parallel port device driver in order to
+ * receive notifications about ports being found in the system, as
+ * well as ports no longer available.
+ *
+ * The @drv structure is allocated by the caller and must not be
+ * deallocated until after calling parport_unregister_driver().
+ *
+ * Returns 0 on success. Currently it always succeeds.
+ **/
int parport_register_driver (struct parport_driver *drv)
{
struct parport *port;
return 0;
}
+/**
+ * parport_unregister_driver - deregister a parallel port device driver
+ * @arg: structure describing the driver that was given to
+ * parport_register_driver()
+ *
+ * This should be called by a parallel port device driver that has
+ * registered itself using parport_register_driver() when it is about
+ * to be unloaded.
+ *
+ * When it returns, the driver's attach() routine will no longer be
+ * called, and for each port that attach() was called for, the
+ * detach() routine will hae been called.
+ *
+ * If the caller's attach() function can block, it is their
+ * responsibility to make sure to wait for it to exit before
+ * unloading.
+ **/
void parport_unregister_driver (struct parport_driver *arg)
{
struct parport_driver *drv = driver_chain, *olddrv = NULL;
}
}
-/* Return a list of all the ports we know about. This function shouldn't
- * really be used -- use parport_register_driver instead. */
+/**
+ * parport_enumerate - return a list of the system's parallel ports
+ *
+ * This returns the head of the list of parallel ports in the system.
+ * The structure that is returned describes the first port in the
+ * list, and its 'next' member points to the next port, or %NULL if
+ * it's the last port.
+ *
+ * If there are no parallel ports in the system, parport_enumerate()
+ * will return %NULL.
+ **/
struct parport *parport_enumerate(void)
{
if (!portlist)
return portlist;
}
+/**
+ * parport_register_port - register a parallel port
+ * @base: base I/O address
+ * @irq: IRQ line
+ * @dma: DMA channel
+ * @ops: pointer to the port driver's port operations structure
+ *
+ * When a parallel port (lowlevel) driver finds a port that should be
+ * made available to parallel port device drivers, it should call
+ * parport_register_port(). The @base, @irq, and @dma parameters are
+ * for the convenience of port drivers, and for ports where they
+ * aren't meaningful needn't be set to anything special. They can be
+ * altered afterwards by adjusting the relevant members of the parport
+ * structure that is returned and represents the port. They should
+ * not be tampered with after calling parport_announce_port, however.
+ *
+ * If there are parallel port device drivers in the system that have
+ * registered themselves using parport_register_driver(), they are not
+ * told about the port at this time; that is done by
+ * parport_announce_port().
+ *
+ * The @ops structure is allocated by the caller, and must not be
+ * deallocated before calling parport_unregister_port().
+ *
+ * If there is no memory to allocate a new parport structure, this
+ * function will return %NULL.
+ **/
struct parport *parport_register_port(unsigned long base, int irq, int dma,
struct parport_operations *ops)
{
return tmp;
}
+/**
+ * parport_announce_port - tell device drivers about a parallel port
+ * @port: parallel port to announce
+ *
+ * After a port driver has registered a parallel port with
+ * parport_register_port, and performed any necessary initialisation
+ * or adjustments, it should call parport_announce_port() in order to
+ * notify all device drivers that have called
+ * parport_register_driver(). Their attach() functions will be
+ * called, with @port as the parameter.
+ **/
void parport_announce_port (struct parport *port)
{
#ifdef CONFIG_PARPORT_1284
kfree(port);
}
+/**
+ * parport_unregister_port - deregister a parallel port
+ * @port: parallel port to deregister
+ *
+ * When a parallel port driver is forcibly unloaded, or a parallel
+ * port becomes inaccessible, the port driver must call this function
+ * in order to deal with device drivers that still want to use it.
+ *
+ * The parport structure associated with the port has its operations
+ * structure replaced with one containing 'null' operations that
+ * return errors or just don't do anything.
+ *
+ * Any drivers that have registered themselves using
+ * parport_register_driver() are notified that the port is no longer
+ * accessible by having their detach() routines called with @port as
+ * the parameter.
+ **/
void parport_unregister_port(struct parport *port)
{
struct parport *p;
free_port (port);
}
-struct pardevice *parport_register_device(struct parport *port, const char *name,
- int (*pf)(void *), void (*kf)(void *),
- void (*irq_func)(int, void *, struct pt_regs *),
- int flags, void *handle)
+/**
+ * parport_register_device - register a device on a parallel port
+ * @port: port to which the device is attached
+ * @name: a name to refer to the device
+ * @pf: preemption callback
+ * @kf: kick callback (wake-up)
+ * @irq_func: interrupt handler
+ * @flags: registration flags
+ * @handle: data for callback functions
+ *
+ * This function, called by parallel port device drivers, declares
+ * that a device is connected to a port, and tells the system all it
+ * needs to know.
+ *
+ * The @name is allocated by the caller and must not be deallocated
+ * until the caller calls @parport_unregister_device for that device.
+ *
+ * The preemption callback function, @pf, is called when this device
+ * driver has claimed access to the port but another device driver
+ * wants to use it. It is given @handle as its parameter, and should
+ * return zero if it is willing for the system to release the port to
+ * another driver on its behalf. If it wants to keep control of the
+ * port it should return non-zero, and no action will be taken. It is
+ * good manners for the driver to try to release the port at the
+ * earliest opportunity after its preemption callback rejects a
+ * preemption attempt. Note that if a preemption callback is happy
+ * for preemption to go ahead, there is no need to release the port;
+ * it is done automatically. This function may not block, as it may
+ * be called from interrupt context. If the device driver does not
+ * support preemption, @pf can be %NULL.
+ *
+ * The wake-up ("kick") callback function, @kf, is called when the
+ * port is available to be claimed for exclusive access; that is,
+ * parport_claim() is guaranteed to succeed when called from inside
+ * the wake-up callback function. If the driver wants to claim the
+ * port it should do so; otherwise, it need not take any action. This
+ * function may not block, as it may be called from interrupt context.
+ * If the device driver does not want to be explicitly invited to
+ * claim the port in this way, @kf can be %NULL.
+ *
+ * The interrupt handler, @irq_func, is called when an interrupt
+ * arrives from the parallel port. Note that if a device driver wants
+ * to use interrupts it should use parport_enable_irq(), and can also
+ * check the irq member of the parport structure representing the
+ * port.
+ *
+ * The parallel port (lowlevel) driver is the one that has called
+ * request_irq() and whose interrupt handler is called first. This
+ * handler does whatever needs to be done to the hardware to
+ * acknowledge the interrupt (for PC-style ports there is nothing
+ * special to be done). It then tells the IEEE 1284 code about the
+ * interrupt, which may involve reacting to an IEEE 1284 event
+ * depending on the current IEEE 1284 phase. After this, it calls
+ * @irq_func. Needless to say, @irq_func will be called from
+ * interrupt context, and may not block.
+ *
+ * The %PARPORT_DEV_EXCL flag is for preventing port sharing, and so
+ * should only be used when sharing the port with other device drivers
+ * is impossible and would lead to incorrect behaviour. Use it
+ * sparingly! Normally, @flags will be zero.
+ *
+ * This function returns a pointer to a structure that represents the
+ * device on the port, or %NULL if there is not enough memory to
+ * allocate space for that structure.
+ **/
+struct pardevice *
+parport_register_device(struct parport *port, const char *name,
+ int (*pf)(void *), void (*kf)(void *),
+ void (*irq_func)(int, void *, struct pt_regs *),
+ int flags, void *handle)
{
struct pardevice *tmp;
return NULL;
}
+/**
+ * parport_unregister_device - deregister a device on a parallel port
+ * @dev: pointer to structure representing device
+ *
+ * This undoes the effect of parport_register_device().
+ **/
void parport_unregister_device(struct pardevice *dev)
{
struct parport *port;
free_port (port);
}
+/**
+ * parport_claim - claim access to a parallel port device
+ * @dev: pointer to structure representing a device on the port
+ *
+ * This function will not block and so can be used from interrupt
+ * context. If parport_claim() succeeds in claiming access to the
+ * port it returns zero and the port is available to use. It may fail
+ * (returning non-zero) if the port is in use by another driver and
+ * that driver is not willing to relinquish control of the port.
+ **/
int parport_claim(struct pardevice *dev)
{
struct pardevice *oldcad;
return -EAGAIN;
}
+/**
+ * parport_claim_or_block - claim access to a parallel port device
+ * @dev: pointer to structure representing a device on the port
+ *
+ * This behaves like parport_claim(), but will block if necessary to
+ * wait for the port to be free. A return value of 1 indicates that
+ * it slept; 0 means that it succeeded without needing to sleep. A
+ * negative error code indicates failure.
+ **/
int parport_claim_or_block(struct pardevice *dev)
{
int r;
return r;
}
+/**
+ * parport_release - give up access to a parallel port device
+ * @dev: pointer to structure representing parallel port device
+ *
+ * This function cannot fail, but it should not be called without the
+ * port claimed. Similarly, if the port is already claimed you should
+ * not try claiming it again.
+ **/
void parport_release(struct pardevice *dev)
{
struct parport *port = dev->port->physport;
}
/* Mountpoints don't count */
- if (root->d_mounts != root ||
- root->d_covers != root) {
+ if (d_mountpoint(root)) {
DPRINTK(("is_tree_busy: mountpoint\n"));
count--;
}
count += (dentry->d_count - 1);
/* Mountpoints don't count */
- if (dentry->d_mounts != dentry ||
- dentry->d_covers != dentry) {
+ if (d_mountpoint(dentry)) {
DPRINTK(("is_tree_busy: mountpoint\n"));
adj++;
}
static struct dentry * autofs4_follow_link(struct dentry *dentry,
struct dentry *base,
- struct vfsmount *mnt,
+ struct vfsmount **mnt,
unsigned int flags)
{
struct autofs_info *ino = autofs4_dentry_ino(dentry);
}
inode->i_flags &= ~S_QUOTA;
put_it:
- if (dquot != NODQUOT)
+ if (dquot != NODQUOT) {
if (dqput_blocks(dquot)) {
if (dquot->dq_count != 1)
printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", dquot->dq_count);
list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */
return 1;
- }
- else
+ } else {
dqput(dquot); /* We have guaranteed we won't block */
+ }
+ }
return 0;
}
if (RPC_ASSASSINATED(task))
goto die;
-#if 0
- /* FIXME: rpc_restart_call() is broken! */
if (task->tk_status < 0) {
dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
- nlm_rebind_host(req->a_host);
- rpc_restart_call(task);
- return;
+ goto retry_unlock;
}
-#endif
if (status != NLM_LCK_GRANTED
&& status != NLM_LCK_DENIED_GRACE_PERIOD) {
printk("lockd: unexpected unlock status: %d\n", status);
}
die:
- rpc_release_task(task);
nlm_release_host(req->a_host);
kfree(req);
+ return;
+ retry_unlock:
+ nlm_rebind_host(req->a_host);
+ rpc_restart_call(task);
}
/*
}
die:
-retry_cancel:
- rpc_release_task(task);
nlm_release_host(req->a_host);
kfree(req);
return;
-#if 0
- /* FIXME: rpc_restart_call() is broken */
retry_cancel:
nlm_rebind_host(req->a_host);
rpc_restart_call(task);
rpc_delay(task, 30 * HZ);
- return;
-#endif
}
/*
dprintk("lockd: %4d callback failed (errno = %d)\n",
task->tk_pid, -task->tk_status);
}
- rpc_release_task(task);
nlm_release_host(call->a_host);
kfree(call);
}
block->b_incall = 0;
nlm_release_host(call->a_host);
- rpc_release_task(task);
}
/*
task->tk_pid, -task->tk_status);
}
nlm_release_host(call->a_host);
- rpc_release_task(task);
kfree(call);
}
cache->task = NULL;
spin_unlock(&nfs_flushd_lock);
wake_up(&cache->request_wait);
- rpc_release_task(task);
}
UnlockPage(page);
page_cache_release(page);
- rpc_release_task(task);
kfree(req);
}
static void nfs_writedata_release(struct rpc_task *task)
{
struct nfs_write_data *wdata = (struct nfs_write_data *)task->tk_calldata;
- rpc_release_task(task);
nfs_writedata_free(wdata);
}
/* Finalize the task. */
rpc_init_task(task, clnt, nfs_writeback_done, flags);
task->tk_calldata = data;
+ /* Release requests */
+ task->tk_release = nfs_writedata_release;
#ifdef CONFIG_NFS_V3
msg.rpc_proc = (NFS_PROTO(inode)->version == 3) ? NFS3PROC_WRITE : NFSPROC_WRITE;
next:
nfs_unlock_request(req);
}
- nfs_writedata_release(task);
}
rpc_init_task(task, clnt, nfs_commit_done, flags);
task->tk_calldata = data;
+ /* Release requests */
+ task->tk_release = nfs_writedata_release;
msg.rpc_proc = NFS3PROC_COMMIT;
msg.rpc_argp = &data->args;
next:
nfs_unlock_request(req);
}
- nfs_writedata_release(task);
}
#endif
if (IS_ERR(name))
goto out;
- dentry = lookup_dentry(name, NULL, 0);
+ dentry = lookup_dentry(name, NULL, LOOKUP_FOLLOW);
putname(name);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
if (IS_ERR(name))
goto out;
- dentry = lookup_dentry(name, NULL, 0);
+ dentry = lookup_dentry(name, NULL, LOOKUP_FOLLOW);
putname(name);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
if (f->f_mode & FMODE_WRITE) {
error = get_write_access(inode);
if (error)
- goto cleanup_dentry;
+ goto cleanup_file;
}
f->f_dentry = dentry;
put_write_access(inode);
f->f_dentry = NULL;
f->f_vfsmnt = NULL;
+cleanup_file:
+ put_filp(f);
cleanup_dentry:
dput(dentry);
mntput(mnt);
- put_filp(f);
return ERR_PTR(error);
}
int errors;
unsigned long sector;
unsigned long nr_sectors;
+ unsigned long hard_sector, hard_nr_sectors;
unsigned int nr_segments;
unsigned int nr_hw_segments;
unsigned long current_nr_sectors;
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
* ============================================================================
+* 2000/04/02 acme dprintk and cycx_debug
* 1999/01/03 acme judicious use of data types
* 1999/01/02 acme #define X25_ACK_N3 0x4411
* 1998/12/28 acme cleanup: lot'o'things removed
#define X25_MBOX_OFFS 0x300 /* general mailbox block */
#define X25_RXMBOX_OFFS 0x340 /* receive mailbox */
+/* Debug */
+#define dprintk(level, format, a...) if (cycx_debug >= level) printk(format, ##a)
+
+extern unsigned int cycx_debug;
+
/* Data Structures */
/* X.25 Command Block. */
typedef struct X25Cmd
extern struct dentry * lookup_one(const char *, struct dentry *);
extern struct dentry * __namei(const char *, unsigned int);
-#define namei(pathname) __namei(pathname, 1)
+#define namei(pathname) __namei(pathname, LOOKUP_FOLLOW)
#define lnamei(pathname) __namei(pathname, 0)
extern void iput(struct inode *);
#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
-#define Page_Dirty(page) test_bit(PG_dirty, &(page)->flags)
+#define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
#define PageLocked(page) test_bit(PG_locked, &(page)->flags)
#define LockPage(page) set_bit(PG_locked, &(page)->flags)
#define PageClearSwapCache(page) clear_bit(PG_swap_cache, &(page)->flags)
#define PageTestandClearSwapCache(page) test_and_clear_bit(PG_swap_cache, &(page)->flags)
+#define PageSwapEntry(page) test_bit(PG_swap_entry, &(page)->flags)
+#define SetPageSwapEntry(page) set_bit(PG_swap_entry, &(page)->flags)
+#define ClearPageSwapEntry(page) clear_bit(PG_swap_entry, &(page)->flags)
#ifdef CONFIG_HIGHMEM
#define PageHighMem(page) test_bit(PG_highmem, &(page)->flags)
void (*tk_callback)(struct rpc_task *);
void (*tk_action)(struct rpc_task *);
void (*tk_exit)(struct rpc_task *);
+ void (*tk_release)(struct rpc_task *);
void * tk_calldata;
/*
/* is it a page-cache page? */
if (page->mapping) {
- if (!Page_Dirty(page) && !pgcache_under_min()) {
+ if (!PageDirty(page) && !pgcache_under_min()) {
remove_page_from_inode_queue(page);
remove_page_from_hash_queue(page);
page->mapping = NULL;
struct page *alias;
unsigned long flags;
- flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error));
+ flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_dirty));
page->flags = flags | (1 << PG_locked) | (1 << PG_referenced);
get_page(page);
page->index = offset;
pte = mk_pte(page, vma->vm_page_prot);
- set_bit(PG_swap_entry, &page->flags);
+ SetPageSwapEntry(page);
/*
* Freeze the "shared"ness of the page, ie page_count + swap_count.
BUG();
if (PageLocked(page))
BUG();
+ if (PageDecrAfter(page))
+ BUG();
zone = page->zone;
*/
void delete_from_swap_cache_nolock(struct page *page)
{
+ if (!PageLocked(page))
+ BUG();
+
if (block_flushpage(page, 0))
lru_cache_del(page);
}
UnlockPage(page);
}
-
- clear_bit(PG_swap_entry, &page->flags);
+
+ ClearPageSwapEntry(page);
__free_page(page);
}
unsigned long offset, type;
swp_entry_t entry;
- if (!test_bit(PG_swap_entry, &page->flags))
+ if (!PageSwapEntry(page))
goto new_swap_entry;
/* We have the old entry in the page offset still */
static void
rpc_default_callback(struct rpc_task *task)
{
- rpc_release_task(task);
}
/*
/* Set up the call info struct and execute the task */
if (task->tk_status == 0)
status = rpc_execute(task);
- else
+ else {
status = task->tk_status;
- rpc_release_task(task);
+ rpc_release_task(task);
+ }
rpc_clnt_sigunmask(clnt, &oldset);
void
rpc_restart_call(struct rpc_task *task)
{
- if (task->tk_flags & RPC_TASK_KILLED) {
- rpc_release_task(task);
+ if (RPC_ASSASSINATED(task))
return;
- }
+
task->tk_action = call_reserve;
rpcproc_count(task->tk_client, task->tk_msg.rpc_proc)++;
}
return 0;
}
+ restarted:
while (1) {
/*
* Execute any pending callback.
}
}
+ if (task->tk_exit) {
+ task->tk_exit(task);
+ /* If tk_action is non-null, the user wants us to restart */
+ if (task->tk_action) {
+ if (!RPC_ASSASSINATED(task)) {
+ /* Release RPC slot and buffer memory */
+ if (task->tk_rqstp)
+ xprt_release(task);
+ if (task->tk_buffer) {
+ rpc_free(task->tk_buffer);
+ task->tk_buffer = NULL;
+ }
+ goto restarted;
+ }
+ printk(KERN_ERR "RPC: dead task tries to walk away.\n");
+ }
+ }
+
dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status);
status = task->tk_status;
- if (task->tk_exit)
- task->tk_exit(task);
+
+ /* Release all resources associated with the task */
+ rpc_release_task(task);
return status;
}
*
* This may be called recursively if e.g. an async NFS task updates
* the attributes and finds that dirty pages must be flushed.
+ * NOTE: Upon exit of this function the task is guaranteed to be
+ * released. In particular note that tk_release() will have
+ * been called, so your task memory may have been freed.
*/
int
rpc_execute(struct rpc_task *task)
{
+ int status = -EIO;
if (rpc_inhibit) {
printk(KERN_INFO "RPC: execution inhibited!\n");
- return -EIO;
+ goto out_release;
}
- task->tk_flags |= RPC_TASK_RUNNING;
+
+ status = -EWOULDBLOCK;
if (task->tk_active) {
printk(KERN_ERR "RPC: active task was run twice!\n");
- return -EWOULDBLOCK;
+ goto out_err;
}
+
task->tk_active = 1;
-
+ task->tk_flags |= RPC_TASK_RUNNING;
return __rpc_execute(task);
+ out_release:
+ rpc_release_task(task);
+ out_err:
+ return status;
}
/*
current->pid);
}
+static void
+rpc_default_free_task(struct rpc_task *task)
+{
+ dprintk("RPC: %4d freeing task\n", task->tk_pid);
+ rpc_free(task);
+}
+
/*
* Create a new task for the specified client. We have to
* clean up after an allocation failure, as the client may
rpc_init_task(task, clnt, callback, flags);
+ /* Replace tk_release */
+ task->tk_release = rpc_default_free_task;
+
dprintk("RPC: %4d allocated task\n", task->tk_pid);
task->tk_flags |= RPC_TASK_DYNAMIC;
out:
#ifdef RPC_DEBUG
task->tk_magic = 0;
#endif
-
- if (task->tk_flags & RPC_TASK_DYNAMIC) {
- dprintk("RPC: %4d freeing task\n", task->tk_pid);
- task->tk_flags &= ~RPC_TASK_DYNAMIC;
- rpc_free(task);
- }
+ if (task->tk_release)
+ task->tk_release(task);
}
/*
__rpc_wake_up(parent);
}
spin_unlock_bh(&rpc_queue_lock);
- rpc_release_task(child);
}
/*
EXPORT_SYMBOL(rpc_free);
EXPORT_SYMBOL(rpc_execute);
EXPORT_SYMBOL(rpc_init_task);
-EXPORT_SYMBOL(rpc_release_task);
EXPORT_SYMBOL(rpc_sleep_on);
EXPORT_SYMBOL(rpc_wake_up_next);
EXPORT_SYMBOL(rpc_wake_up_task);