extern void scsi_times_out(Scsi_Cmnd * SCpnt);
void scsi_build_commandblocks(Scsi_Device * SDpnt);
-/*
- * Function: scsi_initialize_queue()
- *
- * Purpose: Sets up the block queue for a device.
- *
- * Arguments: SDpnt - device for which we need a handler function.
- *
- * Returns: Nothing
- *
- * Lock status: No locking assumed or required.
- */
-void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt)
-{
- request_queue_t *q = SDpnt->request_queue;
-
- /*
- * tell block layer about assigned host_lock for this host
- */
- blk_init_queue(q, scsi_request_fn, SHpnt->host_lock);
-
- /* Hardware imposed limit. */
- blk_queue_max_hw_segments(q, SHpnt->sg_tablesize);
-
- /*
- * scsi_alloc_sgtable max
- */
- blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
-
- if(!SHpnt->max_sectors)
- /* driver imposes no hard sector transfer limit.
- * start at machine infinity initially */
- SHpnt->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
-
- /* FIXME: we should also adjust this limit later on
- * after we know what the device capabilities are */
- blk_queue_max_sectors(q, SHpnt->max_sectors);
-
- if (!SHpnt->use_clustering)
- clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
-
- blk_queue_prep_rq(q, scsi_prep_fn);
-}
-
#ifdef MODULE
MODULE_PARM(scsi_logging_level, "i");
MODULE_PARM_DESC(scsi_logging_level, "SCSI logging level; should be zero or nonzero");
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
-
#include <linux/blk.h>
#include "scsi.h"
printk("\n");
}
-/**
- * scsi_initialize_merge_fn() -ƣinitialize merge function for a host
- * @sd: host descriptor
- */
-static void scsi_initialize_merge_fn(struct scsi_device *sd)
+u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
{
- request_queue_t *q = sd->request_queue;
- struct Scsi_Host *sh = sd->host;
- struct device *dev = scsi_get_device(sh);
- u64 bounce_limit;
-
- if (sh->highmem_io) {
- if (dev && dev->dma_mask && PCI_DMA_BUS_IS_PHYS) {
- bounce_limit = *dev->dma_mask;
- } else {
- /*
- * Platforms with virtual-DMA translation
- * hardware have no practical limit.
- */
- bounce_limit = BLK_BOUNCE_ANY;
- }
- } else if (sh->unchecked_isa_dma) {
- bounce_limit = BLK_BOUNCE_ISA;
- } else {
- bounce_limit = BLK_BOUNCE_HIGH;
+ if (shost->highmem_io) {
+ struct device *host_dev = scsi_get_device(shost);
+
+ if (PCI_DMA_BUS_IS_PHYS && host_dev && host_dev->dma_mask)
+ return *host_dev->dma_mask;
+
+ /*
+ * Platforms with virtual-DMA translation
+ * hardware have no practical limit.
+ */
+ return BLK_BOUNCE_ANY;
+ } else if (shost->unchecked_isa_dma)
+ return BLK_BOUNCE_ISA;
+
+ return BLK_BOUNCE_HIGH;
+}
+
+static request_queue_t *scsi_alloc_queue(struct Scsi_Host *shost)
+{
+ request_queue_t *q;
+
+ q = kmalloc(sizeof(*q), GFP_ATOMIC);
+ if (!q)
+ return NULL;
+ memset(q, 0, sizeof(*q));
+
+ if (!shost->max_sectors) {
+ /*
+ * Driver imposes no hard sector transfer limit.
+ * start at machine infinity initially.
+ */
+ shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
}
- blk_queue_bounce_limit(q, bounce_limit);
+ blk_init_queue(q, scsi_request_fn, shost->host_lock);
+ blk_queue_prep_rq(q, scsi_prep_fn);
+
+ blk_queue_max_hw_segments(q, shost->sg_tablesize);
+ blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
+ blk_queue_max_sectors(q, shost->max_sectors);
+ blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
+
+ if (!shost->use_clustering)
+ clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+
+ return q;
+}
+
+static void scsi_free_queue(request_queue_t *q)
+{
+ blk_cleanup_queue(q);
+ kfree(q);
}
/**
*/
sdev->borken = 1;
- if(!q || *q == NULL) {
- sdev->request_queue = kmalloc(sizeof(struct request_queue), GFP_ATOMIC);
- if(sdev->request_queue == NULL) {
+ if (!q || *q == NULL) {
+ sdev->request_queue = scsi_alloc_queue(shost);
+ if (!sdev->request_queue)
goto out_bail;
- }
- memset(sdev->request_queue, 0,
- sizeof(struct request_queue));
- scsi_initialize_queue(sdev, shost);
- scsi_initialize_merge_fn(sdev);
} else {
sdev->request_queue = *q;
*q = NULL;
}
+
sdev->request_queue->queuedata = sdev;
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
scsi_build_commandblocks(sdev);
}
out_bail:
printk(ALLOC_FAILURE_MSG, __FUNCTION__);
- if(q && sdev->request_queue) {
+ if (q && sdev->request_queue) {
*q = sdev->request_queue;
sdev->request_queue = NULL;
- } else if(sdev->request_queue) {
- blk_cleanup_queue(sdev->request_queue);
- kfree(sdev->request_queue);
- }
+ } else if (sdev->request_queue)
+ scsi_free_queue(sdev->request_queue);
+
scsi_release_commandblocks(sdev);
kfree(sdev);
return NULL;
list_del(&sdev->siblings);
list_del(&sdev->same_target_siblings);
- if(sdev->request_queue != NULL) {
- blk_cleanup_queue(sdev->request_queue);
- kfree(sdev->request_queue);
- }
+ if (sdev->request_queue)
+ scsi_free_queue(sdev->request_queue);
scsi_release_commandblocks(sdev);
if (sdev->host->hostt->slave_destroy)
sdev->host->hostt->slave_destroy(sdev);
- if (sdev->inquiry != NULL)
+ if (sdev->inquiry)
kfree(sdev->inquiry);
kfree(sdev);
}
scsi_scan_target(shost, &q, channel, order_id);
}
}
- if(q) {
- blk_cleanup_queue(q);
- kfree(q);
- }
+
+ if (q)
+ scsi_free_queue(q);
}
void scsi_forget_host(struct Scsi_Host *shost)
tpnt->nbr_partitions = 0;
tpnt->timeout = ST_TIMEOUT;
tpnt->long_timeout = ST_LONG_TIMEOUT;
-
tpnt->try_dio = try_direct_io && !SDp->host->unchecked_isa_dma;
- bounce_limit = BLK_BOUNCE_HIGH; /* Borrowed from scsi_merge.c */
- if (SDp->host->highmem_io) {
- struct device *dev = scsi_get_device(SDp->host);
- if (!PCI_DMA_BUS_IS_PHYS)
- /* Platforms with virtual-DMA translation
- * hardware have no practical limit.
- */
- bounce_limit = BLK_BOUNCE_ANY;
- else if (dev && dev->dma_mask)
- bounce_limit = *dev->dma_mask;
- } else if (SDp->host->unchecked_isa_dma)
- bounce_limit = BLK_BOUNCE_ISA;
- bounce_limit >>= PAGE_SHIFT;
+
+ bounce_limit = scsi_calculate_bounce_limit(SDp->host) >> PAGE_SHIFT;
if (bounce_limit > ULONG_MAX)
bounce_limit = ULONG_MAX;
tpnt->max_pfn = bounce_limit;