/*
* drivers/s390/cio/blacklist.c
* S/390 common I/O routines -- blacklisting of specific devices
- * $Revision: 1.31 $
+ * $Revision: 1.33 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
unsigned int irq;
CIO_TRACE_EVENT (0, "redoval");
- for (irq = 0; irq <= __MAX_SUBCHANNELS; irq++) {
+ for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
int ret;
struct subchannel *sch;
/*
* drivers/s390/cio/ccwgroup.c
* bus driver for ccwgroup
- * $Revision: 1.28 $
+ * $Revision: 1.29 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
gdev->cdev[i]->dev.driver_data = gdev;
del_drvdata = 1;
- *gdev = (struct ccwgroup_device) {
- .creator_id = creator_id,
- .count = argc,
- .dev = {
- .bus = &ccwgroup_bus_type,
- .parent = root,
- .release = ccwgroup_release,
- },
+ gdev->creator_id = creator_id;
+ gdev->count = argc;
+ gdev->dev = (struct device ) {
+ .bus = &ccwgroup_bus_type,
+ .parent = root,
+ .release = ccwgroup_release,
};
snprintf (gdev->dev.bus_id, BUS_ID_SIZE, "%s",
/*
* drivers/s390/cio/cio.c
* S/390 common I/O routines -- low level i/o calls
- * $Revision: 1.123 $
+ * $Revision: 1.128 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
if (!sch)
return 1;
+ local_bh_disable();
irq_enter ();
spin_lock(&sch->lock);
memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw));
sch->driver->irq(&sch->dev);
spin_unlock(&sch->lock);
irq_exit ();
+ __local_bh_enable();
return 1;
}
if (ccode)
return -ENODEV;
- sch->schib.pmcw.ena = 1;
- sch->schib.pmcw.isc = isc;
- sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
for (retry = 5, ret = 0; retry > 0; retry--) {
+ sch->schib.pmcw.ena = 1;
+ sch->schib.pmcw.isc = isc;
+ sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
ret = cio_modify(sch);
if (ret == -ENODEV)
break;
*/
return -EBUSY;
-
- sch->schib.pmcw.ena = 0;
for (retry = 5, ret = 0; retry > 0; retry--) {
+ sch->schib.pmcw.ena = 0;
ret = cio_modify(sch);
if (ret == -ENODEV)
break;
/*
- * linux/drivers/s390/cio/cmf.c ($Revision: 1.15 $)
+ * linux/drivers/s390/cio/cmf.c ($Revision: 1.16 $)
*
* Linux on zSeries Channel Measurement Facility support
*
time = get_clock() - cdev->private->cmb_start_time;
spin_unlock_irqrestore(cdev->ccwlock, flags);
- *data = (struct cmbdata) {
- /* we only know values before device_busy_time */
- .size = offsetof(struct cmbdata, device_busy_time),
-
- /* conver to nanoseconds */
- .elapsed_time = (time * 1000) >> 12,
-
- /* copy data to new structure */
- .ssch_rsch_count = cmb.ssch_rsch_count,
- .sample_count = cmb.sample_count,
-
- /* time fields are converted to nanoseconds while copying */
- .device_connect_time
- = time_to_nsec(cmb.device_connect_time),
- .function_pending_time
- = time_to_nsec(cmb.function_pending_time),
- .device_disconnect_time
- = time_to_nsec(cmb.device_disconnect_time),
- .control_unit_queuing_time
- = time_to_nsec(cmb.control_unit_queuing_time),
- .device_active_only_time
- = time_to_nsec(cmb.device_active_only_time),
- };
+ memset(data, sizeof(struct cmbdata), 0);
+
+ /* we only know values before device_busy_time */
+ data->size = offsetof(struct cmbdata, device_busy_time);
+
+ /* convert to nanoseconds */
+ data->elapsed_time = (time * 1000) >> 12;
+
+ /* copy data to new structure */
+ data->ssch_rsch_count = cmb.ssch_rsch_count;
+ data->sample_count = cmb.sample_count;
+
+ /* time fields are converted to nanoseconds while copying */
+ data->device_connect_time = time_to_nsec(cmb.device_connect_time);
+ data->function_pending_time = time_to_nsec(cmb.function_pending_time);
+ data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time);
+ data->control_unit_queuing_time
+ = time_to_nsec(cmb.control_unit_queuing_time);
+ data->device_active_only_time
+ = time_to_nsec(cmb.device_active_only_time);
return 0;
}
time = get_clock() - cdev->private->cmb_start_time;
spin_unlock_irqrestore(cdev->ccwlock, flags);
- *data = (struct cmbdata) {
- /* we only know values before device_busy_time */
- .size = offsetof(struct cmbdata, device_busy_time),
-
- /* conver to nanoseconds */
- .elapsed_time = (time * 1000) >> 12,
-
- /* copy data to new structure */
- .ssch_rsch_count = cmb.ssch_rsch_count,
- .sample_count = cmb.sample_count,
-
- /* time fields are converted to nanoseconds while copying */
- .device_connect_time
- = time_to_nsec(cmb.device_connect_time),
- .function_pending_time
- = time_to_nsec(cmb.function_pending_time),
- .device_disconnect_time
- = time_to_nsec(cmb.device_disconnect_time),
- .control_unit_queuing_time
- = time_to_nsec(cmb.control_unit_queuing_time),
- .device_active_only_time
- = time_to_nsec(cmb.device_active_only_time),
- .device_busy_time
- = time_to_nsec(cmb.device_busy_time),
- .initial_command_response_time
- = time_to_nsec(cmb.initial_command_response_time),
- };
+ memset (data, sizeof(struct cmbdata), 0);
+
+ /* we only know values before device_busy_time */
+ data->size = offsetof(struct cmbdata, device_busy_time);
+
+ /* conver to nanoseconds */
+ data->elapsed_time = (time * 1000) >> 12;
+
+ /* copy data to new structure */
+ data->ssch_rsch_count = cmb.ssch_rsch_count;
+ data->sample_count = cmb.sample_count;
+
+ /* time fields are converted to nanoseconds while copying */
+ data->device_connect_time = time_to_nsec(cmb.device_connect_time);
+ data->function_pending_time = time_to_nsec(cmb.function_pending_time);
+ data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time);
+ data->control_unit_queuing_time
+ = time_to_nsec(cmb.control_unit_queuing_time);
+ data->device_active_only_time
+ = time_to_nsec(cmb.device_active_only_time);
+ data->device_busy_time = time_to_nsec(cmb.device_busy_time);
+ data->initial_command_response_time
+ = time_to_nsec(cmb.initial_command_response_time);
return 0;
}
/*
* drivers/s390/cio/css.c
* driver for channel subsystem
- * $Revision: 1.80 $
+ * $Revision: 1.82 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
{
int irq, ret;
- for (irq = 0; irq <= __MAX_SUBCHANNELS; irq++) {
+ for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
ret = css_evaluate_subchannel(irq, 1);
/* No more memory. It doesn't make sense to continue. No
* panic because this can happen in midflight and just
new_slow_sch = kmalloc(sizeof(struct slow_subchannel), GFP_ATOMIC);
if (!new_slow_sch)
return -ENOMEM;
+ memset(new_slow_sch, sizeof(struct slow_subchannel), 0);
new_slow_sch->schid = schid;
spin_lock_irqsave(&slow_subchannel_lock, flags);
list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head);
/*
* drivers/s390/cio/device.c
* bus driver for ccw devices
- * $Revision: 1.120 $
+ * $Revision: 1.124 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
return cdev;
}
+extern int css_get_ssd_info(struct subchannel *sch);
+
void
ccw_device_do_unreg_rereg(void *data)
{
device_unregister(&other_sch->dev);
}
}
+ /* Update ssd info here. */
+ css_get_ssd_info(sch);
cdev->private->devno = sch->schib.pmcw.dev;
} else
need_rename = 0;
sch->dev.driver_data = 0;
kfree (cdev->private);
kfree (cdev);
- goto out;
+ put_device(&sch->dev);
+ return;
}
ret = subchannel_add_files(cdev->dev.parent);
io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
{
int rc;
+ struct ccw_device_private *priv;
sch->dev.driver_data = cdev;
sch->driver = &io_subchannel_driver;
cdev->ccwlock = &sch->lock;
- *cdev->private = (struct ccw_device_private) {
- .devno = sch->schib.pmcw.dev,
- .irq = sch->irq,
- .state = DEV_STATE_NOT_OPER,
- .cmb_list = LIST_HEAD_INIT(cdev->private->cmb_list),
- };
- init_waitqueue_head(&cdev->private->wait_q);
- init_timer(&cdev->private->timer);
+ /* Init private data. */
+ priv = cdev->private;
+ priv->devno = sch->schib.pmcw.dev;
+ priv->irq = sch->irq;
+ priv->state = DEV_STATE_NOT_OPER;
+ INIT_LIST_HEAD(&priv->cmb_list);
+ init_waitqueue_head(&priv->wait_q);
+ init_timer(&priv->timer);
/* Set an initial name for the device. */
snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x",
if ((sch->lpm & (sch->lpm - 1)) != 0)
sch->schib.pmcw.mp = 1;
sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
+ /* We should also udate ssd info, but this has to wait. */
ccw_device_start_id(cdev, 0);
spin_unlock_irqrestore(&sch->lock, flags);
}
sch = to_subchannel(cdev->dev.parent);
irb = (struct irb *) __LC_IRB;
- /* Retry sense id for cc=1. */
+ /* Retry sense id, if needed. */
if (irb->scsw.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
- if (irb->scsw.cc == 1) {
+ if ((irb->scsw.cc == 1) || !irb->scsw.actl) {
ret = __ccw_device_sense_id_start(cdev);
if (ret && ret != -EBUSY)
ccw_device_sense_id_done(cdev, ret);