- info on Mylex DAC960/DAC1100 PCI RAID Controller Driver for Linux
VGA-softcursor.txt
- how to change your VGA cursor from a blinking underscore.
-acpi.txt
- - info on ACPI Driver Interface
arm/
- directory with info about Linux on the ARM architecture.
atm.txt
- info on the PCI subsystem for device driver authors
pcwd-watchdog.txt
- info and sample code for using with the PC Watchdog reset card.
+pm.txt
+ - info on Linux power management support
powerpc/
- directory with info on using Linux with the PowerPC.
proc_usb_info.txt
+++ /dev/null
-ACPI Driver Interface
----------------------
-
-Overview:
-1) Register each instance of a device with "acpi_register"
-2) Call "acpi_access" before accessing the hardware.
- (this will ensure that the hardware is awake and ready)
-3) "acpi_transition" callback is called before entering D1-D3
- or after entering D0
-4) Call "acpi_dev_idle" when the device is not being used
- (not required by will improve idle detection)
-5) When unloaded, unregister the device with "acpi_unregister"
-
-/*
- * Description: Register a device with the ACPI subsystem
- *
- * Parameters:
- * info - static device information
- * type - device type
- * hid - PnP identifier (or 0 if unknown)
- * trans - device state transition callback
- * adr - bus number and address or unique id
- *
- * Returns: Registered ACPI device or NULL on error
- *
- * Details: The device type, bus number, and bus address should be
- * enough information to reconstruct the device tree and
- * identify device dependencies
- *
- * Examples:
- * struct acpi_dev_info info = {ACPI_SYS_DEV, ACPI_VGA_HID, vga_trans};
- * dev = acpi_register(&info, 0);
- *
- * struct pci_dev *pci_dev = pci_find_dev(...);
- * struct acpi_dev_info info = {ACPI_PCI_DEV, 0, trans};
- * dev = acpi_register(&info, ACPI_PCI_ADR(pci_dev));
- */
-struct acpi_dev *acpi_register(struct acpi_dev_info *info, unsigned long adr);
-
-/*
- * Description: Unregister a device with ACPI
- *
- * Parameters:
- * dev - ACPI device previously returned from acpi_register
- */
-void acpi_unregister(struct acpi_dev *dev);
-
-/*
- * Device idle/use detection
- *
- * In general, drivers for all devices should call "acpi_access"
- * before accessing the hardware (ie. before reading or modifying
- * a hardware register). Request or packet-driven drivers should
- * additionally call "acpi_idle" when a device is not being used.
- *
- * Examples:
- * 1) A keyboard driver would call acpi_access whenever a key is pressed
- * 2) A network driver would call acpi_access before submitting
- * a packet for transmit or receive and acpi_idle when its
- * transfer and receive queues are empty.
- * 3) A VGA driver would call acpi_access before it accesses any
- * of the video controller registers
- *
- * Ultimately, the ACPI policy manager uses the access and idle
- * information to decide when to transition devices between
- * device states.
- */
-
-/*
- * Description: Update device access time and wake up device, if necessary
- *
- * Parameters:
- * dev - ACPI device previously returned from acpi_register
- *
- * Details: If called from an interrupt handler acpi_access updates
- * access time but should never need to wake up the device
- * (if device is generating interrupts, it should be awake
- * already) This is important as we can not wake up
- * devices (run AML, etc.) from an interrupt handler.
- */
-void acpi_access(struct acpi_dev *dev);
-
-/*
- * Description: Identify device as currently being idle
- *
- * Parameters:
- * dev - ACPI device previously returned from acpi_register
- *
- * Details: A call to acpi_idle might signal to the policy manager
- * to put a device to sleep. If a new device request arrives
- * between the call to acpi_idle and the acpi_transition
- * callback, the driver should fail the acpi_transition request.
- */
-void acpi_dev_idle(struct acpi_dev *dev);
-
-/*
- * Transition function
- *
- * Parameters:
- * dev - ACPI device previously returned from acpi_register
- * state - the device state being entered
- *
- * Returns: 0 if the state transition is possible and context saved
- * EINVAL if the requested device state is not supported
- * EBUSY if the device is now busy and can not transition
- * ENOMEM if the device was unable to save context (out of memory)
- *
- * Details: The device state transition function will be called
- * before the device is transitioned into the D1-D3 states
- * or after the device is transitioned into the D0 state.
- * The device driver should save (D1-D3) or restore (D0)
- * device context when the transition function is called.
- *
- * For system devices, the ACPI subsystem will perform
- * the actual hardware state transition itself. For bus
- * devices, after the driver's acpi_transition function
- * is called, the bus driver's acpi_transition function
- * is called to perform the actual hardware state transition.
- *
- * Once a driver returns 0 (success) from a transition
- * to D1-3 request, it should not process any further
- * requests or access the device hardware until a
- * call to "acpi_access" is made.
- */
-typedef int (*acpi_transition)(struct acpi_dev *dev, acpi_dstate_t state);
--- /dev/null
+ Linux Power Management Support
+
+This document briefly describes how to use power management with your
+Linux system and how to add power management support to Linux drivers.
+
+APM or ACPI?
+------------
+If you have a relatively recent x86 mobile, desktop, or server system,
+odds are it supports either Advanced Power Management (APM) or
+Advanced Configuration and Power Interface (ACPI). ACPI is the newer
+of the two technologies and puts power management in the hands of the
+operating system, allowing for more intelligent power management than
+is possible with BIOS controlled APM.
+
+The best way to determine which, if either, your system supports is to
+build a kernel with both ACPI and APM enabled (as of 2.3.x ACPI is
+enabled by default). If a working ACPI implementation is found, the
+ACPI driver will override and disable APM, otherwise the APM driver
+will be used.
+
+No sorry, you can not have both ACPI and APM enabled and running at
+once. Some people with broken ACPI or broken APM implementations
+would like to use both to get a full set of working features, but you
+simply can not mix and match the two. Only one power management
+interface can be in control of the machine at once. Think about it..
+
+User-space Daemons
+------------------
+Both APM and ACPI rely on user-space daemons, apmd and acpid
+respectively, to be completely functional. Obtain both of these
+daemons from your Linux distribution or from the Internet (see below)
+and be sure that they are started sometime in the system boot process.
+Go ahead and start both. If ACPI or APM is not available on your
+system the associated daemon will exit gracefully.
+
+ apmd: http://linuxcare.com.au/apm/
+ acpid: http://phobos.fs.tum.de/acpi/
+
+Driver Interface
+----------------
+If you are writing a new driver or maintaining an old driver, it
+should include power management support. Without power management
+support, a single driver may prevent a system with power management
+capabilities from ever being able to suspend (safely).
+
+Overview:
+1) Register each instance of a device with "pm_register"
+2) Call "pm_access" before accessing the hardware.
+ (this will ensure that the hardware is awake and ready)
+3) Your "pm_callback" is called before going into a
+ suspend state (ACPI D1-D3) or after resuming (ACPI D0)
+ from a suspend.
+4) Call "pm_dev_idle" when the device is not being used
+ (optional but will improve device idle detection)
+5) When unloaded, unregister the device with "pm_unregister"
+
+/*
+ * Description: Register a device with the power-management subsystem
+ *
+ * Parameters:
+ * type - device type (PCI device, system device, ...)
+ * id - instance number or unique identifier
+ * cback - request handler callback (suspend, resume, ...)
+ *
+ * Returns: Registered PM device or NULL on error
+ *
+ * Examples:
+ * dev = pm_register(PM_SYS_DEV, PM_SYS_VGA, vga_callback);
+ *
+ * struct pci_dev *pci_dev = pci_find_dev(...);
+ * dev = pm_register(PM_PCI_DEV, PM_PCI_ID(pci_dev), callback);
+ */
+struct pm_dev *pm_register(pm_dev_t type, unsigned long id, pm_callback cback);
+
+/*
+ * Description: Unregister a device with the power management subsystem
+ *
+ * Parameters:
+ * dev - PM device previously returned from pm_register
+ */
+void pm_unregister(struct pm_dev *dev);
+
+/*
+ * Description: Unregister all devices with a matching callback function
+ *
+ * Parameters:
+ * cback - previously registered request callback
+ *
+ * Notes: Provided for easier porting from old APM interface
+ */
+void pm_unregister_all(pm_callback cback);
+
+/*
+ * Device idle/use detection
+ *
+ * In general, drivers for all devices should call "pm_access"
+ * before accessing the hardware (ie. before reading or modifying
+ * a hardware register). Request or packet-driven drivers should
+ * additionally call "pm_dev_idle" when a device is not being used.
+ *
+ * Examples:
+ * 1) A keyboard driver would call pm_access whenever a key is pressed
+ * 2) A network driver would call pm_access before submitting
+ * a packet for transmit or receive and pm_dev_idle when its
+ * transfer and receive queues are empty.
+ * 3) A VGA driver would call pm_access before it accesses any
+ * of the video controller registers
+ *
+ * Ultimately, the PM policy manager uses the access and idle
+ * information to decide when to suspend individual devices
+ * or when to suspend the entire system
+ */
+
+/*
+ * Description: Update device access time and wake up device, if necessary
+ *
+ * Parameters:
+ * dev - PM device previously returned from pm_register
+ *
+ * Details: If called from an interrupt handler pm_access updates
+ * access time but should never need to wake up the device
+ * (if device is generating interrupts, it should be awake
+ * already) This is important as we can not wake up
+ * devices from an interrupt handler.
+ */
+void pm_access(struct pm_dev *dev);
+
+/*
+ * Description: Identify device as currently being idle
+ *
+ * Parameters:
+ * dev - PM device previously returned from pm_register
+ *
+ * Details: A call to pm_dev_idle might signal to the policy manager
+ * to put a device to sleep. If a new device request arrives
+ * between the call to pm_dev_idle and the pm_callback
+ * callback, the driver should fail the pm_callback request.
+ */
+void pm_dev_idle(struct pm_dev *dev);
+
+/*
+ * Power management request callback
+ *
+ * Parameters:
+ * dev - PM device previously returned from pm_register
+ * rqst - request type
+ * data - data, if any, associated with the request
+ *
+ * Returns: 0 if the request is successful
+ * EINVAL if the request is not supported
+ * EBUSY if the device is now busy and can not handle the request
+ * ENOMEM if the device was unable to handle the request due to memory
+ *
+ * Details: The device request callback will be called before the
+ * device/system enters a suspend state (ACPI D1-D3) or
+ * or after the device/system resumes from suspend (ACPI D0).
+ * For PM_SUSPEND, the ACPI D-state being entered is passed
+ * as the "data" argument to the callback. The device
+ * driver should save (PM_SUSPEND) or restore (PM_RESUME)
+ * device context when the request callback is called.
+ *
+ * Once a driver returns 0 (success) from a suspend
+ * request, it should not process any further requests or
+ * access the device hardware until a call to "pm_access" is made.
+ */
+typedef int (*pm_callback)(struct pm_dev *dev, pm_request_t rqst, void *data);
L: linux-smp@vger.rutgers.edu
S: Maintained
+SOFTWARE RAID (Multiple Disks) SUPPORT
+P: Ingo Molnar
+M: mingo@redhat.com
+S: Maintained
+
SONIC NETWORK DRIVER
P: Thomas Bogendoerfer
M: tsbogend@alpha.franken.de
* 1998-12-20 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
+#include <linux/config.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
endif
ifeq ($(CONFIG_ACPI),y)
- O_OBJS += acpi.o
+O_OBJS += acpi.o
+else
+ ifeq ($(CONFIG_ACPI),m)
+ M_OBJS += acpi.o
+ endif
endif
ifeq ($(CONFIG_APM),y)
-OX_OBJS += apm.o
+O_OBJS += apm.o
else
ifeq ($(CONFIG_APM),m)
- MX_OBJS += apm.o
+ M_OBJS += apm.o
endif
endif
static int acpi_p_lvl3_tested = 0;
static int acpi_disabled = 0;
-int acpi_active = 0;
// bits 8-15 are SLP_TYPa, bits 0-7 are SLP_TYPb
static unsigned long acpi_slp_typ[] =
/*
* Init VIA ACPI device and create a fake FACP
*/
-static int __init acpi_init_via686a(struct pci_dev *dev)
+static int __init acpi_init_via(struct pci_dev *dev)
{
u32 base;
u8 tmp, irq;
{
CH_UNKNOWN = 0,
CH_INTEL_PIIX4,
+ CH_VIA_586,
CH_VIA_686A,
} acpi_chip_t;
{
{NULL,},
{acpi_init_piix4},
- {acpi_init_via686a},
+ {acpi_init_via},
};
const static struct pci_device_id acpi_pci_tbl[] =
{
{0x8086, 0x7113, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_INTEL_PIIX4},
+ {0x1106, 0x3040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_VIA_586},
{0x1106, 0x3057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_VIA_686A},
{0,}, /* terminate list */
};
/*
* Idle loop (uniprocessor only)
*/
-static void acpi_idle_handler(void)
+static void acpi_idle(void)
{
static int sleep_level = 1;
struct acpi_facp *facp = acpi_facp;
/*
* Enter soft-off (S5)
*/
-static void acpi_power_off_handler(void)
+static void acpi_power_off(void)
{
acpi_enter_sx(ACPI_S5);
}
+/*
+ * Claim I/O port if available
+ */
+static int acpi_claim(unsigned long start, unsigned long size)
+{
+ if (start && size) {
+ if (check_region(start, size))
+ return -EBUSY;
+ request_region(start, size, "acpi");
+ }
+ return 0;
+}
+
/*
* Claim ACPI I/O ports
*/
static int acpi_claim_ioports(struct acpi_facp *facp)
{
// we don't get a guarantee of contiguity for any of the ACPI registers
- if (facp->pm1a_evt)
- request_region(facp->pm1a_evt, facp->pm1_evt_len, "acpi");
- if (facp->pm1b_evt)
- request_region(facp->pm1b_evt, facp->pm1_evt_len, "acpi");
- if (facp->pm1a_cnt)
- request_region(facp->pm1a_cnt, facp->pm1_cnt_len, "acpi");
- if (facp->pm1b_cnt)
- request_region(facp->pm1b_cnt, facp->pm1_cnt_len, "acpi");
- if (facp->pm_tmr)
- request_region(facp->pm_tmr, facp->pm_tm_len, "acpi");
- if (facp->gpe0)
- request_region(facp->gpe0, facp->gpe0_len, "acpi");
- if (facp->gpe1)
- request_region(facp->gpe1, facp->gpe1_len, "acpi");
-
+ if (acpi_claim(facp->pm1a_evt, facp->pm1_evt_len)
+ || acpi_claim(facp->pm1b_evt, facp->pm1_evt_len)
+ || acpi_claim(facp->pm1a_cnt, facp->pm1_cnt_len)
+ || acpi_claim(facp->pm1b_cnt, facp->pm1_cnt_len)
+ || acpi_claim(facp->pm_tmr, facp->pm_tm_len)
+ || acpi_claim(facp->gpe0, facp->gpe0_len)
+ || acpi_claim(facp->gpe1, facp->gpe1_len))
+ return -EBUSY;
return 0;
}
+/*
+ * Release I/O port if claimed
+ */
+static void acpi_release(unsigned long start, unsigned long size)
+{
+ if (start && size)
+ release_region(start, size);
+}
+
/*
* Free ACPI I/O ports
*/
static int acpi_release_ioports(struct acpi_facp *facp)
{
// we don't get a guarantee of contiguity for any of the ACPI registers
- if (facp->pm1a_evt)
- release_region(facp->pm1a_evt, facp->pm1_evt_len);
- if (facp->pm1b_evt)
- release_region(facp->pm1b_evt, facp->pm1_evt_len);
- if (facp->pm1a_cnt)
- release_region(facp->pm1a_cnt, facp->pm1_cnt_len);
- if (facp->pm1b_cnt)
- release_region(facp->pm1b_cnt, facp->pm1_cnt_len);
- if (facp->pm_tmr)
- release_region(facp->pm_tmr, facp->pm_tm_len);
- if (facp->gpe0)
- release_region(facp->gpe0, facp->gpe0_len);
- if (facp->gpe1)
- release_region(facp->gpe1, facp->gpe1_len);
-
+ acpi_release(facp->gpe1, facp->gpe1_len);
+ acpi_release(facp->gpe0, facp->gpe0_len);
+ acpi_release(facp->pm_tmr, facp->pm_tm_len);
+ acpi_release(facp->pm1b_cnt, facp->pm1_cnt_len);
+ acpi_release(facp->pm1a_cnt, facp->pm1_cnt_len);
+ acpi_release(facp->pm1b_evt, facp->pm1_evt_len);
+ acpi_release(facp->pm1a_evt, facp->pm1_evt_len);
return 0;
}
= ACPI_uS_TO_TMR_TICKS(acpi_facp->p_lvl3_lat * 5);
}
+ if (acpi_claim_ioports(acpi_facp)) {
+ printk(KERN_ERR "ACPI: I/O port allocation failed\n");
+ if (pci_driver_registered)
+ pci_unregister_driver(&acpi_driver);
+ acpi_destroy_tables();
+ return -ENODEV;
+ }
+
if (acpi_facp->sci_int
&& request_irq(acpi_facp->sci_int,
acpi_irq,
return -ENODEV;
}
- acpi_claim_ioports(acpi_facp);
acpi_sysctl = register_sysctl_table(acpi_dir_table, 1);
pid = kernel_thread(acpi_control_thread,
NULL,
CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
- acpi_power_off = acpi_power_off_handler;
+ pm_power_off = acpi_power_off;
- acpi_active = 1;
+ pm_active = 1;
/*
* Set up the ACPI idle function. Note that we can't really
#endif
if (acpi_facp->pm_tmr)
- acpi_idle = acpi_idle_handler;
+ pm_idle = acpi_idle;
return 0;
}
*/
static void __exit acpi_exit(void)
{
- acpi_idle = NULL;
- acpi_power_off = NULL;
+ pm_idle = NULL;
+ pm_power_off = NULL;
unregister_sysctl_table(acpi_sysctl);
acpi_disable(acpi_facp);
#include <asm/uaccess.h>
#include <asm/desc.h>
-/*
- * Make APM look as much as just another ACPI module as possible..
- */
-#include <linux/acpi.h>
-
-EXPORT_SYMBOL(apm_register_callback);
-EXPORT_SYMBOL(apm_unregister_callback);
+#include <linux/pm.h>
extern unsigned long get_cmos_time(void);
extern void machine_real_restart(unsigned char *, int);
#define NR_APM_EVENT_NAME \
(sizeof(apm_event_name) / sizeof(apm_event_name[0]))
-typedef struct callback_list_t {
- int (* callback)(apm_event_t);
- struct callback_list_t * next;
-} callback_list_t;
-
-static callback_list_t * callback_list = NULL;
-
typedef struct lookup_t {
int key;
char * msg;
}
#endif
-int apm_register_callback(int (*callback)(apm_event_t))
-{
- callback_list_t * new;
-
- new = kmalloc(sizeof(callback_list_t), GFP_KERNEL);
- if (new == NULL)
- return -ENOMEM;
- new->callback = callback;
- new->next = callback_list;
- callback_list = new;
- return 0;
-}
-
-void apm_unregister_callback(int (*callback)(apm_event_t))
-{
- callback_list_t ** ptr;
- callback_list_t * old;
-
- for (ptr = &callback_list; *ptr != NULL; ptr = &(*ptr)->next)
- if ((*ptr)->callback == callback)
- break;
- old = *ptr;
- *ptr = old->next;
- kfree_s(old, sizeof(callback_list_t));
-}
-
static int queue_empty(struct apm_bios_struct * as)
{
return as->event_head == as->event_tail;
static int send_event(apm_event_t event, apm_event_t undo,
struct apm_bios_struct *sender)
{
- callback_list_t * call;
- callback_list_t * fix;
-
- for (call = callback_list; call != NULL; call = call->next) {
- if (call->callback(event) && undo) {
- for (fix = callback_list; fix != call; fix = fix->next)
- fix->callback(undo);
+ switch (event) {
+ case APM_SYS_SUSPEND:
+ case APM_CRITICAL_SUSPEND:
+ case APM_USER_SUSPEND:
+ /* map all suspends to ACPI D3 */
+ if (pm_send_request(PM_SUSPEND, (void*) 3)) {
+ if (apm_bios_info.version > 0x100)
+ apm_set_power_state(APM_STATE_REJECT);
+ return 0;
+ }
+ break;
+ case APM_NORMAL_RESUME:
+ case APM_CRITICAL_RESUME:
+ /* map all resumes to ACPI D0 */
+ if (pm_send_request(PM_RESUME, 0)) {
if (apm_bios_info.version > 0x100)
apm_set_power_state(APM_STATE_REJECT);
return 0;
}
+ break;
}
queue_event(event, sender);
/* Install our power off handler.. */
if (power_off_enabled)
- acpi_power_off = apm_power_off;
+ pm_power_off = apm_power_off;
#ifdef CONFIG_MAGIC_SYSRQ
sysrq_power_off = apm_power_off;
#endif
console_blank_hook = apm_console_blank;
#endif
+ pm_active = 1;
+
apm_mainloop();
return 0;
}
APM_INIT_ERROR_RETURN;
}
-#ifdef CONFIG_ACPI
- if (acpi_active) {
+ if (PM_IS_ACTIVE()) {
printk(KERN_NOTICE "apm: overridden by ACPI.\n");
APM_INIT_ERROR_RETURN;
}
-#endif
/*
* Set up a segment that references the real mode segment 0x40
return 0;
}
-module_init(apm_init)
+__initcall(apm_init);
#include <linux/in6.h>
#include <linux/interrupt.h>
#include <linux/smp_lock.h>
-#include <linux/acpi.h>
+#include <linux/pm.h>
#include <linux/pci.h>
#include <asm/semaphore.h>
EXPORT_SYMBOL(disable_irq_nosync);
EXPORT_SYMBOL(probe_irq_mask);
EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(acpi_idle);
-EXPORT_SYMBOL(acpi_power_off);
+EXPORT_SYMBOL(pm_idle);
+EXPORT_SYMBOL(pm_power_off);
EXPORT_SYMBOL_NOVERS(__down_failed);
EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
static int __init pin_2_irq(int idx, int apic, int pin);
int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pci_pin)
{
- int apic, i;
+ int apic, i, best_guess = -1;
for (i = 0; i < mp_irq_entries; i++) {
int lbus = mp_irqs[i].mpc_srcbus;
(mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
!mp_irqs[i].mpc_irqtype &&
(bus == mp_bus_id_to_pci_bus[mp_irqs[i].mpc_srcbus]) &&
- (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f)) &&
- (pci_pin == (mp_irqs[i].mpc_srcbusirq & 3)))
+ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
+ int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
- return pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
+ if (pci_pin == (mp_irqs[i].mpc_srcbusirq & 3))
+ return irq;
+ /*
+ * Use the first all-but-pin matching entry as a
+ * best-guess fuzzy result for broken mptables.
+ */
+ if (best_guess < 0)
+ best_guess = irq;
+ }
}
return -1;
}
if (pin) {
pin--; /* interrupt pins are numbered starting from 1 */
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
+/*
+ * Will be removed completely if things work out well with fuzzy parsing
+ */
+#if 0
if (irq < 0 && dev->bus->parent) { /* go back to the bridge */
struct pci_dev * bridge = dev->bus->self;
printk(KERN_WARNING "PCI: using PPB(B%d,I%d,P%d) to get irq %d\n",
bridge->bus->number, PCI_SLOT(bridge->devfn), pin, irq);
}
+#endif
if (irq >= 0) {
printk("PCI->APIC IRQ transform: (B%d,I%d,P%d) -> %d\n",
dev->bus->number, PCI_SLOT(dev->devfn), pin, irq);
#include <linux/slab.h>
#include <linux/pm.h>
+int pm_active = 0;
+
static spinlock_t pm_devs_lock = SPIN_LOCK_UNLOCKED;
static LIST_HEAD(pm_devs);
}
}
+/*
+ * Unregister all devices with matching callback
+ */
+void pm_unregister_all(pm_callback callback)
+{
+ struct list_head *entry;
+
+ if (!callback)
+ return;
+
+ entry = pm_devs.next;
+ while (entry != &pm_devs) {
+ struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
+ entry = entry->next;
+ if (dev->callback == callback)
+ pm_unregister(dev);
+ }
+}
+
+/*
+ * Send request to an individual device
+ */
+static int pm_send(struct pm_dev *dev, pm_request_t rqst, void *data)
+{
+ int status = 0;
+ int next_state;
+ switch (rqst) {
+ case PM_SUSPEND:
+ case PM_RESUME:
+ next_state = (int) data;
+ if (dev->state != next_state) {
+ if (dev->callback)
+ status = (*dev->callback)(dev, rqst, data);
+ if (!status)
+ dev->state = next_state;
+ }
+ break;
+ default:
+ if (dev->callback)
+ status = (*dev->callback)(dev, rqst, data);
+ break;
+ }
+ return status;
+}
+
/*
* Send a request to all devices
*/
while (entry != &pm_devs) {
struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
if (dev->callback) {
- int status = (*dev->callback)(dev, rqst, data);
+ int status = pm_send(dev, rqst, data);
if (status)
return status;
}
EXPORT_SYMBOL(pm_register);
EXPORT_SYMBOL(pm_unregister);
+EXPORT_SYMBOL(pm_unregister_all);
EXPORT_SYMBOL(pm_send_request);
EXPORT_SYMBOL(pm_find);
/*
* Powermanagement idle function, if any..
*/
-void (*acpi_idle)(void) = NULL;
+void (*pm_idle)(void) = NULL;
/*
* Power off function, if any
*/
-void (*acpi_power_off)(void) = NULL;
+void (*pm_power_off)(void) = NULL;
/*
* We use this if we don't have any better
current->counter = -100;
while (1) {
- void (*idle)(void) = acpi_idle;
+ void (*idle)(void) = pm_idle;
if (!idle)
idle = default_idle;
while (!current->need_resched)
void machine_power_off(void)
{
- if (acpi_power_off)
- acpi_power_off();
+ if (pm_power_off)
+ pm_power_off();
}
return cfg;
}
-static inline int __prepare_ICR2 (unsigned int dest)
+static inline int __prepare_ICR2 (unsigned int mask)
{
unsigned int cfg;
cfg = __get_ICR2();
#if LOGICAL_DELIVERY
- cfg |= SET_APIC_DEST_FIELD((1<<dest));
+ cfg |= SET_APIC_DEST_FIELD(mask);
#else
- cfg |= SET_APIC_DEST_FIELD(dest);
+ cfg |= SET_APIC_DEST_FIELD(mask);
#endif
return cfg;
__send_IPI_shortcut(APIC_DEST_SELF, vector);
}
-static inline void send_IPI_single(int dest, int vector)
+static inline void send_IPI_mask(int mask, int vector)
{
unsigned long cfg;
#if FORCE_READ_AROUND_WRITE
* prepare target chip field
*/
- cfg = __prepare_ICR2(dest);
+ cfg = __prepare_ICR2(mask);
apic_write(APIC_ICR2, cfg);
/*
*
* Optimizations Manfred Spraul <manfreds@colorfullife.com>
*/
-#define TLB_PARANOIA 1
static volatile unsigned long flush_cpumask;
static struct mm_struct * flush_mm;
static unsigned long flush_va;
-#define FLUSH_ALL 0xFFFFffff
+static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
+#define FLUSH_ALL 0xffffffff
-static void inline leave_mm(unsigned long cpu)
+static void inline leave_mm (unsigned long cpu)
{
-#ifdef TLB_PARANOIA
- if(cpu_tlbstate[cpu].state == TLBSTATE_OK)
+ if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
BUG();
-#endif
clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
cpu_tlbstate[cpu].state = TLBSTATE_OLD;
}
* instead update cpu_tlbstate.
*/
-asmlinkage void smp_invalidate_interrupt(void)
+asmlinkage void smp_invalidate_interrupt (void)
{
unsigned long cpu = smp_processor_id();
if (flush_mm == cpu_tlbstate[cpu].active_mm) {
if (cpu_tlbstate[cpu].state == TLBSTATE_OK) {
- if(flush_va == FLUSH_ALL)
+ if (flush_va == FLUSH_ALL)
local_flush_tlb();
- else
- __flush_tlb_one(flush_va);
- } else {
+ else
+ __flush_tlb_one(flush_va);
+ } else
leave_mm(cpu);
- }
}
ack_APIC_irq();
clear_bit(cpu, &flush_cpumask);
}
-static void flush_tlb_others(unsigned long cpumask, struct mm_struct *mm, unsigned long va)
+static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
+ unsigned long va)
{
-#ifdef TLB_PARANOIA
- if(in_interrupt()) {
- printk(KERN_EMERG "tlb flush from interrupt: %d,%d",
- local_bh_count[smp_processor_id()],
- local_irq_count[smp_processor_id()]);
- }
- if(cpumask & (1<<smp_processor_id())) {
- printk(KERN_EMERG "flush_tlb_others: bad cpumask!");
- cpumask &= ~(1<<smp_processor_id());
- local_flush_tlb();
- }
- {
- int flags;
-
- save_flags(flags);
- if(flags != 1) {
-static int limit=10;
- if(limit > 0) {
- limit--;
- printk(KERN_EMERG "flush_tlb_others: possible lock-up, broken!(%d)",
- flags);
-/* show_stack(NULL);*/
- }
- sti();
- }
- }
-#endif
- cpumask &= cpu_online_map;
+ /*
+ * A couple of (to be removed) sanity checks:
+ *
+ * - we do not send IPIs to not-yet booted CPUs.
+ * - current CPU must not be in mask
+ * - mask must exist :)
+ */
+ if (!cpumask)
+ BUG();
+ if ((cpumask & cpu_online_map) != cpumask)
+ BUG();
+ if (cpumask & (1 << smp_processor_id()))
+ BUG();
/*
- * it's important that we do not generate any APIC traffic
- * until the AP CPUs have booted up!
+ * i'm not happy about this global shared spinlock in the
+ * MM hot path, but we'll see how contended it is.
*/
- if (cpumask) {
-static spinlock_t lock = SPIN_LOCK_UNLOCKED;
- spin_lock(&lock);
-
- flush_mm = mm;
- flush_va = va;
- atomic_set_mask(cpumask, &flush_cpumask);
- send_IPI_allbutself(INVALIDATE_TLB_VECTOR);
-
- while (flush_cpumask) {
- /* FIXME: lockup-detection, print backtrace on
- * lock-up
- */
- }
- flush_mm = NULL;
- flush_va = 0;
- spin_unlock(&lock);
- }
+ spin_lock(&tlbstate_lock);
+
+ flush_mm = mm;
+ flush_va = va;
+ atomic_set_mask(cpumask, &flush_cpumask);
+ /*
+ * We have to send the IPI only to
+ * CPUs affected.
+ */
+ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
+
+ while (flush_cpumask)
+ /* nothing. lockup detection does not belong here */;
+
+ flush_mm = NULL;
+ flush_va = 0;
+ spin_unlock(&tlbstate_lock);
}
void flush_tlb_current_task(void)
{
- unsigned long vm_mask = 1 << smp_processor_id();
struct mm_struct *mm = current->mm;
- unsigned long cpu_mask = mm->cpu_vm_mask & ~vm_mask;
+ unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
local_flush_tlb();
- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+ if (cpu_mask)
+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
}
-void flush_tlb_mm(struct mm_struct * mm)
+void flush_tlb_mm (struct mm_struct * mm)
{
- unsigned long vm_mask = 1 << smp_processor_id();
- unsigned long cpu_mask = mm->cpu_vm_mask & ~vm_mask;
+ unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
if (current->active_mm == mm) {
- if(current->mm)
+ if (current->mm)
local_flush_tlb();
- else
+ else
leave_mm(smp_processor_id());
}
-
- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+ if (cpu_mask)
+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
}
void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
{
- unsigned long vm_mask = 1 << smp_processor_id();
struct mm_struct *mm = vma->vm_mm;
- unsigned long cpu_mask = mm->cpu_vm_mask & ~vm_mask;
+ unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
if (current->active_mm == mm) {
if(current->mm)
leave_mm(smp_processor_id());
}
- flush_tlb_others(cpu_mask, mm, va);
+ if (cpu_mask)
+ flush_tlb_others(cpu_mask, mm, va);
}
static inline void do_flush_tlb_all_local(void)
{
unsigned long cpu = smp_processor_id();
+
__flush_tlb_all();
if (cpu_tlbstate[cpu].state == TLBSTATE_LAZY)
leave_mm(cpu);
void smp_send_reschedule(int cpu)
{
- send_IPI_single(cpu, RESCHEDULE_VECTOR);
+ send_IPI_mask(1 << cpu, RESCHEDULE_VECTOR);
}
/*
*
* 01/07/99 S.Eranian modified to pass command line arguments to kernel
*/
+#include <linux/config.h>
#include <linux/elf.h>
#include <linux/init.h>
#include <linux/kernel.h>
*
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
*/
+#include <linux/config.h>
#include <linux/posix_types.h>
#include <asm/signal.h>
* Derived from i386 and Alpha versions.
*/
-#include <linux/config.h>
-
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
/*
* IA32 helper functions
*/
-#include <linux/config.h>
-
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/utime.h>
#include <linux/resource.h>
#include <linux/times.h>
-#include <linux/utime.h>
#include <linux/utsname.h>
#include <linux/timex.h>
#include <linux/smp.h>
#include <linux/smb_mount.h>
#include <linux/ncp_fs.h>
#include <linux/quota.h>
-#include <linux/file.h>
#include <linux/module.h>
#include <linux/sunrpc/svc.h>
#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/cache.h>
#include <linux/nfsd/xdr.h>
#include <linux/nfsd/syscall.h>
-#include <linux/module.h>
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>
-#include <linux/timex.h>
-
#include <linux/ipc.h>
-#include <linux/sem.h>
-#include <linux/shm.h>
#include <asm/types.h>
#include <asm/uaccess.h>
* Port to ia64
*/
+#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/serial_reg.h>
* RSE support for ia64
*/
+#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/config.h>
-#include <linux/init.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
int __initdata acpi_apic_map[32];
int __initdata cpu_cnt = 0;
-void (*acpi_idle) (void);
+void (*pm_idle) (void);
/*
* Identify usable CPU's and remember them for SMP bringup later.
*
* Implemented EFI runtime services and virtual mode calls. --davidm
*/
+#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <asm/irq.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
-#include <asm/irq.h>
/*
* This is identical to IOSAPIC handle_irq. It may go away . . .
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
*/
-#include <linux/config.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
-#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/page.h>
#define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */
#include <linux/config.h>
-#include <linux/acpi.h>
+#include <linux/pm.h>
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#endif
schedule();
check_pgt_cache();
- if (acpi_idle)
- (*acpi_idle)();
+ if (pm_idle)
+ (*pm_idle)();
}
}
* Derived from i386 and Alpha versions.
*/
+#include <linux/config.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
* Copyright (C) 1999 Hewlett-Packard Co
* Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
*/
+#include <linux/config.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/mm.h>
nops while maximizing parallelism
*/
-#include <linux/config.h>
#include <asm/break.h>
.text
* Copyright (C) 1998, 1999 Hewlett-Packard Co
* Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
*/
+#include <linux/config.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/irq.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/sched.h>
-#include <linux/config.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
-#include <linux/config.h>
#include <asm/system.h>
#include <asm/page.h>
* Copyright(C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
-#include <linux/config.h>
#include <linux/string.h>
#include <asm/oplib.h>
#include <asm/sun4prom.h>
* Copyright (C) 1998 Pete Zaitcev <zaitcev@metabyte.com>
*/
-#include <linux/config.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
* about or use it! It's simple and smelly anyway....
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/openprom.h>
endif
ifeq ($(CONFIG_MD_RAID5),y)
+LX_OBJS += xor.o
+CFLAGS_xor.o := $(PROFILING) -fomit-frame-pointer
L_OBJS += raid5.o
else
ifeq ($(CONFIG_MD_RAID5),m)
+ LX_OBJS += xor.o
+ CFLAGS_xor.o := $(PROFILING) -fomit-frame-pointer
M_OBJS += raid5.o
endif
endif
+ifeq ($(CONFIG_MD_TRANSLUCENT),y)
+L_OBJS += translucent.o
+else
+ ifeq ($(CONFIG_MD_TRANSLUCENT),m)
+ M_OBJS += translucent.o
+ endif
+endif
+
+ifeq ($(CONFIG_MD_HSM),y)
+L_OBJS += hsm.o
+else
+ ifeq ($(CONFIG_MD_HSM),m)
+ M_OBJS += hsm.o
+ endif
+endif
+
endif
ifeq ($(CONFIG_BLK_DEV_NBD),y)
return g;
}
-/* moved here from md.c - will be discarded later */
-char *partition_name (kdev_t dev) {
- static char name[40]; /* kdevname returns 32 bytes */
- /* disk_name requires 32 bytes */
- struct gendisk *hd = get_gendisk (dev);
-
- if (!hd) {
- sprintf (name, "[dev %s]", kdevname(dev));
- return (name);
- }
-
- return disk_name (hd, MINOR(dev), name); /* routine in genhd.c */
-}
-
/*
* Add a partition.
*
-
/*
linear.c : Multiple Devices driver for Linux
Copyright (C) 1994-96 Marc ZYNGIER
#include <linux/module.h>
-#include <linux/md.h>
+#include <linux/raid/md.h>
#include <linux/malloc.h>
-#include <linux/init.h>
-#include "linear.h"
+#include <linux/raid/linear.h>
#define MAJOR_NR MD_MAJOR
#define MD_DRIVER
#define MD_PERSONALITY
-static int linear_run (int minor, struct md_dev *mddev)
+static int linear_run (mddev_t *mddev)
{
- int cur=0, i, size, dev0_size, nb_zone;
- struct linear_data *data;
-
- MOD_INC_USE_COUNT;
-
- mddev->private=kmalloc (sizeof (struct linear_data), GFP_KERNEL);
- data=(struct linear_data *) mddev->private;
-
- /*
- Find out the smallest device. This was previously done
- at registry time, but since it violates modularity,
- I moved it here... Any comment ? ;-)
- */
-
- data->smallest=mddev->devices;
- for (i=1; i<mddev->nb_dev; i++)
- if (data->smallest->size > mddev->devices[i].size)
- data->smallest=mddev->devices+i;
-
- nb_zone=data->nr_zones=
- md_size[minor]/data->smallest->size +
- (md_size[minor]%data->smallest->size ? 1 : 0);
+ linear_conf_t *conf;
+ struct linear_hash *table;
+ mdk_rdev_t *rdev;
+ int size, i, j, nb_zone;
+ unsigned int curr_offset;
+
+ MOD_INC_USE_COUNT;
+
+ conf = kmalloc (sizeof (*conf), GFP_KERNEL);
+ if (!conf)
+ goto out;
+ mddev->private = conf;
+
+ if (md_check_ordering(mddev)) {
+ printk("linear: disks are not ordered, aborting!\n");
+ goto out;
+ }
+ /*
+ * Find the smallest device.
+ */
+
+ conf->smallest = NULL;
+ curr_offset = 0;
+ ITERATE_RDEV_ORDERED(mddev,rdev,j) {
+ dev_info_t *disk = conf->disks + j;
+
+ disk->dev = rdev->dev;
+ disk->size = rdev->size;
+ disk->offset = curr_offset;
+
+ curr_offset += disk->size;
+
+ if (!conf->smallest || (disk->size < conf->smallest->size))
+ conf->smallest = disk;
+ }
+
+ nb_zone = conf->nr_zones =
+ md_size[mdidx(mddev)] / conf->smallest->size +
+ ((md_size[mdidx(mddev)] % conf->smallest->size) ? 1 : 0);
- data->hash_table=kmalloc (sizeof (struct linear_hash)*nb_zone, GFP_KERNEL);
-
- size=mddev->devices[cur].size;
-
- i=0;
- while (cur<mddev->nb_dev)
- {
- data->hash_table[i].dev0=mddev->devices+cur;
-
- if (size>=data->smallest->size) /* If we completely fill the slot */
- {
- data->hash_table[i++].dev1=NULL;
- size-=data->smallest->size;
-
- if (!size)
- {
- if (++cur==mddev->nb_dev) continue;
- size=mddev->devices[cur].size;
- }
-
- continue;
- }
-
- if (++cur==mddev->nb_dev) /* Last dev, set dev1 as NULL */
- {
- data->hash_table[i].dev1=NULL;
- continue;
- }
-
- dev0_size=size; /* Here, we use a 2nd dev to fill the slot */
- size=mddev->devices[cur].size;
- data->hash_table[i++].dev1=mddev->devices+cur;
- size-=(data->smallest->size - dev0_size);
- }
-
- return 0;
+ conf->hash_table = kmalloc (sizeof (struct linear_hash) * nb_zone,
+ GFP_KERNEL);
+ if (!conf->hash_table)
+ goto out;
+
+ /*
+ * Here we generate the linear hash table
+ */
+ table = conf->hash_table;
+ i = 0;
+ size = 0;
+ for (j = 0; j < mddev->nb_dev; j++) {
+ dev_info_t *disk = conf->disks + j;
+
+ if (size < 0) {
+ table->dev1 = disk;
+ table++;
+ }
+ size += disk->size;
+
+ while (size) {
+ table->dev0 = disk;
+ size -= conf->smallest->size;
+ if (size < 0)
+ break;
+ table->dev1 = NULL;
+ table++;
+ }
+ }
+ table->dev1 = NULL;
+
+ return 0;
+
+out:
+ if (conf)
+ kfree(conf);
+ MOD_DEC_USE_COUNT;
+ return 1;
}
-static int linear_stop (int minor, struct md_dev *mddev)
+static int linear_stop (mddev_t *mddev)
{
- struct linear_data *data=(struct linear_data *) mddev->private;
+ linear_conf_t *conf = mddev_to_conf(mddev);
- kfree (data->hash_table);
- kfree (data);
+ kfree(conf->hash_table);
+ kfree(conf);
- MOD_DEC_USE_COUNT;
+ MOD_DEC_USE_COUNT;
- return 0;
+ return 0;
}
-
-static int linear_map (struct md_dev *mddev, kdev_t *rdev,
- unsigned long *rsector, unsigned long size)
+static int linear_make_request (mddev_t *mddev, int rw, struct buffer_head * bh)
{
- struct linear_data *data=(struct linear_data *) mddev->private;
- struct linear_hash *hash;
- struct real_dev *tmp_dev;
- long block;
+ linear_conf_t *conf = mddev_to_conf(mddev);
+ struct linear_hash *hash;
+ dev_info_t *tmp_dev;
+ long block;
- block=*rsector >> 1;
- hash=data->hash_table+(block/data->smallest->size);
+ block = bh->b_blocknr * (bh->b_size >> 10);
+ hash = conf->hash_table + (block / conf->smallest->size);
- if (block >= (hash->dev0->size + hash->dev0->offset))
- {
- if (!hash->dev1)
- {
- printk ("linear_map : hash->dev1==NULL for block %ld\n", block);
- return (-1);
- }
-
- tmp_dev=hash->dev1;
- }
- else
- tmp_dev=hash->dev0;
+ if (block >= (hash->dev0->size + hash->dev0->offset)) {
+ if (!hash->dev1) {
+ printk ("linear_make_request : hash->dev1==NULL for block %ld\n",
+ block);
+ return -1;
+ }
+ tmp_dev = hash->dev1;
+ } else
+ tmp_dev = hash->dev0;
- if (block >= (tmp_dev->size + tmp_dev->offset) || block < tmp_dev->offset)
- printk ("Block %ld out of bounds on dev %s size %d offset %d\n",
- block, kdevname(tmp_dev->dev), tmp_dev->size, tmp_dev->offset);
-
- *rdev=tmp_dev->dev;
- *rsector=(block-(tmp_dev->offset)) << 1;
-
- return (0);
+ if (block >= (tmp_dev->size + tmp_dev->offset)
+ || block < tmp_dev->offset) {
+ printk ("linear_make_request: Block %ld out of bounds on dev %s size %d offset %d\n", block, kdevname(tmp_dev->dev), tmp_dev->size, tmp_dev->offset);
+ return -1;
+ }
+ bh->b_rdev = tmp_dev->dev;
+ bh->b_rsector = (block - tmp_dev->offset) << 1;
+
+ generic_make_request(rw, bh);
+ return 0;
}
-static int linear_status (char *page, int minor, struct md_dev *mddev)
+static int linear_status (char *page, mddev_t *mddev)
{
- int sz=0;
+ int sz = 0;
#undef MD_DEBUG
#ifdef MD_DEBUG
- int j;
- struct linear_data *data=(struct linear_data *) mddev->private;
+ int j;
+ linear_conf_t *conf = mddev_to_conf(mddev);
- sz+=sprintf (page+sz, " ");
- for (j=0; j<data->nr_zones; j++)
- {
- sz+=sprintf (page+sz, "[%s",
- partition_name (data->hash_table[j].dev0->dev));
-
- if (data->hash_table[j].dev1)
- sz+=sprintf (page+sz, "/%s] ",
- partition_name(data->hash_table[j].dev1->dev));
- else
- sz+=sprintf (page+sz, "] ");
- }
-
- sz+=sprintf (page+sz, "\n");
+ sz += sprintf(page+sz, " ");
+ for (j = 0; j < conf->nr_zones; j++)
+ {
+ sz += sprintf(page+sz, "[%s",
+ partition_name(conf->hash_table[j].dev0->dev));
+
+ if (conf->hash_table[j].dev1)
+ sz += sprintf(page+sz, "/%s] ",
+ partition_name(conf->hash_table[j].dev1->dev));
+ else
+ sz += sprintf(page+sz, "] ");
+ }
+ sz += sprintf(page+sz, "\n");
#endif
- sz+=sprintf (page+sz, " %dk rounding", 1<<FACTOR_SHIFT(FACTOR(mddev)));
- return sz;
+ sz += sprintf(page+sz, " %dk rounding", mddev->param.chunk_size/1024);
+ return sz;
}
-static struct md_personality linear_personality=
+static mdk_personality_t linear_personality=
{
- "linear",
- linear_map,
- NULL,
- NULL,
- linear_run,
- linear_stop,
- linear_status,
- NULL, /* no ioctls */
- 0
+ "linear",
+ NULL,
+ linear_make_request,
+ NULL,
+ linear_run,
+ linear_stop,
+ linear_status,
+ NULL,
+ 0,
+ NULL,
+ NULL,
+ NULL,
+ NULL
};
-
#ifndef MODULE
-void __init linear_init (void)
+void md__init linear_init (void)
{
- register_md_personality (LINEAR, &linear_personality);
+ register_md_personality (LINEAR, &linear_personality);
}
#else
int init_module (void)
{
- return (register_md_personality (LINEAR, &linear_personality));
+ return (register_md_personality (LINEAR, &linear_personality));
}
void cleanup_module (void)
{
- unregister_md_personality (LINEAR);
+ unregister_md_personality (LINEAR);
}
#endif
+
+++ /dev/null
-#ifndef _LINEAR_H
-#define _LINEAR_H
-
-struct linear_hash
-{
- struct real_dev *dev0, *dev1;
-};
-
-struct linear_data
-{
- struct linear_hash *hash_table; /* Dynamically allocated */
- struct real_dev *smallest;
- int nr_zones;
-};
-
-#endif
#include <asm/io.h>
#include <linux/blk.h>
#include <linux/highmem.h>
+#include <linux/raid/md.h>
#include <linux/module.h>
*/
DECLARE_WAIT_QUEUE_HEAD(wait_for_request);
-/* This specifies how many sectors to read ahead on the disk. */
+/* This specifies how many sectors to read ahead on the disk. */
int read_ahead[MAX_BLKDEV] = {0, };
}
/*
- * Is called with the request spinlock aquired.
* NOTE: the device-specific queue() functions
* have to be atomic!
*/
-static inline request_queue_t *get_queue(kdev_t dev)
+request_queue_t * blk_get_queue (kdev_t dev)
{
int major = MAJOR(dev);
struct blk_dev_struct *bdev = blk_dev + major;
+ unsigned long flags;
+ request_queue_t *ret;
+ spin_lock_irqsave(&io_request_lock,flags);
if (bdev->queue)
- return bdev->queue(dev);
- return &blk_dev[major].request_queue;
+ ret = bdev->queue(dev);
+ else
+ ret = &blk_dev[major].request_queue;
+ spin_unlock_irqrestore(&io_request_lock,flags);
+
+ return ret;
}
void blk_cleanup_queue(request_queue_t * q)
void blk_queue_headactive(request_queue_t * q, int active)
{
- q->head_active = active;
+ q->head_active = active;
+}
+
+void blk_queue_pluggable (request_queue_t * q, plug_device_fn *plug)
+{
+ q->plug_device_fn = plug;
}
-void blk_queue_pluggable(request_queue_t * q, int use_plug)
+void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
{
- q->use_plug = use_plug;
+ q->make_request_fn = mfn;
}
static int ll_merge_fn(request_queue_t *q, struct request *req,
void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
{
- q->request_fn = rfn;
+ q->request_fn = rfn;
q->current_request = NULL;
- q->merge_fn = ll_merge_fn;
+ q->merge_fn = ll_merge_fn;
q->merge_requests_fn = ll_merge_requests_fn;
- q->plug_tq.sync = 0;
- q->plug_tq.routine = unplug_device;
- q->plug_tq.data = q;
- q->plugged = 0;
+ q->make_request_fn = NULL;
+ q->plug_tq.sync = 0;
+ q->plug_tq.routine = &generic_unplug_device;
+ q->plug_tq.data = q;
+ q->plugged = 0;
/*
* These booleans describe the queue properties. We set the
* default (and most common) values here. Other drivers can
* use the appropriate functions to alter the queue properties.
* as appropriate.
*/
- q->use_plug = 1;
- q->head_active = 1;
-}
-
-/*
- * remove the plug and let it rip..
- */
-void unplug_device(void * data)
-{
- request_queue_t * q = (request_queue_t *) data;
- unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock,flags);
- if( q->plugged )
- {
- q->plugged = 0;
- if( q->current_request != NULL )
- {
- (q->request_fn)(q);
- }
- }
- spin_unlock_irqrestore(&io_request_lock,flags);
+ q->plug_device_fn = NULL;
+ q->head_active = 1;
}
/*
* This is called with interrupts off and no requests on the queue.
* (and with the request spinlock aquired)
*/
-static inline void plug_device(request_queue_t * q)
+inline void generic_plug_device (request_queue_t *q, kdev_t dev)
{
+ if (MAJOR(dev) == MD_MAJOR) {
+ spin_unlock_irq(&io_request_lock);
+ BUG();
+ }
if (q->current_request)
return;
queue_task(&q->plug_tq, &tq_disk);
}
+/*
+ * remove the plug and let it rip..
+ */
+void generic_unplug_device(void * data)
+{
+ request_queue_t * q = (request_queue_t *) data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&io_request_lock,flags);
+ if (q->plugged) {
+ q->plugged = 0;
+ if (q->current_request)
+ (q->request_fn)(q);
+ }
+ spin_unlock_irqrestore(&io_request_lock,flags);
+}
+
/*
* look for a free request in the first N entries.
* NOTE: interrupts must be disabled on the way in (on SMP the request queue
}
static inline void drive_stat_acct(struct request *req,
- unsigned long nr_sectors, int new_io)
+ unsigned long nr_sectors, int new_io)
{
int major = MAJOR(req->rq_dev);
int minor = MINOR(req->rq_dev);
* which is important for drive_stat_acct() above.
*/
-static void add_request(request_queue_t * q, struct request * req)
+static inline void __add_request(request_queue_t * q, struct request * req)
{
int major = MAJOR(req->rq_dev);
struct request * tmp;
- unsigned long flags;
drive_stat_acct(req, req->nr_sectors, 1);
req->next = NULL;
- /*
- * We use the goto to reduce locking complexity
- */
- spin_lock_irqsave(&io_request_lock,flags);
-
if (!(tmp = q->current_request)) {
q->current_request = req;
- goto out;
+ return;
}
for ( ; tmp->next ; tmp = tmp->next) {
const int after_current = IN_ORDER(tmp,req);
/*
* FIXME(eric) I don't understand why there is a need for this
* special case code. It clearly doesn't fit any more with
- * the new queueing architecture, and it got added in 2.3.10.
+ * the new queueing architecture, and it got added in 2.3.10.
* I am leaving this in here until I hear back from the COMPAQ
* people.
*/
{
(q->request_fn)(q);
}
-
-out:
- spin_unlock_irqrestore(&io_request_lock,flags);
}
/*
* Has to be called with the request spinlock aquired
*/
static inline void attempt_merge (request_queue_t * q,
- struct request *req,
+ struct request *req,
int max_sectors)
{
struct request *next = req->next;
return;
if (next->sem || req->cmd != next->cmd || req->rq_dev != next->rq_dev || req->nr_sectors + next->nr_sectors > max_sectors)
return;
-
/*
* If we are not allowed to merge these requests, then
* return. If we are allowed to merge, then the count
wake_up (&wait_for_request);
}
-static void __make_request(request_queue_t * q,
- int major,
- int rw,
+static inline void __make_request(request_queue_t * q, int rw,
struct buffer_head * bh)
{
+ int major = MAJOR(bh->b_rdev);
unsigned int sector, count;
struct request * req;
int rw_ahead, max_req, max_sectors;
if (buffer_new(bh))
BUG();
- /* Only one thread can actually submit the I/O. */
- if (test_and_set_bit(BH_Lock, &bh->b_state))
- return;
-
if (blk_size[major]) {
unsigned long maxsector = (blk_size[major][MINOR(bh->b_rdev)] << 1) + 1;
if (maxsector < count || maxsector - count < sector) {
bh->b_state &= (1 << BH_Lock) | (1 << BH_Mapped);
- /* This may well happen - the kernel calls bread()
- without checking the size of the device, e.g.,
- when mounting a device. */
+ if (!blk_size[major][MINOR(bh->b_rdev)])
+ goto end_io;
+ /* This may well happen - the kernel calls bread()
+ without checking the size of the device, e.g.,
+ when mounting a device. */
printk(KERN_INFO
- "attempt to access beyond end of device\n");
+ "attempt to access beyond end of device\n");
printk(KERN_INFO "%s: rw=%d, want=%d, limit=%d\n",
- kdevname(bh->b_rdev), rw,
- (sector + count)>>1,
- blk_size[major][MINOR(bh->b_rdev)]);
+ kdevname(bh->b_rdev), rw,
+ (sector + count)>>1,
+ blk_size[major][MINOR(bh->b_rdev)]);
goto end_io;
}
}
max_req = (NR_REQUEST * 2) / 3;
break;
default:
- printk(KERN_ERR "make_request: bad block dev cmd,"
- " must be R/W/RA/WA\n");
+ BUG();
goto end_io;
}
#endif
/* look for a free request. */
- /* Loop uses two requests, 1 for loop and 1 for the real device.
- * Cut max_req in half to avoid running out and deadlocking. */
+ /*
+ * Loop uses two requests, 1 for loop and 1 for the real device.
+ * Cut max_req in half to avoid running out and deadlocking.
+ */
if ((major == LOOP_MAJOR) || (major == NBD_MAJOR))
- max_req >>= 1;
+ max_req >>= 1;
/*
* Try to coalesce the new request with old requests
req = q->current_request;
if (!req) {
/* MD and loop can't handle plugging without deadlocking */
- if (major != MD_MAJOR && major != LOOP_MAJOR &&
- major != DDV_MAJOR && major != NBD_MAJOR
- && q->use_plug)
- plug_device(q); /* is atomic */
+ if (q->plug_device_fn)
+ q->plug_device_fn(q, bh->b_rdev); /* is atomic */
+ else
+ generic_plug_device(q, bh->b_rdev); /* is atomic */
goto get_rq;
}
get_rq:
req = get_request(max_req, bh->b_rdev);
- spin_unlock_irqrestore(&io_request_lock,flags);
-
-/* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
+ /*
+ * if no request available: if rw_ahead, forget it,
+ * otherwise try again blocking..
+ */
if (!req) {
+ spin_unlock_irqrestore(&io_request_lock,flags);
if (rw_ahead)
goto end_io;
req = __get_request_wait(max_req, bh->b_rdev);
+ spin_lock_irqsave(&io_request_lock,flags);
+ }
+ /*
+ * Dont start the IO if the buffer has been
+ * invalidated meanwhile. (we have to do this
+ * within the io request lock and atomically
+ * before adding the request, see buffer.c's
+ * insert_into_queues_exclusive() function.
+ */
+ if (!test_bit(BH_Req, &bh->b_state)) {
+ req->rq_status = RQ_INACTIVE;
+ spin_unlock_irqrestore(&io_request_lock,flags);
+ /*
+ * A fake 'everything went ok' completion event.
+ * The bh doesnt matter anymore, but we should not
+ * signal errors to RAID levels.
+ */
+ bh->b_end_io(bh, 1);
+ return;
}
/* fill up the request-info, and add it to the queue */
req->bh = bh;
req->bhtail = bh;
req->next = NULL;
- add_request(q, req);
+ __add_request(q, req);
+ spin_unlock_irqrestore(&io_request_lock, flags);
return;
end_io:
bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));
}
-void make_request(int major,int rw, struct buffer_head * bh)
+void generic_make_request(int rw, struct buffer_head * bh)
{
request_queue_t * q;
unsigned long flags;
-
- q = get_queue(bh->b_dev);
- __make_request(q, major, rw, bh);
+ q = blk_get_queue(bh->b_rdev);
+
+ __make_request(q, rw, bh);
spin_lock_irqsave(&io_request_lock,flags);
- if( !q->plugged )
+ if (q && !q->plugged)
(q->request_fn)(q);
spin_unlock_irqrestore(&io_request_lock,flags);
}
-
/* This function can be used to request a number of buffers from a block
device. Currently the only restriction is that all buffers must belong to
the same device */
-void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
+static void __ll_rw_block(int rw, int nr, struct buffer_head * bh[],int haslock)
{
unsigned int major;
int correct_size;
- request_queue_t * q;
- unsigned long flags;
+ request_queue_t *q;
int i;
-
major = MAJOR(bh[0]->b_dev);
- if (!(q = get_queue(bh[0]->b_dev))) {
+ q = blk_get_queue(bh[0]->b_dev);
+ if (!q) {
printk(KERN_ERR
"ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
goto sorry;
}
- /* Determine correct block size for this device. */
+ /* Determine correct block size for this device. */
correct_size = BLOCK_SIZE;
if (blksize_size[major]) {
i = blksize_size[major][MINOR(bh[0]->b_dev)];
correct_size = i;
}
- /* Verify requested block sizes. */
+ /* Verify requested block sizes. */
for (i = 0; i < nr; i++) {
if (bh[i]->b_size != correct_size) {
printk(KERN_NOTICE "ll_rw_block: device %s: "
correct_size, bh[i]->b_size);
goto sorry;
}
-
- /* Md remaps blocks now */
- bh[i]->b_rdev = bh[i]->b_dev;
- bh[i]->b_rsector=bh[i]->b_blocknr*(bh[i]->b_size >> 9);
-#ifdef CONFIG_BLK_DEV_MD
- if (major==MD_MAJOR &&
- md_map (MINOR(bh[i]->b_dev), &bh[i]->b_rdev,
- &bh[i]->b_rsector, bh[i]->b_size >> 9)) {
- printk (KERN_ERR
- "Bad md_map in ll_rw_block\n");
- goto sorry;
- }
-#endif
}
if ((rw & WRITE) && is_read_only(bh[0]->b_dev)) {
}
for (i = 0; i < nr; i++) {
+ /* Only one thread can actually submit the I/O. */
+ if (haslock) {
+ if (!buffer_locked(bh[i]))
+ BUG();
+ } else {
+ if (test_and_set_bit(BH_Lock, &bh[i]->b_state))
+ continue;
+ }
set_bit(BH_Req, &bh[i]->b_state);
-#ifdef CONFIG_BLK_DEV_MD
- if (MAJOR(bh[i]->b_dev) == MD_MAJOR) {
- md_make_request(MINOR (bh[i]->b_dev), rw, bh[i]);
- continue;
+
+ if (q->make_request_fn)
+ q->make_request_fn(rw, bh[i]);
+ else {
+ bh[i]->b_rdev = bh[i]->b_dev;
+ bh[i]->b_rsector = bh[i]->b_blocknr*(bh[i]->b_size>>9);
+
+ generic_make_request(rw, bh[i]);
}
-#endif
- __make_request(q, MAJOR(bh[i]->b_rdev), rw, bh[i]);
}
- spin_lock_irqsave(&io_request_lock,flags);
- if( !q->plugged )
- {
- (q->request_fn)(q);
- }
- spin_unlock_irqrestore(&io_request_lock,flags);
return;
- sorry:
+sorry:
for (i = 0; i < nr; i++) {
mark_buffer_clean(bh[i]); /* remeber to refile it */
clear_bit(BH_Uptodate, &bh[i]->b_state);
return;
}
+void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
+{
+ __ll_rw_block(rw, nr, bh, 0);
+}
+
+void ll_rw_block_locked(int rw, int nr, struct buffer_head * bh[])
+{
+ __ll_rw_block(rw, nr, bh, 1);
+}
+
#ifdef CONFIG_STRAM_SWAP
-extern int stram_device_init( void );
+extern int stram_device_init (void);
#endif
/*
* 1 means we are done
*/
-int
-end_that_request_first( struct request *req, int uptodate, char *name )
+int end_that_request_first (struct request *req, int uptodate, char *name)
{
struct buffer_head * bh;
int nsect;
return 0;
}
-void
-end_that_request_last( struct request *req )
+void end_that_request_last(struct request *req)
{
if (req->sem != NULL)
up(req->sem);
struct blk_dev_struct *dev;
for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
- dev->queue = NULL;
+ dev->queue = NULL;
blk_init_queue(&dev->request_queue, NULL);
}
sbpcd_init();
#endif CONFIG_SBPCD
#ifdef CONFIG_AZTCD
- aztcd_init();
+ aztcd_init();
#endif CONFIG_AZTCD
#ifdef CONFIG_CDU535
sony535_init();
-
/*
md.c : Multiple Devices driver for Linux
- Copyright (C) 1994-96 Marc ZYNGIER
- <zyngier@ufr-info-p7.ibp.fr> or
- <maz@gloups.fdn.fr>
+ Copyright (C) 1998, 1999, 2000 Ingo Molnar
- A lot of inspiration came from hd.c ...
+ completely rewritten, based on the MD driver code from Marc Zyngier
- kerneld support by Boris Tobotras <boris@xtalk.msk.su>
- boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
+ Changes:
- RAID-1/RAID-5 extensions by:
- Ingo Molnar, Miguel de Icaza, Gadi Oxman
+ - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
+ - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
+ - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
+ - kmod support by: Cyrus Durgin
+ - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
- Changes for kmod by:
- Cyrus Durgin
-
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/*
- * Current RAID-1,4,5 parallel reconstruction speed limit is 1024 KB/sec, so
- * the extra system load does not show up that much. Increase it if your
- * system can take more.
- */
-#define SPEED_LIMIT 1024
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/version.h>
-#include <linux/malloc.h>
-#include <linux/mm.h>
-#include <linux/md.h>
-#include <linux/hdreg.h>
-#include <linux/stat.h>
-#include <linux/fs.h>
-#include <linux/proc_fs.h>
-#include <linux/blkdev.h>
-#include <linux/genhd.h>
-#include <linux/smp_lock.h>
+#include <linux/raid/md.h>
+#include <linux/raid/xor.h>
+
#ifdef CONFIG_KMOD
#include <linux/kmod.h>
#endif
-#include <linux/errno.h>
-#include <linux/init.h>
#define __KERNEL_SYSCALLS__
#include <linux/unistd.h>
+#include <asm/unaligned.h>
+
+extern asmlinkage int sys_sched_yield(void);
+extern asmlinkage int sys_setsid(void);
+
+extern unsigned long io_events[MAX_BLKDEV];
+
#define MAJOR_NR MD_MAJOR
#define MD_DRIVER
#include <linux/blk.h>
-#include <linux/blkpg.h>
-#include <asm/uaccess.h>
-#include <asm/bitops.h>
-#include <asm/atomic.h>
#ifdef CONFIG_MD_BOOT
-extern kdev_t name_to_kdev_t(char *line) __init;
+extern kdev_t name_to_kdev_t(char *line) md__init;
#endif
-static struct hd_struct md_hd_struct[MAX_MD_DEV];
-static int md_blocksizes[MAX_MD_DEV];
-int md_maxreadahead[MAX_MD_DEV];
-#if SUPPORT_RECONSTRUCTION
-static struct md_thread *md_sync_thread = NULL;
-#endif /* SUPPORT_RECONSTRUCTION */
+#define DEBUG 0
+#if DEBUG
+# define dprintk(x...) printk(x)
+#else
+# define dprintk(x...) do { } while(0)
+#endif
+
+static mdk_personality_t *pers[MAX_PERSONALITY] = {NULL, };
+
+/*
+ * these have to be allocated separately because external
+ * subsystems want to have a pre-defined structure
+ */
+struct hd_struct md_hd_struct[MAX_MD_DEVS];
+static int md_blocksizes[MAX_MD_DEVS];
+static int md_maxreadahead[MAX_MD_DEVS];
+static mdk_thread_t *md_recovery_thread = NULL;
-int md_size[MAX_MD_DEV]={0, };
+int md_size[MAX_MD_DEVS] = {0, };
static struct gendisk md_gendisk=
{
- MD_MAJOR,
- "md",
- 0,
- 1,
- md_hd_struct,
- md_size,
- MAX_MD_DEV,
- NULL,
- NULL
+ MD_MAJOR,
+ "md",
+ 0,
+ 1,
+ md_hd_struct,
+ md_size,
+ MAX_MD_DEVS,
+ NULL,
+ NULL
};
-static struct md_personality *pers[MAX_PERSONALITY]={NULL, };
-struct md_dev md_dev[MAX_MD_DEV];
-
-int md_thread(void * arg);
+void md_plug_device (request_queue_t *mdqueue, kdev_t dev)
+{
+ mdk_rdev_t * rdev;
+ struct md_list_head *tmp;
+ request_queue_t *q;
+ mddev_t *mddev;
+
+ if (!md_test_and_set_bit(0, (atomic_t *)&mdqueue->plugged)) {
+ mddev = kdev_to_mddev(dev);
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ q = blk_get_queue(rdev->dev);
+ generic_unplug_device(q);
+ }
+ queue_task(&mdqueue->plug_tq, &tq_disk);
+ }
+}
-static int legacy_raid_sb (int minor, int pnum)
+static void md_unplug_device (void * data)
{
- int i, factor;
+ mdk_rdev_t * rdev;
+ struct md_list_head *tmp;
+ mddev_t *mddev = (mddev_t *)data;
+ request_queue_t *mdqueue = &mddev->queue, *q;
+
+ clear_bit(0, (atomic_t *)&mdqueue->plugged);
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ q = blk_get_queue(rdev->dev);
+ generic_unplug_device(q);
+ }
+}
- factor = 1 << FACTOR_SHIFT(FACTOR((md_dev+minor)));
+/*
+ * Enables to iterate over all existing md arrays
+ */
+static MD_LIST_HEAD(all_mddevs);
- /*****
- * do size and offset calculations.
- */
- for (i=0; i<md_dev[minor].nb_dev; i++) {
- md_dev[minor].devices[i].size &= ~(factor - 1);
- md_size[minor] += md_dev[minor].devices[i].size;
- md_dev[minor].devices[i].offset=i ? (md_dev[minor].devices[i-1].offset +
- md_dev[minor].devices[i-1].size) : 0;
- }
- if (pnum == RAID0 >> PERSONALITY_SHIFT)
- md_maxreadahead[minor] = MD_DEFAULT_DISK_READAHEAD * md_dev[minor].nb_dev;
- return 0;
+/*
+ * The mapping between kdev and mddev is not necessary a simple
+ * one! Eg. HSM uses several sub-devices to implement Logical
+ * Volumes. All these sub-devices map to the same mddev.
+ */
+dev_mapping_t mddev_map [MAX_MD_DEVS] = { {NULL, 0}, };
+
+void add_mddev_mapping (mddev_t * mddev, kdev_t dev, void *data)
+{
+ unsigned int minor = MINOR(dev);
+
+ if (MAJOR(dev) != MD_MAJOR) {
+ MD_BUG();
+ return;
+ }
+ if (mddev_map[minor].mddev != NULL) {
+ MD_BUG();
+ return;
+ }
+ mddev_map[minor].mddev = mddev;
+ mddev_map[minor].data = data;
}
-static void free_sb (struct md_dev *mddev)
+void del_mddev_mapping (mddev_t * mddev, kdev_t dev)
{
- int i;
- struct real_dev *realdev;
+ unsigned int minor = MINOR(dev);
- if (mddev->sb) {
- free_page((unsigned long) mddev->sb);
- mddev->sb = NULL;
+ if (MAJOR(dev) != MD_MAJOR) {
+ MD_BUG();
+ return;
}
- for (i = 0; i <mddev->nb_dev; i++) {
- realdev = mddev->devices + i;
- if (realdev->sb) {
- free_page((unsigned long) realdev->sb);
- realdev->sb = NULL;
- }
+ if (mddev_map[minor].mddev != mddev) {
+ MD_BUG();
+ return;
}
+ mddev_map[minor].mddev = NULL;
+ mddev_map[minor].data = NULL;
}
-/*
- * Check one RAID superblock for generic plausibility
- */
+static request_queue_t *md_get_queue (kdev_t dev)
+{
+ mddev_t *mddev = kdev_to_mddev(dev);
-#define BAD_MAGIC KERN_ERR \
-"md: %s: invalid raid superblock magic (%x) on block %u\n"
+ if (!mddev)
+ return NULL;
+ return &mddev->queue;
+}
-#define OUT_OF_MEM KERN_ALERT \
-"md: out of memory.\n"
+static void do_md_request (request_queue_t * q)
+{
+ printk(KERN_ALERT "Got md request, not good...");
+ BUG();
+ return;
+}
-#define NO_DEVICE KERN_ERR \
-"md: disabled device %s\n"
+void md_make_request (int rw, struct buffer_head * bh)
+{
+ mddev_t *mddev = kdev_to_mddev(bh->b_dev);
-#define SUCCESS 0
-#define FAILURE -1
+ if (!mddev || !mddev->pers)
+ bh->b_end_io(bh, 0);
+ else {
+ if ((rw == READ || rw == READA) && buffer_uptodate(bh))
+ bh->b_end_io(bh, 1);
+ else
+ mddev->pers->make_request(mddev, rw, bh);
+ }
+}
-static int analyze_one_sb (struct real_dev * rdev)
+static mddev_t * alloc_mddev (kdev_t dev)
{
- int ret = FAILURE;
- struct buffer_head *bh;
- kdev_t dev = rdev->dev;
- md_superblock_t *sb;
+ request_queue_t *q;
+ mddev_t *mddev;
+
+ if (MAJOR(dev) != MD_MAJOR) {
+ MD_BUG();
+ return 0;
+ }
+ mddev = (mddev_t *) kmalloc(sizeof(*mddev), GFP_KERNEL);
+ if (!mddev)
+ return NULL;
+
+ memset(mddev, 0, sizeof(*mddev));
+
+ mddev->__minor = MINOR(dev);
+ init_MUTEX(&mddev->reconfig_sem);
+ init_MUTEX(&mddev->recovery_sem);
+ init_MUTEX(&mddev->resync_sem);
+ MD_INIT_LIST_HEAD(&mddev->disks);
+ MD_INIT_LIST_HEAD(&mddev->all_mddevs);
+
+ q = &mddev->queue;
+ blk_init_queue(q, DEVICE_REQUEST);
+ blk_queue_pluggable(q, md_plug_device);
+ blk_queue_make_request(q, md_make_request);
+
+ q->plug_tq.sync = 0;
+ q->plug_tq.routine = &md_unplug_device;
+ q->plug_tq.data = mddev;
/*
- * Read the superblock, it's at the end of the disk
+ * The 'base' mddev is the one with data NULL.
+ * personalities can create additional mddevs
+ * if necessary.
*/
- rdev->sb_offset = MD_NEW_SIZE_BLOCKS (blk_size[MAJOR(dev)][MINOR(dev)]);
- set_blocksize (dev, MD_SB_BYTES);
- bh = bread (dev, rdev->sb_offset / MD_SB_BLOCKS, MD_SB_BYTES);
-
- if (bh) {
- sb = (md_superblock_t *) bh->b_data;
- if (sb->md_magic != MD_SB_MAGIC) {
- printk (BAD_MAGIC, kdevname(dev),
- sb->md_magic, rdev->sb_offset);
- goto abort;
- }
- rdev->sb = (md_superblock_t *) __get_free_page(GFP_KERNEL);
- if (!rdev->sb) {
- printk (OUT_OF_MEM);
- goto abort;
- }
- memcpy (rdev->sb, bh->b_data, MD_SB_BYTES);
+ add_mddev_mapping(mddev, dev, 0);
+ md_list_add(&mddev->all_mddevs, &all_mddevs);
- rdev->size = sb->size;
- } else
- printk (NO_DEVICE,kdevname(rdev->dev));
- ret = SUCCESS;
-abort:
- if (bh)
- brelse (bh);
- return ret;
+ return mddev;
}
-#undef SUCCESS
-#undef FAILURE
+static void free_mddev (mddev_t *mddev)
+{
+ if (!mddev) {
+ MD_BUG();
+ return;
+ }
-#undef BAD_MAGIC
-#undef OUT_OF_MEM
-#undef NO_DEVICE
+ /*
+ * Make sure nobody else is using this mddev
+ * (careful, we rely on the global kernel lock here)
+ */
+ while (md_atomic_read(&mddev->resync_sem.count) != 1)
+ schedule();
+ while (md_atomic_read(&mddev->recovery_sem.count) != 1)
+ schedule();
+
+ del_mddev_mapping(mddev, MKDEV(MD_MAJOR, mdidx(mddev)));
+ md_list_del(&mddev->all_mddevs);
+ MD_INIT_LIST_HEAD(&mddev->all_mddevs);
+ kfree(mddev);
+}
-/*
- * Check a full RAID array for plausibility
- */
+struct gendisk * find_gendisk (kdev_t dev)
+{
+ struct gendisk *tmp = gendisk_head;
-#define INCONSISTENT KERN_ERR \
-"md: superblock inconsistency -- run ckraid\n"
+ while (tmp != NULL) {
+ if (tmp->major == MAJOR(dev))
+ return (tmp);
+ tmp = tmp->next;
+ }
+ return (NULL);
+}
-#define OUT_OF_DATE KERN_ERR \
-"md: superblock update time inconsistenty -- using the most recent one\n"
+mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
+{
+ mdk_rdev_t * rdev;
+ struct md_list_head *tmp;
-#define OLD_VERSION KERN_ALERT \
-"md: %s: unsupported raid array version %d.%d.%d\n"
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ if (rdev->desc_nr == nr)
+ return rdev;
+ }
+ return NULL;
+}
-#define NOT_CLEAN KERN_ERR \
-"md: %s: raid array is not clean -- run ckraid\n"
+mdk_rdev_t * find_rdev(mddev_t * mddev, kdev_t dev)
+{
+ struct md_list_head *tmp;
+ mdk_rdev_t *rdev;
-#define NOT_CLEAN_IGNORE KERN_ERR \
-"md: %s: raid array is not clean -- reconstructing parity\n"
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ if (rdev->dev == dev)
+ return rdev;
+ }
+ return NULL;
+}
-#define UNKNOWN_LEVEL KERN_ERR \
-"md: %s: unsupported raid level %d\n"
+static MD_LIST_HEAD(device_names);
-static int analyze_sbs (int minor, int pnum)
+char * partition_name (kdev_t dev)
{
- struct md_dev *mddev = md_dev + minor;
- int i, N = mddev->nb_dev, out_of_date = 0;
- struct real_dev * disks = mddev->devices;
- md_superblock_t *sb, *freshest = NULL;
+ struct gendisk *hd;
+ static char nomem [] = "<nomem>";
+ dev_name_t *dname;
+ struct md_list_head *tmp = device_names.next;
+
+ while (tmp != &device_names) {
+ dname = md_list_entry(tmp, dev_name_t, list);
+ if (dname->dev == dev)
+ return dname->name;
+ tmp = tmp->next;
+ }
+
+ dname = (dev_name_t *) kmalloc(sizeof(*dname), GFP_KERNEL);
+ if (!dname)
+ return nomem;
/*
- * RAID-0 and linear don't use a RAID superblock
+ * ok, add this new device name to the list
*/
- if (pnum == RAID0 >> PERSONALITY_SHIFT ||
- pnum == LINEAR >> PERSONALITY_SHIFT)
- return legacy_raid_sb (minor, pnum);
+ hd = find_gendisk (dev);
+
+ if (!hd)
+ sprintf (dname->name, "[dev %s]", kdevname(dev));
+ else
+ disk_name (hd, MINOR(dev), dname->name);
+
+ dname->dev = dev;
+ MD_INIT_LIST_HEAD(&dname->list);
+ md_list_add(&dname->list, &device_names);
+
+ return dname->name;
+}
+
+static unsigned int calc_dev_sboffset (kdev_t dev, mddev_t *mddev,
+ int persistent)
+{
+ unsigned int size = 0;
+
+ if (blk_size[MAJOR(dev)])
+ size = blk_size[MAJOR(dev)][MINOR(dev)];
+ if (persistent)
+ size = MD_NEW_SIZE_BLOCKS(size);
+ return size;
+}
+
+static unsigned int calc_dev_size (kdev_t dev, mddev_t *mddev, int persistent)
+{
+ unsigned int size;
+
+ size = calc_dev_sboffset(dev, mddev, persistent);
+ if (!mddev->sb) {
+ MD_BUG();
+ return size;
+ }
+ if (mddev->sb->chunk_size)
+ size &= ~(mddev->sb->chunk_size/1024 - 1);
+ return size;
+}
+
+static unsigned int zoned_raid_size (mddev_t *mddev)
+{
+ unsigned int mask;
+ mdk_rdev_t * rdev;
+ struct md_list_head *tmp;
+ if (!mddev->sb) {
+ MD_BUG();
+ return -EINVAL;
+ }
/*
- * Verify the RAID superblock on each real device
+ * do size and offset calculations.
*/
- for (i = 0; i < N; i++)
- if (analyze_one_sb(disks+i))
- goto abort;
+ mask = ~(mddev->sb->chunk_size/1024 - 1);
+
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev->size &= mask;
+ md_size[mdidx(mddev)] += rdev->size;
+ }
+ return 0;
+}
+
+/*
+ * We check wether all devices are numbered from 0 to nb_dev-1. The
+ * order is guaranteed even after device name changes.
+ *
+ * Some personalities (raid0, linear) use this. Personalities that
+ * provide data have to be able to deal with loss of individual
+ * disks, so they do their checking themselves.
+ */
+int md_check_ordering (mddev_t *mddev)
+{
+ int i, c;
+ mdk_rdev_t *rdev;
+ struct md_list_head *tmp;
/*
- * The superblock constant part has to be the same
- * for all disks in the array.
+ * First, all devices must be fully functional
*/
- sb = NULL;
- for (i = 0; i < N; i++) {
- if (!disks[i].sb)
- continue;
- if (!sb) {
- sb = disks[i].sb;
- continue;
- }
- if (memcmp(sb,
- disks[i].sb, MD_SB_GENERIC_CONSTANT_WORDS * 4)) {
- printk (INCONSISTENT);
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ if (rdev->faulty) {
+ printk("md: md%d's device %s faulty, aborting.\n",
+ mdidx(mddev), partition_name(rdev->dev));
goto abort;
}
}
- /*
- * OK, we have all disks and the array is ready to run. Let's
- * find the freshest superblock, that one will be the superblock
- * that represents the whole array.
- */
- if ((sb = mddev->sb = (md_superblock_t *) __get_free_page (GFP_KERNEL)) == NULL)
+ c = 0;
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ c++;
+ }
+ if (c != mddev->nb_dev) {
+ MD_BUG();
goto abort;
- freshest = NULL;
- for (i = 0; i < N; i++) {
- if (!disks[i].sb)
- continue;
- if (!freshest) {
- freshest = disks[i].sb;
- continue;
- }
- /*
- * Find the newest superblock version
- */
- if (disks[i].sb->utime != freshest->utime) {
- out_of_date = 1;
- if (disks[i].sb->utime > freshest->utime)
- freshest = disks[i].sb;
- }
}
- if (out_of_date)
- printk(OUT_OF_DATE);
- memcpy (sb, freshest, sizeof(*freshest));
-
- /*
- * Check if we can support this RAID array
- */
- if (sb->major_version != MD_MAJOR_VERSION ||
- sb->minor_version > MD_MINOR_VERSION) {
-
- printk (OLD_VERSION, kdevname(MKDEV(MD_MAJOR, minor)),
- sb->major_version, sb->minor_version,
- sb->patch_version);
+ if (mddev->nb_dev != mddev->sb->raid_disks) {
+ printk("md: md%d, array needs %d disks, has %d, aborting.\n",
+ mdidx(mddev), mddev->sb->raid_disks, mddev->nb_dev);
goto abort;
}
-
/*
- * We need to add this as a superblock option.
+ * Now the numbering check
*/
-#if SUPPORT_RECONSTRUCTION
- if (sb->state != (1 << MD_SB_CLEAN)) {
- if (sb->level == 1) {
- printk (NOT_CLEAN, kdevname(MKDEV(MD_MAJOR, minor)));
+ for (i = 0; i < mddev->nb_dev; i++) {
+ c = 0;
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ if (rdev->desc_nr == i)
+ c++;
+ }
+ if (c == 0) {
+ printk("md: md%d, missing disk #%d, aborting.\n",
+ mdidx(mddev), i);
goto abort;
- } else
- printk (NOT_CLEAN_IGNORE, kdevname(MKDEV(MD_MAJOR, minor)));
- }
-#else
- if (sb->state != (1 << MD_SB_CLEAN)) {
- printk (NOT_CLEAN, kdevname(MKDEV(MD_MAJOR, minor)));
- goto abort;
- }
-#endif /* SUPPORT_RECONSTRUCTION */
-
- switch (sb->level) {
- case 1:
- md_size[minor] = sb->size;
- md_maxreadahead[minor] = MD_DEFAULT_DISK_READAHEAD;
- break;
- case 4:
- case 5:
- md_size[minor] = sb->size * (sb->raid_disks - 1);
- md_maxreadahead[minor] = MD_DEFAULT_DISK_READAHEAD * (sb->raid_disks - 1);
- break;
- default:
- printk (UNKNOWN_LEVEL, kdevname(MKDEV(MD_MAJOR, minor)),
- sb->level);
+ }
+ if (c > 1) {
+ printk("md: md%d, too many disks #%d, aborting.\n",
+ mdidx(mddev), i);
goto abort;
+ }
}
return 0;
abort:
- free_sb(mddev);
return 1;
}
-#undef INCONSISTENT
-#undef OUT_OF_DATE
-#undef OLD_VERSION
-#undef NOT_CLEAN
-#undef OLD_LEVEL
-
-int md_update_sb(int minor)
+static void remove_descriptor (mdp_disk_t *disk, mdp_super_t *sb)
{
- struct md_dev *mddev = md_dev + minor;
- struct buffer_head *bh;
- md_superblock_t *sb = mddev->sb;
- struct real_dev *realdev;
- kdev_t dev;
- int i;
- u32 sb_offset;
-
- sb->utime = CURRENT_TIME;
- for (i = 0; i < mddev->nb_dev; i++) {
- realdev = mddev->devices + i;
- if (!realdev->sb)
- continue;
- dev = realdev->dev;
- sb_offset = realdev->sb_offset;
- set_blocksize(dev, MD_SB_BYTES);
- printk("md: updating raid superblock on device %s, sb_offset == %u\n", kdevname(dev), sb_offset);
- bh = getblk(dev, sb_offset / MD_SB_BLOCKS, MD_SB_BYTES);
- if (bh) {
- sb = (md_superblock_t *) bh->b_data;
- memcpy(sb, mddev->sb, MD_SB_BYTES);
- memcpy(&sb->descriptor, sb->disks + realdev->sb->descriptor.number, MD_SB_DESCRIPTOR_WORDS * 4);
- mark_buffer_uptodate(bh, 1);
- mark_buffer_dirty(bh, 1);
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
- bforget(bh);
- fsync_dev(dev);
- invalidate_buffers(dev);
- } else
- printk(KERN_ERR "md: getblk failed for device %s\n", kdevname(dev));
+ if (disk_active(disk)) {
+ sb->working_disks--;
+ } else {
+ if (disk_spare(disk)) {
+ sb->spare_disks--;
+ sb->working_disks--;
+ } else {
+ sb->failed_disks--;
+ }
}
- return 0;
+ sb->nr_disks--;
+ disk->major = 0;
+ disk->minor = 0;
+ mark_disk_removed(disk);
}
-static int do_md_run (int minor, int repart)
+#define BAD_MAGIC KERN_ERR \
+"md: invalid raid superblock magic on %s\n"
+
+#define BAD_MINOR KERN_ERR \
+"md: %s: invalid raid minor (%x)\n"
+
+#define OUT_OF_MEM KERN_ALERT \
+"md: out of memory.\n"
+
+#define NO_SB KERN_ERR \
+"md: disabled device %s, could not read superblock.\n"
+
+#define BAD_CSUM KERN_WARNING \
+"md: invalid superblock checksum on %s\n"
+
+static int alloc_array_sb (mddev_t * mddev)
{
- int pnum, i, min, factor, err;
+ if (mddev->sb) {
+ MD_BUG();
+ return 0;
+ }
- if (!md_dev[minor].nb_dev)
- return -EINVAL;
-
- if (md_dev[minor].pers)
- return -EBUSY;
+ mddev->sb = (mdp_super_t *) __get_free_page (GFP_KERNEL);
+ if (!mddev->sb)
+ return -ENOMEM;
+ md_clear_page((unsigned long)mddev->sb);
+ return 0;
+}
- md_dev[minor].repartition=repart;
-
- if ((pnum=PERSONALITY(&md_dev[minor]) >> (PERSONALITY_SHIFT))
- >= MAX_PERSONALITY)
- return -EINVAL;
-
- /* Only RAID-1 and RAID-5 can have MD devices as underlying devices */
- if (pnum != (RAID1 >> PERSONALITY_SHIFT) && pnum != (RAID5 >> PERSONALITY_SHIFT)){
- for (i = 0; i < md_dev [minor].nb_dev; i++)
- if (MAJOR (md_dev [minor].devices [i].dev) == MD_MAJOR)
- return -EINVAL;
- }
- if (!pers[pnum])
- {
-#ifdef CONFIG_KMOD
- char module_name[80];
- sprintf (module_name, "md-personality-%d", pnum);
- request_module (module_name);
- if (!pers[pnum])
-#endif
- return -EINVAL;
- }
-
- factor = min = 1 << FACTOR_SHIFT(FACTOR((md_dev+minor)));
-
- for (i=0; i<md_dev[minor].nb_dev; i++)
- if (md_dev[minor].devices[i].size<min)
- {
- printk ("Dev %s smaller than %dk, cannot shrink\n",
- partition_name (md_dev[minor].devices[i].dev), min);
- return -EINVAL;
- }
-
- for (i=0; i<md_dev[minor].nb_dev; i++) {
- fsync_dev(md_dev[minor].devices[i].dev);
- invalidate_buffers(md_dev[minor].devices[i].dev);
- }
-
- /* Resize devices according to the factor. It is used to align
- partitions size on a given chunk size. */
- md_size[minor]=0;
+static int alloc_disk_sb (mdk_rdev_t * rdev)
+{
+ if (rdev->sb)
+ MD_BUG();
- /*
- * Analyze the raid superblock
- */
- if (analyze_sbs(minor, pnum))
- return -EINVAL;
+ rdev->sb = (mdp_super_t *) __get_free_page(GFP_KERNEL);
+ if (!rdev->sb) {
+ printk (OUT_OF_MEM);
+ return -EINVAL;
+ }
+ md_clear_page((unsigned long)rdev->sb);
- md_dev[minor].pers=pers[pnum];
-
- if ((err=md_dev[minor].pers->run (minor, md_dev+minor)))
- {
- md_dev[minor].pers=NULL;
- free_sb(md_dev + minor);
- return (err);
- }
-
- if (pnum != RAID0 >> PERSONALITY_SHIFT && pnum != LINEAR >> PERSONALITY_SHIFT)
- {
- md_dev[minor].sb->state &= ~(1 << MD_SB_CLEAN);
- md_update_sb(minor);
- }
-
- /* FIXME : We assume here we have blocks
- that are twice as large as sectors.
- THIS MAY NOT BE TRUE !!! */
- md_hd_struct[minor].start_sect=0;
- md_hd_struct[minor].nr_sects=md_size[minor]<<1;
-
- read_ahead[MD_MAJOR] = 128;
- return (0);
+ return 0;
}
-static int do_md_stop (int minor, struct inode *inode)
+static void free_disk_sb (mdk_rdev_t * rdev)
{
- int i;
-
- if (inode->i_count>1 || md_dev[minor].busy>1) {
- /*
- * ioctl : one open channel
- */
- printk ("STOP_MD md%x failed : i_count=%d, busy=%d\n",
- minor, inode->i_count, md_dev[minor].busy);
- return -EBUSY;
+ if (rdev->sb) {
+ free_page((unsigned long) rdev->sb);
+ rdev->sb = NULL;
+ rdev->sb_offset = 0;
+ rdev->size = 0;
+ } else {
+ if (!rdev->faulty)
+ MD_BUG();
}
-
- if (md_dev[minor].pers) {
- /*
- * It is safe to call stop here, it only frees private
- * data. Also, it tells us if a device is unstoppable
- * (eg. resyncing is in progress)
- */
- if (md_dev[minor].pers->stop (minor, md_dev+minor))
- return -EBUSY;
- /*
- * The device won't exist anymore -> flush it now
- */
- fsync_dev (inode->i_rdev);
- invalidate_buffers (inode->i_rdev);
- if (md_dev[minor].sb) {
- md_dev[minor].sb->state |= 1 << MD_SB_CLEAN;
- md_update_sb(minor);
- }
+}
+
+static void mark_rdev_faulty (mdk_rdev_t * rdev)
+{
+ if (!rdev) {
+ MD_BUG();
+ return;
}
-
- /* Remove locks. */
- if (md_dev[minor].sb)
- free_sb(md_dev + minor);
- for (i=0; i<md_dev[minor].nb_dev; i++)
- clear_inode (md_dev[minor].devices[i].inode);
-
- md_dev[minor].nb_dev=md_size[minor]=0;
- md_hd_struct[minor].nr_sects=0;
- md_dev[minor].pers=NULL;
-
- read_ahead[MD_MAJOR] = 128;
-
- return (0);
+ free_disk_sb(rdev);
+ rdev->faulty = 1;
}
-static int do_md_add (int minor, kdev_t dev)
-{
- int i;
- int hot_add=0;
- struct real_dev *realdev;
+static int read_disk_sb (mdk_rdev_t * rdev)
+{
+ int ret = -EINVAL;
+ struct buffer_head *bh = NULL;
+ kdev_t dev = rdev->dev;
+ mdp_super_t *sb;
+ u32 sb_offset;
+
+ if (!rdev->sb) {
+ MD_BUG();
+ goto abort;
+ }
+
+ /*
+ * Calculate the position of the superblock,
+ * it's at the end of the disk
+ */
+ sb_offset = calc_dev_sboffset(rdev->dev, rdev->mddev, 1);
+ rdev->sb_offset = sb_offset;
+ printk("(read) %s's sb offset: %d", partition_name(dev),
+ sb_offset);
+ fsync_dev(dev);
+ set_blocksize (dev, MD_SB_BYTES);
+ bh = bread (dev, sb_offset / MD_SB_BLOCKS, MD_SB_BYTES);
+
+ if (bh) {
+ sb = (mdp_super_t *) bh->b_data;
+ memcpy (rdev->sb, sb, MD_SB_BYTES);
+ } else {
+ printk (NO_SB,partition_name(rdev->dev));
+ goto abort;
+ }
+ printk(" [events: %08lx]\n", (unsigned long)get_unaligned(&rdev->sb->events));
+ ret = 0;
+abort:
+ if (bh)
+ brelse (bh);
+ return ret;
+}
+
+static unsigned int calc_sb_csum (mdp_super_t * sb)
+{
+ unsigned int disk_csum, csum;
+
+ disk_csum = sb->sb_csum;
+ sb->sb_csum = 0;
+ csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
+ sb->sb_csum = disk_csum;
+ return csum;
+}
+
+/*
+ * Check one RAID superblock for generic plausibility
+ */
+
+static int check_disk_sb (mdk_rdev_t * rdev)
+{
+ mdp_super_t *sb;
+ int ret = -EINVAL;
+
+ sb = rdev->sb;
+ if (!sb) {
+ MD_BUG();
+ goto abort;
+ }
+
+ if (sb->md_magic != MD_SB_MAGIC) {
+ printk (BAD_MAGIC, partition_name(rdev->dev));
+ goto abort;
+ }
+
+ if (sb->md_minor >= MAX_MD_DEVS) {
+ printk (BAD_MINOR, partition_name(rdev->dev),
+ sb->md_minor);
+ goto abort;
+ }
+
+ if (calc_sb_csum(sb) != sb->sb_csum)
+ printk(BAD_CSUM, partition_name(rdev->dev));
+ ret = 0;
+abort:
+ return ret;
+}
+
+static kdev_t dev_unit(kdev_t dev)
+{
+ unsigned int mask;
+ struct gendisk *hd = find_gendisk(dev);
+
+ if (!hd)
+ return 0;
+ mask = ~((1 << hd->minor_shift) - 1);
+
+ return MKDEV(MAJOR(dev), MINOR(dev) & mask);
+}
+
+static mdk_rdev_t * match_dev_unit(mddev_t *mddev, kdev_t dev)
+{
+ struct md_list_head *tmp;
+ mdk_rdev_t *rdev;
+
+ ITERATE_RDEV(mddev,rdev,tmp)
+ if (dev_unit(rdev->dev) == dev_unit(dev))
+ return rdev;
+
+ return NULL;
+}
+
+static MD_LIST_HEAD(all_raid_disks);
+static MD_LIST_HEAD(pending_raid_disks);
+
+static void bind_rdev_to_array (mdk_rdev_t * rdev, mddev_t * mddev)
+{
+ mdk_rdev_t *same_pdev;
+
+ if (rdev->mddev) {
+ MD_BUG();
+ return;
+ }
+ same_pdev = match_dev_unit(mddev, rdev->dev);
+ if (same_pdev)
+ printk( KERN_WARNING
+"md%d: WARNING: %s appears to be on the same physical disk as %s. True\n"
+" protection against single-disk failure might be compromised.\n",
+ mdidx(mddev), partition_name(rdev->dev),
+ partition_name(same_pdev->dev));
+
+ md_list_add(&rdev->same_set, &mddev->disks);
+ rdev->mddev = mddev;
+ mddev->nb_dev++;
+ printk("bind<%s,%d>\n", partition_name(rdev->dev), mddev->nb_dev);
+}
+
+static void unbind_rdev_from_array (mdk_rdev_t * rdev)
+{
+ if (!rdev->mddev) {
+ MD_BUG();
+ return;
+ }
+ md_list_del(&rdev->same_set);
+ MD_INIT_LIST_HEAD(&rdev->same_set);
+ rdev->mddev->nb_dev--;
+ printk("unbind<%s,%d>\n", partition_name(rdev->dev),
+ rdev->mddev->nb_dev);
+ rdev->mddev = NULL;
+}
+
+/*
+ * prevent the device from being mounted, repartitioned or
+ * otherwise reused by a RAID array (or any other kernel
+ * subsystem), by opening the device. [simply getting an
+ * inode is not enough, the SCSI module usage code needs
+ * an explicit open() on the device]
+ */
+static int lock_rdev (mdk_rdev_t *rdev)
+{
+ int err = 0;
+
+ /*
+ * First insert a dummy inode.
+ */
+ if (rdev->inode)
+ MD_BUG();
+ rdev->inode = get_empty_inode();
+ if (!rdev->inode)
+ return -ENOMEM;
+ /*
+ * we dont care about any other fields
+ */
+ rdev->inode->i_dev = rdev->inode->i_rdev = rdev->dev;
+ insert_inode_hash(rdev->inode);
+
+ memset(&rdev->filp, 0, sizeof(rdev->filp));
+ rdev->filp.f_mode = 3; /* read write */
+ return err;
+}
+
+static void unlock_rdev (mdk_rdev_t *rdev)
+{
+ if (!rdev->inode)
+ MD_BUG();
+ iput(rdev->inode);
+ rdev->inode = NULL;
+}
+
+static void export_rdev (mdk_rdev_t * rdev)
+{
+ printk("export_rdev(%s)\n",partition_name(rdev->dev));
+ if (rdev->mddev)
+ MD_BUG();
+ unlock_rdev(rdev);
+ free_disk_sb(rdev);
+ md_list_del(&rdev->all);
+ MD_INIT_LIST_HEAD(&rdev->all);
+ if (rdev->pending.next != &rdev->pending) {
+ printk("(%s was pending)\n",partition_name(rdev->dev));
+ md_list_del(&rdev->pending);
+ MD_INIT_LIST_HEAD(&rdev->pending);
+ }
+ rdev->dev = 0;
+ rdev->faulty = 0;
+ kfree(rdev);
+}
+
+static void kick_rdev_from_array (mdk_rdev_t * rdev)
+{
+ unbind_rdev_from_array(rdev);
+ export_rdev(rdev);
+}
+
+static void export_array (mddev_t *mddev)
+{
+ struct md_list_head *tmp;
+ mdk_rdev_t *rdev;
+ mdp_super_t *sb = mddev->sb;
+
+ if (mddev->sb) {
+ mddev->sb = NULL;
+ free_page((unsigned long) sb);
+ }
+
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ if (!rdev->mddev) {
+ MD_BUG();
+ continue;
+ }
+ kick_rdev_from_array(rdev);
+ }
+ if (mddev->nb_dev)
+ MD_BUG();
+}
+
+#undef BAD_CSUM
+#undef BAD_MAGIC
+#undef OUT_OF_MEM
+#undef NO_SB
+
+static void print_desc(mdp_disk_t *desc)
+{
+ printk(" DISK<N:%d,%s(%d,%d),R:%d,S:%d>\n", desc->number,
+ partition_name(MKDEV(desc->major,desc->minor)),
+ desc->major,desc->minor,desc->raid_disk,desc->state);
+}
+
+static void print_sb(mdp_super_t *sb)
+{
+ int i;
+
+ printk(" SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
+ sb->major_version, sb->minor_version, sb->patch_version,
+ sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
+ sb->ctime);
+ printk(" L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", sb->level,
+ sb->size, sb->nr_disks, sb->raid_disks, sb->md_minor,
+ sb->layout, sb->chunk_size);
+ printk(" UT:%08x ST:%d AD:%d WD:%d FD:%d SD:%d CSUM:%08x E:%08lx\n",
+ sb->utime, sb->state, sb->active_disks, sb->working_disks,
+ sb->failed_disks, sb->spare_disks,
+ sb->sb_csum, (unsigned long)get_unaligned(&sb->events));
+
+ for (i = 0; i < MD_SB_DISKS; i++) {
+ mdp_disk_t *desc;
+
+ desc = sb->disks + i;
+ printk(" D %2d: ", i);
+ print_desc(desc);
+ }
+ printk(" THIS: ");
+ print_desc(&sb->this_disk);
+
+}
+
+static void print_rdev(mdk_rdev_t *rdev)
+{
+ printk(" rdev %s: O:%s, SZ:%08d F:%d DN:%d ",
+ partition_name(rdev->dev), partition_name(rdev->old_dev),
+ rdev->size, rdev->faulty, rdev->desc_nr);
+ if (rdev->sb) {
+ printk("rdev superblock:\n");
+ print_sb(rdev->sb);
+ } else
+ printk("no rdev superblock!\n");
+}
+
+void md_print_devices (void)
+{
+ struct md_list_head *tmp, *tmp2;
+ mdk_rdev_t *rdev;
+ mddev_t *mddev;
+
+ printk("\n");
+ printk(" **********************************\n");
+ printk(" * <COMPLETE RAID STATE PRINTOUT> *\n");
+ printk(" **********************************\n");
+ ITERATE_MDDEV(mddev,tmp) {
+ printk("md%d: ", mdidx(mddev));
+
+ ITERATE_RDEV(mddev,rdev,tmp2)
+ printk("<%s>", partition_name(rdev->dev));
+
+ if (mddev->sb) {
+ printk(" array superblock:\n");
+ print_sb(mddev->sb);
+ } else
+ printk(" no array superblock.\n");
+
+ ITERATE_RDEV(mddev,rdev,tmp2)
+ print_rdev(rdev);
+ }
+ printk(" **********************************\n");
+ printk("\n");
+}
+
+static int sb_equal ( mdp_super_t *sb1, mdp_super_t *sb2)
+{
+ int ret;
+ mdp_super_t *tmp1, *tmp2;
+
+ tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
+ tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
+
+ if (!tmp1 || !tmp2) {
+ ret = 0;
+ goto abort;
+ }
+
+ *tmp1 = *sb1;
+ *tmp2 = *sb2;
+
+ /*
+ * nr_disks is not constant
+ */
+ tmp1->nr_disks = 0;
+ tmp2->nr_disks = 0;
+
+ if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
+ ret = 0;
+ else
+ ret = 1;
+
+abort:
+ if (tmp1)
+ kfree(tmp1);
+ if (tmp2)
+ kfree(tmp2);
+
+ return ret;
+}
+
+static int uuid_equal(mdk_rdev_t *rdev1, mdk_rdev_t *rdev2)
+{
+ if ( (rdev1->sb->set_uuid0 == rdev2->sb->set_uuid0) &&
+ (rdev1->sb->set_uuid1 == rdev2->sb->set_uuid1) &&
+ (rdev1->sb->set_uuid2 == rdev2->sb->set_uuid2) &&
+ (rdev1->sb->set_uuid3 == rdev2->sb->set_uuid3))
+
+ return 1;
+
+ return 0;
+}
+
+static mdk_rdev_t * find_rdev_all (kdev_t dev)
+{
+ struct md_list_head *tmp;
+ mdk_rdev_t *rdev;
+
+ tmp = all_raid_disks.next;
+ while (tmp != &all_raid_disks) {
+ rdev = md_list_entry(tmp, mdk_rdev_t, all);
+ if (rdev->dev == dev)
+ return rdev;
+ tmp = tmp->next;
+ }
+ return NULL;
+}
+
+#define GETBLK_FAILED KERN_ERR \
+"md: getblk failed for device %s\n"
+
+static int write_disk_sb(mdk_rdev_t * rdev)
+{
+ struct buffer_head *bh;
+ kdev_t dev;
+ u32 sb_offset, size;
+ mdp_super_t *sb;
+
+ if (!rdev->sb) {
+ MD_BUG();
+ return -1;
+ }
+ if (rdev->faulty) {
+ MD_BUG();
+ return -1;
+ }
+ if (rdev->sb->md_magic != MD_SB_MAGIC) {
+ MD_BUG();
+ return -1;
+ }
+
+ dev = rdev->dev;
+ sb_offset = calc_dev_sboffset(dev, rdev->mddev, 1);
+ if (rdev->sb_offset != sb_offset) {
+ printk("%s's sb offset has changed from %d to %d, skipping\n", partition_name(dev), rdev->sb_offset, sb_offset);
+ goto skip;
+ }
+ /*
+ * If the disk went offline meanwhile and it's just a spare, then
+ * it's size has changed to zero silently, and the MD code does
+ * not yet know that it's faulty.
+ */
+ size = calc_dev_size(dev, rdev->mddev, 1);
+ if (size != rdev->size) {
+ printk("%s's size has changed from %d to %d since import, skipping\n", partition_name(dev), rdev->size, size);
+ goto skip;
+ }
+
+ printk("(write) %s's sb offset: %d\n", partition_name(dev), sb_offset);
+ fsync_dev(dev);
+ set_blocksize(dev, MD_SB_BYTES);
+ bh = getblk(dev, sb_offset / MD_SB_BLOCKS, MD_SB_BYTES);
+ if (!bh) {
+ printk(GETBLK_FAILED, partition_name(dev));
+ return 1;
+ }
+ memset(bh->b_data,0,bh->b_size);
+ sb = (mdp_super_t *) bh->b_data;
+ memcpy(sb, rdev->sb, MD_SB_BYTES);
+
+ mark_buffer_uptodate(bh, 1);
+ mark_buffer_dirty(bh, 1);
+ ll_rw_block(WRITE, 1, &bh);
+ wait_on_buffer(bh);
+ brelse(bh);
+ fsync_dev(dev);
+skip:
+ return 0;
+}
+#undef GETBLK_FAILED KERN_ERR
+
+static void set_this_disk(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ int i, ok = 0;
+ mdp_disk_t *desc;
+
+ for (i = 0; i < MD_SB_DISKS; i++) {
+ desc = mddev->sb->disks + i;
+#if 0
+ if (disk_faulty(desc)) {
+ if (MKDEV(desc->major,desc->minor) == rdev->dev)
+ ok = 1;
+ continue;
+ }
+#endif
+ if (MKDEV(desc->major,desc->minor) == rdev->dev) {
+ rdev->sb->this_disk = *desc;
+ rdev->desc_nr = desc->number;
+ ok = 1;
+ break;
+ }
+ }
+
+ if (!ok) {
+ MD_BUG();
+ }
+}
+
+static int sync_sbs(mddev_t * mddev)
+{
+ mdk_rdev_t *rdev;
+ mdp_super_t *sb;
+ struct md_list_head *tmp;
+
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ if (rdev->faulty)
+ continue;
+ sb = rdev->sb;
+ *sb = *mddev->sb;
+ set_this_disk(mddev, rdev);
+ sb->sb_csum = calc_sb_csum(sb);
+ }
+ return 0;
+}
+
+int md_update_sb(mddev_t * mddev)
+{
+ int first, err, count = 100;
+ struct md_list_head *tmp;
+ mdk_rdev_t *rdev;
+ __u64 ev;
+
+repeat:
+ mddev->sb->utime = CURRENT_TIME;
+ ev = get_unaligned(&mddev->sb->events);
+ ++ev;
+ put_unaligned(ev,&mddev->sb->events);
+ if (ev == (__u64)0) {
+ /*
+ * oops, this 64-bit counter should never wrap.
+ * Either we are in around ~1 trillion A.C., assuming
+ * 1 reboot per second, or we have a bug:
+ */
+ MD_BUG();
+ --ev;
+ put_unaligned(ev,&mddev->sb->events);
+ }
+ sync_sbs(mddev);
+
+ /*
+ * do not write anything to disk if using
+ * nonpersistent superblocks
+ */
+ if (mddev->sb->not_persistent)
+ return 0;
+
+ printk(KERN_INFO "md: updating md%d RAID superblock on device\n",
+ mdidx(mddev));
+
+ first = 1;
+ err = 0;
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ if (!first) {
+ first = 0;
+ printk(", ");
+ }
+ if (rdev->faulty)
+ printk("(skipping faulty ");
+ printk("%s ", partition_name(rdev->dev));
+ if (!rdev->faulty) {
+ printk("[events: %08lx]",
+ (unsigned long)get_unaligned(&rdev->sb->events));
+ err += write_disk_sb(rdev);
+ } else
+ printk(")\n");
+ }
+ printk(".\n");
+ if (err) {
+ printk("errors occured during superblock update, repeating\n");
+ if (--count)
+ goto repeat;
+ printk("excessive errors occured during superblock update, exiting\n");
+ }
+ return 0;
+}
+
+/*
+ * Import a device. If 'on_disk', then sanity check the superblock
+ *
+ * mark the device faulty if:
+ *
+ * - the device is nonexistent (zero size)
+ * - the device has no valid superblock
+ *
+ * a faulty rdev _never_ has rdev->sb set.
+ */
+static int md_import_device (kdev_t newdev, int on_disk)
+{
+ int err;
+ mdk_rdev_t *rdev;
+ unsigned int size;
+
+ if (find_rdev_all(newdev))
+ return -EEXIST;
+
+ rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL);
+ if (!rdev) {
+ printk("could not alloc mem for %s!\n", partition_name(newdev));
+ return -ENOMEM;
+ }
+ memset(rdev, 0, sizeof(*rdev));
+
+ if (!fs_may_mount(newdev)) {
+ printk("md: can not import %s, has active inodes!\n",
+ partition_name(newdev));
+ err = -EBUSY;
+ goto abort_free;
+ }
+
+ if ((err = alloc_disk_sb(rdev)))
+ goto abort_free;
+
+ rdev->dev = newdev;
+ if (lock_rdev(rdev)) {
+ printk("md: could not lock %s, zero-size? Marking faulty.\n",
+ partition_name(newdev));
+ err = -EINVAL;
+ goto abort_free;
+ }
+ rdev->desc_nr = -1;
+ rdev->faulty = 0;
+
+ size = 0;
+ if (blk_size[MAJOR(newdev)])
+ size = blk_size[MAJOR(newdev)][MINOR(newdev)];
+ if (!size) {
+ printk("md: %s has zero size, marking faulty!\n",
+ partition_name(newdev));
+ err = -EINVAL;
+ goto abort_free;
+ }
+
+ if (on_disk) {
+ if ((err = read_disk_sb(rdev))) {
+ printk("md: could not read %s's sb, not importing!\n",
+ partition_name(newdev));
+ goto abort_free;
+ }
+ if ((err = check_disk_sb(rdev))) {
+ printk("md: %s has invalid sb, not importing!\n",
+ partition_name(newdev));
+ goto abort_free;
+ }
+
+ rdev->old_dev = MKDEV(rdev->sb->this_disk.major,
+ rdev->sb->this_disk.minor);
+ rdev->desc_nr = rdev->sb->this_disk.number;
+ }
+ md_list_add(&rdev->all, &all_raid_disks);
+ MD_INIT_LIST_HEAD(&rdev->pending);
+
+ if (rdev->faulty && rdev->sb)
+ free_disk_sb(rdev);
+ return 0;
+
+abort_free:
+ if (rdev->sb) {
+ if (rdev->inode)
+ unlock_rdev(rdev);
+ free_disk_sb(rdev);
+ }
+ kfree(rdev);
+ return err;
+}
+
+/*
+ * Check a full RAID array for plausibility
+ */
+
+#define INCONSISTENT KERN_ERR \
+"md: fatal superblock inconsistency in %s -- removing from array\n"
+
+#define OUT_OF_DATE KERN_ERR \
+"md: superblock update time inconsistency -- using the most recent one\n"
+
+#define OLD_VERSION KERN_ALERT \
+"md: md%d: unsupported raid array version %d.%d.%d\n"
+
+#define NOT_CLEAN_IGNORE KERN_ERR \
+"md: md%d: raid array is not clean -- starting background reconstruction\n"
+
+#define UNKNOWN_LEVEL KERN_ERR \
+"md: md%d: unsupported raid level %d\n"
+
+static int analyze_sbs (mddev_t * mddev)
+{
+ int out_of_date = 0, i;
+ struct md_list_head *tmp, *tmp2;
+ mdk_rdev_t *rdev, *rdev2, *freshest;
+ mdp_super_t *sb;
+
+ /*
+ * Verify the RAID superblock on each real device
+ */
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ if (rdev->faulty) {
+ MD_BUG();
+ goto abort;
+ }
+ if (!rdev->sb) {
+ MD_BUG();
+ goto abort;
+ }
+ if (check_disk_sb(rdev))
+ goto abort;
+ }
+
+ /*
+ * The superblock constant part has to be the same
+ * for all disks in the array.
+ */
+ sb = NULL;
+
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ if (!sb) {
+ sb = rdev->sb;
+ continue;
+ }
+ if (!sb_equal(sb, rdev->sb)) {
+ printk (INCONSISTENT, partition_name(rdev->dev));
+ kick_rdev_from_array(rdev);
+ continue;
+ }
+ }
+
+ /*
+ * OK, we have all disks and the array is ready to run. Let's
+ * find the freshest superblock, that one will be the superblock
+ * that represents the whole array.
+ */
+ if (!mddev->sb)
+ if (alloc_array_sb(mddev))
+ goto abort;
+ sb = mddev->sb;
+ freshest = NULL;
+
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ __u64 ev1, ev2;
+ /*
+ * if the checksum is invalid, use the superblock
+ * only as a last resort. (decrease it's age by
+ * one event)
+ */
+ if (calc_sb_csum(rdev->sb) != rdev->sb->sb_csum) {
+ __u64 ev = get_unaligned(&rdev->sb->events);
+ if (ev != (__u64)0) {
+ --ev;
+ put_unaligned(ev,&rdev->sb->events);
+ }
+ }
+
+ printk("%s's event counter: %08lx\n", partition_name(rdev->dev),
+ (unsigned long)get_unaligned(&rdev->sb->events));
+ if (!freshest) {
+ freshest = rdev;
+ continue;
+ }
+ /*
+ * Find the newest superblock version
+ */
+ ev1 = get_unaligned(&rdev->sb->events);
+ ev2 = get_unaligned(&freshest->sb->events);
+ if (ev1 != ev2) {
+ out_of_date = 1;
+ if (ev1 > ev2)
+ freshest = rdev;
+ }
+ }
+ if (out_of_date) {
+ printk(OUT_OF_DATE);
+ printk("freshest: %s\n", partition_name(freshest->dev));
+ }
+ memcpy (sb, freshest->sb, sizeof(*sb));
+
+ /*
+ * at this point we have picked the 'best' superblock
+ * from all available superblocks.
+ * now we validate this superblock and kick out possibly
+ * failed disks.
+ */
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ /*
+ * Kick all non-fresh devices faulty
+ */
+ __u64 ev1, ev2;
+ ev1 = get_unaligned(&rdev->sb->events);
+ ev2 = get_unaligned(&sb->events);
+ ++ev1;
+ if (ev1 < ev2) {
+ printk("md: kicking non-fresh %s from array!\n",
+ partition_name(rdev->dev));
+ kick_rdev_from_array(rdev);
+ continue;
+ }
+ }
+
+ /*
+ * Fix up changed device names ... but only if this disk has a
+ * recent update time. Use faulty checksum ones too.
+ */
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ __u64 ev1, ev2, ev3;
+ if (rdev->faulty) { /* REMOVEME */
+ MD_BUG();
+ goto abort;
+ }
+ ev1 = get_unaligned(&rdev->sb->events);
+ ev2 = get_unaligned(&sb->events);
+ ev3 = ev2;
+ --ev3;
+ if ((rdev->dev != rdev->old_dev) &&
+ ((ev1 == ev2) || (ev1 == ev3))) {
+ mdp_disk_t *desc;
+
+ printk("md: device name has changed from %s to %s since last import!\n", partition_name(rdev->old_dev), partition_name(rdev->dev));
+ if (rdev->desc_nr == -1) {
+ MD_BUG();
+ goto abort;
+ }
+ desc = &sb->disks[rdev->desc_nr];
+ if (rdev->old_dev != MKDEV(desc->major, desc->minor)) {
+ MD_BUG();
+ goto abort;
+ }
+ desc->major = MAJOR(rdev->dev);
+ desc->minor = MINOR(rdev->dev);
+ desc = &rdev->sb->this_disk;
+ desc->major = MAJOR(rdev->dev);
+ desc->minor = MINOR(rdev->dev);
+ }
+ }
+
+ /*
+ * Remove unavailable and faulty devices ...
+ *
+ * note that if an array becomes completely unrunnable due to
+ * missing devices, we do not write the superblock back, so the
+ * administrator has a chance to fix things up. The removal thus
+ * only happens if it's nonfatal to the contents of the array.
+ */
+ for (i = 0; i < MD_SB_DISKS; i++) {
+ int found;
+ mdp_disk_t *desc;
+ kdev_t dev;
+
+ desc = sb->disks + i;
+ dev = MKDEV(desc->major, desc->minor);
+
+ /*
+ * We kick faulty devices/descriptors immediately.
+ */
+ if (disk_faulty(desc)) {
+ found = 0;
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ if (rdev->desc_nr != desc->number)
+ continue;
+ printk("md%d: kicking faulty %s!\n",
+ mdidx(mddev),partition_name(rdev->dev));
+ kick_rdev_from_array(rdev);
+ found = 1;
+ break;
+ }
+ if (!found) {
+ if (dev == MKDEV(0,0))
+ continue;
+ printk("md%d: removing former faulty %s!\n",
+ mdidx(mddev), partition_name(dev));
+ }
+ remove_descriptor(desc, sb);
+ continue;
+ }
+
+ if (dev == MKDEV(0,0))
+ continue;
+ /*
+ * Is this device present in the rdev ring?
+ */
+ found = 0;
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ if (rdev->desc_nr == desc->number) {
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ continue;
+
+ printk("md%d: former device %s is unavailable, removing from array!\n", mdidx(mddev), partition_name(dev));
+ remove_descriptor(desc, sb);
+ }
+
+ /*
+ * Double check wether all devices mentioned in the
+ * superblock are in the rdev ring.
+ */
+ for (i = 0; i < MD_SB_DISKS; i++) {
+ mdp_disk_t *desc;
+ kdev_t dev;
+
+ desc = sb->disks + i;
+ dev = MKDEV(desc->major, desc->minor);
+
+ if (dev == MKDEV(0,0))
+ continue;
+
+ if (disk_faulty(desc)) {
+ MD_BUG();
+ goto abort;
+ }
+
+ rdev = find_rdev(mddev, dev);
+ if (!rdev) {
+ MD_BUG();
+ goto abort;
+ }
+ }
+
+ /*
+ * Do a final reality check.
+ */
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ if (rdev->desc_nr == -1) {
+ MD_BUG();
+ goto abort;
+ }
+ /*
+ * is the desc_nr unique?
+ */
+ ITERATE_RDEV(mddev,rdev2,tmp2) {
+ if ((rdev2 != rdev) &&
+ (rdev2->desc_nr == rdev->desc_nr)) {
+ MD_BUG();
+ goto abort;
+ }
+ }
+ /*
+ * is the device unique?
+ */
+ ITERATE_RDEV(mddev,rdev2,tmp2) {
+ if ((rdev2 != rdev) &&
+ (rdev2->dev == rdev->dev)) {
+ MD_BUG();
+ goto abort;
+ }
+ }
+ }
+
+ /*
+ * Check if we can support this RAID array
+ */
+ if (sb->major_version != MD_MAJOR_VERSION ||
+ sb->minor_version > MD_MINOR_VERSION) {
+
+ printk (OLD_VERSION, mdidx(mddev), sb->major_version,
+ sb->minor_version, sb->patch_version);
+ goto abort;
+ }
+
+ if ((sb->state != (1 << MD_SB_CLEAN)) && ((sb->level == 1) ||
+ (sb->level == 4) || (sb->level == 5)))
+ printk (NOT_CLEAN_IGNORE, mdidx(mddev));
+
+ return 0;
+abort:
+ return 1;
+}
+
+#undef INCONSISTENT
+#undef OUT_OF_DATE
+#undef OLD_VERSION
+#undef OLD_LEVEL
+
+static int device_size_calculation (mddev_t * mddev)
+{
+ int data_disks = 0, persistent;
+ unsigned int readahead;
+ mdp_super_t *sb = mddev->sb;
+ struct md_list_head *tmp;
+ mdk_rdev_t *rdev;
+
+ /*
+ * Do device size calculation. Bail out if too small.
+ * (we have to do this after having validated chunk_size,
+ * because device size has to be modulo chunk_size)
+ */
+ persistent = !mddev->sb->not_persistent;
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ if (rdev->faulty)
+ continue;
+ if (rdev->size) {
+ MD_BUG();
+ continue;
+ }
+ rdev->size = calc_dev_size(rdev->dev, mddev, persistent);
+ if (rdev->size < sb->chunk_size / 1024) {
+ printk (KERN_WARNING
+ "Dev %s smaller than chunk_size: %dk < %dk\n",
+ partition_name(rdev->dev),
+ rdev->size, sb->chunk_size / 1024);
+ return -EINVAL;
+ }
+ }
+
+ switch (sb->level) {
+ case -3:
+ data_disks = 1;
+ break;
+ case -2:
+ data_disks = 1;
+ break;
+ case -1:
+ zoned_raid_size(mddev);
+ data_disks = 1;
+ break;
+ case 0:
+ zoned_raid_size(mddev);
+ data_disks = sb->raid_disks;
+ break;
+ case 1:
+ data_disks = 1;
+ break;
+ case 4:
+ case 5:
+ data_disks = sb->raid_disks-1;
+ break;
+ default:
+ printk (UNKNOWN_LEVEL, mdidx(mddev), sb->level);
+ goto abort;
+ }
+ if (!md_size[mdidx(mddev)])
+ md_size[mdidx(mddev)] = sb->size * data_disks;
+
+ readahead = MD_READAHEAD;
+ if ((sb->level == 0) || (sb->level == 4) || (sb->level == 5))
+ readahead = mddev->sb->chunk_size * 4 * data_disks;
+ if (readahead < data_disks * MAX_SECTORS*512*2)
+ readahead = data_disks * MAX_SECTORS*512*2;
+ else {
+ if (sb->level == -3)
+ readahead = 0;
+ }
+ md_maxreadahead[mdidx(mddev)] = readahead;
+
+ printk(KERN_INFO "md%d: max total readahead window set to %dk\n",
+ mdidx(mddev), readahead/1024);
+
+ printk(KERN_INFO
+ "md%d: %d data-disks, max readahead per data-disk: %dk\n",
+ mdidx(mddev), data_disks, readahead/data_disks/1024);
+ return 0;
+abort:
+ return 1;
+}
+
+
+#define TOO_BIG_CHUNKSIZE KERN_ERR \
+"too big chunk_size: %d > %d\n"
+
+#define TOO_SMALL_CHUNKSIZE KERN_ERR \
+"too small chunk_size: %d < %ld\n"
+
+#define BAD_CHUNKSIZE KERN_ERR \
+"no chunksize specified, see 'man raidtab'\n"
+
+static int do_md_run (mddev_t * mddev)
+{
+ int pnum, err;
+ int chunk_size;
+ struct md_list_head *tmp;
+ mdk_rdev_t *rdev;
+
+
+ if (!mddev->nb_dev) {
+ MD_BUG();
+ return -EINVAL;
+ }
+
+ if (mddev->pers)
+ return -EBUSY;
+
+ /*
+ * Resize disks to align partitions size on a given
+ * chunk size.
+ */
+ md_size[mdidx(mddev)] = 0;
+
+ /*
+ * Analyze all RAID superblock(s)
+ */
+ if (analyze_sbs(mddev)) {
+ MD_BUG();
+ return -EINVAL;
+ }
+
+ chunk_size = mddev->sb->chunk_size;
+ pnum = level_to_pers(mddev->sb->level);
+
+ mddev->param.chunk_size = chunk_size;
+ mddev->param.personality = pnum;
+
+ if (chunk_size > MAX_CHUNK_SIZE) {
+ printk(TOO_BIG_CHUNKSIZE, chunk_size, MAX_CHUNK_SIZE);
+ return -EINVAL;
+ }
+ /*
+ * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
+ */
+ if ( (1 << ffz(~chunk_size)) != chunk_size) {
+ MD_BUG();
+ return -EINVAL;
+ }
+ if (chunk_size < PAGE_SIZE) {
+ printk(TOO_SMALL_CHUNKSIZE, chunk_size, PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ if (pnum >= MAX_PERSONALITY) {
+ MD_BUG();
+ return -EINVAL;
+ }
+
+ if ((pnum != RAID1) && (pnum != LINEAR) && !chunk_size) {
+ /*
+ * 'default chunksize' in the old md code used to
+ * be PAGE_SIZE, baaad.
+ * we abort here to be on the safe side. We dont
+ * want to continue the bad practice.
+ */
+ printk(BAD_CHUNKSIZE);
+ return -EINVAL;
+ }
+
+ if (!pers[pnum])
+ {
+#ifdef CONFIG_KMOD
+ char module_name[80];
+ sprintf (module_name, "md-personality-%d", pnum);
+ request_module (module_name);
+ if (!pers[pnum])
+#endif
+ return -EINVAL;
+ }
+
+ if (device_size_calculation(mddev))
+ return -EINVAL;
+
+ /*
+ * Drop all container device buffers, from now on
+ * the only valid external interface is through the md
+ * device.
+ */
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ if (rdev->faulty)
+ continue;
+ fsync_dev(rdev->dev);
+ invalidate_buffers(rdev->dev);
+ }
+
+ mddev->pers = pers[pnum];
+
+ err = mddev->pers->run(mddev);
+ if (err) {
+ printk("pers->run() failed ...\n");
+ mddev->pers = NULL;
+ return -EINVAL;
+ }
+
+ mddev->sb->state &= ~(1 << MD_SB_CLEAN);
+ md_update_sb(mddev);
+
+ /*
+ * md_size has units of 1K blocks, which are
+ * twice as large as sectors.
+ */
+ md_hd_struct[mdidx(mddev)].start_sect = 0;
+ md_hd_struct[mdidx(mddev)].nr_sects = md_size[mdidx(mddev)] << 1;
+
+ read_ahead[MD_MAJOR] = 1024;
+ return (0);
+}
+
+#undef TOO_BIG_CHUNKSIZE
+#undef BAD_CHUNKSIZE
+
+#define OUT(x) do { err = (x); goto out; } while (0)
+
+static int restart_array (mddev_t *mddev)
+{
+ int err = 0;
+
+ /*
+ * Complain if it has no devices
+ */
+ if (!mddev->nb_dev)
+ OUT(-ENXIO);
+
+ if (mddev->pers) {
+ if (!mddev->ro)
+ OUT(-EBUSY);
+
+ mddev->ro = 0;
+ set_device_ro(mddev_to_kdev(mddev), 0);
+
+ printk (KERN_INFO
+ "md%d switched to read-write mode.\n", mdidx(mddev));
+ /*
+ * Kick recovery or resync if necessary
+ */
+ md_recover_arrays();
+ if (mddev->pers->restart_resync)
+ mddev->pers->restart_resync(mddev);
+ } else
+ err = -EINVAL;
+
+out:
+ return err;
+}
+
+#define STILL_MOUNTED KERN_WARNING \
+"md: md%d still mounted.\n"
+
+static int do_md_stop (mddev_t * mddev, int ro)
+{
+ int err = 0, resync_interrupted = 0;
+ kdev_t dev = mddev_to_kdev(mddev);
+
+ if (!ro && !fs_may_mount (dev)) {
+ printk (STILL_MOUNTED, mdidx(mddev));
+ OUT(-EBUSY);
+ }
+
+ /*
+ * complain if it's already stopped
+ */
+ if (!mddev->nb_dev)
+ OUT(-ENXIO);
+
+ if (mddev->pers) {
+ /*
+ * It is safe to call stop here, it only frees private
+ * data. Also, it tells us if a device is unstoppable
+ * (eg. resyncing is in progress)
+ */
+ if (mddev->pers->stop_resync)
+ if (mddev->pers->stop_resync(mddev))
+ resync_interrupted = 1;
+
+ if (mddev->recovery_running)
+ md_interrupt_thread(md_recovery_thread);
+
+ /*
+ * This synchronizes with signal delivery to the
+ * resync or reconstruction thread. It also nicely
+ * hangs the process if some reconstruction has not
+ * finished.
+ */
+ down(&mddev->recovery_sem);
+ up(&mddev->recovery_sem);
+
+ /*
+ * sync and invalidate buffers because we cannot kill the
+ * main thread with valid IO transfers still around.
+ * the kernel lock protects us from new requests being
+ * added after invalidate_buffers().
+ */
+ fsync_dev (mddev_to_kdev(mddev));
+ fsync_dev (dev);
+ invalidate_buffers (dev);
+
+ if (ro) {
+ if (mddev->ro)
+ OUT(-ENXIO);
+ mddev->ro = 1;
+ } else {
+ if (mddev->ro)
+ set_device_ro(dev, 0);
+ if (mddev->pers->stop(mddev)) {
+ if (mddev->ro)
+ set_device_ro(dev, 1);
+ OUT(-EBUSY);
+ }
+ if (mddev->ro)
+ mddev->ro = 0;
+ }
+ if (mddev->sb) {
+ /*
+ * mark it clean only if there was no resync
+ * interrupted.
+ */
+ if (!mddev->recovery_running && !resync_interrupted) {
+ printk("marking sb clean...\n");
+ mddev->sb->state |= 1 << MD_SB_CLEAN;
+ }
+ md_update_sb(mddev);
+ }
+ if (ro)
+ set_device_ro(dev, 1);
+ }
+
+ /*
+ * Free resources if final stop
+ */
+ if (!ro) {
+ export_array(mddev);
+ md_size[mdidx(mddev)] = 0;
+ md_hd_struct[mdidx(mddev)].nr_sects = 0;
+ free_mddev(mddev);
+
+ printk (KERN_INFO "md%d stopped.\n", mdidx(mddev));
+ } else
+ printk (KERN_INFO
+ "md%d switched to read-only mode.\n", mdidx(mddev));
+out:
+ return err;
+}
+
+#undef OUT
+
+/*
+ * We have to safely support old arrays too.
+ */
+int detect_old_array (mdp_super_t *sb)
+{
+ if (sb->major_version > 0)
+ return 0;
+ if (sb->minor_version >= 90)
+ return 0;
+
+ return -EINVAL;
+}
+
+
+static void autorun_array (mddev_t *mddev)
+{
+ mdk_rdev_t *rdev;
+ struct md_list_head *tmp;
+ int err;
+
+ if (mddev->disks.prev == &mddev->disks) {
+ MD_BUG();
+ return;
+ }
+
+ printk("running: ");
+
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ printk("<%s>", partition_name(rdev->dev));
+ }
+ printk("\nnow!\n");
+
+ err = do_md_run (mddev);
+ if (err) {
+ printk("do_md_run() returned %d\n", err);
+ /*
+ * prevent the writeback of an unrunnable array
+ */
+ mddev->sb_dirty = 0;
+ do_md_stop (mddev, 0);
+ }
+}
+
+/*
+ * lets try to run arrays based on all disks that have arrived
+ * until now. (those are in the ->pending list)
+ *
+ * the method: pick the first pending disk, collect all disks with
+ * the same UUID, remove all from the pending list and put them into
+ * the 'same_array' list. Then order this list based on superblock
+ * update time (freshest comes first), kick out 'old' disks and
+ * compare superblocks. If everything's fine then run it.
+ */
+static void autorun_devices (void)
+{
+ struct md_list_head candidates;
+ struct md_list_head *tmp;
+ mdk_rdev_t *rdev0, *rdev;
+ mddev_t *mddev;
+ kdev_t md_kdev;
+
+
+ printk("autorun ...\n");
+ while (pending_raid_disks.next != &pending_raid_disks) {
+ rdev0 = md_list_entry(pending_raid_disks.next,
+ mdk_rdev_t, pending);
+
+ printk("considering %s ...\n", partition_name(rdev0->dev));
+ MD_INIT_LIST_HEAD(&candidates);
+ ITERATE_RDEV_PENDING(rdev,tmp) {
+ if (uuid_equal(rdev0, rdev)) {
+ if (!sb_equal(rdev0->sb, rdev->sb)) {
+ printk("%s has same UUID as %s, but superblocks differ ...\n", partition_name(rdev->dev), partition_name(rdev0->dev));
+ continue;
+ }
+ printk(" adding %s ...\n", partition_name(rdev->dev));
+ md_list_del(&rdev->pending);
+ md_list_add(&rdev->pending, &candidates);
+ }
+ }
+ /*
+ * now we have a set of devices, with all of them having
+ * mostly sane superblocks. It's time to allocate the
+ * mddev.
+ */
+ md_kdev = MKDEV(MD_MAJOR, rdev0->sb->md_minor);
+ mddev = kdev_to_mddev(md_kdev);
+ if (mddev) {
+ printk("md%d already running, cannot run %s\n",
+ mdidx(mddev), partition_name(rdev0->dev));
+ ITERATE_RDEV_GENERIC(candidates,pending,rdev,tmp)
+ export_rdev(rdev);
+ continue;
+ }
+ mddev = alloc_mddev(md_kdev);
+ printk("created md%d\n", mdidx(mddev));
+ ITERATE_RDEV_GENERIC(candidates,pending,rdev,tmp) {
+ bind_rdev_to_array(rdev, mddev);
+ md_list_del(&rdev->pending);
+ MD_INIT_LIST_HEAD(&rdev->pending);
+ }
+ autorun_array(mddev);
+ }
+ printk("... autorun DONE.\n");
+}
+
+/*
+ * import RAID devices based on one partition
+ * if possible, the array gets run as well.
+ */
+
+#define BAD_VERSION KERN_ERR \
+"md: %s has RAID superblock version 0.%d, autodetect needs v0.90 or higher\n"
+
+#define OUT_OF_MEM KERN_ALERT \
+"md: out of memory.\n"
+
+#define NO_DEVICE KERN_ERR \
+"md: disabled device %s\n"
+
+#define AUTOADD_FAILED KERN_ERR \
+"md: auto-adding devices to md%d FAILED (error %d).\n"
+
+#define AUTOADD_FAILED_USED KERN_ERR \
+"md: cannot auto-add device %s to md%d, already used.\n"
+
+#define AUTORUN_FAILED KERN_ERR \
+"md: auto-running md%d FAILED (error %d).\n"
+
+#define MDDEV_BUSY KERN_ERR \
+"md: cannot auto-add to md%d, already running.\n"
+
+#define AUTOADDING KERN_INFO \
+"md: auto-adding devices to md%d, based on %s's superblock.\n"
+
+#define AUTORUNNING KERN_INFO \
+"md: auto-running md%d.\n"
+
+static int autostart_array (kdev_t startdev)
+{
+ int err = -EINVAL, i;
+ mdp_super_t *sb = NULL;
+ mdk_rdev_t *start_rdev = NULL, *rdev;
+
+ if (md_import_device(startdev, 1)) {
+ printk("could not import %s!\n", partition_name(startdev));
+ goto abort;
+ }
+
+ start_rdev = find_rdev_all(startdev);
+ if (!start_rdev) {
+ MD_BUG();
+ goto abort;
+ }
+ if (start_rdev->faulty) {
+ printk("can not autostart based on faulty %s!\n",
+ partition_name(startdev));
+ goto abort;
+ }
+ md_list_add(&start_rdev->pending, &pending_raid_disks);
+
+ sb = start_rdev->sb;
+
+ err = detect_old_array(sb);
+ if (err) {
+ printk("array version is too old to be autostarted, use raidtools 0.90 mkraid --upgrade\nto upgrade the array without data loss!\n");
+ goto abort;
+ }
+
+ for (i = 0; i < MD_SB_DISKS; i++) {
+ mdp_disk_t *desc;
+ kdev_t dev;
+
+ desc = sb->disks + i;
+ dev = MKDEV(desc->major, desc->minor);
+
+ if (dev == MKDEV(0,0))
+ continue;
+ if (dev == startdev)
+ continue;
+ if (md_import_device(dev, 1)) {
+ printk("could not import %s, trying to run array nevertheless.\n", partition_name(dev));
+ continue;
+ }
+ rdev = find_rdev_all(dev);
+ if (!rdev) {
+ MD_BUG();
+ goto abort;
+ }
+ md_list_add(&rdev->pending, &pending_raid_disks);
+ }
+
+ /*
+ * possibly return codes
+ */
+ autorun_devices();
+ return 0;
+
+abort:
+ if (start_rdev)
+ export_rdev(start_rdev);
+ return err;
+}
+
+#undef BAD_VERSION
+#undef OUT_OF_MEM
+#undef NO_DEVICE
+#undef AUTOADD_FAILED_USED
+#undef AUTOADD_FAILED
+#undef AUTORUN_FAILED
+#undef AUTOADDING
+#undef AUTORUNNING
+
+struct {
+ int set;
+ int noautodetect;
+
+} raid_setup_args md__initdata = { 0, 0 };
+
+/*
+ * Searches all registered partitions for autorun RAID arrays
+ * at boot time.
+ */
+void md__init autodetect_raid(void)
+{
+#ifdef CONFIG_AUTODETECT_RAID
+ struct gendisk *disk;
+ mdk_rdev_t *rdev;
+ int i;
+
+ if (raid_setup_args.noautodetect) {
+ printk(KERN_INFO "skipping autodetection of RAID arrays\n");
+ return;
+ }
+ printk(KERN_INFO "autodetecting RAID arrays\n");
+
+ for (disk = gendisk_head ; disk ; disk = disk->next) {
+ for (i = 0; i < disk->max_p*disk->nr_real; i++) {
+ kdev_t dev = MKDEV(disk->major,i);
+
+ if (disk->part[i].type != LINUX_RAID_PARTITION)
+ continue;
+
+ if (md_import_device(dev,1)) {
+ printk(KERN_ALERT "could not import %s!\n",
+ partition_name(dev));
+ continue;
+ }
+ /*
+ * Sanity checks:
+ */
+ rdev = find_rdev_all(dev);
+ if (!rdev) {
+ MD_BUG();
+ continue;
+ }
+ if (rdev->faulty) {
+ MD_BUG();
+ continue;
+ }
+ md_list_add(&rdev->pending, &pending_raid_disks);
+ }
+ }
+
+ autorun_devices();
+#endif
+}
+
+static int get_version (void * arg)
+{
+ mdu_version_t ver;
+
+ ver.major = MD_MAJOR_VERSION;
+ ver.minor = MD_MINOR_VERSION;
+ ver.patchlevel = MD_PATCHLEVEL_VERSION;
+
+ if (md_copy_to_user(arg, &ver, sizeof(ver)))
+ return -EFAULT;
+
+ return 0;
+}
+
+#define SET_FROM_SB(x) info.x = mddev->sb->x
+static int get_array_info (mddev_t * mddev, void * arg)
+{
+ mdu_array_info_t info;
+
+ if (!mddev->sb)
+ return -EINVAL;
+
+ SET_FROM_SB(major_version);
+ SET_FROM_SB(minor_version);
+ SET_FROM_SB(patch_version);
+ SET_FROM_SB(ctime);
+ SET_FROM_SB(level);
+ SET_FROM_SB(size);
+ SET_FROM_SB(nr_disks);
+ SET_FROM_SB(raid_disks);
+ SET_FROM_SB(md_minor);
+ SET_FROM_SB(not_persistent);
+
+ SET_FROM_SB(utime);
+ SET_FROM_SB(state);
+ SET_FROM_SB(active_disks);
+ SET_FROM_SB(working_disks);
+ SET_FROM_SB(failed_disks);
+ SET_FROM_SB(spare_disks);
+
+ SET_FROM_SB(layout);
+ SET_FROM_SB(chunk_size);
+
+ if (md_copy_to_user(arg, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+#undef SET_FROM_SB
+
+#define SET_FROM_SB(x) info.x = mddev->sb->disks[nr].x
+static int get_disk_info (mddev_t * mddev, void * arg)
+{
+ mdu_disk_info_t info;
+ unsigned int nr;
+
+ if (!mddev->sb)
+ return -EINVAL;
+
+ if (md_copy_from_user(&info, arg, sizeof(info)))
+ return -EFAULT;
+
+ nr = info.number;
+ if (nr >= mddev->sb->nr_disks)
+ return -EINVAL;
+
+ SET_FROM_SB(major);
+ SET_FROM_SB(minor);
+ SET_FROM_SB(raid_disk);
+ SET_FROM_SB(state);
+
+ if (md_copy_to_user(arg, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+#undef SET_FROM_SB
+
+#define SET_SB(x) mddev->sb->disks[nr].x = info.x
+
+static int add_new_disk (mddev_t * mddev, void * arg)
+{
+ int err, size, persistent;
+ mdu_disk_info_t info;
+ mdk_rdev_t *rdev;
+ unsigned int nr;
+ kdev_t dev;
+
+ if (!mddev->sb)
+ return -EINVAL;
+
+ if (md_copy_from_user(&info, arg, sizeof(info)))
+ return -EFAULT;
+
+ nr = info.number;
+ if (nr >= mddev->sb->nr_disks)
+ return -EINVAL;
+
+ dev = MKDEV(info.major,info.minor);
+
+ if (find_rdev_all(dev)) {
+ printk("device %s already used in a RAID array!\n",
+ partition_name(dev));
+ return -EBUSY;
+ }
+
+ SET_SB(number);
+ SET_SB(major);
+ SET_SB(minor);
+ SET_SB(raid_disk);
+ SET_SB(state);
+
+ if ((info.state & (1<<MD_DISK_FAULTY))==0) {
+ err = md_import_device (dev, 0);
+ if (err) {
+ printk("md: error, md_import_device() returned %d\n", err);
+ return -EINVAL;
+ }
+ rdev = find_rdev_all(dev);
+ if (!rdev) {
+ MD_BUG();
+ return -EINVAL;
+ }
+
+ rdev->old_dev = dev;
+ rdev->desc_nr = info.number;
+
+ bind_rdev_to_array(rdev, mddev);
+
+ persistent = !mddev->sb->not_persistent;
+ if (!persistent)
+ printk("nonpersistent superblock ...\n");
+ if (!mddev->sb->chunk_size)
+ printk("no chunksize?\n");
+
+ size = calc_dev_size(dev, mddev, persistent);
+ rdev->sb_offset = calc_dev_sboffset(dev, mddev, persistent);
+
+ if (!mddev->sb->size || (mddev->sb->size > size))
+ mddev->sb->size = size;
+ }
+
+ /*
+ * sync all other superblocks with the main superblock
+ */
+ sync_sbs(mddev);
+
+ return 0;
+}
+#undef SET_SB
+
+static int hot_remove_disk (mddev_t * mddev, kdev_t dev)
+{
+ int err;
+ mdk_rdev_t *rdev;
+ mdp_disk_t *disk;
+
+ if (!mddev->pers)
+ return -ENODEV;
+
+ printk("trying to remove %s from md%d ... \n",
+ partition_name(dev), mdidx(mddev));
+
+ if (!mddev->pers->diskop) {
+ printk("md%d: personality does not support diskops!\n",
+ mdidx(mddev));
+ return -EINVAL;
+ }
+
+ rdev = find_rdev(mddev, dev);
+ if (!rdev)
+ return -ENXIO;
+
+ if (rdev->desc_nr == -1) {
+ MD_BUG();
+ return -EINVAL;
+ }
+ disk = &mddev->sb->disks[rdev->desc_nr];
+ if (disk_active(disk))
+ goto busy;
+ if (disk_removed(disk)) {
+ MD_BUG();
+ return -EINVAL;
+ }
+
+ err = mddev->pers->diskop(mddev, &disk, DISKOP_HOT_REMOVE_DISK);
+ if (err == -EBUSY)
+ goto busy;
+ if (err) {
+ MD_BUG();
+ return -EINVAL;
+ }
+
+ remove_descriptor(disk, mddev->sb);
+ kick_rdev_from_array(rdev);
+ mddev->sb_dirty = 1;
+ md_update_sb(mddev);
+
+ return 0;
+busy:
+ printk("cannot remove active disk %s from md%d ... \n",
+ partition_name(dev), mdidx(mddev));
+ return -EBUSY;
+}
+
+static int hot_add_disk (mddev_t * mddev, kdev_t dev)
+{
+ int i, err, persistent;
+ unsigned int size;
+ mdk_rdev_t *rdev;
+ mdp_disk_t *disk;
+
+ if (!mddev->pers)
+ return -ENODEV;
+
+ printk("trying to hot-add %s to md%d ... \n",
+ partition_name(dev), mdidx(mddev));
+
+ if (!mddev->pers->diskop) {
+ printk("md%d: personality does not support diskops!\n",
+ mdidx(mddev));
+ return -EINVAL;
+ }
+
+ persistent = !mddev->sb->not_persistent;
+ size = calc_dev_size(dev, mddev, persistent);
+
+ if (size < mddev->sb->size) {
+ printk("md%d: disk size %d blocks < array size %d\n",
+ mdidx(mddev), size, mddev->sb->size);
+ return -ENOSPC;
+ }
+
+ rdev = find_rdev(mddev, dev);
+ if (rdev)
+ return -EBUSY;
+
+ err = md_import_device (dev, 0);
+ if (err) {
+ printk("md: error, md_import_device() returned %d\n", err);
+ return -EINVAL;
+ }
+ rdev = find_rdev_all(dev);
+ if (!rdev) {
+ MD_BUG();
+ return -EINVAL;
+ }
+ if (rdev->faulty) {
+ printk("md: can not hot-add faulty %s disk to md%d!\n",
+ partition_name(dev), mdidx(mddev));
+ err = -EINVAL;
+ goto abort_export;
+ }
+ bind_rdev_to_array(rdev, mddev);
+
+ /*
+ * The rest should better be atomic, we can have disk failures
+ * noticed in interrupt contexts ...
+ */
+ rdev->old_dev = dev;
+ rdev->size = size;
+ rdev->sb_offset = calc_dev_sboffset(dev, mddev, persistent);
+
+ disk = mddev->sb->disks + mddev->sb->raid_disks;
+ for (i = mddev->sb->raid_disks; i < MD_SB_DISKS; i++) {
+ disk = mddev->sb->disks + i;
+
+ if (!disk->major && !disk->minor)
+ break;
+ if (disk_removed(disk))
+ break;
+ }
+ if (i == MD_SB_DISKS) {
+ printk("md%d: can not hot-add to full array!\n", mdidx(mddev));
+ err = -EBUSY;
+ goto abort_unbind_export;
+ }
+
+ if (disk_removed(disk)) {
+ /*
+ * reuse slot
+ */
+ if (disk->number != i) {
+ MD_BUG();
+ err = -EINVAL;
+ goto abort_unbind_export;
+ }
+ } else {
+ disk->number = i;
+ }
+
+ disk->raid_disk = disk->number;
+ disk->major = MAJOR(dev);
+ disk->minor = MINOR(dev);
+
+ if (mddev->pers->diskop(mddev, &disk, DISKOP_HOT_ADD_DISK)) {
+ MD_BUG();
+ err = -EINVAL;
+ goto abort_unbind_export;
+ }
+
+ mark_disk_spare(disk);
+ mddev->sb->nr_disks++;
+ mddev->sb->spare_disks++;
+ mddev->sb->working_disks++;
+
+ mddev->sb_dirty = 1;
+
+ md_update_sb(mddev);
+
+ /*
+ * Kick recovery, maybe this spare has to be added to the
+ * array immediately.
+ */
+ md_recover_arrays();
+
+ return 0;
+
+abort_unbind_export:
+ unbind_rdev_from_array(rdev);
+
+abort_export:
+ export_rdev(rdev);
+ return err;
+}
+
+#define SET_SB(x) mddev->sb->x = info.x
+static int set_array_info (mddev_t * mddev, void * arg)
+{
+ mdu_array_info_t info;
+
+ if (mddev->sb) {
+ printk("array md%d already has a superblock!\n",
+ mdidx(mddev));
+ return -EBUSY;
+ }
+
+ if (md_copy_from_user(&info, arg, sizeof(info)))
+ return -EFAULT;
+
+ if (alloc_array_sb(mddev))
+ return -ENOMEM;
+
+ mddev->sb->major_version = MD_MAJOR_VERSION;
+ mddev->sb->minor_version = MD_MINOR_VERSION;
+ mddev->sb->patch_version = MD_PATCHLEVEL_VERSION;
+ mddev->sb->ctime = CURRENT_TIME;
+
+ SET_SB(level);
+ SET_SB(size);
+ SET_SB(nr_disks);
+ SET_SB(raid_disks);
+ SET_SB(md_minor);
+ SET_SB(not_persistent);
+
+ SET_SB(state);
+ SET_SB(active_disks);
+ SET_SB(working_disks);
+ SET_SB(failed_disks);
+ SET_SB(spare_disks);
+
+ SET_SB(layout);
+ SET_SB(chunk_size);
+
+ mddev->sb->md_magic = MD_SB_MAGIC;
+
+ /*
+ * Generate a 128 bit UUID
+ */
+ get_random_bytes(&mddev->sb->set_uuid0, 4);
+ get_random_bytes(&mddev->sb->set_uuid1, 4);
+ get_random_bytes(&mddev->sb->set_uuid2, 4);
+ get_random_bytes(&mddev->sb->set_uuid3, 4);
+
+ return 0;
+}
+#undef SET_SB
+
+static int set_disk_info (mddev_t * mddev, void * arg)
+{
+ printk("not yet");
+ return -EINVAL;
+}
+
+static int clear_array (mddev_t * mddev)
+{
+ printk("not yet");
+ return -EINVAL;
+}
+
+static int write_raid_info (mddev_t * mddev)
+{
+ printk("not yet");
+ return -EINVAL;
+}
+
+static int protect_array (mddev_t * mddev)
+{
+ printk("not yet");
+ return -EINVAL;
+}
+
+static int unprotect_array (mddev_t * mddev)
+{
+ printk("not yet");
+ return -EINVAL;
+}
+
+static int set_disk_faulty (mddev_t *mddev, kdev_t dev)
+{
+ int ret;
+
+ fsync_dev(mddev_to_kdev(mddev));
+ ret = md_error(mddev_to_kdev(mddev), dev);
+ return ret;
+}
+
+static int md_ioctl (struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ unsigned int minor;
+ int err = 0;
+ struct hd_geometry *loc = (struct hd_geometry *) arg;
+ mddev_t *mddev = NULL;
+ kdev_t dev;
+
+ if (!md_capable_admin())
+ return -EACCES;
+
+ dev = inode->i_rdev;
+ minor = MINOR(dev);
+ if (minor >= MAX_MD_DEVS)
+ return -EINVAL;
+
+ /*
+ * Commands dealing with the RAID driver but not any
+ * particular array:
+ */
+ switch (cmd)
+ {
+ case RAID_VERSION:
+ err = get_version((void *)arg);
+ goto done;
+
+ case PRINT_RAID_DEBUG:
+ err = 0;
+ md_print_devices();
+ goto done_unlock;
+
+ case BLKGETSIZE: /* Return device size */
+ if (!arg) {
+ err = -EINVAL;
+ goto abort;
+ }
+ err = md_put_user(md_hd_struct[minor].nr_sects,
+ (long *) arg);
+ goto done;
+
+ case BLKFLSBUF:
+ fsync_dev(dev);
+ invalidate_buffers(dev);
+ goto done;
+
+ case BLKRASET:
+ if (arg > 0xff) {
+ err = -EINVAL;
+ goto abort;
+ }
+ read_ahead[MAJOR(dev)] = arg;
+ goto done;
+
+ case BLKRAGET:
+ if (!arg) {
+ err = -EINVAL;
+ goto abort;
+ }
+ err = md_put_user (read_ahead[
+ MAJOR(dev)], (long *) arg);
+ goto done;
+ default:
+ }
+
+ /*
+ * Commands creating/starting a new array:
+ */
+
+ mddev = kdev_to_mddev(dev);
+
+ switch (cmd)
+ {
+ case SET_ARRAY_INFO:
+ case START_ARRAY:
+ if (mddev) {
+ printk("array md%d already exists!\n",
+ mdidx(mddev));
+ err = -EEXIST;
+ goto abort;
+ }
+ default:
+ }
+
+ switch (cmd)
+ {
+ case SET_ARRAY_INFO:
+ mddev = alloc_mddev(dev);
+ if (!mddev) {
+ err = -ENOMEM;
+ goto abort;
+ }
+ /*
+ * alloc_mddev() should possibly self-lock.
+ */
+ err = lock_mddev(mddev);
+ if (err) {
+ printk("ioctl, reason %d, cmd %d\n", err, cmd);
+ goto abort;
+ }
+ err = set_array_info(mddev, (void *)arg);
+ if (err) {
+ printk("couldnt set array info. %d\n", err);
+ goto abort;
+ }
+ goto done_unlock;
+
+ case START_ARRAY:
+ /*
+ * possibly make it lock the array ...
+ */
+ err = autostart_array((kdev_t)arg);
+ if (err) {
+ printk("autostart %s failed!\n",
+ partition_name((kdev_t)arg));
+ goto abort;
+ }
+ goto done;
+
+ default:
+ }
+
+ /*
+ * Commands querying/configuring an existing array:
+ */
+
+ if (!mddev) {
+ err = -ENODEV;
+ goto abort;
+ }
+ err = lock_mddev(mddev);
+ if (err) {
+ printk("ioctl lock interrupted, reason %d, cmd %d\n",err, cmd);
+ goto abort;
+ }
+
+ /*
+ * Commands even a read-only array can execute:
+ */
+ switch (cmd)
+ {
+ case GET_ARRAY_INFO:
+ err = get_array_info(mddev, (void *)arg);
+ goto done_unlock;
+
+ case GET_DISK_INFO:
+ err = get_disk_info(mddev, (void *)arg);
+ goto done_unlock;
+
+ case RESTART_ARRAY_RW:
+ err = restart_array(mddev);
+ goto done_unlock;
+
+ case STOP_ARRAY:
+ err = do_md_stop (mddev, 0);
+ goto done_unlock;
+
+ case STOP_ARRAY_RO:
+ err = do_md_stop (mddev, 1);
+ goto done_unlock;
+
+ /*
+ * We have a problem here : there is no easy way to give a CHS
+ * virtual geometry. We currently pretend that we have a 2 heads
+ * 4 sectors (with a BIG number of cylinders...). This drives
+ * dosfs just mad... ;-)
+ */
+ case HDIO_GETGEO:
+ if (!loc) {
+ err = -EINVAL;
+ goto abort_unlock;
+ }
+ err = md_put_user (2, (char *) &loc->heads);
+ if (err)
+ goto abort_unlock;
+ err = md_put_user (4, (char *) &loc->sectors);
+ if (err)
+ goto abort_unlock;
+ err = md_put_user (md_hd_struct[mdidx(mddev)].nr_sects/8,
+ (short *) &loc->cylinders);
+ if (err)
+ goto abort_unlock;
+ err = md_put_user (md_hd_struct[minor].start_sect,
+ (long *) &loc->start);
+ goto done_unlock;
+ }
+
+ /*
+ * The remaining ioctls are changing the state of the
+ * superblock, so we do not allow read-only arrays
+ * here:
+ */
+ if (mddev->ro) {
+ err = -EROFS;
+ goto abort_unlock;
+ }
+
+ switch (cmd)
+ {
+ case CLEAR_ARRAY:
+ err = clear_array(mddev);
+ goto done_unlock;
+
+ case ADD_NEW_DISK:
+ err = add_new_disk(mddev, (void *)arg);
+ goto done_unlock;
+
+ case HOT_REMOVE_DISK:
+ err = hot_remove_disk(mddev, (kdev_t)arg);
+ goto done_unlock;
+
+ case HOT_ADD_DISK:
+ err = hot_add_disk(mddev, (kdev_t)arg);
+ goto done_unlock;
- if (md_dev[minor].nb_dev==MAX_REAL)
- return -EINVAL;
+ case SET_DISK_INFO:
+ err = set_disk_info(mddev, (void *)arg);
+ goto done_unlock;
- if (!fs_may_mount (dev))
- return -EBUSY;
+ case WRITE_RAID_INFO:
+ err = write_raid_info(mddev);
+ goto done_unlock;
- if (blk_size[MAJOR(dev)] == NULL || blk_size[MAJOR(dev)][MINOR(dev)] == 0) {
- printk("md_add(): zero device size, huh, bailing out.\n");
- return -EINVAL;
- }
+ case UNPROTECT_ARRAY:
+ err = unprotect_array(mddev);
+ goto done_unlock;
- if (md_dev[minor].pers) {
- /*
- * The array is already running, hot-add the drive, or
- * bail out:
- */
- if (!md_dev[minor].pers->hot_add_disk)
- return -EBUSY;
- else
- hot_add=1;
- }
+ case PROTECT_ARRAY:
+ err = protect_array(mddev);
+ goto done_unlock;
- /*
- * Careful. We cannot increase nb_dev for a running array.
- */
- i=md_dev[minor].nb_dev;
- realdev = &md_dev[minor].devices[i];
- realdev->dev=dev;
-
- /* Lock the device by inserting a dummy inode. This doesn't
- smell very good, but I need to be consistent with the
- mount stuff, specially with fs_may_mount. If someone have
- a better idea, please help ! */
-
- realdev->inode=get_empty_inode ();
- if (!realdev->inode)
- return -ENOMEM;
- realdev->inode->i_dev=dev; /* don't care about other fields */
- insert_inode_hash (realdev->inode);
-
- /* Sizes are now rounded at run time */
-
-/* md_dev[minor].devices[i].size=gen_real->sizes[MINOR(dev)]; HACKHACK*/
+ case SET_DISK_FAULTY:
+ err = set_disk_faulty(mddev, (kdev_t)arg);
+ goto done_unlock;
- realdev->size=blk_size[MAJOR(dev)][MINOR(dev)];
+ case RUN_ARRAY:
+ {
+ mdu_param_t param;
- if (hot_add) {
- /*
- * Check the superblock for consistency.
- * The personality itself has to check whether it's getting
- * added with the proper flags. The personality has to be
- * checked too. ;)
- */
- if (analyze_one_sb (realdev))
- return -EINVAL;
- /*
- * hot_add has to bump up nb_dev itself
- */
- if (md_dev[minor].pers->hot_add_disk (&md_dev[minor], dev)) {
+ err = md_copy_from_user(¶m, (mdu_param_t *)arg,
+ sizeof(param));
+ if (err)
+ goto abort_unlock;
+
+ err = do_md_run (mddev);
/*
- * FIXME: here we should free up the inode and stuff
+ * we have to clean up the mess if
+ * the array cannot be run for some
+ * reason ...
*/
- printk ("FIXME\n");
- return -EINVAL;
+ if (err) {
+ mddev->sb_dirty = 0;
+ do_md_stop (mddev, 0);
+ }
+ goto done_unlock;
}
- } else
- md_dev[minor].nb_dev++;
-
- printk ("REGISTER_DEV %s to md%x done\n", partition_name(dev), minor);
- return (0);
-}
-static int md_ioctl (struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- int minor, err;
- struct hd_geometry *loc = (struct hd_geometry *) arg;
+ default:
+ printk(KERN_WARNING "%s(pid %d) used obsolete MD ioctl, upgrade your software to use new ictls.\n", current->comm, current->pid);
+ err = -EINVAL;
+ goto abort_unlock;
+ }
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
+done_unlock:
+abort_unlock:
+ if (mddev)
+ unlock_mddev(mddev);
+ else
+ printk("huh11?\n");
- if (((minor=MINOR(inode->i_rdev)) & 0x80) &&
- (minor & 0x7f) < MAX_PERSONALITY &&
- pers[minor & 0x7f] &&
- pers[minor & 0x7f]->ioctl)
- return (pers[minor & 0x7f]->ioctl (inode, file, cmd, arg));
-
- if (minor >= MAX_MD_DEV)
- return -EINVAL;
-
- switch (cmd)
- {
- case REGISTER_DEV:
- return do_md_add (minor, to_kdev_t ((dev_t) arg));
-
- case START_MD:
- return do_md_run (minor, (int) arg);
-
- case STOP_MD:
- return do_md_stop (minor, inode);
-
- case BLKGETSIZE: /* Return device size */
- if (!arg) return -EINVAL;
- err = put_user (md_hd_struct[MINOR(inode->i_rdev)].nr_sects, (long *) arg);
- if (err)
- return err;
- break;
-
-
- /* We have a problem here : there is no easy way to give a CHS
- virtual geometry. We currently pretend that we have a 2 heads
- 4 sectors (with a BIG number of cylinders...). This drives dosfs
- just mad... ;-) */
-
- case HDIO_GETGEO:
- if (!loc) return -EINVAL;
- err = put_user (2, (char *) &loc->heads);
- if (err)
- return err;
- err = put_user (4, (char *) &loc->sectors);
- if (err)
- return err;
- err = put_user (md_hd_struct[minor].nr_sects/8, (short *) &loc->cylinders);
- if (err)
- return err;
- err = put_user (md_hd_struct[MINOR(inode->i_rdev)].start_sect,
- (long *) &loc->start);
- if (err)
- return err;
- break;
-
- case BLKROSET:
- case BLKROGET:
- case BLKRAGET:
- case BLKRASET:
- case BLKFLSBUF:
- return blk_ioctl(inode->i_rdev, cmd, arg);
-
- default:
- return -EINVAL;
- }
-
- return (0);
+ return err;
+done:
+ if (err)
+ printk("huh12?\n");
+abort:
+ return err;
}
static int md_open (struct inode *inode, struct file *file)
{
- int minor=MINOR(inode->i_rdev);
-
- md_dev[minor].busy++;
- return (0); /* Always succeed */
-}
-
-
-static int md_release (struct inode *inode, struct file *file)
-{
- int minor=MINOR(inode->i_rdev);
- md_dev[minor].busy--;
- return 0;
+ /*
+ * Always succeed
+ */
+ return (0);
}
static struct block_device_operations md_fops=
{
open: md_open,
- release: md_release,
ioctl: md_ioctl,
};
+
-int md_map (int minor, kdev_t *rdev, unsigned long *rsector, unsigned long size)
+int md_thread(void * arg)
{
- if ((unsigned int) minor >= MAX_MD_DEV)
- {
- printk ("Bad md device %d\n", minor);
- return (-1);
- }
-
- if (!md_dev[minor].pers)
- {
- printk ("Oops ! md%d not running, giving up !\n", minor);
- return (-1);
- }
+ mdk_thread_t *thread = arg;
- return (md_dev[minor].pers->map(md_dev+minor, rdev, rsector, size));
-}
-
-int md_make_request (int minor, int rw, struct buffer_head * bh)
-{
- if (md_dev [minor].pers->make_request) {
- if (buffer_locked(bh))
- return 0;
- set_bit(BH_Lock, &bh->b_state);
- if (rw == WRITE) {
- if (!buffer_dirty(bh)) {
- bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));
- return 0;
- }
+ md_lock_kernel();
+ exit_mm(current);
+ exit_files(current);
+ exit_fs(current);
+
+ /*
+ * Detach thread
+ */
+ sys_setsid();
+ sprintf(current->comm, thread->name);
+ md_init_signals();
+ md_flush_signals();
+ thread->tsk = current;
+
+ /*
+ * md_thread is a 'system-thread', it's priority should be very
+ * high. We avoid resource deadlocks individually in each
+ * raid personality. (RAID5 does preallocation) We also use RR and
+ * the very same RT priority as kswapd, thus we will never get
+ * into a priority inversion deadlock.
+ *
+ * we definitely have to have equal or higher priority than
+ * bdflush, otherwise bdflush will deadlock if there are too
+ * many dirty RAID5 blocks.
+ */
+ current->policy = SCHED_OTHER;
+ current->priority = 40;
+// md_unlock_kernel();
+
+ up(thread->sem);
+
+ for (;;) {
+ DECLARE_WAITQUEUE(wait, current);
+
+ add_wait_queue(&thread->wqueue, &wait);
+ if (!test_bit(THREAD_WAKEUP, &thread->flags)) {
+ set_task_state(current, TASK_INTERRUPTIBLE);
+ dprintk("thread %p went to sleep.\n", thread);
+ schedule();
+ dprintk("thread %p woke up.\n", thread);
+ current->state = TASK_RUNNING;
}
- if (rw == READ || rw == READA) {
- if (buffer_uptodate(bh)) {
- bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));
- return 0;
- }
+ remove_wait_queue(&thread->wqueue, &wait);
+ clear_bit(THREAD_WAKEUP, &thread->flags);
+
+ if (thread->run) {
+ thread->run(thread->data);
+ run_task_queue(&tq_disk);
+ } else
+ break;
+ if (md_signal_pending(current)) {
+ printk("%8s(%d) flushing signals.\n", current->comm,
+ current->pid);
+ md_flush_signals();
}
- return (md_dev[minor].pers->make_request(md_dev+minor, rw, bh));
- } else {
- make_request (MAJOR(bh->b_rdev), rw, bh);
- return 0;
}
+ up(thread->sem);
+ return 0;
}
-static void do_md_request (request_queue_t * q)
-{
- printk ("Got md request, not good...");
- return;
-}
-
-void md_wakeup_thread(struct md_thread *thread)
+void md_wakeup_thread(mdk_thread_t *thread)
{
+ dprintk("waking up MD thread %p.\n", thread);
set_bit(THREAD_WAKEUP, &thread->flags);
wake_up(&thread->wqueue);
}
-struct md_thread *md_register_thread (void (*run) (void *), void *data)
+mdk_thread_t *md_register_thread (void (*run) (void *),
+ void *data, const char *name)
{
- struct md_thread *thread = (struct md_thread *)
- kmalloc(sizeof(struct md_thread), GFP_KERNEL);
+ mdk_thread_t *thread;
int ret;
DECLARE_MUTEX_LOCKED(sem);
- if (!thread) return NULL;
+ thread = (mdk_thread_t *) kmalloc
+ (sizeof(mdk_thread_t), GFP_KERNEL);
+ if (!thread)
+ return NULL;
- memset(thread, 0, sizeof(struct md_thread));
- init_waitqueue_head(&thread->wqueue);
+ memset(thread, 0, sizeof(mdk_thread_t));
+ md_init_waitqueue_head(&thread->wqueue);
thread->sem = &sem;
thread->run = run;
thread->data = data;
+ thread->name = name;
ret = kernel_thread(md_thread, thread, 0);
if (ret < 0) {
kfree(thread);
return thread;
}
-void md_unregister_thread (struct md_thread *thread)
+void md_interrupt_thread (mdk_thread_t *thread)
+{
+ if (!thread->tsk) {
+ MD_BUG();
+ return;
+ }
+ printk("interrupting MD-thread pid %d\n", thread->tsk->pid);
+ send_sig(SIGKILL, thread->tsk, 1);
+}
+
+void md_unregister_thread (mdk_thread_t *thread)
{
DECLARE_MUTEX_LOCKED(sem);
thread->sem = &sem;
thread->run = NULL;
- if (thread->tsk)
- printk("Killing md_thread %d %p %s\n",
- thread->tsk->pid, thread->tsk, thread->tsk->comm);
- else
- printk("Aiee. md_thread has 0 tsk\n");
- send_sig(SIGKILL, thread->tsk, 1);
- printk("downing on %p\n", &sem);
+ thread->name = NULL;
+ if (!thread->tsk) {
+ MD_BUG();
+ return;
+ }
+ md_interrupt_thread(thread);
down(&sem);
}
-#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM))
+void md_recover_arrays (void)
+{
+ if (!md_recovery_thread) {
+ MD_BUG();
+ return;
+ }
+ md_wakeup_thread(md_recovery_thread);
+}
+
-int md_thread(void * arg)
+int md_error (kdev_t dev, kdev_t rdev)
{
- struct md_thread *thread = arg;
+ mddev_t *mddev = kdev_to_mddev(dev);
+ mdk_rdev_t * rrdev;
+ int rc;
- lock_kernel();
- exit_mm(current);
- exit_files(current);
- exit_fs(current);
-
- current->session = 1;
- current->pgrp = 1;
- sprintf(current->comm, "md_thread");
- siginitsetinv(¤t->blocked, SHUTDOWN_SIGS);
- thread->tsk = current;
- up(thread->sem);
+ printk("md_error dev:(%d:%d), rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",MAJOR(dev),MINOR(dev),MAJOR(rdev),MINOR(rdev), __builtin_return_address(0),__builtin_return_address(1),__builtin_return_address(2),__builtin_return_address(3));
- for (;;) {
- cli();
- if (!test_bit(THREAD_WAKEUP, &thread->flags)) {
- do {
- spin_lock(¤t->sigmask_lock);
- flush_signals(current);
- spin_unlock(¤t->sigmask_lock);
- interruptible_sleep_on(&thread->wqueue);
- cli();
- if (test_bit(THREAD_WAKEUP, &thread->flags))
- break;
- if (!thread->run) {
- sti();
- up(thread->sem);
- return 0;
- }
- } while (signal_pending(current));
- }
- sti();
- clear_bit(THREAD_WAKEUP, &thread->flags);
- if (thread->run) {
- thread->run(thread->data);
- run_task_queue(&tq_disk);
+ if (!mddev) {
+ MD_BUG();
+ return 0;
+ }
+ rrdev = find_rdev(mddev, rdev);
+ mark_rdev_faulty(rrdev);
+ /*
+ * if recovery was running, stop it now.
+ */
+ if (mddev->pers->stop_resync)
+ mddev->pers->stop_resync(mddev);
+ if (mddev->recovery_running)
+ md_interrupt_thread(md_recovery_thread);
+ if (mddev->pers->error_handler) {
+ rc = mddev->pers->error_handler(mddev, rdev);
+ md_recover_arrays();
+ return rc;
+ }
+ return 0;
+}
+
+static int status_unused (char * page)
+{
+ int sz = 0, i = 0;
+ mdk_rdev_t *rdev;
+ struct md_list_head *tmp;
+
+ sz += sprintf(page + sz, "unused devices: ");
+
+ ITERATE_RDEV_ALL(rdev,tmp) {
+ if (!rdev->same_set.next && !rdev->same_set.prev) {
+ /*
+ * The device is not yet used by any array.
+ */
+ i++;
+ sz += sprintf(page + sz, "%s ",
+ partition_name(rdev->dev));
}
}
+ if (!i)
+ sz += sprintf(page + sz, "<none>");
+
+ sz += sprintf(page + sz, "\n");
+ return sz;
}
-EXPORT_SYMBOL(md_size);
-EXPORT_SYMBOL(md_maxreadahead);
-EXPORT_SYMBOL(register_md_personality);
-EXPORT_SYMBOL(unregister_md_personality);
-EXPORT_SYMBOL(md_dev);
-EXPORT_SYMBOL(md_error);
-EXPORT_SYMBOL(md_register_thread);
-EXPORT_SYMBOL(md_unregister_thread);
-EXPORT_SYMBOL(md_update_sb);
-EXPORT_SYMBOL(md_map);
-EXPORT_SYMBOL(md_wakeup_thread);
-EXPORT_SYMBOL(md_do_sync);
-#ifdef CONFIG_PROC_FS
+static int status_resync (char * page, mddev_t * mddev)
+{
+ int sz = 0;
+ unsigned int blocksize, max_blocks, resync, res, dt, tt, et;
+
+ resync = mddev->curr_resync;
+ blocksize = blksize_size[MD_MAJOR][mdidx(mddev)];
+ max_blocks = blk_size[MD_MAJOR][mdidx(mddev)] / (blocksize >> 10);
+
+ /*
+ * Should not happen.
+ */
+ if (!max_blocks) {
+ MD_BUG();
+ return 0;
+ }
+ res = (resync/1024)*1000/(max_blocks/1024 + 1);
+ {
+ int i, x = res/50, y = 20-x;
+ sz += sprintf(page + sz, "[");
+ for (i = 0; i < x; i++)
+ sz += sprintf(page + sz, "=");
+ sz += sprintf(page + sz, ">");
+ for (i = 0; i < y; i++)
+ sz += sprintf(page + sz, ".");
+ sz += sprintf(page + sz, "] ");
+ }
+ if (!mddev->recovery_running)
+ /*
+ * true resync
+ */
+ sz += sprintf(page + sz, " resync =%3u.%u%% (%u/%u)",
+ res/10, res % 10, resync, max_blocks);
+ else
+ /*
+ * recovery ...
+ */
+ sz += sprintf(page + sz, " recovery =%3u.%u%% (%u/%u)",
+ res/10, res % 10, resync, max_blocks);
+
+ /*
+ * We do not want to overflow, so the order of operands and
+ * the * 100 / 100 trick are important. We do a +1 to be
+ * safe against division by zero. We only estimate anyway.
+ *
+ * dt: time until now
+ * tt: total time
+ * et: estimated finish time
+ */
+ dt = ((jiffies - mddev->resync_start) / HZ);
+ tt = (dt * (max_blocks / (resync/100+1)))/100;
+ if (tt > dt)
+ et = tt - dt;
+ else
+ /*
+ * ignore rounding effects near finish time
+ */
+ et = 0;
+
+ sz += sprintf(page + sz, " finish=%u.%umin", et / 60, (et % 60)/6);
+
+ return sz;
+}
+
static int md_status_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
- int sz = 0, i, j, size;
- int begin = 0;
+ int sz = 0, j, size;
+ struct md_list_head *tmp, *tmp2;
+ mdk_rdev_t *rdev;
+ mddev_t *mddev;
- sz=sprintf( page, "Personalities : ");
- for (i=0; i<MAX_PERSONALITY; i++)
- if (pers[i])
- sz+=sprintf (page+sz, "[%d %s] ", i, pers[i]->name);
- page[sz-1]='\n';
+ sz += sprintf(page + sz, "Personalities : ");
+ for (j = 0; j < MAX_PERSONALITY; j++)
+ if (pers[j])
+ sz += sprintf(page+sz, "[%s] ", pers[j]->name);
- sz+=sprintf (page+sz, "read_ahead ");
- if (read_ahead[MD_MAJOR]==INT_MAX)
- sz+=sprintf (page+sz, "not set\n");
- else
- sz+=sprintf (page+sz, "%d sectors\n", read_ahead[MD_MAJOR]);
+ sz += sprintf(page+sz, "\n");
- for (i=0; i<MAX_MD_DEV; i++) {
- if (sz < off) {
- begin += sz;
- off -= sz;
- sz = 0;
- }
- if (sz >= off+count) {
- *eof = 1;
- break;
- }
- sz+=sprintf (page+sz, "md%d : %sactive",
- i, md_dev[i].pers ? "" : "in");
- if (md_dev[i].pers)
- sz+=sprintf (page+sz, " %s", md_dev[i].pers->name);
+ sz += sprintf(page+sz, "read_ahead ");
+ if (read_ahead[MD_MAJOR] == INT_MAX)
+ sz += sprintf(page+sz, "not set\n");
+ else
+ sz += sprintf(page+sz, "%d sectors\n", read_ahead[MD_MAJOR]);
+
+ ITERATE_MDDEV(mddev,tmp) {
+ sz += sprintf(page + sz, "md%d : %sactive", mdidx(mddev),
+ mddev->pers ? "" : "in");
+ if (mddev->pers) {
+ if (mddev->ro)
+ sz += sprintf(page + sz, " (read-only)");
+ sz += sprintf(page + sz, " %s", mddev->pers->name);
+ }
- for (j=0, size=0; j<md_dev[i].nb_dev; j++) {
- sz+=sprintf (page+sz, " %s",
- partition_name(md_dev[i].devices[j].dev));
- size+=md_dev[i].devices[j].size;
+ size = 0;
+ ITERATE_RDEV(mddev,rdev,tmp2) {
+ sz += sprintf(page + sz, " %s[%d]",
+ partition_name(rdev->dev), rdev->desc_nr);
+ if (rdev->faulty) {
+ sz += sprintf(page + sz, "(F)");
+ continue;
+ }
+ size += rdev->size;
}
- if (md_dev[i].nb_dev) {
- if (md_dev[i].pers)
- sz+=sprintf (page+sz, " %d blocks", md_size[i]);
+ if (mddev->nb_dev) {
+ if (mddev->pers)
+ sz += sprintf(page + sz, "\n %d blocks",
+ md_size[mdidx(mddev)]);
else
- sz+=sprintf (page+sz, " %d blocks", size);
+ sz += sprintf(page + sz, "\n %d blocks", size);
}
- if (!md_dev[i].pers) {
- sz+=sprintf (page+sz, "\n");
+ if (!mddev->pers) {
+ sz += sprintf(page+sz, "\n");
continue;
}
- if (md_dev[i].pers->max_invalid_dev)
- sz+=sprintf (page+sz, " maxfault=%ld",
- MAX_FAULT(md_dev+i));
+ sz += mddev->pers->status (page+sz, mddev);
- sz+=md_dev[i].pers->status (page+sz, i, md_dev+i);
- sz+=sprintf (page+sz, "\n");
+ sz += sprintf(page+sz, "\n ");
+ if (mddev->curr_resync) {
+ sz += status_resync (page+sz, mddev);
+ } else {
+ if (md_atomic_read(&mddev->resync_sem.count) != 1)
+ sz += sprintf(page + sz, " resync=DELAYED");
+ }
+ sz += sprintf(page + sz, "\n");
}
+ sz += status_unused (page + sz);
- sz -= off;
- *start = page + off;
- if (sz>count)
- sz = count;
- if (sz<0)
- sz = 0;
return sz;
}
-#endif
-
-static void md_geninit (void)
-{
- int i;
-
- blksize_size[MD_MAJOR] = md_blocksizes;
- max_readahead[MD_MAJOR] = md_maxreadahead;
- for(i=0;i<MAX_MD_DEV;i++)
- {
- md_blocksizes[i] = 1024;
- md_maxreadahead[i] = MD_DEFAULT_DISK_READAHEAD;
- md_dev[i].pers=NULL;
- register_disk(&md_gendisk, MKDEV(MAJOR_NR,i), 1, &md_fops, 0);
- }
-
-#ifdef CONFIG_PROC_FS
- create_proc_read_entry("mdstat", 0, NULL, md_status_read_proc, NULL);
-#endif
-}
-
-int md_error (kdev_t mddev, kdev_t rdev)
-{
- unsigned int minor = MINOR (mddev);
- int rc;
-
- if (MAJOR(mddev) != MD_MAJOR || minor > MAX_MD_DEV)
- panic ("md_error gets unknown device\n");
- if (!md_dev [minor].pers)
- panic ("md_error gets an error for an unknown device\n");
- if (md_dev [minor].pers->error_handler) {
- rc = md_dev [minor].pers->error_handler (md_dev+minor, rdev);
-#if SUPPORT_RECONSTRUCTION
- md_wakeup_thread(md_sync_thread);
-#endif /* SUPPORT_RECONSTRUCTION */
- return rc;
- }
- return 0;
-}
-int register_md_personality (int p_num, struct md_personality *p)
+int register_md_personality (int pnum, mdk_personality_t *p)
{
- int i=(p_num >> PERSONALITY_SHIFT);
-
- if (i >= MAX_PERSONALITY)
- return -EINVAL;
+ if (pnum >= MAX_PERSONALITY)
+ return -EINVAL;
- if (pers[i])
- return -EBUSY;
+ if (pers[pnum])
+ return -EBUSY;
- pers[i]=p;
- printk ("%s personality registered\n", p->name);
- return 0;
+ pers[pnum] = p;
+ printk(KERN_INFO "%s personality registered\n", p->name);
+ return 0;
}
-int unregister_md_personality (int p_num)
+int unregister_md_personality (int pnum)
{
- int i=(p_num >> PERSONALITY_SHIFT);
-
- if (i >= MAX_PERSONALITY)
- return -EINVAL;
+ if (pnum >= MAX_PERSONALITY)
+ return -EINVAL;
- printk ("%s personality unregistered\n", pers[i]->name);
- pers[i]=NULL;
- return 0;
+ printk(KERN_INFO "%s personality unregistered\n", pers[pnum]->name);
+ pers[pnum] = NULL;
+ return 0;
}
-static md_descriptor_t *get_spare(struct md_dev *mddev)
-{
- int i;
- md_superblock_t *sb = mddev->sb;
- md_descriptor_t *descriptor;
- struct real_dev *realdev;
-
- for (i = 0; i < mddev->nb_dev; i++) {
- realdev = &mddev->devices[i];
- if (!realdev->sb)
- continue;
- descriptor = &sb->disks[realdev->sb->descriptor.number];
- if (descriptor->state & (1 << MD_FAULTY_DEVICE))
- continue;
- if (descriptor->state & (1 << MD_ACTIVE_DEVICE))
- continue;
- return descriptor;
- }
- return NULL;
-}
-
-/*
- * parallel resyncing thread.
- *
- * FIXME: - make it abort with a dirty array on mdstop, now it just blocks
- * - fix read error handing
- */
-
-int md_do_sync(struct md_dev *mddev)
+int md_notify_reboot(struct notifier_block *this,
+ unsigned long code, void *x)
{
- struct buffer_head *bh;
- int max_blocks, blocksize, curr_bsize, percent=1, j;
- kdev_t read_disk = MKDEV(MD_MAJOR, mddev - md_dev);
- int major = MAJOR(read_disk), minor = MINOR(read_disk);
- unsigned long starttime;
-
- blocksize = blksize_size[major][minor];
- max_blocks = blk_size[major][minor] / (blocksize >> 10);
-
- printk("... resync log\n");
- printk(" .... mddev->nb_dev: %d\n", mddev->nb_dev);
- printk(" .... raid array: %s\n", kdevname(read_disk));
- printk(" .... max_blocks: %d blocksize: %d\n", max_blocks, blocksize);
- printk("md: syncing RAID array %s\n", kdevname(read_disk));
-
- mddev->busy++;
-
- starttime=jiffies;
- for (j = 0; j < max_blocks; j++) {
-
- /*
- * B careful. When some1 mounts a non-'blocksize' filesystem
- * then we get the blocksize changed right under us. Go deal
- * with it transparently, recalculate 'blocksize', 'j' and
- * 'max_blocks':
- */
- curr_bsize = blksize_size[major][minor];
- if (curr_bsize != blocksize) {
- diff_blocksize:
- if (curr_bsize > blocksize)
- /*
- * this is safe, rounds downwards.
- */
- j /= curr_bsize/blocksize;
- else
- j *= blocksize/curr_bsize;
-
- blocksize = curr_bsize;
- max_blocks = blk_size[major][minor] / (blocksize >> 10);
- }
- if ((bh = breada (read_disk, j, blocksize, j * blocksize,
- max_blocks * blocksize)) != NULL) {
- mark_buffer_dirty(bh, 1);
- brelse(bh);
- } else {
- /*
- * FIXME: Ugly, but set_blocksize() isnt safe ...
- */
- curr_bsize = blksize_size[major][minor];
- if (curr_bsize != blocksize)
- goto diff_blocksize;
+ struct md_list_head *tmp;
+ mddev_t *mddev;
- /*
- * It's a real read problem. FIXME, handle this
- * a better way.
- */
- printk ( KERN_ALERT
- "read error, stopping reconstruction.\n");
- mddev->busy--;
- return 1;
- }
+ if ((code == MD_SYS_DOWN) || (code == MD_SYS_HALT)
+ || (code == MD_SYS_POWER_OFF)) {
- /*
- * Let's sleep some if we are faster than our speed limit:
- */
- while (blocksize*j/(jiffies-starttime+1)*HZ/1024 > SPEED_LIMIT)
- {
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(1);
- }
+ printk(KERN_INFO "stopping all md devices.\n");
+ ITERATE_MDDEV(mddev,tmp)
+ do_md_stop (mddev, 1);
/*
- * FIXME: put this status bar thing into /proc
+ * certain more exotic SCSI devices are known to be
+ * volatile wrt too early system reboots. While the
+ * right place to handle this issue is the given
+ * driver, we do want to have a safe RAID driver ...
*/
- if (!(j%(max_blocks/100))) {
- if (!(percent%10))
- printk (" %03d%% done.\n",percent);
- else
- printk (".");
- percent++;
- }
+ md_mdelay(1000*1);
}
- fsync_dev(read_disk);
- printk("md: %s: sync done.\n", kdevname(read_disk));
- mddev->busy--;
- return 0;
+ return NOTIFY_DONE;
}
-/*
- * This is a kernel thread which: syncs a spare disk with the active array
- *
- * the amount of foolproofing might seem to be a tad excessive, but an
- * early (not so error-safe) version of raid1syncd synced the first 0.5 gigs
- * of my root partition with the first 0.5 gigs of my /home partition ... so
- * i'm a bit nervous ;)
- */
-void mdsyncd (void *data)
-{
- int i;
- struct md_dev *mddev;
- md_superblock_t *sb;
- md_descriptor_t *spare;
- unsigned long flags;
+struct notifier_block md_notifier = {
+ md_notify_reboot,
+ NULL,
+ 0
+};
- for (i = 0, mddev = md_dev; i < MAX_MD_DEV; i++, mddev++) {
- if ((sb = mddev->sb) == NULL)
- continue;
- if (sb->active_disks == sb->raid_disks)
- continue;
- if (!sb->spare_disks)
- continue;
- if ((spare = get_spare(mddev)) == NULL)
- continue;
- if (!mddev->pers->mark_spare)
- continue;
- if (mddev->pers->mark_spare(mddev, spare, SPARE_WRITE))
- continue;
- if (md_do_sync(mddev) || (spare->state & (1 << MD_FAULTY_DEVICE))) {
- mddev->pers->mark_spare(mddev, spare, SPARE_INACTIVE);
+void md__init raid_setup(char *str, int *ints)
+{
+ char tmpline[100];
+ int len, pos, nr, i;
+
+ len = strlen(str) + 1;
+ nr = 0;
+ pos = 0;
+
+ for (i = 0; i < len; i++) {
+ char c = str[i];
+
+ if (c == ',' || !c) {
+ tmpline[pos] = 0;
+ if (!strcmp(tmpline,"noautodetect"))
+ raid_setup_args.noautodetect = 1;
+ nr++;
+ pos = 0;
continue;
}
- save_flags(flags);
- cli();
- mddev->pers->mark_spare(mddev, spare, SPARE_ACTIVE);
- spare->state |= (1 << MD_SYNC_DEVICE);
- spare->state |= (1 << MD_ACTIVE_DEVICE);
- sb->spare_disks--;
- sb->active_disks++;
- mddev->sb_dirty = 1;
- md_update_sb(mddev - md_dev);
- restore_flags(flags);
+ tmpline[pos] = c;
+ pos++;
}
-
+ raid_setup_args.set = 1;
+ return;
}
#ifdef CONFIG_MD_BOOT
struct {
unsigned long set;
- int pers[MAX_MD_DEV];
- kdev_t devices[MAX_MD_DEV][MAX_REAL];
-} md_setup_args __initdata = {
+ int pers[MAX_MD_DEVS];
+ kdev_t devices[MAX_MD_DEVS][MAX_REAL];
+} md_setup_args md__initdata = {
0,{0},{{0}}
};
* the MD devices (by specifying multiple "md=" lines)
* instead of just one. -- KTK
*/
-int __init md_setup(char *str)
+static int __init md_setup(char *str)
{
int minor, level, factor, fault, i;
kdev_t device;
get_option(&str, &fault) != 2) {
printk("md: Too few arguments supplied to md=.\n");
return 0;
- } else if (minor >= MAX_MD_DEV) {
- printk ("md: Minor device number too high.\n");
+ } else if (minor >= MAX_MD_DEVS) {
+ printk ("md: Minor device number too high.\n");
return 0;
} else if (md_setup_args.set & (1 << minor)) {
printk ("md: Warning - md=%d,... has been specified twice;\n"
" will discard the first definition.\n", minor);
- }
+ }
switch(level) {
#ifdef CONFIG_MD_LINEAR
case -1:
level = LINEAR;
pername = "linear";
- break;
+ break;
#endif
#ifdef CONFIG_MD_STRIPED
case 0:
level = STRIPED;
pername = "striped";
- break;
+ break;
#endif
default:
printk ("md: The kernel has not been configured for raid%d"
" support!\n", level);
return 0;
- }
+ }
devnames = str;
for (i = 0; str; i++) {
if ((device = name_to_kdev_t(str))) {
md_setup_args.set |= (1 << minor);
return 0;
}
-
#endif
+static void md_geninit (void)
+{
+ int i;
+
+ blksize_size[MD_MAJOR] = md_blocksizes;
+ max_readahead[MD_MAJOR] = md_maxreadahead;
+
+ for(i = 0; i < MAX_MD_DEVS; i++) {
+ md_blocksizes[i] = 1024;
+ md_maxreadahead[i] = MD_READAHEAD;
+ register_disk(&md_gendisk, MKDEV(MAJOR_NR,i), 1, &md_fops, 0);
+
+ }
+
+ printk("md.c: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
+
+#ifdef CONFIG_PROC_FS
+ create_proc_read_entry("mdstat", 0, NULL, md_status_read_proc, NULL);
+#endif
+}
+void hsm_init (void);
+void translucent_init (void);
void linear_init (void);
void raid0_init (void);
void raid1_init (void);
void raid5_init (void);
-int __init md_init (void)
+int md__init md_init (void)
{
- printk ("md driver %d.%d.%d MAX_MD_DEV=%d, MAX_REAL=%d\n",
- MD_MAJOR_VERSION, MD_MINOR_VERSION, MD_PATCHLEVEL_VERSION,
- MAX_MD_DEV, MAX_REAL);
+ printk (KERN_INFO "md driver %d.%d.%d MAX_MD_DEVS=%d, MAX_REAL=%d\n",
+ MD_MAJOR_VERSION, MD_MINOR_VERSION,
+ MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MAX_REAL);
+
+ if (register_blkdev (MD_MAJOR, "md", &md_fops))
+ {
+ printk (KERN_ALERT "Unable to get major %d for md\n", MD_MAJOR);
+ return (-1);
+ }
- if (register_blkdev (MD_MAJOR, "md", &md_fops))
- {
- printk ("Unable to get major %d for md\n", MD_MAJOR);
- return (-1);
- }
+ blk_dev[MD_MAJOR].queue = md_get_queue;
- blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
- read_ahead[MD_MAJOR]=INT_MAX;
- memset(md_dev, 0, MAX_MD_DEV * sizeof (struct md_dev));
- md_gendisk.next=gendisk_head;
+ read_ahead[MD_MAJOR] = INT_MAX;
+ md_gendisk.next = gendisk_head;
- gendisk_head=&md_gendisk;
+ gendisk_head = &md_gendisk;
-#if SUPPORT_RECONSTRUCTION
- if ((md_sync_thread = md_register_thread(mdsyncd, NULL)) == NULL)
- printk("md: bug: md_sync_thread == NULL\n");
-#endif /* SUPPORT_RECONSTRUCTION */
+ md_register_reboot_notifier(&md_notifier);
+#ifdef CONFIG_MD_HSM
+ hsm_init ();
+#endif
+#ifdef CONFIG_MD_TRANSLUCENT
+ translucent_init ();
+#endif
#ifdef CONFIG_MD_LINEAR
- linear_init ();
+ linear_init ();
#endif
#ifdef CONFIG_MD_STRIPED
- raid0_init ();
+ raid0_init ();
#endif
#ifdef CONFIG_MD_MIRRORING
- raid1_init ();
+ raid1_init ();
#endif
#ifdef CONFIG_MD_RAID5
- raid5_init ();
+ raid5_init ();
#endif
- md_geninit();
- return (0);
+#if defined(CONFIG_MD_RAID5) || defined(CONFIG_MD_RAID5_MODULE)
+ /*
+ * pick a XOR routine, runtime.
+ */
+ calibrate_xor_block();
+#endif
+ md_geninit();
+ return (0);
}
#ifdef CONFIG_MD_BOOT
-void __init md_setup_drive(void)
+static void __init md_setup_drive(void)
{
+ if(md_setup_args.set)
+ do_md_setup(md_setup_args.str, md_setup_args.ints);
int minor, i;
kdev_t dev;
- for (minor = 0; minor < MAX_MD_DEV; minor++) {
+ for (minor = 0; minor < MAX_MD_DEVS; minor++) {
if ((md_setup_args.set & (1 << minor)) == 0)
continue;
printk("md: Loading md%d.\n", minor);
__setup("md=", md_setup);
#endif
+
+MD_EXPORT_SYMBOL(md_size);
+MD_EXPORT_SYMBOL(register_md_personality);
+MD_EXPORT_SYMBOL(unregister_md_personality);
+MD_EXPORT_SYMBOL(partition_name);
+MD_EXPORT_SYMBOL(md_error);
+MD_EXPORT_SYMBOL(md_recover_arrays);
+MD_EXPORT_SYMBOL(md_register_thread);
+MD_EXPORT_SYMBOL(md_unregister_thread);
+MD_EXPORT_SYMBOL(md_update_sb);
+MD_EXPORT_SYMBOL(md_wakeup_thread);
+MD_EXPORT_SYMBOL(md_print_devices);
+MD_EXPORT_SYMBOL(find_rdev_nr);
+MD_EXPORT_SYMBOL(md_interrupt_thread);
+MD_EXPORT_SYMBOL(mddev_map);
+
-
/*
raid0.c : Multiple Devices driver for Linux
Copyright (C) 1994-96 Marc ZYNGIER
<zyngier@ufr-info-p7.ibp.fr> or
<maz@gloups.fdn.fr>
+ Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
+
RAID-0 management functions.
*/
#include <linux/module.h>
-#include <linux/md.h>
-#include <linux/raid0.h>
-#include <linux/vmalloc.h>
+#include <linux/raid/raid0.h>
#define MAJOR_NR MD_MAJOR
#define MD_DRIVER
#define MD_PERSONALITY
-static int create_strip_zones (int minor, struct md_dev *mddev)
+static int create_strip_zones (mddev_t *mddev)
{
- int i, j, c=0;
- int current_offset=0;
- struct real_dev *smallest_by_zone;
- struct raid0_data *data=(struct raid0_data *) mddev->private;
-
- data->nr_strip_zones=1;
-
- for (i=1; i<mddev->nb_dev; i++)
- {
- for (j=0; j<i; j++)
- if (mddev->devices[i].size==mddev->devices[j].size)
- {
- c=1;
- break;
- }
-
- if (!c)
- data->nr_strip_zones++;
-
- c=0;
- }
-
- if ((data->strip_zone=vmalloc(sizeof(struct strip_zone)*data->nr_strip_zones)) == NULL)
- return 1;
-
- data->smallest=NULL;
-
- for (i=0; i<data->nr_strip_zones; i++)
- {
- data->strip_zone[i].dev_offset=current_offset;
- smallest_by_zone=NULL;
- c=0;
-
- for (j=0; j<mddev->nb_dev; j++)
- if (mddev->devices[j].size>current_offset)
- {
- data->strip_zone[i].dev[c++]=mddev->devices+j;
- if (!smallest_by_zone ||
- smallest_by_zone->size > mddev->devices[j].size)
- smallest_by_zone=mddev->devices+j;
- }
-
- data->strip_zone[i].nb_dev=c;
- data->strip_zone[i].size=(smallest_by_zone->size-current_offset)*c;
-
- if (!data->smallest ||
- data->smallest->size > data->strip_zone[i].size)
- data->smallest=data->strip_zone+i;
-
- data->strip_zone[i].zone_offset=i ? (data->strip_zone[i-1].zone_offset+
- data->strip_zone[i-1].size) : 0;
- current_offset=smallest_by_zone->size;
- }
- return 0;
+ int i, c, j, j1, j2;
+ int current_offset, curr_zone_offset;
+ raid0_conf_t *conf = mddev_to_conf(mddev);
+ mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
+
+ /*
+ * The number of 'same size groups'
+ */
+ conf->nr_strip_zones = 0;
+
+ ITERATE_RDEV_ORDERED(mddev,rdev1,j1) {
+ printk("raid0: looking at %s\n", partition_name(rdev1->dev));
+ c = 0;
+ ITERATE_RDEV_ORDERED(mddev,rdev2,j2) {
+ printk("raid0: comparing %s(%d) with %s(%d)\n", partition_name(rdev1->dev), rdev1->size, partition_name(rdev2->dev), rdev2->size);
+ if (rdev2 == rdev1) {
+ printk("raid0: END\n");
+ break;
+ }
+ if (rdev2->size == rdev1->size)
+ {
+ /*
+ * Not unique, dont count it as a new
+ * group
+ */
+ printk("raid0: EQUAL\n");
+ c = 1;
+ break;
+ }
+ printk("raid0: NOT EQUAL\n");
+ }
+ if (!c) {
+ printk("raid0: ==> UNIQUE\n");
+ conf->nr_strip_zones++;
+ printk("raid0: %d zones\n", conf->nr_strip_zones);
+ }
+ }
+ printk("raid0: FINAL %d zones\n", conf->nr_strip_zones);
+
+ conf->strip_zone = vmalloc(sizeof(struct strip_zone)*
+ conf->nr_strip_zones);
+ if (!conf->strip_zone)
+ return 1;
+
+
+ conf->smallest = NULL;
+ current_offset = 0;
+ curr_zone_offset = 0;
+
+ for (i = 0; i < conf->nr_strip_zones; i++)
+ {
+ struct strip_zone *zone = conf->strip_zone + i;
+
+ printk("zone %d\n", i);
+ zone->dev_offset = current_offset;
+ smallest = NULL;
+ c = 0;
+
+ ITERATE_RDEV_ORDERED(mddev,rdev,j) {
+
+ printk(" checking %s ...", partition_name(rdev->dev));
+ if (rdev->size > current_offset)
+ {
+ printk(" contained as device %d\n", c);
+ zone->dev[c] = rdev;
+ c++;
+ if (!smallest || (rdev->size <smallest->size)) {
+ smallest = rdev;
+ printk(" (%d) is smallest!.\n", rdev->size);
+ }
+ } else
+ printk(" nope.\n");
+ }
+
+ zone->nb_dev = c;
+ zone->size = (smallest->size - current_offset) * c;
+ printk(" zone->nb_dev: %d, size: %d\n",zone->nb_dev,zone->size);
+
+ if (!conf->smallest || (zone->size < conf->smallest->size))
+ conf->smallest = zone;
+
+ zone->zone_offset = curr_zone_offset;
+ curr_zone_offset += zone->size;
+
+ current_offset = smallest->size;
+ printk("current zone offset: %d\n", current_offset);
+ }
+ printk("done.\n");
+ return 0;
}
-static int raid0_run (int minor, struct md_dev *mddev)
+static int raid0_run (mddev_t *mddev)
{
- int cur=0, i=0, size, zone0_size, nb_zone;
- struct raid0_data *data;
-
- MOD_INC_USE_COUNT;
-
- if ((mddev->private=vmalloc (sizeof (struct raid0_data))) == NULL) return 1;
- data=(struct raid0_data *) mddev->private;
-
- if (create_strip_zones (minor, mddev))
- {
- vfree(data);
- return 1;
- }
-
- nb_zone=data->nr_zones=
- md_size[minor]/data->smallest->size +
- (md_size[minor]%data->smallest->size ? 1 : 0);
-
- printk ("raid0 : Allocating %ld bytes for hash.\n",(long)sizeof(struct raid0_hash)*nb_zone);
- if ((data->hash_table=vmalloc (sizeof (struct raid0_hash)*nb_zone)) == NULL)
- {
- vfree(data->strip_zone);
- vfree(data);
- return 1;
- }
- size=data->strip_zone[cur].size;
-
- i=0;
- while (cur<data->nr_strip_zones)
- {
- data->hash_table[i].zone0=data->strip_zone+cur;
-
- if (size>=data->smallest->size)/* If we completely fill the slot */
- {
- data->hash_table[i++].zone1=NULL;
- size-=data->smallest->size;
-
- if (!size)
- {
- if (++cur==data->nr_strip_zones) continue;
- size=data->strip_zone[cur].size;
- }
-
- continue;
- }
-
- if (++cur==data->nr_strip_zones) /* Last dev, set unit1 as NULL */
- {
- data->hash_table[i].zone1=NULL;
- continue;
- }
-
- zone0_size=size; /* Here, we use a 2nd dev to fill the slot */
- size=data->strip_zone[cur].size;
- data->hash_table[i++].zone1=data->strip_zone+cur;
- size-=(data->smallest->size - zone0_size);
- }
-
- return (0);
+ int cur=0, i=0, size, zone0_size, nb_zone;
+ raid0_conf_t *conf;
+
+ MOD_INC_USE_COUNT;
+
+ conf = vmalloc(sizeof (raid0_conf_t));
+ if (!conf)
+ goto out;
+ mddev->private = (void *)conf;
+
+ if (md_check_ordering(mddev)) {
+ printk("raid0: disks are not ordered, aborting!\n");
+ goto out_free_conf;
+ }
+
+ if (create_strip_zones (mddev))
+ goto out_free_conf;
+
+ printk("raid0 : md_size is %d blocks.\n", md_size[mdidx(mddev)]);
+ printk("raid0 : conf->smallest->size is %d blocks.\n", conf->smallest->size);
+ nb_zone = md_size[mdidx(mddev)]/conf->smallest->size +
+ (md_size[mdidx(mddev)] % conf->smallest->size ? 1 : 0);
+ printk("raid0 : nb_zone is %d.\n", nb_zone);
+ conf->nr_zones = nb_zone;
+
+ printk("raid0 : Allocating %d bytes for hash.\n",
+ sizeof(struct raid0_hash)*nb_zone);
+
+ conf->hash_table = vmalloc (sizeof (struct raid0_hash)*nb_zone);
+ if (!conf->hash_table)
+ goto out_free_zone_conf;
+ size = conf->strip_zone[cur].size;
+
+ i = 0;
+ while (cur < conf->nr_strip_zones) {
+ conf->hash_table[i].zone0 = conf->strip_zone + cur;
+
+ /*
+ * If we completely fill the slot
+ */
+ if (size >= conf->smallest->size) {
+ conf->hash_table[i++].zone1 = NULL;
+ size -= conf->smallest->size;
+
+ if (!size) {
+ if (++cur == conf->nr_strip_zones)
+ continue;
+ size = conf->strip_zone[cur].size;
+ }
+ continue;
+ }
+ if (++cur == conf->nr_strip_zones) {
+ /*
+ * Last dev, set unit1 as NULL
+ */
+ conf->hash_table[i].zone1=NULL;
+ continue;
+ }
+
+ /*
+ * Here we use a 2nd dev to fill the slot
+ */
+ zone0_size = size;
+ size = conf->strip_zone[cur].size;
+ conf->hash_table[i++].zone1 = conf->strip_zone + cur;
+ size -= (conf->smallest->size - zone0_size);
+ }
+ return 0;
+
+out_free_zone_conf:
+ vfree(conf->strip_zone);
+ conf->strip_zone = NULL;
+
+out_free_conf:
+ vfree(conf);
+ mddev->private = NULL;
+out:
+ MOD_DEC_USE_COUNT;
+ return 1;
}
-
-static int raid0_stop (int minor, struct md_dev *mddev)
+static int raid0_stop (mddev_t *mddev)
{
- struct raid0_data *data=(struct raid0_data *) mddev->private;
+ raid0_conf_t *conf = mddev_to_conf(mddev);
- vfree (data->hash_table);
- vfree (data->strip_zone);
- vfree (data);
+ vfree (conf->hash_table);
+ conf->hash_table = NULL;
+ vfree (conf->strip_zone);
+ conf->strip_zone = NULL;
+ vfree (conf);
+ mddev->private = NULL;
- MOD_DEC_USE_COUNT;
- return 0;
+ MOD_DEC_USE_COUNT;
+ return 0;
}
/*
* Of course, those facts may not be valid anymore (and surely won't...)
* Hey guys, there's some work out there ;-)
*/
-static int raid0_map (struct md_dev *mddev, kdev_t *rdev,
- unsigned long *rsector, unsigned long size)
+static int raid0_make_request (mddev_t *mddev, int rw, struct buffer_head * bh)
{
- struct raid0_data *data=(struct raid0_data *) mddev->private;
- static struct raid0_hash *hash;
- struct strip_zone *zone;
- struct real_dev *tmp_dev;
- int blk_in_chunk, factor, chunk, chunk_size;
- long block, rblock;
-
- factor=FACTOR(mddev);
- chunk_size=(1UL << FACTOR_SHIFT(factor));
- block=*rsector >> 1;
- hash=data->hash_table+(block/data->smallest->size);
-
- if (hash - data->hash_table > data->nr_zones)
- {
- printk(KERN_DEBUG "raid0_map: invalid block %ul\n", block);
- return -1;
- }
-
- /* Sanity check */
- if ((chunk_size*2)<(*rsector % (chunk_size*2))+size)
- {
- printk ("raid0_convert : can't convert block across chunks or bigger than %dk %ld %ld\n", chunk_size, *rsector, size);
- return (-1);
- }
-
- if (block >= (hash->zone0->size +
- hash->zone0->zone_offset))
- {
- if (!hash->zone1)
- {
- printk ("raid0_convert : hash->zone1==NULL for block %ld\n", block);
- return (-1);
- }
+ unsigned long size = bh->b_size >> 10;
+ raid0_conf_t *conf = mddev_to_conf(mddev);
+ struct raid0_hash *hash;
+ struct strip_zone *zone;
+ mdk_rdev_t *tmp_dev;
+ int blk_in_chunk, chunksize_bits, chunk, chunk_size;
+ long block, rblock;
+
+ chunk_size = mddev->param.chunk_size >> 10;
+ chunksize_bits = ffz(~chunk_size);
+ block = bh->b_blocknr * size;
+ hash = conf->hash_table + block / conf->smallest->size;
+
+ /* Sanity check */
+ if (chunk_size < (block % chunk_size) + size)
+ goto bad_map;
+
+ if (!hash)
+ goto bad_hash;
+
+ if (!hash->zone0)
+ goto bad_zone0;
+
+ if (block >= (hash->zone0->size + hash->zone0->zone_offset)) {
+ if (!hash->zone1)
+ goto bad_zone1;
+ zone = hash->zone1;
+ } else
+ zone = hash->zone0;
- zone=hash->zone1;
- }
- else
- zone=hash->zone0;
-
- blk_in_chunk=block & (chunk_size -1);
- chunk=(block - zone->zone_offset) / (zone->nb_dev<<FACTOR_SHIFT(factor));
- tmp_dev=zone->dev[(block >> FACTOR_SHIFT(factor)) % zone->nb_dev];
- rblock=(chunk << FACTOR_SHIFT(factor)) + blk_in_chunk + zone->dev_offset;
-
- *rdev=tmp_dev->dev;
- *rsector=rblock<<1;
-
- return (0);
+ blk_in_chunk = block & (chunk_size -1);
+ chunk = (block - zone->zone_offset) / (zone->nb_dev << chunksize_bits);
+ tmp_dev = zone->dev[(block >> chunksize_bits) % zone->nb_dev];
+ rblock = (chunk << chunksize_bits) + blk_in_chunk + zone->dev_offset;
+
+ /*
+ * Important, at this point we are not guaranteed to be the only
+ * CPU modifying b_rdev and b_rsector! Only __make_request() later
+ * on serializes the IO. So in 2.4 we must never write temporary
+ * values to bh->b_rdev, like 2.2 and 2.0 did.
+ */
+ bh->b_rdev = tmp_dev->dev;
+ bh->b_rsector = rblock << 1;
+
+ generic_make_request(rw, bh);
+
+ return 0;
+
+bad_map:
+ printk ("raid0_make_request bug: can't convert block across chunks or bigger than %dk %ld %ld\n", chunk_size, bh->b_rsector, size);
+ return -1;
+bad_hash:
+ printk("raid0_make_request bug: hash==NULL for block %ld\n", block);
+ return -1;
+bad_zone0:
+ printk ("raid0_make_request bug: hash->zone0==NULL for block %ld\n", block);
+ return -1;
+bad_zone1:
+ printk ("raid0_make_request bug: hash->zone1==NULL for block %ld\n", block);
+ return -1;
}
-
-static int raid0_status (char *page, int minor, struct md_dev *mddev)
+static int raid0_status (char *page, mddev_t *mddev)
{
- int sz=0;
+ int sz = 0;
#undef MD_DEBUG
#ifdef MD_DEBUG
- int j, k;
- struct raid0_data *data=(struct raid0_data *) mddev->private;
+ int j, k;
+ raid0_conf_t *conf = mddev_to_conf(mddev);
- sz+=sprintf (page+sz, " ");
- for (j=0; j<data->nr_zones; j++)
- {
- sz+=sprintf (page+sz, "[z%d",
- data->hash_table[j].zone0-data->strip_zone);
- if (data->hash_table[j].zone1)
- sz+=sprintf (page+sz, "/z%d] ",
- data->hash_table[j].zone1-data->strip_zone);
- else
- sz+=sprintf (page+sz, "] ");
- }
+ sz += sprintf(page + sz, " ");
+ for (j = 0; j < conf->nr_zones; j++) {
+ sz += sprintf(page + sz, "[z%d",
+ conf->hash_table[j].zone0 - conf->strip_zone);
+ if (conf->hash_table[j].zone1)
+ sz += sprintf(page+sz, "/z%d] ",
+ conf->hash_table[j].zone1 - conf->strip_zone);
+ else
+ sz += sprintf(page+sz, "] ");
+ }
- sz+=sprintf (page+sz, "\n");
+ sz += sprintf(page + sz, "\n");
- for (j=0; j<data->nr_strip_zones; j++)
- {
- sz+=sprintf (page+sz, " z%d=[", j);
- for (k=0; k<data->strip_zone[j].nb_dev; k++)
- sz+=sprintf (page+sz, "%s/",
- partition_name(data->strip_zone[j].dev[k]->dev));
- sz--;
- sz+=sprintf (page+sz, "] zo=%d do=%d s=%d\n",
- data->strip_zone[j].zone_offset,
- data->strip_zone[j].dev_offset,
- data->strip_zone[j].size);
- }
+ for (j = 0; j < conf->nr_strip_zones; j++) {
+ sz += sprintf(page + sz, " z%d=[", j);
+ for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
+ sz += sprintf (page+sz, "%s/", partition_name(
+ conf->strip_zone[j].dev[k]->dev));
+ sz--;
+ sz += sprintf (page+sz, "] zo=%d do=%d s=%d\n",
+ conf->strip_zone[j].zone_offset,
+ conf->strip_zone[j].dev_offset,
+ conf->strip_zone[j].size);
+ }
#endif
- sz+=sprintf (page+sz, " %dk chunks", 1<<FACTOR_SHIFT(FACTOR(mddev)));
- return sz;
+ sz += sprintf(page + sz, " %dk chunks", mddev->param.chunk_size/1024);
+ return sz;
}
-
-static struct md_personality raid0_personality=
+static mdk_personality_t raid0_personality=
{
- "raid0",
- raid0_map,
- NULL, /* no special make_request */
- NULL, /* no special end_request */
- raid0_run,
- raid0_stop,
- raid0_status,
- NULL, /* no ioctls */
- 0,
- NULL, /* no error_handler */
- NULL, /* hot_add_disk */
- NULL, /* hot_remove_disk */
- NULL /* mark_spare */
+ "raid0",
+ NULL, /* no special map */
+ raid0_make_request,
+ NULL, /* no special end_request */
+ raid0_run,
+ raid0_stop,
+ raid0_status,
+ NULL, /* no ioctls */
+ 0,
+ NULL, /* no error_handler */
+ NULL, /* no diskop */
+ NULL, /* no stop resync */
+ NULL /* no restart resync */
};
-
#ifndef MODULE
void raid0_init (void)
{
- register_md_personality (RAID0, &raid0_personality);
+ register_md_personality (RAID0, &raid0_personality);
}
#else
int init_module (void)
{
- return (register_md_personality (RAID0, &raid0_personality));
+ return (register_md_personality (RAID0, &raid0_personality));
}
void cleanup_module (void)
{
- unregister_md_personality (RAID0);
+ unregister_md_personality (RAID0);
}
#endif
+
if (result[1] == 'D')
{
- sprintf(msg, " mcd: Mitsumi Double Speed CD-ROM at port=0x%x,"
- " irq=%d\n", mcd_port, mcd_irq);
MCMD_DATA_READ = MCMD_2X_READ;
-
- mcd_info.speed = 2;
/* Added flag to drop to 1x speed if too many errors */
mcdDouble = 1;
- } else {
- sprintf(msg, " mcd: Mitsumi Single Speed CD-ROM at port=0x%x,"
- " irq=%d\n", mcd_port, mcd_irq);
- mcd_info.speed = 2;
- }
+ } else
+ mcd_info.speed = 1;
+ sprintf(msg, " mcd: Mitsumi %s Speed CD-ROM at port=0x%x,"
+ " irq=%d\n", mcd_info.speed == 1 ? "Single" : "Double", mcd_port, mcd_irq);
request_region(mcd_port, 4, "mcd");
* month ago...
*
* 14 Dec 1998, Andrea Arcangeli
+ *
+ * Copyright (C) 2000 by Tim Waugh (added LPSETTIMEOUT ioctl)
*/
#include <linux/module.h>
/* if you have more than 3 printers, remember to increase LP_NO */
#define LP_NO 3
+/* ROUND_UP macro from fs/select.c */
+#define ROUND_UP(x,y) (((x)+(y)-1)/(y))
+
struct lp_struct lp_table[LP_NO];
static unsigned int lp_count = 0;
ssize_t retv = 0;
ssize_t written;
size_t copy_size = count;
+ long old_to;
#ifdef LP_STATS
if (jiffies-lp_table[minor].lastcall > LP_TIME(minor))
/* Go to compatibility mode. */
parport_negotiate (port, IEEE1284_MODE_COMPAT);
+ old_to = parport_set_timeout (lp_table[minor].dev,
+ lp_table[minor].timeout);
+
do {
/* Write the data. */
written = parport_write (port, kbuf, copy_size);
}
} while (count > 0);
+ /* Not really necessary, but polite. */
+ parport_set_timeout (lp_table[minor].dev, old_to);
+
lp_parport_release (minor);
up (&lp_table[minor].port_mutex);
if ((LP_F(minor) & LP_EXIST) == 0)
return -ENODEV;
switch ( cmd ) {
+ struct timeval par_timeout;
+ long to_jiffies;
+
case LPTIME:
LP_TIME(minor) = arg * HZ/100;
break;
if (copy_to_user((int *) arg, &status, sizeof(int)))
return -EFAULT;
break;
+
+ case LPSETTIMEOUT:
+ if (copy_from_user (&par_timeout,
+ (struct timeval *) arg,
+ sizeof (struct timeval))) {
+ return -EFAULT;
+ }
+ /* Convert to jiffies, place in lp_table */
+ if ((par_timeout.tv_sec < 0) ||
+ (par_timeout.tv_usec < 0)) {
+ return -EINVAL;
+ }
+ to_jiffies = ROUND_UP(par_timeout.tv_usec, 1000000/HZ);
+ to_jiffies += par_timeout.tv_sec * (long) HZ;
+ if (to_jiffies <= 0) {
+ return -EINVAL;
+ }
+ lp_table[minor].timeout = to_jiffies;
+ break;
+
default:
retval = -EINVAL;
}
do {
/* Write the data, converting LF->CRLF as we go. */
ssize_t canwrite = count;
- char *line = strchr (s, '\n');
- if (line)
- canwrite = line - s;
+ char *lf = strchr (s, '\n');
+ if (lf)
+ canwrite = lf - s;
- written = parport_write (port, s, canwrite);
- if (written <= 0)
- continue;
+ if (canwrite > 0) {
+ written = parport_write (port, s, canwrite);
+
+ if (written <= 0)
+ continue;
+
+ s += written;
+ count -= written;
+ canwrite -= written;
+ }
- s += written;
- count -= written;
- if (line) {
+ if (lf && canwrite <= 0) {
const char *crlf = "\r\n";
int i = 2;
/* Dodge the original '\n', and put '\r\n' instead. */
s++;
count--;
- while (i) {
+ do {
written = parport_write (port, crlf, i);
if (written > 0)
i -= written, crlf += written;
- }
+ } while (i > 0 && (CONSOLE_LP_STRICT || written > 0));
}
} while (count > 0 && (CONSOLE_LP_STRICT || written > 0));
}
static struct console lpcons = {
- "lp0",
+ "lp",
lp_console_write,
NULL,
lp_console_device,
NULL,
NULL,
CON_PRINTBUFFER,
- -1,
+ 0,
0,
NULL
};
#ifdef CONFIG_LP_CONSOLE
if (!nr) {
if (port->modes & PARPORT_MODE_SAFEININT) {
+ MOD_INC_USE_COUNT;
register_console (&lpcons);
printk (KERN_INFO "lp%d: console ready\n", CONSOLE_LP);
} else
init_waitqueue_head (&lp_table[i].waitq);
init_waitqueue_head (&lp_table[i].dataq);
init_MUTEX (&lp_table[i].port_mutex);
+ lp_table[i].timeout = 10 * HZ;
}
if (register_chrdev (LP_MAJOR, "lp", &lp_fops)) {
* This is the code behind /dev/parport* -- it allows a user-space
* application to use the parport subsystem.
*
- * Copyright (C) 1998-9 Tim Waugh <tim@cyberelk.demon.co.uk>
+ * Copyright (C) 1998-2000 Tim Waugh <tim@cyberelk.demon.co.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
if (minor >= PARPORT_MAX)
return -ENXIO;
+ MOD_INC_USE_COUNT;
+
pp = kmalloc (sizeof (struct pp_struct), GFP_KERNEL);
- if (!pp)
+ if (!pp) {
+ MOD_DEC_USE_COUNT;
return -ENOMEM;
+ }
pp->state.mode = IEEE1284_MODE_COMPAT;
pp->state.phase = init_phase (pp->state.mode);
pp->pdev = NULL;
file->private_data = pp;
- MOD_INC_USE_COUNT;
return 0;
}
#include <asm/dma.h>
#include <asm/byteorder.h>
-#ifdef CONFIG_APM
-#include <linux/apm_bios.h>
-#endif
+#include <linux/pm.h>
#include <net/irda/wrapper.h>
#include <net/irda/irda.h>
static int nsc_ircc_net_close(struct net_device *dev);
static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev);
-#ifdef CONFIG_APM
-static int nsc_ircc_apmproc(apm_event_t event);
-#endif /* CONFIG_APM */
+static int nsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data);
/*
* Function nsc_ircc_init ()
}
}
-#ifdef CONFIG_APM
- /* Make sure at least one chip was found before enabling APM */
- if (ret == 0)
- apm_register_callback(nsc_ircc_apmproc);
-#endif /* CONFIG_APM */
return ret;
}
{
int i;
-#ifdef CONFIG_APM
- apm_unregister_callback(nsc_ircc_apmproc);
-#endif /* CONFIG_APM */
+ pm_unregister_all(nsc_ircc_pmproc);
for (i=0; i < 4; i++) {
if (dev_self[i])
{
struct net_device *dev;
struct nsc_ircc_cb *self;
+ struct pm_dev *pmdev;
int ret;
int err;
self->io.dongle_id = dongle_id;
nsc_ircc_init_dongle_interface(self->io.fir_base, dongle_id);
+ pmdev = pm_register(PM_SYS_DEV, PM_SYS_IRDA, nsc_ircc_pmproc);
+ if (pmdev)
+ pmdev->data = self;
+
return 0;
}
return &self->stats;
}
-#ifdef CONFIG_APM
static void nsc_ircc_suspend(struct nsc_ircc_cb *self)
{
MESSAGE("%s, Suspending\n", driver_name);
self->io.suspended = 0;
}
-static int nsc_ircc_apmproc(apm_event_t event)
+static int nsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data)
{
- static int down = 0; /* Filter out double events */
- int i;
-
- switch (event) {
- case APM_SYS_SUSPEND:
- case APM_USER_SUSPEND:
- if (!down) {
- for (i=0; i<4; i++) {
- if (dev_self[i])
- nsc_ircc_suspend(dev_self[i]);
- }
- }
- down = 1;
- break;
- case APM_NORMAL_RESUME:
- case APM_CRITICAL_RESUME:
- if (down) {
- for (i=0; i<4; i++) {
- if (dev_self[i])
- nsc_ircc_wakeup(dev_self[i]);
- }
- }
- down = 0;
- break;
- }
+ struct nsc_ircc_cb *self = (struct nsc_ircc_cb*) dev->data;
+ if (self) {
+ switch (rqst) {
+ case PM_SUSPEND:
+ nsc_ircc_suspend(self);
+ break;
+ case PM_RESUME:
+ nsc_ircc_wakeup(self);
+ break;
+ }
+ }
return 0;
}
-#endif /* CONFIG_APM */
#ifdef MODULE
MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
#include <asm/dma.h>
#include <asm/byteorder.h>
-#ifdef CONFIG_APM
-#include <linux/apm_bios.h>
-#endif
+#include <linux/pm.h>
#include <net/irda/wrapper.h>
#include <net/irda/irda.h>
static int ircc_net_open(struct net_device *dev);
static int ircc_net_close(struct net_device *dev);
-#ifdef CONFIG_APM
-static int ircc_apmproc(apm_event_t event);
-#endif /* CONFIG_APM */
+static int ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data);
/* These are the currently known SMC chipsets */
static smc_chip_t chips[] =
irport_start(self->irport);
+ self->pmdev = pm_register(PM_SYS_DEV, PM_SYS_IRDA, ircc_pmproc);
+ if (self->pmdev)
+ self->pmdev->data = self;
+
return 0;
}
return 0;
}
-#ifdef CONFIG_APM
static void ircc_suspend(struct ircc_cb *self)
{
int i = 10;
MESSAGE("%s, Waking up\n", driver_name);
}
-static int ircc_apmproc(apm_event_t event)
+static int ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data)
{
- static int down = 0; /* Filter out double events */
- int i;
-
- switch (event) {
- case APM_SYS_SUSPEND:
- case APM_USER_SUSPEND:
- if (!down) {
- for (i=0; i<4; i++) {
- if (dev_self[i])
- ircc_suspend(dev_self[i]);
- }
- }
- down = 1;
- break;
- case APM_NORMAL_RESUME:
- case APM_CRITICAL_RESUME:
- if (down) {
- for (i=0; i<4; i++) {
- if (dev_self[i])
- ircc_wakeup(dev_self[i]);
- }
- }
- down = 0;
- break;
- }
+ struct ircc_cb *self = (struct ircc_cb*) dev->data;
+ if (self) {
+ switch (rqst) {
+ case PM_SUSPEND:
+ ircc_suspend(self);
+ break;
+ case PM_RESUME:
+ ircc_wakeup(self);
+ break;
+ }
+ }
return 0;
}
-#endif /* CONFIG_APM */
#ifdef MODULE
MODULE_AUTHOR("Thomas Davis <tadavis@jps.net>");
#include <net/irda/irlap_frame.h>
#include <net/irda/irda_device.h>
-#ifdef CONFIG_APM
-#include <linux/apm_bios.h>
-#endif
+#include <linux/pm.h>
+static int toshoboe_pmproc (struct pm_dev *dev, pm_request_t rqst, void *data);
#include <net/irda/toshoboe.h>
{
struct toshoboe_cb *self;
struct net_device *dev;
+ struct pm_dev *pmdev;
int i = 0;
int ok = 0;
int err;
return -1;
}
+ pmdev = pm_register (PM_PCI_DEV, PM_PCI_ID(pci_dev), toshoboe_pmproc);
+ if (pmdev)
+ pmdev->data = self;
+
printk (KERN_WARNING "ToshOboe: Using ");
#ifdef ONETASK
printk ("single");
return (0);
}
-#ifdef CONFIG_APM
static void
toshoboe_gotosleep (struct toshoboe_cb *self)
{
}
static int
-toshoboe_apmproc (apm_event_t event)
+toshoboe_pmproc (struct pm_dev *dev, pm_request_t rqst, void *data)
{
- static int down = 0; /*Filter out double events */
- int i;
-
- switch (event)
- {
- case APM_SYS_SUSPEND:
- case APM_USER_SUSPEND:
- if (!down)
- {
-
- for (i = 0; i < 4; i++)
- {
- if (dev_self[i])
- toshoboe_gotosleep (dev_self[i]);
- }
-
- }
- down = 1;
- break;
- case APM_NORMAL_RESUME:
- case APM_CRITICAL_RESUME:
- if (down)
- {
-
-
-
- for (i = 0; i < 4; i++)
- {
- if (dev_self[i])
- toshoboe_wakeup (dev_self[i]);
- }
-
-
-
- }
- down = 0;
- break;
- }
+ struct toshoboe_cb *self = (struct toshoboe_cb *) dev->data;
+ if (self) {
+ switch (rqst) {
+ case PM_SUSPEND:
+ toshoboe_gotosleep (self);
+ break;
+ case PM_RESUME:
+ toshoboe_wakeup (self);
+ break;
+ }
+ }
return 0;
}
-#endif
-
int __init toshoboe_init (void)
{
struct pci_dev *pci_dev = NULL;
pci_dev->irq);
if (!toshoboe_open (pci_dev))
- found++;
+ found++;
}
}
if (found)
{
-#ifdef CONFIG_APM
- apm_register_callback (toshoboe_apmproc);
-#endif
return 0;
}
toshoboe_close (dev_self[i]);
}
-#ifdef CONFIG_APM
- apm_unregister_callback (toshoboe_apmproc);
-#endif
-
+ pm_unregister_all (toshoboe_pmproc);
}
#include "cs_internal.h"
#include "rsrc_mgr.h"
-#ifdef CONFIG_APM
-#include <linux/apm_bios.h>
-static int handle_apm_event(apm_event_t event);
-#endif
+#include <linux/pm.h>
+static int handle_pm_event(struct pm_dev *dev, pm_request_t rqst, void *data);
#ifdef PCMCIA_DEBUG
int pc_debug = PCMCIA_DEBUG;
#else
#define CB_OPT ""
#endif
-#ifdef CONFIG_APM
+#if defined(CONFIG_APM) || defined(CONFIG_ACPI)
#define APM_OPT " [apm]"
#else
#define APM_OPT ""
#endif
#if !defined(CONFIG_CARDBUS) && !defined(CONFIG_PCI) && \
- !defined(CONFIG_APM)
+ !defined(CONFIG_APM) && !defined(CONFIG_ACPI)
#define OPTIONS " none"
#else
#define OPTIONS PCI_OPT CB_OPT APM_OPT
static int io_speed = 0; /* ns */
/* Optional features */
-#ifdef CONFIG_APM
+#if defined(CONFIG_APM) || defined(CONFIG_ACPI)
static int do_apm = 1;
MODULE_PARM(do_apm, "i");
+#else
+static int do_apm = 0;
#endif
MODULE_PARM(setup_delay, "i");
======================================================================*/
-#ifdef CONFIG_APM
-static int handle_apm_event(apm_event_t event)
+static int handle_pm_event(struct pm_dev *dev, pm_request_t rqst, void *data)
{
int i, stat;
socket_info_t *s;
- static int down = 0;
- switch (event) {
- case APM_SYS_SUSPEND:
- case APM_USER_SUSPEND:
+ switch (rqst) {
+ case PM_SUSPEND:
DEBUG(1, "cs: received suspend notification\n");
- if (down) {
- printk(KERN_DEBUG "cs: received extra suspend event\n");
- break;
- }
- down = 1;
for (i = 0; i < sockets; i++) {
s = socket_table[i];
if ((s->state & SOCKET_PRESENT) &&
}
}
break;
- case APM_NORMAL_RESUME:
- case APM_CRITICAL_RESUME:
+ case PM_RESUME:
DEBUG(1, "cs: received resume notification\n");
- if (!down) {
- printk(KERN_DEBUG "cs: received bogus resume event\n");
- break;
- }
- down = 0;
for (i = 0; i < sockets; i++) {
s = socket_table[i];
/* Do this just to reinitialize the socket */
break;
}
return 0;
-} /* handle_apm_event */
-#endif
+} /* handle_pm_event */
/*======================================================================
#endif
printk(KERN_INFO " %s\n", options);
DEBUG(0, "%s\n", version);
-#ifdef CONFIG_APM
if (do_apm)
- apm_register_callback(&handle_apm_event);
-#endif
+ pm_register(PM_SYS_DEV, PM_SYS_PCMCIA, handle_pm_event);
#ifdef CONFIG_PROC_FS
proc_pccard = proc_mkdir("pccard", proc_bus);
#endif
remove_proc_entry("pccard", proc_bus);
}
#endif
-#ifdef CONFIG_APM
if (do_apm)
- apm_unregister_callback(&handle_apm_event);
-#endif
+ pm_unregister_all(handle_pm_event);
release_resource_db();
}
- Initial Beta Release.
*****************************************************************************/
-
-#include <linux/config.h> /* for CONFIG_PCI */
#ifdef MODULE
#include <linux/module.h>
#endif
#include <linux/soundcard.h>
#include <linux/pci.h>
#include <linux/bitops.h>
-#include <linux/apm_bios.h>
+#include <linux/pm.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <linux/init.h>
return 0;
}
-#ifdef CONFIG_APM
-
-static int solo1_apm_callback(apm_event_t event)
+static int solo1_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data)
{
- struct solo1_state *s;
-
- switch(event) {
- case APM_NORMAL_RESUME:
- case APM_CRITICAL_RESUME:
- case APM_STANDBY_RESUME:
- for(s = devs ; s ; s = s->next)
+ struct solo1_state *s = (struct solo1_state*) dev->data;
+ if (s) {
+ switch(rqst) {
+ case PM_RESUME:
setup_solo1(s);
- break;
+ break;
- default:
- for(s = devs ; s ; s = s->next) {
- outb(0, s->iobase+6);
- /* DMA master clear */
- outb(0, s->ddmabase+0xd);
- /* reset sequencer and FIFO */
- outb(3, s->sbbase+6);
- /* turn off DDMA controller address space */
- pci_write_config_word(s->dev, 0x60, 0);
- }
+ case PM_SUSPEND:
+ outb(0, s->iobase+6);
+ /* DMA master clear */
+ outb(0, s->ddmabase+0xd);
+ /* reset sequencer and FIFO */
+ outb(3, s->sbbase+6);
+ /* turn off DDMA controller address space */
+ pci_write_config_word(s->dev, 0x60, 0);
+ break;
+ }
}
return 0;
}
-#endif
-
#define RSRCISIOREGION(dev,num) ((dev)->resource[(num)].start != 0 && \
((dev)->resource[(num)].flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO)
static int solo1_probe(struct pci_dev *pcidev, const struct pci_device_id *pciid)
{
struct solo1_state *s;
+ struct pm_dev *pmdev;
if (!RSRCISIOREGION(pcidev, 0) ||
!RSRCISIOREGION(pcidev, 1) ||
pcidev->dma_mask = 0xffffff; /* pessimistic; play can handle 32bit addrs */
/* put it into driver list */
list_add_tail(&s->devs, &devs);
+
+ pmdev = pm_register(PM_PCI_DEV, PM_PCI_ID(pcidev), solo1_pm_callback);
+ if (pmdev)
+ pmdev->data = s;
+
return 0;
err:
printk(KERN_INFO "solo1: version v0.13 time " __TIME__ " " __DATE__ "\n");
if (!pci_register_driver(&solo1_driver))
return -ENODEV;
-#ifdef CONFIG_APM
- apm_register_callback(solo1_apm_callback);
-#endif
return 0;
}
{
printk(KERN_INFO "solo1: unloading\n");
pci_unregister_driver(&solo1_driver);
-#ifdef CONFIG_APM
- apm_unregister_callback(solo1_apm_callback);
-#endif
+ pm_unregister_all(solo1_pm_callback);
}
/* --------------------------------------------------------------------- */
#include <asm/uaccess.h>
#include <asm/hardirq.h>
-#ifdef CONFIG_APM
-#include <linux/apm_bios.h>
-static int maestro_apm_callback(apm_event_t ae);
+#include <linux/pm.h>
+static int maestro_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *d);
static int in_suspend=0;
wait_queue_head_t suspend_queue;
static void check_suspend(void);
#define CHECK_SUSPEND check_suspend();
-#else
-#define CHECK_SUSPEND
-#endif
#include "maestro.h"
struct ess_state channels[MAX_DSPS];
u16 maestro_map[NR_IDRS]; /* Register map */
-#ifdef CONFIG_APM
/* we have to store this junk so that we can come back from a
suspend */
u16 apu_map[NR_APUS][NR_APU_REGS]; /* contents of apu regs */
-#endif
/* this locks around the physical registers on the card */
spinlock_t lock;
printk("BAD CHANNEL %d.\n",channel);
else
channel = s->apu[channel];
-#ifdef CONFIG_APM
/* store based on real hardware apu/reg */
s->card->apu_map[channel][reg]=data;
-#endif
}
reg|=(channel<<4);
goto rec_return_free;
}
if (!interruptible_sleep_on_timeout(&s->dma_adc.wait, HZ)) {
-#ifdef CONFIG_APM
if(! in_suspend)
-#endif
printk(KERN_DEBUG "maestro: read: chip lockup? dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
s->dma_adc.dmasize, s->dma_adc.fragsize, s->dma_adc.count,
s->dma_adc.hwptr, s->dma_adc.swptr);
goto return_free;
}
if (!interruptible_sleep_on_timeout(&s->dma_dac.wait, HZ)) {
-#ifdef CONFIG_APM
if(! in_suspend)
-#endif
printk(KERN_DEBUG "maestro: write: chip lockup? dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
s->dma_dac.dmasize, s->dma_dac.fragsize, s->dma_dac.count,
s->dma_dac.hwptr, s->dma_dac.swptr);
int i;
struct ess_card *card;
struct ess_state *ess;
+ struct pm_dev *pmdev;
int num = 0;
/* don't pick up weird modem maestros */
memset(card, 0, sizeof(*card));
memcpy(&card->pcidev,pcidev,sizeof(card->pcidev));
-#ifdef CONFIG_APM
- if (apm_register_callback(maestro_apm_callback)) {
- printk(KERN_WARNING "maestro: apm suspend might not work.\n");
- }
-#endif
+ pmdev = pm_register(PM_PCI_DEV,
+ PM_PCI_ID(pcidev),
+ maestro_pm_callback);
+ if (pmdev)
+ pmdev->data = card;
card->iobase = iobase;
card->card_type = card_type;
printk(KERN_WARNING "maestro: clipping dsps_order to %d\n",dsps_order);
}
-#ifdef CONFIG_APM
init_waitqueue_head(&suspend_queue);
-#endif
/*
* Find the ESS Maestro 2.
{
struct ess_card *s;
-#ifdef CONFIG_APM
- apm_unregister_callback(maestro_apm_callback);
-#endif
+ pm_unregister_all(maestro_pm_callback);
+
while ((s = devs)) {
int i;
devs = devs->next;
}
#endif /* MODULE */
-#ifdef CONFIG_APM
void
check_suspend(void)
}
static int
-maestro_suspend(void)
+maestro_suspend(struct ess_card *card)
{
- struct ess_card *card;
unsigned long flags;
+ int i,j;
save_flags(flags);
cli();
- for (card = devs; card ; card = card->next) {
- int i,j;
-
- M_printk("maestro: apm in dev %p\n",card);
+ M_printk("maestro: pm in dev %p\n",card);
- for(i=0;i<NR_DSPS;i++) {
- struct ess_state *s = &card->channels[i];
+ for(i=0;i<NR_DSPS;i++) {
+ struct ess_state *s = &card->channels[i];
- if(s->dev_audio == -1)
- continue;
+ if(s->dev_audio == -1)
+ continue;
+
+ M_printk("maestro: stopping apus for device %d\n",i);
+ stop_dac(s);
+ stop_adc(s);
+ for(j=0;j<6;j++)
+ card->apu_map[s->apu[j]][5]=apu_get_register(s,j,5);
+
+ }
- M_printk("maestro: stopping apus for device %d\n",i);
- stop_dac(s);
- stop_adc(s);
- for(j=0;j<6;j++)
- card->apu_map[s->apu[j]][5]=apu_get_register(s,j,5);
+ /* get rid of interrupts? */
+ if( card->dsps_open > 0)
+ stop_bob(&card->channels[0]);
- }
-
- /* get rid of interrupts? */
- if( card->dsps_open > 0)
- stop_bob(&card->channels[0]);
- }
- in_suspend=1;
+ in_suspend=1;
restore_flags(flags);
return 0;
}
static int
-maestro_resume(void)
+maestro_resume(struct ess_card *card)
{
- struct ess_card *card;
unsigned long flags;
+ int i;
save_flags(flags);
cli();
M_printk("maestro: resuming\n");
/* first lets just bring everything back. .*/
- for (card = devs; card ; card = card->next) {
- int i;
- M_printk("maestro: apm in dev %p\n",card);
-
- maestro_config(card);
- /* need to restore the base pointers.. */
- if(card->dmapages)
- set_base_registers(&card->channels[0],card->dmapages);
-
- mixer_push_state(card);
-
- for(i=0;i<NR_DSPS;i++) {
- struct ess_state *s = &card->channels[i];
- int chan,reg;
-
- if(s->dev_audio == -1)
- continue;
-
- for(chan = 0 ; chan < 6 ; chan++) {
- wave_set_register(s,s->apu[chan]<<3,s->apu_base[chan]);
- for(reg = 1 ; reg < NR_APU_REGS ; reg++)
- apu_set_register(s,chan,reg,s->card->apu_map[s->apu[chan]][reg]);
- }
- for(chan = 0 ; chan < 6 ; chan++)
- apu_set_register(s,chan,0,s->card->apu_map[s->apu[chan]][0] & 0xFF0F);
- }
- }
+ M_printk("maestro: pm in dev %p\n",card);
+
+ maestro_config(card);
+ /* need to restore the base pointers.. */
+ if(card->dmapages)
+ set_base_registers(&card->channels[0],card->dmapages);
+
+ mixer_push_state(card);
+
+ for(i=0;i<NR_DSPS;i++) {
+ struct ess_state *s = &card->channels[i];
+ int chan,reg;
+
+ if(s->dev_audio == -1)
+ continue;
+
+ for(chan = 0 ; chan < 6 ; chan++) {
+ wave_set_register(s,s->apu[chan]<<3,s->apu_base[chan]);
+ for(reg = 1 ; reg < NR_APU_REGS ; reg++)
+ apu_set_register(s,chan,reg,s->card->apu_map[s->apu[chan]][reg]);
+ }
+ for(chan = 0 ; chan < 6 ; chan++)
+ apu_set_register(s,chan,0,s->card->apu_map[s->apu[chan]][0] & 0xFF0F);
+ }
/* now we flip on the music */
- for (card = devs; card ; card = card->next) {
- int i;
-
- M_printk("maestro: apm in dev %p\n",card);
-
- for(i=0;i<NR_DSPS;i++) {
- struct ess_state *s = &card->channels[i];
-
- /* these use the apu_mode, and can handle
- spurious calls */
- start_dac(s);
- start_adc(s);
- }
- if( card->dsps_open > 0)
- start_bob(&card->channels[0]);
- }
+ M_printk("maestro: pm in dev %p\n",card);
+
+ for(i=0;i<NR_DSPS;i++) {
+ struct ess_state *s = &card->channels[i];
+
+ /* these use the apu_mode, and can handle
+ spurious calls */
+ start_dac(s);
+ start_adc(s);
+ }
+ if( card->dsps_open > 0)
+ start_bob(&card->channels[0]);
restore_flags(flags);
}
int
-maestro_apm_callback(apm_event_t ae) {
-
- M_printk("maestro: apm event received: 0x%x\n",ae);
-
- switch(ae) {
- case APM_SYS_SUSPEND:
- case APM_CRITICAL_SUSPEND:
- case APM_USER_SUSPEND:
- maestro_suspend();break;
- case APM_NORMAL_RESUME:
- case APM_CRITICAL_RESUME:
- case APM_STANDBY_RESUME:
- maestro_resume();break;
- default: break;
- }
+maestro_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data) {
+ struct ess_card *card = (struct ess_card*) dev->data;
+ if (card) {
+ M_printk("maestro: pm event received: 0x%x\n", rqst);
+
+ switch (rqst) {
+ case PM_SUSPEND:
+ maestro_suspend(card);
+ break;
+ case PM_RESUME:
+ maestro_resume(card);
+ break;
+ }
+ }
return 0;
}
-#endif
#define __NO_VERSION__
#include <linux/pci.h>
#include <linux/module.h>
-#ifdef CONFIG_APM
-#include <linux/apm_bios.h>
-#endif
+#include <linux/pm.h>
#include "sound_config.h"
#include "soundmodule.h"
#include "nm256.h"
static int nm256_releaseInterrupt (struct nm256_info *card);
static void nm256_interrupt (int irq, void *dev_id, struct pt_regs *dummy);
static void nm256_interrupt_zx (int irq, void *dev_id, struct pt_regs *dummy);
+static int handle_pm_event (struct pm_dev *dev, pm_request_t rqst, void *data);
/* These belong in linux/pci.h. */
#define PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO 0x8005
nm256_install(struct pci_dev *pcidev, enum nm256rev rev, char *verstr)
{
struct nm256_info *card;
+ struct pm_dev *pmdev;
int x;
card = kmalloc (sizeof (struct nm256_info), GFP_KERNEL);
nm256_install_mixer (card);
+ pmdev = pm_register(PM_PCI_DEV, PM_PCI_ID(pcidev), handle_pm_event);
+ if (pmdev)
+ pmdev->data = card;
+
return 1;
}
-#ifdef CONFIG_APM
/*
- * APM event handler, so the card is properly reinitialized after a power
+ * PM event handler, so the card is properly reinitialized after a power
* event.
*/
static int
-handle_apm_event (apm_event_t event)
+handle_pm_event (struct pm_dev *dev, pm_request_t rqst, void *data)
{
- static int down = 0;
-
- switch (event)
- {
- case APM_SYS_SUSPEND:
- case APM_USER_SUSPEND:
- down++;
+ struct nm256_info *crd = (struct nm256_info*) dev->data;
+ if (crd) {
+ switch (rqst) {
+ case PM_SUSPEND:
break;
- case APM_NORMAL_RESUME:
- case APM_CRITICAL_RESUME:
- if (down)
- {
- struct nm256_info *crd;
-
- down = 0;
- for (crd = nmcard_list; crd != NULL; crd = crd->next_card)
- {
- int playing = crd->playing;
- nm256_full_reset (crd);
- /*
- * A little ugly, but that's ok; pretend the
- * block we were playing is done.
- */
- if (playing)
- DMAbuf_outputintr (crd->dev_for_play, 1);
- }
- }
+ case PM_RESUME:
+ {
+ int playing = crd->playing;
+ nm256_full_reset (crd);
+ /*
+ * A little ugly, but that's ok; pretend the
+ * block we were playing is done.
+ */
+ if (playing)
+ DMAbuf_outputintr (crd->dev_for_play, 1);
+ }
break;
}
+ }
return 0;
}
-#endif
/*
* This loop walks the PCI configuration database and finds where
if (count == 0)
return -ENODEV;
-#ifdef CONFIG_APM
- apm_register_callback (&handle_apm_event);
-#endif
-
printk (KERN_INFO "Done installing NM256 audio driver.\n");
return 0;
}
}
nmcard_list = NULL;
}
-#ifdef CONFIG_APM
- apm_unregister_callback (&handle_apm_event);
-#endif
+ pm_unregister_all (&handle_pm_event);
}
#endif
\f
#include <asm/uaccess.h>
#include <asm/hardirq.h>
-#ifdef CONFIG_APM
-#include <linux/apm_bios.h>
-#endif
-
#include "trident.h"
#include "ac97_codec.h"
/*****************************************************************************/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/miscdevice.h>
#include "uhci.h"
#include "uhci-debug.h"
-#ifdef CONFIG_APM
-#include <linux/apm_bios.h>
-static int handle_apm_event(apm_event_t event);
-#endif
+#include <linux/pm.h>
+static int handle_pm_event(struct pm_dev *dev, pm_request_t rqst, void *data);
static int debug = 1;
MODULE_PARM(debug, "i");
* If we've successfully found a UHCI, now is the time to increment the
* module usage count, and return success..
*/
-static int setup_uhci(int irq, unsigned int io_addr, unsigned int io_size)
+static int setup_uhci(struct pci_dev *dev, int irq, unsigned int io_addr, unsigned int io_size)
{
int retval;
struct uhci *uhci;
if (request_irq(irq, uhci_interrupt, SA_SHIRQ, "usb-uhci", uhci) == 0) {
uhci->irq = irq;
- if (!uhci_start_root_hub(uhci))
+ if (!uhci_start_root_hub(uhci)) {
+ struct pm_dev *pmdev;
+
+ pmdev = pm_register(PM_PCI_DEV,
+ PM_PCI_ID(dev),
+ handle_pm_event);
+ if (pmdev)
+ pmdev->data = uhci;
return 0;
+ }
}
/* Couldn't allocate IRQ if we got here */
continue;
}
- return setup_uhci(dev->irq, io_addr, io_size);
+ return setup_uhci(dev, dev->irq, io_addr, io_size);
}
return -1;
}
-#ifdef CONFIG_APM
-static int handle_apm_event(apm_event_t event)
+static int handle_pm_event(struct pm_dev *dev, pm_request_t rqst, void *data)
{
- static int down = 0;
-
- switch (event) {
- case APM_SYS_SUSPEND:
- case APM_USER_SUSPEND:
- if (down) {
- dbg("received extra suspend event");
- break;
- }
- down = 1;
+ switch (rqst) {
+ case PM_SUSPEND:
break;
- case APM_NORMAL_RESUME:
- case APM_CRITICAL_RESUME:
- if (!down) {
- dbg("received bogus resume event");
- break;
- }
- down = 0;
+ case PM_RESUME:
break;
}
return 0;
}
-#endif
int uhci_init(void)
{
if (retval && uhci_list.next == &uhci_list)
goto init_failed;
-#ifdef CONFIG_APM
- apm_register_callback(&handle_apm_event);
-#endif
-
return 0;
init_failed:
void cleanup_module(void)
{
-#ifdef CONFIG_APM
- apm_unregister_callback(&handle_apm_event);
-#endif
+ pm_unregister_all(handle_pm_event);
uhci_cleanup();
}
#endif //MODULE
#include "usb.h"
#include "usb-ohci.h"
-#ifdef CONFIG_APM
-#include <linux/apm_bios.h>
-static int handle_apm_event (apm_event_t event);
-#endif
+#include <linux/pm.h>
+static int handle_pm_event (struct pm_dev *dev, pm_request_t rqst, void *data);
#ifdef CONFIG_PMAC_PBOOK
#include <linux/adb.h>
/* Increment the module usage count, start the control thread and
* return success. */
-static int hc_found_ohci (int irq, void * mem_base)
+static int hc_found_ohci (struct pci_dev *dev, int irq, void * mem_base)
{
ohci_t * ohci;
dbg("USB HC found: irq= %d membase= %lx", irq, (unsigned long) mem_base);
usb_register_bus (ohci->bus);
if (request_irq (irq, hc_interrupt, SA_SHIRQ, "ohci-usb", ohci) == 0) {
+ struct pm_dev *pmdev;
+
ohci->irq = irq;
hc_start (ohci);
+
+ pmdev = pm_register (PM_PCI_DEV,
+ PM_PCI_ID(dev),
+ handle_pm_event);
+ if (pmdev)
+ pmdev->data = ohci;
+
return 0;
}
err("request interrupt %d failed", irq);
err("Error mapping OHCI memory");
return -EFAULT;
}
- return hc_found_ohci (dev->irq, (void *) mem_base);
+ return hc_found_ohci (dev, dev->irq, (void *) mem_base);
}
/*-------------------------------------------------------------------------*/
/*-------------------------------------------------------------------------*/
-#ifdef CONFIG_APM
-static int handle_apm_event (apm_event_t event)
+static int handle_pm_event (struct pm_dev *dev, pm_request_t rqst, void *data)
{
- static int down = 0;
- ohci_t * ohci;
- struct list_head * ohci_l;
-
- switch (event) {
- case APM_SYS_SUSPEND:
- case APM_USER_SUSPEND:
- if (down) {
- dbg("received extra suspend event");
- break;
- }
- for (ohci_l = ohci_hcd_list.next; ohci_l != &ohci_hcd_list; ohci_l = ohci_l->next) {
- ohci = list_entry (ohci_l, ohci_t, ohci_hcd_list);
+ ohci_t * ohci = (ohci_t*) dev->data;
+ if (ohci) {
+ switch (rqst) {
+ case PM_SUSPEND:
dbg("USB-Bus suspend: %p", ohci);
writel (ohci->hc_control = 0xFF, &ohci->regs->control);
- }
- wait_ms (10);
- down = 1;
- break;
- case APM_NORMAL_RESUME:
- case APM_CRITICAL_RESUME:
- if (!down) {
- dbg("received bogus resume event");
+ wait_ms (10);
break;
- }
- for (ohci_l = ohci_hcd_list.next; ohci_l != &ohci_hcd_list; ohci_l = ohci_l->next) {
- ohci = list_entry(ohci_l, ohci_t, ohci_hcd_list);
+ case PM_RESUME:
dbg("USB-Bus resume: %p", ohci);
writel (ohci->hc_control = 0x7F, &ohci->regs->control);
- }
- wait_ms (20);
- for (ohci_l = ohci_hcd_list.next; ohci_l != &ohci_hcd_list; ohci_l = ohci_l->next) {
- ohci = list_entry (ohci_l, ohci_t, ohci_hcd_list);
+ wait_ms (20);
writel (ohci->hc_control = 0xBF, &ohci->regs->control);
+ break;
}
- down = 0;
- break;
}
return 0;
}
-#endif
/*-------------------------------------------------------------------------*/
if (hc_start_ohci(dev) >= 0) ret = 0;
}
-#ifdef CONFIG_APM
- apm_register_callback (&handle_apm_event);
-#endif
-
#ifdef CONFIG_PMAC_PBOOK
pmu_register_sleep_notifier (&ohci_sleep_notifier);
#endif
{
ohci_t * ohci;
-#ifdef CONFIG_APM
- apm_unregister_callback (&handle_apm_event);
-#endif
+ pm_unregister_all (handle_pm_event);
#ifdef CONFIG_PMAC_PBOOK
pmu_unregister_sleep_notifier (&ohci_sleep_notifier);
#ifndef __LINUX_USB_SERIAL_H
#define __LINUX_USB_SERIAL_H
+#include <linux/config.h>
+
/* Module information */
MODULE_AUTHOR("Greg Kroah-Hartman, greg@kroah.com, http://www.kroah.com/linux-usb/");
MODULE_DESCRIPTION("USB Serial Driver");
#undef dbg
#define dbg(format, arg...) do {} while (0)
-#ifdef CONFIG_APM
- #include <linux/apm_bios.h>
- static int handle_apm_event (apm_event_t event);
-#endif
+#include <linux/pm.h>
+static int handle_pm_event (struct pm_dev *dev, pm_request_t rqst, void *data);
#ifdef DEBUG_SYMBOLS
#define _static
return 0;
}
-_static int __init alloc_uhci (int irq, unsigned int io_addr, unsigned int io_size)
+_static int __init alloc_uhci (struct pci_dev *dev, int irq, unsigned int io_addr, unsigned int io_size)
{
uhci_t *s;
struct usb_bus *bus;
+ struct pm_dev *pmdev;
s = kmalloc (sizeof (uhci_t), GFP_KERNEL);
if (!s)
//chain new uhci device into global list
devs = s;
+
+ pmdev = pm_register(PM_PCI_DEV, PM_PCI_ID(dev), handle_pm_event);
+ if (pmdev)
+ pmdev->data = s;
+
return 0;
}
break;
/* disable legacy emulation */
pci_write_config_word (dev, USBLEGSUP, USBLEGSUP_DEFAULT);
- return alloc_uhci(dev->irq, io_addr, io_size);
+ return alloc_uhci(dev, dev->irq, io_addr, io_size);
}
return -1;
}
-#ifdef CONFIG_APM
-_static int handle_apm_event (apm_event_t event)
+_static int handle_pm_event (struct pm_dev *dev, pm_request_t rqst, void *data)
{
- static int down = 0;
- uhci_t *s = devs;
- dbg("handle_apm_event(%d)", event);
- switch (event) {
- case APM_SYS_SUSPEND:
- case APM_USER_SUSPEND:
- if (down) {
- dbg("received extra suspend event");
- break;
- }
- while (s) {
- reset_hc (s);
- s = s->next;
- }
- down = 1;
+ uhci_t *s = (uhci_t*) dev->data;
+ dbg("handle_apm_event(%d)", rqst);
+ if (s) {
+ switch (rqst) {
+ case PM_SUSPEND:
+ reset_hc (s);
break;
- case APM_NORMAL_RESUME:
- case APM_CRITICAL_RESUME:
- if (!down) {
- dbg("received bogus resume event");
- break;
- }
- down = 0;
- while (s) {
- start_hc (s);
- s = s->next;
- }
+ case PM_RESUME:
+ start_hc (s);
break;
}
return 0;
}
-#endif
int __init uhci_init (void)
{
if (!retval)
i++;
-
}
-#ifdef CONFIG_APM
- if(i)
- apm_register_callback (&handle_apm_event);
-#endif
return retval;
}
void cleanup_module (void)
{
-#ifdef CONFIG_APM
- apm_unregister_callback (&handle_apm_event);
-#endif
+ pm_unregister_all (handle_pm_event);
uhci_cleanup ();
}
*/
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
EXPORT_SYMBOL(register_framebuffer);
EXPORT_SYMBOL(unregister_framebuffer);
+EXPORT_SYMBOL(registered_fb);
+EXPORT_SYMBOL(num_registered_fb);
#if 1 /* to go away in 2.4.0 */
EXPORT_SYMBOL(GET_FB_IDX);
#endif
*gid = current->gid;
*pgrp = current->pgrp;
- *minproto = *maxproto = AUTOFS_MAX_PROTO_VERSION;
+ *minproto = AUTOFS_MIN_PROTO_VERSION;
+ *maxproto = AUTOFS_MAX_PROTO_VERSION;
*pipefd = -1;
if (!bh)
return -EIO;
new = getblk(dev, to, BFS_BSIZE);
- if (!buffer_uptodate(new))
- wait_on_buffer(new);
memcpy(new->b_data, bh->b_data, bh->b_size);
mark_buffer_dirty(new, 1);
bforget(bh);
dprintf("%08lx-%08lx->%08lx\n", start, end, where);
for (i = start; i <= end; i++)
- if(i && bfs_move_block(i, where + i, dev)) {
+ if(bfs_move_block(i, where + i, dev)) {
dprintf("failed to move block %08lx -> %08lx\n", i, where + i);
return -EIO;
}
/* Ok, we have to move this entire file to the next free block */
next_free_block = s->su_lf_eblk + 1;
- err = bfs_move_blocks(inode->i_dev, inode->iu_sblock, inode->iu_eblock, next_free_block);
- if (err) {
- dprintf("failed to move ino=%08lx -> possible fs corruption\n", inode->i_ino);
- goto out;
- }
+ if (inode->iu_sblock) { /* if data starts on block 0 then there is no data */
+ err = bfs_move_blocks(inode->i_dev, inode->iu_sblock, inode->iu_eblock, next_free_block);
+ if (err) {
+ dprintf("failed to move ino=%08lx -> possible fs corruption\n", inode->i_ino);
+ goto out;
+ }
+ } else
+ err = 0;
inode->iu_sblock = next_free_block;
s->su_lf_eblk = inode->iu_eblock = next_free_block + block;
#include <linux/major.h>
#include <linux/blk.h>
#include <linux/init.h>
+#include <linux/raid/md.h>
#include "check.h"
#endif
rd_load();
#endif
+#ifdef CONFIG_BLK_DEV_MD
+ autodetect_raid();
+#endif
#ifdef CONFIG_MD_BOOT
md_setup_drive();
#endif
#include <asm/uaccess.h>
-#include <linux/config.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
#ifndef __ALPHA_APECS__H__
#define __ALPHA_APECS__H__
-#include <linux/config.h>
#include <linux/types.h>
#include <asm/compiler.h>
* 28-Dec-1998 APH Made leds code optional
*/
-#include <linux/config.h>
#include <asm/leds.h>
#define MCLK_47_8
#define RTC_PORT(x) (rtc_base+(x))
#define RTC_ALWAYS_BCD 0
-#include <linux/config.h>
#include <linux/mc146818rtc.h>
#include <asm/dec21285.h>
*
*/
+#include <linux/config.h>
+
#ifdef CONFIG_BLK_DEV_IDE
#include <asm/irq.h>
* (C) 1999 Nicolas Pitre <nico@cam.org>
*/
+#include <linux/config.h>
#if defined(CONFIG_SA1100_EMPEG) || \
defined(CONFIG_SA1100_VICTOR) || \
+++ /dev/null
-/* $Id: md.h,v 1.1 1997/12/15 15:11:57 jj Exp $
- * md.h: High speed xor_block operation for RAID4/5
- *
- */
-
-#ifndef __ASM_MD_H
-#define __ASM_MD_H
-
-/* #define HAVE_ARCH_XORBLOCK */
-
-#define MD_XORBLOCK_ALIGNMENT sizeof(long)
-
-#endif /* __ASM_MD_H */
#ifndef __I386_MMU_CONTEXT_H
#define __I386_MMU_CONTEXT_H
+#include <linux/config.h>
#include <asm/desc.h>
#include <asm/atomic.h>
#include <asm/pgalloc.h>
#define __flush_tlb() \
do { \
- __asm__ __volatile__ \
- ("movl %0, %%cr3;" \
- : \
- : "r" __pa(current->active_mm->pgd) \
- : "memory" \
- ); \
+ unsigned int tmpreg; \
+ \
+ __asm__ __volatile__( \
+ "movl %%cr3, %0; # flush TLB \n" \
+ "movl %0, %%cr3; \n" \
+ : "=r" (tmpreg) \
+ :: "memory"); \
} while (0)
/*
*/
#define __flush_tlb_global() \
do { \
+ unsigned int tmpreg; \
+ \
__asm__ __volatile__( \
- "movl %0, %%cr4; # turn off PGE \n" \
- "mov %2, %%cr3; # flush TLB \n" \
- "mov %1, %%cr4; # turn PGE back on \n" \
- : \
- : "r" (mmu_cr4_features), \
- "r" (mmu_cr4_features & ~X86_CR4_PGE), \
- "r" (__pa(current->active_mm->pgd)) \
+ "movl %1, %%cr4; # turn off PGE \n" \
+ "movl %%cr3, %0; # flush TLB \n" \
+ "movl %0, %%cr3; \n" \
+ "movl %2, %%cr4; # turn PGE back on \n" \
+ : "=r" (tmpreg) \
+ : "r" (mmu_cr4_features & ~X86_CR4_PGE), \
+ "r" (mmu_cr4_features) \
: "memory"); \
} while (0)
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
*/
-#include <linux/config.h>
-
#include <linux/types.h>
#define ACPI_RSDP_SIG "RSD PTR " /* Trailing space required */
* Copyright (C) 1998, 1999 Hewlett-Packard Co
* Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
*/
-#include <linux/config.h>
#include <linux/types.h>
#include <asm/system.h>
*/
#include <asm/io.h> /* need byte IO */
+#include <linux/config.h>
#include <linux/spinlock.h> /* And spinlocks */
#include <linux/delay.h>
* Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
*/
+#include <linux/config.h>
#include <linux/threads.h>
extern unsigned int local_irq_count[NR_CPUS];
#ifndef __ASM_IA64_IOSAPIC_H
#define __ASM_IA64_IOSAPIC_H
+#include <linux/config.h>
+
#define IO_SAPIC_DEFAULT_ADDR 0xFEC00000
#define IO_SAPIC_REG_SELECT 0x0
# ifdef __KERNEL__
+#include <linux/config.h>
+
#define KEYBOARD_IRQ 1
#define DISABLE_KBD_DURING_INTERRUPTS 0
#ifndef _ASM_IA64_SMP_H
#define _ASM_IA64_SMP_H
+#include <linux/config.h>
#include <linux/init.h>
#include <linux/threads.h>
#include <linux/kernel.h>
typedef int acpi_dstate_t;
-#ifdef __KERNEL__
-
-extern int acpi_active;
-
-extern void (*acpi_idle)(void);
-extern void (*acpi_power_off)(void);
-
-#endif /* __KERNEL__ */
-
/* RSDP location */
#define ACPI_BIOS_ROM_BASE (0x0e0000)
#define ACPI_BIOS_ROM_END (0x100000)
/* autofs v4 definitions */
#undef AUTOFS_PROTO_VERSION
-#define AUTOFS_PROTO_VERSION 4
-
+#undef AUTOFS_MIN_PROTO_VERSION
#undef AUTOFS_MAX_PROTO_VERSION
-#define AUTOFS_MAX_PROTO_VERSION AUTOFS_PROTO_VERSION
+
+#define AUTOFS_PROTO_VERSION 4
+#define AUTOFS_MIN_PROTO_VERSION 3
+#define AUTOFS_MAX_PROTO_VERSION 4
/* New message type */
#define autofs_ptype_expire_multi 2 /* Expire entry (umount request) */
};
typedef struct request_queue request_queue_t;
-typedef int (merge_request_fn) (request_queue_t *,
- struct request * req,
- struct buffer_head *);
-typedef int (merge_requests_fn) (request_queue_t *,
- struct request * req,
- struct request * req2);
-typedef void (request_fn_proc) (request_queue_t *);
+typedef int (merge_request_fn) (request_queue_t *q,
+ struct request *req,
+ struct buffer_head *bh);
+typedef int (merge_requests_fn) (request_queue_t *q,
+ struct request *req,
+ struct request *req2);
+typedef void (request_fn_proc) (request_queue_t *q);
typedef request_queue_t * (queue_proc) (kdev_t dev);
+typedef void (make_request_fn) (int rw, struct buffer_head *bh);
+typedef void (plug_device_fn) (request_queue_t *q, kdev_t device);
+typedef void (unplug_device_fn) (void *q);
struct request_queue
{
request_fn_proc * request_fn;
merge_request_fn * merge_fn;
merge_requests_fn * merge_requests_fn;
+ make_request_fn * make_request_fn;
+ plug_device_fn * plug_device_fn;
/*
* The queue owner gets to use this for whatever they like.
* ll_rw_blk doesn't touch it.
* not.
*/
char head_active;
-
- /*
- * Boolean that indicates whether we should use plugging on
- * this queue or not.
- */
- char use_plug;
};
struct blk_dev_struct {
extern wait_queue_head_t wait_for_request;
extern void grok_partitions(struct gendisk *dev, int drive, unsigned minors, long size);
extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
-extern void unplug_device(void * data);
-extern void make_request(int major,int rw, struct buffer_head * bh);
+extern void generic_unplug_device(void * data);
+extern void generic_plug_device (request_queue_t *q, kdev_t dev);
+extern void generic_make_request(int rw, struct buffer_head * bh);
+extern request_queue_t * blk_get_queue(kdev_t dev);
/*
* Access functions for manipulating queue properties
extern void blk_init_queue(request_queue_t *, request_fn_proc *);
extern void blk_cleanup_queue(request_queue_t *);
extern void blk_queue_headactive(request_queue_t *, int);
-extern void blk_queue_pluggable(request_queue_t *, int);
-
-/* md needs this function to remap requests */
-extern int md_map (int minor, kdev_t *rdev, unsigned long *rsector, unsigned long size);
-extern int md_make_request (int minor, int rw, struct buffer_head * bh);
-extern int md_error (kdev_t mddev, kdev_t rdev);
+extern void blk_queue_pluggable(request_queue_t *, plug_device_fn *);
+extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
extern int * blk_size[MAX_BLKDEV];
#define WIN98_EXTENDED_PARTITION 0x0f
#define LINUX_SWAP_PARTITION 0x82
+#define LINUX_RAID_PARTITION 0xfd /* autodetect RAID partition */
#ifdef CONFIG_SOLARIS_X86_PARTITION
#define SOLARIS_X86_PARTITION LINUX_SWAP_PARTITION
struct hd_struct {
long start_sect;
long nr_sects;
+ int type; /* currently RAID or normal */
};
struct gendisk {
#define LPGETSTATS 0x060d /* get statistics (struct lp_stats) */
#endif
#define LPGETFLAGS 0x060e /* get status flags */
+#define LPSETTIMEOUT 0x060f /* set parport timeout */
/* timeout for printk'ing a timeout, in jiffies (100ths of a second).
This is also used for re-checking error conditions if LP_ABORT is
unsigned int last_error;
struct semaphore port_mutex;
wait_queue_head_t dataq;
+ long timeout;
};
/*
+++ /dev/null
-/*
- md.h : Multiple Devices driver for Linux
- Copyright (C) 1994-96 Marc ZYNGIER
- <zyngier@ufr-info-p7.ibp.fr> or
- <maz@gloups.fdn.fr>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- You should have received a copy of the GNU General Public License
- (for example /usr/src/linux/COPYING); if not, write to the Free
- Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#ifndef _MD_H
-#define _MD_H
-
-#include <linux/major.h>
-#include <linux/ioctl.h>
-#include <linux/types.h>
-
-/*
- * Different major versions are not compatible.
- * Different minor versions are only downward compatible.
- * Different patchlevel versions are downward and upward compatible.
- */
-#define MD_MAJOR_VERSION 0
-#define MD_MINOR_VERSION 36
-#define MD_PATCHLEVEL_VERSION 6
-
-#define MD_DEFAULT_DISK_READAHEAD (256 * 1024)
-
-/* ioctls */
-#define REGISTER_DEV _IO (MD_MAJOR, 1)
-#define START_MD _IO (MD_MAJOR, 2)
-#define STOP_MD _IO (MD_MAJOR, 3)
-#define REGISTER_DEV_NEW _IO (MD_MAJOR, 4)
-
-/*
- personalities :
- Byte 0 : Chunk size factor
- Byte 1 : Fault tolerance count for each physical device
- ( 0 means no fault tolerance,
- 0xFF means always tolerate faults), not used by now.
- Byte 2 : Personality
- Byte 3 : Reserved.
- */
-
-#define FAULT_SHIFT 8
-#define PERSONALITY_SHIFT 16
-
-#define FACTOR_MASK 0x000000FFUL
-#define FAULT_MASK 0x0000FF00UL
-#define PERSONALITY_MASK 0x00FF0000UL
-
-#define MD_RESERVED 0 /* Not used by now */
-#define LINEAR (1UL << PERSONALITY_SHIFT)
-#define STRIPED (2UL << PERSONALITY_SHIFT)
-#define RAID0 STRIPED
-#define RAID1 (3UL << PERSONALITY_SHIFT)
-#define RAID5 (4UL << PERSONALITY_SHIFT)
-#define MAX_PERSONALITY 5
-
-/*
- * MD superblock.
- *
- * The MD superblock maintains some statistics on each MD configuration.
- * Each real device in the MD set contains it near the end of the device.
- * Some of the ideas are copied from the ext2fs implementation.
- *
- * We currently use 4096 bytes as follows:
- *
- * word offset function
- *
- * 0 - 31 Constant generic MD device information.
- * 32 - 63 Generic state information.
- * 64 - 127 Personality specific information.
- * 128 - 511 12 32-words descriptors of the disks in the raid set.
- * 512 - 911 Reserved.
- * 912 - 1023 Disk specific descriptor.
- */
-
-/*
- * If x is the real device size in bytes, we return an apparent size of:
- *
- * y = (x & ~(MD_RESERVED_BYTES - 1)) - MD_RESERVED_BYTES
- *
- * and place the 4kB superblock at offset y.
- */
-#define MD_RESERVED_BYTES (64 * 1024)
-#define MD_RESERVED_SECTORS (MD_RESERVED_BYTES / 512)
-#define MD_RESERVED_BLOCKS (MD_RESERVED_BYTES / BLOCK_SIZE)
-
-#define MD_NEW_SIZE_SECTORS(x) ((x & ~(MD_RESERVED_SECTORS - 1)) - MD_RESERVED_SECTORS)
-#define MD_NEW_SIZE_BLOCKS(x) ((x & ~(MD_RESERVED_BLOCKS - 1)) - MD_RESERVED_BLOCKS)
-
-#define MD_SB_BYTES 4096
-#define MD_SB_WORDS (MD_SB_BYTES / 4)
-#define MD_SB_BLOCKS (MD_SB_BYTES / BLOCK_SIZE)
-#define MD_SB_SECTORS (MD_SB_BYTES / 512)
-
-/*
- * The following are counted in 32-bit words
- */
-#define MD_SB_GENERIC_OFFSET 0
-#define MD_SB_PERSONALITY_OFFSET 64
-#define MD_SB_DISKS_OFFSET 128
-#define MD_SB_DESCRIPTOR_OFFSET 992
-
-#define MD_SB_GENERIC_CONSTANT_WORDS 32
-#define MD_SB_GENERIC_STATE_WORDS 32
-#define MD_SB_GENERIC_WORDS (MD_SB_GENERIC_CONSTANT_WORDS + MD_SB_GENERIC_STATE_WORDS)
-#define MD_SB_PERSONALITY_WORDS 64
-#define MD_SB_DISKS_WORDS 384
-#define MD_SB_DESCRIPTOR_WORDS 32
-#define MD_SB_RESERVED_WORDS (1024 - MD_SB_GENERIC_WORDS - MD_SB_PERSONALITY_WORDS - MD_SB_DISKS_WORDS - MD_SB_DESCRIPTOR_WORDS)
-#define MD_SB_EQUAL_WORDS (MD_SB_GENERIC_WORDS + MD_SB_PERSONALITY_WORDS + MD_SB_DISKS_WORDS)
-#define MD_SB_DISKS (MD_SB_DISKS_WORDS / MD_SB_DESCRIPTOR_WORDS)
-
-/*
- * Device "operational" state bits
- */
-#define MD_FAULTY_DEVICE 0 /* Device is faulty / operational */
-#define MD_ACTIVE_DEVICE 1 /* Device is a part or the raid set / spare disk */
-#define MD_SYNC_DEVICE 2 /* Device is in sync with the raid set */
-
-typedef struct md_device_descriptor_s {
- __u32 number; /* 0 Device number in the entire set */
- __u32 major; /* 1 Device major number */
- __u32 minor; /* 2 Device minor number */
- __u32 raid_disk; /* 3 The role of the device in the raid set */
- __u32 state; /* 4 Operational state */
- __u32 reserved[MD_SB_DESCRIPTOR_WORDS - 5];
-} md_descriptor_t;
-
-#define MD_SB_MAGIC 0xa92b4efc
-
-/*
- * Superblock state bits
- */
-#define MD_SB_CLEAN 0
-#define MD_SB_ERRORS 1
-
-typedef struct md_superblock_s {
-
- /*
- * Constant generic information
- */
- __u32 md_magic; /* 0 MD identifier */
- __u32 major_version; /* 1 major version to which the set conforms */
- __u32 minor_version; /* 2 minor version to which the set conforms */
- __u32 patch_version; /* 3 patchlevel version to which the set conforms */
- __u32 gvalid_words; /* 4 Number of non-reserved words in this section */
- __u32 set_magic; /* 5 Raid set identifier */
- __u32 ctime; /* 6 Creation time */
- __u32 level; /* 7 Raid personality (mirroring, raid5, ...) */
- __u32 size; /* 8 Apparent size of each individual disk, in kB */
- __u32 nr_disks; /* 9 Number of total disks in the raid set */
- __u32 raid_disks; /* 10 Number of disks in a fully functional raid set */
- __u32 gstate_creserved[MD_SB_GENERIC_CONSTANT_WORDS - 11];
-
- /*
- * Generic state information
- */
- __u32 utime; /* 0 Superblock update time */
- __u32 state; /* 1 State bits (clean, ...) */
- __u32 active_disks; /* 2 Number of currently active disks (some non-faulty disks might not be in sync) */
- __u32 working_disks; /* 3 Number of working disks */
- __u32 failed_disks; /* 4 Number of failed disks */
- __u32 spare_disks; /* 5 Number of spare disks */
- __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 6];
-
- /*
- * Personality information
- */
- __u32 parity_algorithm;
- __u32 chunk_size;
- __u32 pstate_reserved[MD_SB_PERSONALITY_WORDS - 2];
-
- /*
- * Disks information
- */
- md_descriptor_t disks[MD_SB_DISKS];
-
- /*
- * Reserved
- */
- __u32 reserved[MD_SB_RESERVED_WORDS];
-
- /*
- * Active descriptor
- */
- md_descriptor_t descriptor;
-} md_superblock_t;
-
-#ifdef __KERNEL__
-
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/blkdev.h>
-#include <asm/semaphore.h>
-
-/*
- * Kernel-based reconstruction is mostly working, but still requires
- * some additional work.
- */
-#define SUPPORT_RECONSTRUCTION 0
-
-#define MAX_REAL 8 /* Max number of physical dev per md dev */
-#define MAX_MD_DEV 4 /* Max number of md dev */
-
-#define FACTOR(a) ((a)->repartition & FACTOR_MASK)
-#define MAX_FAULT(a) (((a)->repartition & FAULT_MASK)>>8)
-#define PERSONALITY(a) ((a)->repartition & PERSONALITY_MASK)
-
-#define FACTOR_SHIFT(a) (PAGE_SHIFT + (a) - 10)
-
-struct real_dev
-{
- kdev_t dev; /* Device number */
- int size; /* Device size (in blocks) */
- int offset; /* Real device offset (in blocks) in md dev
- (only used in linear mode) */
- struct inode *inode; /* Lock inode */
- md_superblock_t *sb;
- u32 sb_offset;
-};
-
-struct md_dev;
-
-#define SPARE_INACTIVE 0
-#define SPARE_WRITE 1
-#define SPARE_ACTIVE 2
-
-struct md_personality
-{
- char *name;
- int (*map)(struct md_dev *mddev, kdev_t *rdev,
- unsigned long *rsector, unsigned long size);
- int (*make_request)(struct md_dev *mddev, int rw, struct buffer_head * bh);
- void (*end_request)(struct buffer_head * bh, int uptodate);
- int (*run)(int minor, struct md_dev *mddev);
- int (*stop)(int minor, struct md_dev *mddev);
- int (*status)(char *page, int minor, struct md_dev *mddev);
- int (*ioctl)(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg);
- int max_invalid_dev;
- int (*error_handler)(struct md_dev *mddev, kdev_t dev);
-
-/*
- * Some personalities (RAID-1, RAID-5) can get disks hot-added and
- * hot-removed. Hot removal is different from failure. (failure marks
- * a disk inactive, but the disk is still part of the array)
- */
- int (*hot_add_disk) (struct md_dev *mddev, kdev_t dev);
- int (*hot_remove_disk) (struct md_dev *mddev, kdev_t dev);
- int (*mark_spare) (struct md_dev *mddev, md_descriptor_t *descriptor, int state);
-};
-
-struct md_dev
-{
- struct real_dev devices[MAX_REAL];
- struct md_personality *pers;
- md_superblock_t *sb;
- int sb_dirty;
- int repartition;
- int busy;
- int nb_dev;
- void *private;
-};
-
-struct md_thread {
- void (*run) (void *data);
- void *data;
- wait_queue_head_t wqueue;
- unsigned long flags;
- struct semaphore *sem;
- struct task_struct *tsk;
-};
-
-#define THREAD_WAKEUP 0
-
-extern struct md_dev md_dev[MAX_MD_DEV];
-extern int md_size[MAX_MD_DEV];
-extern int md_maxreadahead[MAX_MD_DEV];
-
-extern char *partition_name (kdev_t dev);
-
-extern int register_md_personality (int p_num, struct md_personality *p);
-extern int unregister_md_personality (int p_num);
-extern struct md_thread *md_register_thread (void (*run) (void *data), void *data);
-extern void md_unregister_thread (struct md_thread *thread);
-extern void md_wakeup_thread(struct md_thread *thread);
-extern int md_update_sb (int minor);
-extern int md_do_sync(struct md_dev *mddev);
-
-#endif __KERNEL__
-#endif _MD_H
#ifndef _LINUX_PM_H
#define _LINUX_PM_H
+#ifdef __KERNEL__
+
#include <linux/config.h>
#include <linux/list.h>
PM_SYS_UNKNOWN = 0x00000000, /* generic */
PM_SYS_KBC = 0x41d00303, /* keyboard controller */
PM_SYS_COM = 0x41d00500, /* serial port */
+ PM_SYS_IRDA = 0x41d00510, /* IRDA controller */
PM_SYS_FDC = 0x41d00700, /* floppy controller */
PM_SYS_VGA = 0x41d00900, /* VGA controller */
+ PM_SYS_PCMCIA = 0x41d00e00, /* PCMCIA controller */
};
/*
void *data;
unsigned long flags;
- unsigned long status;
+ int state;
struct list_head entry;
};
#if defined(CONFIG_ACPI) || defined(CONFIG_APM)
+extern int pm_active;
+
+#define PM_IS_ACTIVE() (pm_active != 0)
+
/*
* Register a device with power management
*/
*/
void pm_unregister(struct pm_dev *dev);
+/*
+ * Unregister all devices with matching callback
+ */
+void pm_unregister_all(pm_callback callback);
+
/*
* Send a request to all devices
*/
#else // CONFIG_ACPI || CONFIG_APM
+#define PM_IS_ACTIVE() 0
+
extern inline struct pm_dev *pm_register(pm_dev_t type,
unsigned long id,
pm_callback callback)
extern inline void pm_unregister(struct pm_dev *dev) {}
+extern inline void pm_unregister_all(pm_callback callback) {}
+
extern inline int pm_send_request(pm_request_t rqst, void *data)
{
return 0;
#endif // CONFIG_ACPI || CONFIG_APM
+extern void (*pm_idle)(void);
+extern void (*pm_power_off)(void);
+
+#endif // __KERNEL__
+
#endif /* _LINUX_PM_H */
--- /dev/null
+#ifndef _LINEAR_H
+#define _LINEAR_H
+
+#include <linux/raid/md.h>
+
+struct dev_info {
+ kdev_t dev;
+ int size;
+ unsigned int offset;
+};
+
+typedef struct dev_info dev_info_t;
+
+struct linear_hash
+{
+ dev_info_t *dev0, *dev1;
+};
+
+struct linear_private_data
+{
+ struct linear_hash *hash_table;
+ dev_info_t disks[MD_SB_DISKS];
+ dev_info_t *smallest;
+ int nr_zones;
+};
+
+
+typedef struct linear_private_data linear_conf_t;
+
+#define mddev_to_conf(mddev) ((linear_conf_t *) mddev->private)
+
+#endif
--- /dev/null
+/*
+ md.h : Multiple Devices driver for Linux
+ Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
+ Copyright (C) 1994-96 Marc ZYNGIER
+ <zyngier@ufr-info-p7.ibp.fr> or
+ <maz@gloups.fdn.fr>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ You should have received a copy of the GNU General Public License
+ (for example /usr/src/linux/COPYING); if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef _MD_H
+#define _MD_H
+
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <asm/semaphore.h>
+#include <linux/major.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <asm/bitops.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/hdreg.h>
+#include <linux/sysctl.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/smp_lock.h>
+#include <linux/delay.h>
+#include <net/checksum.h>
+#include <linux/random.h>
+#include <linux/locks.h>
+#include <asm/io.h>
+
+#include <linux/raid/md_compatible.h>
+/*
+ * 'md_p.h' holds the 'physical' layout of RAID devices
+ * 'md_u.h' holds the user <=> kernel API
+ *
+ * 'md_k.h' holds kernel internal definitions
+ */
+
+#include <linux/raid/md_p.h>
+#include <linux/raid/md_u.h>
+#include <linux/raid/md_k.h>
+
+/*
+ * Different major versions are not compatible.
+ * Different minor versions are only downward compatible.
+ * Different patchlevel versions are downward and upward compatible.
+ */
+#define MD_MAJOR_VERSION 0
+#define MD_MINOR_VERSION 90
+#define MD_PATCHLEVEL_VERSION 0
+
+extern int md_size[MAX_MD_DEVS];
+extern struct hd_struct md_hd_struct[MAX_MD_DEVS];
+
+extern void add_mddev_mapping (mddev_t *mddev, kdev_t dev, void *data);
+extern void del_mddev_mapping (mddev_t *mddev, kdev_t dev);
+extern char * partition_name (kdev_t dev);
+extern int register_md_personality (int p_num, mdk_personality_t *p);
+extern int unregister_md_personality (int p_num);
+extern mdk_thread_t * md_register_thread (void (*run) (void *data),
+ void *data, const char *name);
+extern void md_unregister_thread (mdk_thread_t *thread);
+extern void md_wakeup_thread(mdk_thread_t *thread);
+extern void md_interrupt_thread (mdk_thread_t *thread);
+extern int md_update_sb (mddev_t *mddev);
+extern int md_do_sync(mddev_t *mddev, mdp_disk_t *spare);
+extern void md_recover_arrays (void);
+extern int md_check_ordering (mddev_t *mddev);
+extern void autodetect_raid(void);
+extern struct gendisk * find_gendisk (kdev_t dev);
+extern int md_notify_reboot(struct notifier_block *this,
+ unsigned long code, void *x);
+extern int md_error (kdev_t mddev, kdev_t rdev);
+
+#if CONFIG_BLK_DEV_MD
+extern void raid_setup(char *str,int *ints) md__init;
+#endif
+
+extern void md_print_devices (void);
+
+#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
+
+#endif _MD_H
+
--- /dev/null
+
+/*
+ md.h : Multiple Devices driver compatibility layer for Linux 2.0/2.2
+ Copyright (C) 1998 Ingo Molnar
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ You should have received a copy of the GNU General Public License
+ (for example /usr/src/linux/COPYING); if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/version.h>
+
+#ifndef _MD_COMPATIBLE_H
+#define _MD_COMPATIBLE_H
+
+/** 2.3/2.4 stuff: **/
+
+#include <linux/reboot.h>
+#include <linux/vmalloc.h>
+#include <linux/blkpg.h>
+
+/* 000 */
+#define md__get_free_pages(x,y) __get_free_pages(x,y)
+
+#ifdef __i386__
+/* 001 */
+extern __inline__ int md_cpu_has_mmx(void)
+{
+ return boot_cpu_data.x86_capability & X86_FEATURE_MMX;
+}
+#endif
+
+/* 002 */
+#define md_clear_page(page) clear_page(page)
+
+/* 003 */
+#define MD_EXPORT_SYMBOL(x) EXPORT_SYMBOL(x)
+
+/* 004 */
+#define md_copy_to_user(x,y,z) copy_to_user(x,y,z)
+
+/* 005 */
+#define md_copy_from_user(x,y,z) copy_from_user(x,y,z)
+
+/* 006 */
+#define md_put_user put_user
+
+/* 007 */
+extern inline int md_capable_admin(void)
+{
+ return capable(CAP_SYS_ADMIN);
+}
+
+/* 008 */
+#define MD_FILE_TO_INODE(file) ((file)->f_dentry->d_inode)
+
+/* 009 */
+extern inline void md_flush_signals (void)
+{
+ spin_lock(¤t->sigmask_lock);
+ flush_signals(current);
+ spin_unlock(¤t->sigmask_lock);
+}
+
+/* 010 */
+extern inline void md_init_signals (void)
+{
+ current->exit_signal = SIGCHLD;
+ siginitsetinv(¤t->blocked, sigmask(SIGKILL));
+}
+
+/* 011 */
+#define md_signal_pending signal_pending
+
+/* 012 */
+extern inline void md_set_global_readahead(int * table)
+{
+ max_readahead[MD_MAJOR] = table;
+}
+
+/* 013 */
+#define md_mdelay(x) mdelay(x)
+
+/* 014 */
+#define MD_SYS_DOWN SYS_DOWN
+#define MD_SYS_HALT SYS_HALT
+#define MD_SYS_POWER_OFF SYS_POWER_OFF
+
+/* 015 */
+#define md_register_reboot_notifier register_reboot_notifier
+
+/* 016 */
+#define md_test_and_set_bit test_and_set_bit
+
+/* 017 */
+#define md_test_and_clear_bit test_and_clear_bit
+
+/* 018 */
+#define md_atomic_read atomic_read
+#define md_atomic_set atomic_set
+
+/* 019 */
+#define md_lock_kernel lock_kernel
+#define md_unlock_kernel unlock_kernel
+
+/* 020 */
+
+#include <linux/init.h>
+
+#define md__init __init
+#define md__initdata __initdata
+#define md__initfunc(__arginit) __initfunc(__arginit)
+
+/* 021 */
+
+
+/* 022 */
+
+#define md_list_head list_head
+#define MD_LIST_HEAD(name) LIST_HEAD(name)
+#define MD_INIT_LIST_HEAD(ptr) INIT_LIST_HEAD(ptr)
+#define md_list_add list_add
+#define md_list_del list_del
+#define md_list_empty list_empty
+
+#define md_list_entry(ptr, type, member) list_entry(ptr, type, member)
+
+/* 023 */
+
+#define md_schedule_timeout schedule_timeout
+
+/* 024 */
+#define md_need_resched(tsk) ((tsk)->need_resched)
+
+/* 025 */
+#define md_spinlock_t spinlock_t
+#define MD_SPIN_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED
+
+#define md_spin_lock spin_lock
+#define md_spin_unlock spin_unlock
+#define md_spin_lock_irq spin_lock_irq
+#define md_spin_unlock_irq spin_unlock_irq
+#define md_spin_unlock_irqrestore spin_unlock_irqrestore
+#define md_spin_lock_irqsave spin_lock_irqsave
+
+/* 026 */
+typedef wait_queue_head_t md_wait_queue_head_t;
+#define MD_DECLARE_WAITQUEUE(w,t) DECLARE_WAITQUEUE((w),(t))
+#define MD_DECLARE_WAIT_QUEUE_HEAD(x) DECLARE_WAIT_QUEUE_HEAD(x)
+#define md_init_waitqueue_head init_waitqueue_head
+
+/* END */
+
+#endif _MD_COMPATIBLE_H
+
--- /dev/null
+/*
+ md_k.h : kernel internal structure of the Linux MD driver
+ Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ You should have received a copy of the GNU General Public License
+ (for example /usr/src/linux/COPYING); if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef _MD_K_H
+#define _MD_K_H
+
+#define MD_RESERVED 0UL
+#define LINEAR 1UL
+#define STRIPED 2UL
+#define RAID0 STRIPED
+#define RAID1 3UL
+#define RAID5 4UL
+#define TRANSLUCENT 5UL
+#define HSM 6UL
+#define MAX_PERSONALITY 7UL
+
+extern inline int pers_to_level (int pers)
+{
+ switch (pers) {
+ case HSM: return -3;
+ case TRANSLUCENT: return -2;
+ case LINEAR: return -1;
+ case RAID0: return 0;
+ case RAID1: return 1;
+ case RAID5: return 5;
+ }
+ panic("pers_to_level()");
+}
+
+extern inline int level_to_pers (int level)
+{
+ switch (level) {
+ case -3: return HSM;
+ case -2: return TRANSLUCENT;
+ case -1: return LINEAR;
+ case 0: return RAID0;
+ case 1: return RAID1;
+ case 4:
+ case 5: return RAID5;
+ }
+ return MD_RESERVED;
+}
+
+typedef struct mddev_s mddev_t;
+typedef struct mdk_rdev_s mdk_rdev_t;
+
+#if (MINORBITS != 8)
+#error MD doesnt handle bigger kdev yet
+#endif
+
+#define MAX_REAL 12 /* Max number of disks per md dev */
+#define MAX_MD_DEVS (1<<MINORBITS) /* Max number of md dev */
+
+/*
+ * Maps a kdev to an mddev/subdev. How 'data' is handled is up to
+ * the personality. (eg. HSM uses this to identify individual LVs)
+ */
+typedef struct dev_mapping_s {
+ mddev_t *mddev;
+ void *data;
+} dev_mapping_t;
+
+extern dev_mapping_t mddev_map [MAX_MD_DEVS];
+
+extern inline mddev_t * kdev_to_mddev (kdev_t dev)
+{
+ return mddev_map[MINOR(dev)].mddev;
+}
+
+/*
+ * options passed in raidrun:
+ */
+
+#define MAX_CHUNK_SIZE (4096*1024)
+
+/*
+ * default readahead
+ */
+#define MD_READAHEAD (256 * 512)
+
+extern inline int disk_faulty(mdp_disk_t * d)
+{
+ return d->state & (1 << MD_DISK_FAULTY);
+}
+
+extern inline int disk_active(mdp_disk_t * d)
+{
+ return d->state & (1 << MD_DISK_ACTIVE);
+}
+
+extern inline int disk_sync(mdp_disk_t * d)
+{
+ return d->state & (1 << MD_DISK_SYNC);
+}
+
+extern inline int disk_spare(mdp_disk_t * d)
+{
+ return !disk_sync(d) && !disk_active(d) && !disk_faulty(d);
+}
+
+extern inline int disk_removed(mdp_disk_t * d)
+{
+ return d->state & (1 << MD_DISK_REMOVED);
+}
+
+extern inline void mark_disk_faulty(mdp_disk_t * d)
+{
+ d->state |= (1 << MD_DISK_FAULTY);
+}
+
+extern inline void mark_disk_active(mdp_disk_t * d)
+{
+ d->state |= (1 << MD_DISK_ACTIVE);
+}
+
+extern inline void mark_disk_sync(mdp_disk_t * d)
+{
+ d->state |= (1 << MD_DISK_SYNC);
+}
+
+extern inline void mark_disk_spare(mdp_disk_t * d)
+{
+ d->state = 0;
+}
+
+extern inline void mark_disk_removed(mdp_disk_t * d)
+{
+ d->state = (1 << MD_DISK_FAULTY) | (1 << MD_DISK_REMOVED);
+}
+
+extern inline void mark_disk_inactive(mdp_disk_t * d)
+{
+ d->state &= ~(1 << MD_DISK_ACTIVE);
+}
+
+extern inline void mark_disk_nonsync(mdp_disk_t * d)
+{
+ d->state &= ~(1 << MD_DISK_SYNC);
+}
+
+/*
+ * MD's 'extended' device
+ */
+struct mdk_rdev_s
+{
+ struct md_list_head same_set; /* RAID devices within the same set */
+ struct md_list_head all; /* all RAID devices */
+ struct md_list_head pending; /* undetected RAID devices */
+
+ kdev_t dev; /* Device number */
+ kdev_t old_dev; /* "" when it was last imported */
+ int size; /* Device size (in blocks) */
+ mddev_t *mddev; /* RAID array if running */
+ unsigned long last_events; /* IO event timestamp */
+
+ struct inode *inode; /* Lock inode */
+ struct file filp; /* Lock file */
+
+ mdp_super_t *sb;
+ int sb_offset;
+
+ int faulty; /* if faulty do not issue IO requests */
+ int desc_nr; /* descriptor index in the superblock */
+};
+
+
+/*
+ * disk operations in a working array:
+ */
+#define DISKOP_SPARE_INACTIVE 0
+#define DISKOP_SPARE_WRITE 1
+#define DISKOP_SPARE_ACTIVE 2
+#define DISKOP_HOT_REMOVE_DISK 3
+#define DISKOP_HOT_ADD_DISK 4
+
+typedef struct mdk_personality_s mdk_personality_t;
+
+struct mddev_s
+{
+ void *private;
+ mdk_personality_t *pers;
+ int __minor;
+ mdp_super_t *sb;
+ int nb_dev;
+ struct md_list_head disks;
+ int sb_dirty;
+ mdu_param_t param;
+ int ro;
+ unsigned int curr_resync;
+ unsigned long resync_start;
+ char *name;
+ int recovery_running;
+ struct semaphore reconfig_sem;
+ struct semaphore recovery_sem;
+ struct semaphore resync_sem;
+ struct md_list_head all_mddevs;
+ request_queue_t queue;
+};
+
+struct mdk_personality_s
+{
+ char *name;
+ int (*map)(mddev_t *mddev, kdev_t dev, kdev_t *rdev,
+ unsigned long *rsector, unsigned long size);
+ int (*make_request)(mddev_t *mddev, int rw, struct buffer_head * bh);
+ void (*end_request)(struct buffer_head * bh, int uptodate);
+ int (*run)(mddev_t *mddev);
+ int (*stop)(mddev_t *mddev);
+ int (*status)(char *page, mddev_t *mddev);
+ int (*ioctl)(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg);
+ int max_invalid_dev;
+ int (*error_handler)(mddev_t *mddev, kdev_t dev);
+
+/*
+ * Some personalities (RAID-1, RAID-5) can have disks hot-added and
+ * hot-removed. Hot removal is different from failure. (failure marks
+ * a disk inactive, but the disk is still part of the array) The interface
+ * to such operations is the 'pers->diskop()' function, can be NULL.
+ *
+ * the diskop function can change the pointer pointing to the incoming
+ * descriptor, but must do so very carefully. (currently only
+ * SPARE_ACTIVE expects such a change)
+ */
+ int (*diskop) (mddev_t *mddev, mdp_disk_t **descriptor, int state);
+
+ int (*stop_resync)(mddev_t *mddev);
+ int (*restart_resync)(mddev_t *mddev);
+};
+
+
+/*
+ * Currently we index md_array directly, based on the minor
+ * number. This will have to change to dynamic allocation
+ * once we start supporting partitioning of md devices.
+ */
+extern inline int mdidx (mddev_t * mddev)
+{
+ return mddev->__minor;
+}
+
+extern inline kdev_t mddev_to_kdev(mddev_t * mddev)
+{
+ return MKDEV(MD_MAJOR, mdidx(mddev));
+}
+
+extern mdk_rdev_t * find_rdev(mddev_t * mddev, kdev_t dev);
+extern mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr);
+
+/*
+ * iterates through some rdev ringlist. It's safe to remove the
+ * current 'rdev'. Dont touch 'tmp' though.
+ */
+#define ITERATE_RDEV_GENERIC(head,field,rdev,tmp) \
+ \
+ for (tmp = head.next; \
+ rdev = md_list_entry(tmp, mdk_rdev_t, field), \
+ tmp = tmp->next, tmp->prev != &head \
+ ; )
+/*
+ * iterates through the 'same array disks' ringlist
+ */
+#define ITERATE_RDEV(mddev,rdev,tmp) \
+ ITERATE_RDEV_GENERIC((mddev)->disks,same_set,rdev,tmp)
+
+/*
+ * Same as above, but assumes that the device has rdev->desc_nr numbered
+ * from 0 to mddev->nb_dev, and iterates through rdevs in ascending order.
+ */
+#define ITERATE_RDEV_ORDERED(mddev,rdev,i) \
+ for (i = 0; rdev = find_rdev_nr(mddev, i), i < mddev->nb_dev; i++)
+
+
+/*
+ * Iterates through all 'RAID managed disks'
+ */
+#define ITERATE_RDEV_ALL(rdev,tmp) \
+ ITERATE_RDEV_GENERIC(all_raid_disks,all,rdev,tmp)
+
+/*
+ * Iterates through 'pending RAID disks'
+ */
+#define ITERATE_RDEV_PENDING(rdev,tmp) \
+ ITERATE_RDEV_GENERIC(pending_raid_disks,pending,rdev,tmp)
+
+/*
+ * iterates through all used mddevs in the system.
+ */
+#define ITERATE_MDDEV(mddev,tmp) \
+ \
+ for (tmp = all_mddevs.next; \
+ mddev = md_list_entry(tmp, mddev_t, all_mddevs), \
+ tmp = tmp->next, tmp->prev != &all_mddevs \
+ ; )
+
+extern inline int lock_mddev (mddev_t * mddev)
+{
+ return down_interruptible(&mddev->reconfig_sem);
+}
+
+extern inline void unlock_mddev (mddev_t * mddev)
+{
+ up(&mddev->reconfig_sem);
+}
+
+#define xchg_values(x,y) do { __typeof__(x) __tmp = x; \
+ x = y; y = __tmp; } while (0)
+
+typedef struct mdk_thread_s {
+ void (*run) (void *data);
+ void *data;
+ md_wait_queue_head_t wqueue;
+ unsigned long flags;
+ struct semaphore *sem;
+ struct task_struct *tsk;
+ const char *name;
+} mdk_thread_t;
+
+#define THREAD_WAKEUP 0
+
+#define MAX_DISKNAME_LEN 32
+
+typedef struct dev_name_s {
+ struct md_list_head list;
+ kdev_t dev;
+ char name [MAX_DISKNAME_LEN];
+} dev_name_t;
+
+#endif _MD_K_H
+
--- /dev/null
+/*
+ md_p.h : physical layout of Linux RAID devices
+ Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ You should have received a copy of the GNU General Public License
+ (for example /usr/src/linux/COPYING); if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef _MD_P_H
+#define _MD_P_H
+
+/*
+ * RAID superblock.
+ *
+ * The RAID superblock maintains some statistics on each RAID configuration.
+ * Each real device in the RAID set contains it near the end of the device.
+ * Some of the ideas are copied from the ext2fs implementation.
+ *
+ * We currently use 4096 bytes as follows:
+ *
+ * word offset function
+ *
+ * 0 - 31 Constant generic RAID device information.
+ * 32 - 63 Generic state information.
+ * 64 - 127 Personality specific information.
+ * 128 - 511 12 32-words descriptors of the disks in the raid set.
+ * 512 - 911 Reserved.
+ * 912 - 1023 Disk specific descriptor.
+ */
+
+/*
+ * If x is the real device size in bytes, we return an apparent size of:
+ *
+ * y = (x & ~(MD_RESERVED_BYTES - 1)) - MD_RESERVED_BYTES
+ *
+ * and place the 4kB superblock at offset y.
+ */
+#define MD_RESERVED_BYTES (64 * 1024)
+#define MD_RESERVED_SECTORS (MD_RESERVED_BYTES / 512)
+#define MD_RESERVED_BLOCKS (MD_RESERVED_BYTES / BLOCK_SIZE)
+
+#define MD_NEW_SIZE_SECTORS(x) ((x & ~(MD_RESERVED_SECTORS - 1)) - MD_RESERVED_SECTORS)
+#define MD_NEW_SIZE_BLOCKS(x) ((x & ~(MD_RESERVED_BLOCKS - 1)) - MD_RESERVED_BLOCKS)
+
+#define MD_SB_BYTES 4096
+#define MD_SB_WORDS (MD_SB_BYTES / 4)
+#define MD_SB_BLOCKS (MD_SB_BYTES / BLOCK_SIZE)
+#define MD_SB_SECTORS (MD_SB_BYTES / 512)
+
+/*
+ * The following are counted in 32-bit words
+ */
+#define MD_SB_GENERIC_OFFSET 0
+#define MD_SB_PERSONALITY_OFFSET 64
+#define MD_SB_DISKS_OFFSET 128
+#define MD_SB_DESCRIPTOR_OFFSET 992
+
+#define MD_SB_GENERIC_CONSTANT_WORDS 32
+#define MD_SB_GENERIC_STATE_WORDS 32
+#define MD_SB_GENERIC_WORDS (MD_SB_GENERIC_CONSTANT_WORDS + MD_SB_GENERIC_STATE_WORDS)
+#define MD_SB_PERSONALITY_WORDS 64
+#define MD_SB_DESCRIPTOR_WORDS 32
+#define MD_SB_DISKS 27
+#define MD_SB_DISKS_WORDS (MD_SB_DISKS*MD_SB_DESCRIPTOR_WORDS)
+#define MD_SB_RESERVED_WORDS (1024 - MD_SB_GENERIC_WORDS - MD_SB_PERSONALITY_WORDS - MD_SB_DISKS_WORDS - MD_SB_DESCRIPTOR_WORDS)
+#define MD_SB_EQUAL_WORDS (MD_SB_GENERIC_WORDS + MD_SB_PERSONALITY_WORDS + MD_SB_DISKS_WORDS)
+
+/*
+ * Device "operational" state bits
+ */
+#define MD_DISK_FAULTY 0 /* disk is faulty / operational */
+#define MD_DISK_ACTIVE 1 /* disk is running or spare disk */
+#define MD_DISK_SYNC 2 /* disk is in sync with the raid set */
+#define MD_DISK_REMOVED 3 /* disk is in sync with the raid set */
+
+typedef struct mdp_device_descriptor_s {
+ __u32 number; /* 0 Device number in the entire set */
+ __u32 major; /* 1 Device major number */
+ __u32 minor; /* 2 Device minor number */
+ __u32 raid_disk; /* 3 The role of the device in the raid set */
+ __u32 state; /* 4 Operational state */
+ __u32 reserved[MD_SB_DESCRIPTOR_WORDS - 5];
+} mdp_disk_t;
+
+#define MD_SB_MAGIC 0xa92b4efc
+
+/*
+ * Superblock state bits
+ */
+#define MD_SB_CLEAN 0
+#define MD_SB_ERRORS 1
+
+typedef struct mdp_superblock_s {
+ /*
+ * Constant generic information
+ */
+ __u32 md_magic; /* 0 MD identifier */
+ __u32 major_version; /* 1 major version to which the set conforms */
+ __u32 minor_version; /* 2 minor version ... */
+ __u32 patch_version; /* 3 patchlevel version ... */
+ __u32 gvalid_words; /* 4 Number of used words in this section */
+ __u32 set_uuid0; /* 5 Raid set identifier */
+ __u32 ctime; /* 6 Creation time */
+ __u32 level; /* 7 Raid personality */
+ __u32 size; /* 8 Apparent size of each individual disk */
+ __u32 nr_disks; /* 9 total disks in the raid set */
+ __u32 raid_disks; /* 10 disks in a fully functional raid set */
+ __u32 md_minor; /* 11 preferred MD minor device number */
+ __u32 not_persistent; /* 12 does it have a persistent superblock */
+ __u32 set_uuid1; /* 13 Raid set identifier #2 */
+ __u32 set_uuid2; /* 14 Raid set identifier #3 */
+ __u32 set_uuid3; /* 14 Raid set identifier #4 */
+ __u32 gstate_creserved[MD_SB_GENERIC_CONSTANT_WORDS - 16];
+
+ /*
+ * Generic state information
+ */
+ __u32 utime; /* 0 Superblock update time */
+ __u32 state; /* 1 State bits (clean, ...) */
+ __u32 active_disks; /* 2 Number of currently active disks */
+ __u32 working_disks; /* 3 Number of working disks */
+ __u32 failed_disks; /* 4 Number of failed disks */
+ __u32 spare_disks; /* 5 Number of spare disks */
+ __u32 sb_csum; /* 6 checksum of the whole superblock */
+ __u64 events; /* 7 number of superblock updates (64-bit!) */
+ __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 9];
+
+ /*
+ * Personality information
+ */
+ __u32 layout; /* 0 the array's physical layout */
+ __u32 chunk_size; /* 1 chunk size in bytes */
+ __u32 root_pv; /* 2 LV root PV */
+ __u32 root_block; /* 3 LV root block */
+ __u32 pstate_reserved[MD_SB_PERSONALITY_WORDS - 4];
+
+ /*
+ * Disks information
+ */
+ mdp_disk_t disks[MD_SB_DISKS];
+
+ /*
+ * Reserved
+ */
+ __u32 reserved[MD_SB_RESERVED_WORDS];
+
+ /*
+ * Active descriptor
+ */
+ mdp_disk_t this_disk;
+
+} mdp_super_t;
+
+#endif _MD_P_H
+
--- /dev/null
+/*
+ md_u.h : user <=> kernel API between Linux raidtools and RAID drivers
+ Copyright (C) 1998 Ingo Molnar
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ You should have received a copy of the GNU General Public License
+ (for example /usr/src/linux/COPYING); if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef _MD_U_H
+#define _MD_U_H
+
+/* ioctls */
+
+/* status */
+#define RAID_VERSION _IOR (MD_MAJOR, 0x10, mdu_version_t)
+#define GET_ARRAY_INFO _IOR (MD_MAJOR, 0x11, mdu_array_info_t)
+#define GET_DISK_INFO _IOR (MD_MAJOR, 0x12, mdu_disk_info_t)
+#define PRINT_RAID_DEBUG _IO (MD_MAJOR, 0x13)
+
+/* configuration */
+#define CLEAR_ARRAY _IO (MD_MAJOR, 0x20)
+#define ADD_NEW_DISK _IOW (MD_MAJOR, 0x21, mdu_disk_info_t)
+#define HOT_REMOVE_DISK _IO (MD_MAJOR, 0x22)
+#define SET_ARRAY_INFO _IOW (MD_MAJOR, 0x23, mdu_array_info_t)
+#define SET_DISK_INFO _IO (MD_MAJOR, 0x24)
+#define WRITE_RAID_INFO _IO (MD_MAJOR, 0x25)
+#define UNPROTECT_ARRAY _IO (MD_MAJOR, 0x26)
+#define PROTECT_ARRAY _IO (MD_MAJOR, 0x27)
+#define HOT_ADD_DISK _IO (MD_MAJOR, 0x28)
+#define SET_DISK_FAULTY _IO (MD_MAJOR, 0x29)
+
+/* usage */
+#define RUN_ARRAY _IOW (MD_MAJOR, 0x30, mdu_param_t)
+#define START_ARRAY _IO (MD_MAJOR, 0x31)
+#define STOP_ARRAY _IO (MD_MAJOR, 0x32)
+#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33)
+#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34)
+
+typedef struct mdu_version_s {
+ int major;
+ int minor;
+ int patchlevel;
+} mdu_version_t;
+
+typedef struct mdu_array_info_s {
+ /*
+ * Generic constant information
+ */
+ int major_version;
+ int minor_version;
+ int patch_version;
+ int ctime;
+ int level;
+ int size;
+ int nr_disks;
+ int raid_disks;
+ int md_minor;
+ int not_persistent;
+
+ /*
+ * Generic state information
+ */
+ int utime; /* 0 Superblock update time */
+ int state; /* 1 State bits (clean, ...) */
+ int active_disks; /* 2 Number of currently active disks */
+ int working_disks; /* 3 Number of working disks */
+ int failed_disks; /* 4 Number of failed disks */
+ int spare_disks; /* 5 Number of spare disks */
+
+ /*
+ * Personality information
+ */
+ int layout; /* 0 the array's physical layout */
+ int chunk_size; /* 1 chunk size in bytes */
+
+} mdu_array_info_t;
+
+typedef struct mdu_disk_info_s {
+ /*
+ * configuration/status of one particular disk
+ */
+ int number;
+ int major;
+ int minor;
+ int raid_disk;
+ int state;
+
+} mdu_disk_info_t;
+
+typedef struct mdu_start_info_s {
+ /*
+ * configuration/status of one particular disk
+ */
+ int major;
+ int minor;
+ int raid_disk;
+ int state;
+
+} mdu_start_info_t;
+
+typedef struct mdu_param_s
+{
+ int personality; /* 1,2,3,4 */
+ int chunk_size; /* in bytes */
+ int max_fault; /* unused for now */
+} mdu_param_t;
+
+#endif _MD_U_H
+
--- /dev/null
+#ifndef _RAID0_H
+#define _RAID0_H
+
+#include <linux/raid/md.h>
+
+struct strip_zone
+{
+ int zone_offset; /* Zone offset in md_dev */
+ int dev_offset; /* Zone offset in real dev */
+ int size; /* Zone size */
+ int nb_dev; /* # of devices attached to the zone */
+ mdk_rdev_t *dev[MAX_REAL]; /* Devices attached to the zone */
+};
+
+struct raid0_hash
+{
+ struct strip_zone *zone0, *zone1;
+};
+
+struct raid0_private_data
+{
+ struct raid0_hash *hash_table; /* Dynamically allocated */
+ struct strip_zone *strip_zone; /* This one too */
+ int nr_strip_zones;
+ struct strip_zone *smallest;
+ int nr_zones;
+};
+
+typedef struct raid0_private_data raid0_conf_t;
+
+#define mddev_to_conf(mddev) ((raid0_conf_t *) mddev->private)
+
+#endif
+++ /dev/null
-#ifndef _RAID0_H
-#define _RAID0_H
-
-struct strip_zone
-{
- int zone_offset; /* Zone offset in md_dev */
- int dev_offset; /* Zone offset in real dev */
- int size; /* Zone size */
- int nb_dev; /* Number of devices attached to the zone */
- struct real_dev *dev[MAX_REAL]; /* Devices attached to the zone */
-};
-
-struct raid0_hash
-{
- struct strip_zone *zone0, *zone1;
-};
-
-struct raid0_data
-{
- struct raid0_hash *hash_table; /* Dynamically allocated */
- struct strip_zone *strip_zone; /* This one too */
- int nr_strip_zones;
- struct strip_zone *smallest;
- int nr_zones;
-};
-
-#endif
p = p -> next;
mb();
save_p -> sync = 0;
- (*f)(arg);
+ if (f)
+ (*f)(arg);
}
}
}
#include <linux/time.h>
#include <linux/spinlock.h>
+#include <linux/pm.h>
#include <asm/io.h>
/* DMA modes needed */
__u32 flags; /* Interface flags */
__u32 new_speed;
int index; /* Instance index */
+
+ struct pm_dev *dev;
};
static inline void switch_bank(int iobase, int bank)
#define SMC_IRCC_H
#include <linux/spinlock.h>
+#include <linux/pm.h>
#include <net/irda/irport.h>
int tx_buff_offsets[10]; /* Offsets between frames in tx_buff */
int tx_len; /* Number of frames in tx_buff */
+
+ struct pm_dev *pmdev;
};
#endif /* SMC_IRCC_H */
#include <linux/utsname.h>
#include <linux/ioport.h>
#include <linux/init.h>
+#include <linux/raid/md.h>
#include <linux/smp_lock.h>
#include <linux/blk.h>
#include <linux/hdreg.h>
# include <asm/mtrr.h>
#endif
-#ifdef CONFIG_APM
-#include <linux/apm_bios.h>
-#endif
-
#ifdef CONFIG_MAC
extern void nubus_init(void);
#endif
while (pid != wait(&i));
if (MAJOR(real_root_dev) != RAMDISK_MAJOR
|| MINOR(real_root_dev) != 0) {
+#ifdef CONFIG_BLK_DEV_MD
+ autodetect_raid();
+#endif
error = change_root(real_root_dev,"/initrd");
if (error)
printk(KERN_ERR "Change root to /initrd: "
EXPORT_SYMBOL(gendisk_head);
EXPORT_SYMBOL(grok_partitions);
EXPORT_SYMBOL(register_disk);
-EXPORT_SYMBOL(unplug_device);
-EXPORT_SYMBOL(make_request);
EXPORT_SYMBOL(tq_disk);
EXPORT_SYMBOL(init_buffer);
EXPORT_SYMBOL(refile_buffer);
EXPORT_SYMBOL(kdevname);
EXPORT_SYMBOL(bdevname);
EXPORT_SYMBOL(cdevname);
-EXPORT_SYMBOL(partition_name); /* md.c only */
EXPORT_SYMBOL(simple_strtoul);
EXPORT_SYMBOL(system_utsname); /* UTS data */
EXPORT_SYMBOL(uts_sem); /* UTS semaphore */