extern void openpic_init_IRQ(void);
extern void find_and_init_phbs(void);
+extern void pSeries_final_fixup(void);
extern void pSeries_get_boot_time(struct rtc_time *rtc_time);
extern void pSeries_get_rtc_time(struct rtc_time *rtc_time);
ppc_md.init = chrp_init2;
+ ppc_md.pcibios_fixup = pSeries_final_fixup;
+
ppc_md.restart = rtas_restart;
ppc_md.power_off = rtas_power_off;
ppc_md.halt = rtas_halt;
}
/*
- * pcibios_final_fixup(void)
+ * iSeries_pci_final_fixup(void)
*/
-void __init pcibios_final_fixup(void)
+void __init iSeries_pci_final_fixup(void)
{
struct pci_dev *pdev = NULL;
struct iSeries_Device_Node *node;
static void build_iSeries_Memory_Map(void);
static void setup_iSeries_cache_sizes(void);
static void iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr);
-void build_valid_hpte(unsigned long vsid, unsigned long ea, unsigned long pa,
- pte_t *ptep, unsigned hpteflags, unsigned bolted);
+extern void build_valid_hpte(unsigned long vsid, unsigned long ea, unsigned long pa,
+ pte_t *ptep, unsigned hpteflags, unsigned bolted);
static void iSeries_setup_dprofile(void);
-void iSeries_setup_arch(void);
+extern void iSeries_setup_arch(void);
+extern void iSeries_pci_final_fixup(void);
/* Global Variables */
static unsigned long procFreqHz;
ppc_md.get_irq = iSeries_get_irq;
ppc_md.init = NULL;
+ ppc_md.pcibios_fixup = iSeries_pci_final_fixup;
+
ppc_md.restart = iSeries_restart;
ppc_md.power_off = iSeries_power_off;
ppc_md.halt = iSeries_halt;
extern void chrp_request_regions(void);
-void __init pcibios_final_fixup(void)
+void __init pSeries_final_fixup(void)
{
struct pci_dev *dev = NULL;
#include <asm/ppcdebug.h>
#include <asm/naca.h>
#include <asm/pci_dma.h>
+#include <asm/machdep.h>
#include "pci.h"
void pcibios_final_fixup(void);
static void fixup_broken_pcnet32(struct pci_dev* dev);
static void fixup_windbond_82c105(struct pci_dev* dev);
+extern void fixup_k2_sata(struct pci_dev* dev);
void iSeries_pcibios_init(void);
struct pci_controller *hose_head;
struct pci_controller **hose_tail = &hose_head;
+struct pci_dma_ops pci_dma_ops;
+EXPORT_SYMBOL(pci_dma_ops);
+
int global_phb_number; /* Global phb counter */
/* Cached ISA bridge dev. */
struct pci_dev *ppc64_isabridge_dev = NULL;
struct pci_fixup pcibios_fixups[] = {
- { PCI_FIXUP_HEADER, PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32 },
- { PCI_FIXUP_HEADER, PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105, fixup_windbond_82c105 },
- { PCI_FIXUP_HEADER, PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device },
+ { PCI_FIXUP_HEADER, PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID,
+ fixup_broken_pcnet32 },
+ { PCI_FIXUP_HEADER, PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105,
+ fixup_windbond_82c105 },
+ { PCI_FIXUP_HEADER, PCI_ANY_ID, PCI_ANY_ID,
+ pcibios_name_device },
+#ifdef CONFIG_PPC_PMAC
+ { PCI_FIXUP_HEADER, PCI_VENDOR_ID_SERVERWORKS, 0x0240,
+ fixup_k2_sata },
+#endif
{ 0 }
};
case phb_type_winnipeg:
model = "PHB WP";
break;
+ case phb_type_apple:
+ model = "PHB APPLE";
+ break;
default:
model = "PHB UK";
break;
pci_assign_unassigned_resources();
#endif
- /* Call machine dependent fixup */
- pcibios_final_fixup();
+ /* Call machine dependent final fixup */
+ if (ppc_md.pcibios_fixup)
+ ppc_md.pcibios_fixup();
/* Cache the location of the ISA bridge (if we have one) */
ppc64_isabridge_dev = pci_find_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address (tce) of the first page.
*/
-void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+static void *tce_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
struct TceTable * tbl;
return ret;
}
-void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+static void tce_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
struct TceTable * tbl;
* need not be page aligned, the dma_addr_t returned will point to the same
* byte within the page as vaddr.
*/
-dma_addr_t pci_map_single(struct pci_dev *hwdev, void *vaddr,
+static dma_addr_t tce_map_single(struct pci_dev *hwdev, void *vaddr,
size_t size, int direction )
{
struct TceTable * tbl;
return dma_handle;
}
-void pci_unmap_single( struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction )
+static void tce_unmap_single( struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction )
{
struct TceTable * tbl;
unsigned order, nPages;
return dmaAddr;
}
-int pci_map_sg( struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction )
+static int tce_map_sg( struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction )
{
struct TceTable * tbl;
unsigned numTces;
return num_dma;
}
-void pci_unmap_sg( struct pci_dev *hwdev, struct scatterlist *sg, int nelms, int direction )
+static void tce_unmap_sg( struct pci_dev *hwdev, struct scatterlist *sg, int nelms, int direction )
{
struct TceTable * tbl;
unsigned order, numTces, i;
}
#else
-int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
+static int tce_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
int direction)
{
int i;
return nelems;
}
-void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
+static void tce_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
int direction)
{
while (nelems--) {
{
ppc_md.tce_build = tce_build_pSeries;
ppc_md.tce_free_one = tce_free_one_pSeries;
+
+ pci_dma_ops.pci_alloc_consistent = tce_alloc_consistent;
+ pci_dma_ops.pci_free_consistent = tce_free_consistent;
+ pci_dma_ops.pci_map_single = tce_map_single;
+ pci_dma_ops.pci_unmap_single = tce_unmap_single;
+ pci_dma_ops.pci_map_sg = tce_map_sg;
+ pci_dma_ops.pci_unmap_sg = tce_unmap_sg;
}
+
#endif
#ifdef CONFIG_PPC_ISERIES
{
ppc_md.tce_build = tce_build_iSeries;
ppc_md.tce_free_one = tce_free_one_iSeries;
+
+ pci_dma_ops.pci_alloc_consistent = tce_alloc_consistent;
+ pci_dma_ops.pci_free_consistent = tce_free_consistent;
+ pci_dma_ops.pci_map_single = tce_map_single;
+ pci_dma_ops.pci_unmap_single = tce_unmap_single;
+ pci_dma_ops.pci_map_sg = tce_map_sg;
+ pci_dma_ops.pci_unmap_sg = tce_unmap_sg;
}
#endif
}
return dn;
}
+EXPORT_SYMBOL(fetch_dev_dn);
/******************************************************************
void (*init_IRQ)(void);
int (*get_irq)(struct pt_regs *);
+ /* PCI stuff */
+ void (*pcibios_fixup)(void);
+
/* Optional, may be NULL. */
void (*init)(void);
phb_type_hypervisor = 0x1,
phb_type_python = 0x10,
phb_type_speedwagon = 0x11,
- phb_type_winnipeg = 0x12
+ phb_type_winnipeg = 0x12,
+ phb_type_apple = 0xff
};
/*
unsigned long pci_io_offset;
struct pci_ops *ops;
+ volatile unsigned int *cfg_addr;
+ volatile unsigned char *cfg_data;
/* Currently, we limit ourselves to 1 IO range and 3 mem
* ranges since the common pci_bus structure can't handle more
extern unsigned int pcibios_assign_all_busses(void);
-extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle);
-extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
-extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
- size_t size, int direction);
-extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
- size_t size, int direction);
-extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction);
-extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction);
+/*
+ * PCI DMA operations are abstracted for G5 vs. i/pSeries
+ */
+struct pci_dma_ops {
+ void * (*pci_alloc_consistent)(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle);
+ void (*pci_free_consistent)(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+ dma_addr_t (*pci_map_single)(struct pci_dev *hwdev, void *ptr,
+ size_t size, int direction);
+ void (*pci_unmap_single)(struct pci_dev *hwdev, dma_addr_t dma_addr,
+ size_t size, int direction);
+ int (*pci_map_sg)(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents, int direction);
+ void (*pci_unmap_sg)(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents, int direction);
+};
+
+extern struct pci_dma_ops pci_dma_ops;
+
+static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle)
+{
+ return pci_dma_ops.pci_alloc_consistent(hwdev, size, dma_handle);
+}
+
+static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ pci_dma_ops.pci_free_consistent(hwdev, size, vaddr, dma_handle);
+}
+
+static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
+ size_t size, int direction)
+{
+ return pci_dma_ops.pci_map_single(hwdev, ptr, size, direction);
+}
+
+static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
+ size_t size, int direction)
+{
+ pci_dma_ops.pci_unmap_single(hwdev, dma_addr, size, direction);
+}
+
+static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents, int direction)
+{
+ return pci_dma_ops.pci_map_sg(hwdev, sg, nents, direction);
+}
+
+static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents, int direction)
+{
+ pci_dma_ops.pci_unmap_sg(hwdev, sg, nents, direction);
+}
static inline void pci_dma_sync_single(struct pci_dev *hwdev,
dma_addr_t dma_handle,
extern void create_tce_tables(void);
extern void create_pci_bus_tce_table(unsigned long);
-void tce_init_pSeries(void);
-void tce_init_iSeries(void);
+extern void tce_init_pSeries(void);
+extern void tce_init_iSeries(void);
+
+extern void pci_dma_init_direct(void);
#endif