From: Stephen Rothwell This patch "inverts" the PPC64 dma mapping routines so that the pci_ and vio_ ... routines are implemented in terms of the dma_ ... routines (the vio_ routines disappear anyway as noone uses them directly any more). The most noticable change after this patch is applied will be that the flags passed to dma_alloc_coherent will now be honoured (whereas they were previously silently ignored since we used to just call pci_alloc_consistent). This has been compiled for iSeries, pSeries and g5 (default configs) and booted on iSeries. Signed-off-by: Stephen Rothwell Signed-off-by: Andrew Morton --- 25-akpm/arch/ppc64/kernel/dma.c | 100 ++++++++++----------- 25-akpm/arch/ppc64/kernel/iommu.c | 8 - 25-akpm/arch/ppc64/kernel/pci.c | 2 25-akpm/arch/ppc64/kernel/pci_direct_iommu.c | 34 ++++--- 25-akpm/arch/ppc64/kernel/pci_iommu.c | 55 ++++++----- 25-akpm/arch/ppc64/kernel/vio.c | 55 +++++++---- 25-akpm/include/asm-ppc64/dma-mapping.h | 20 ++++ 25-akpm/include/asm-ppc64/iommu.h | 6 - 25-akpm/include/asm-ppc64/pci.h | 126 +-------------------------- 25-akpm/include/asm-ppc64/vio.h | 27 ----- 10 files changed, 166 insertions(+), 267 deletions(-) diff -puN arch/ppc64/kernel/dma.c~ppc64-invert-dma-mapping-routines arch/ppc64/kernel/dma.c --- 25/arch/ppc64/kernel/dma.c~ppc64-invert-dma-mapping-routines 2005-03-07 23:32:43.000000000 -0800 +++ 25-akpm/arch/ppc64/kernel/dma.c 2005-03-07 23:32:43.000000000 -0800 @@ -13,14 +13,23 @@ #include #include -int dma_supported(struct device *dev, u64 mask) +static struct dma_mapping_ops *get_dma_ops(struct device *dev) { if (dev->bus == &pci_bus_type) - return pci_dma_supported(to_pci_dev(dev), mask); + return &pci_dma_ops; #ifdef CONFIG_IBMVIO if (dev->bus == &vio_bus_type) - return vio_dma_supported(to_vio_dev(dev), mask); -#endif /* CONFIG_IBMVIO */ + return &vio_dma_ops; +#endif + return NULL; +} + +int dma_supported(struct device *dev, u64 mask) +{ + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + + if (dma_ops) + return dma_ops->dma_supported(dev, mask); BUG(); return 0; } @@ -32,7 +41,7 @@ int dma_set_mask(struct device *dev, u64 return pci_set_dma_mask(to_pci_dev(dev), dma_mask); #ifdef CONFIG_IBMVIO if (dev->bus == &vio_bus_type) - return vio_set_dma_mask(to_vio_dev(dev), dma_mask); + return -EIO; #endif /* CONFIG_IBMVIO */ BUG(); return 0; @@ -42,12 +51,10 @@ EXPORT_SYMBOL(dma_set_mask); void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, int flag) { - if (dev->bus == &pci_bus_type) - return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle); -#ifdef CONFIG_IBMVIO - if (dev->bus == &vio_bus_type) - return vio_alloc_consistent(to_vio_dev(dev), size, dma_handle); -#endif /* CONFIG_IBMVIO */ + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + + if (dma_ops) + return dma_ops->alloc_coherent(dev, size, dma_handle, flag); BUG(); return NULL; } @@ -56,12 +63,10 @@ EXPORT_SYMBOL(dma_alloc_coherent); void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) { - if (dev->bus == &pci_bus_type) - pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle); -#ifdef CONFIG_IBMVIO - else if (dev->bus == &vio_bus_type) - vio_free_consistent(to_vio_dev(dev), size, cpu_addr, dma_handle); -#endif /* CONFIG_IBMVIO */ + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + + if (dma_ops) + dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); else BUG(); } @@ -70,12 +75,10 @@ EXPORT_SYMBOL(dma_free_coherent); dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) { - if (dev->bus == &pci_bus_type) - return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction); -#ifdef CONFIG_IBMVIO - if (dev->bus == &vio_bus_type) - return vio_map_single(to_vio_dev(dev), cpu_addr, size, direction); -#endif /* CONFIG_IBMVIO */ + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + + if (dma_ops) + return dma_ops->map_single(dev, cpu_addr, size, direction); BUG(); return (dma_addr_t)0; } @@ -84,12 +87,10 @@ EXPORT_SYMBOL(dma_map_single); void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction) { - if (dev->bus == &pci_bus_type) - pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction); -#ifdef CONFIG_IBMVIO - else if (dev->bus == &vio_bus_type) - vio_unmap_single(to_vio_dev(dev), dma_addr, size, direction); -#endif /* CONFIG_IBMVIO */ + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + + if (dma_ops) + dma_ops->unmap_single(dev, dma_addr, size, direction); else BUG(); } @@ -99,12 +100,11 @@ dma_addr_t dma_map_page(struct device *d unsigned long offset, size_t size, enum dma_data_direction direction) { - if (dev->bus == &pci_bus_type) - return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction); -#ifdef CONFIG_IBMVIO - if (dev->bus == &vio_bus_type) - return vio_map_page(to_vio_dev(dev), page, offset, size, direction); -#endif /* CONFIG_IBMVIO */ + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + + if (dma_ops) + return dma_ops->map_single(dev, + (page_address(page) + offset), size, direction); BUG(); return (dma_addr_t)0; } @@ -113,12 +113,10 @@ EXPORT_SYMBOL(dma_map_page); void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction) { - if (dev->bus == &pci_bus_type) - pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction); -#ifdef CONFIG_IBMVIO - else if (dev->bus == &vio_bus_type) - vio_unmap_page(to_vio_dev(dev), dma_address, size, direction); -#endif /* CONFIG_IBMVIO */ + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + + if (dma_ops) + dma_ops->unmap_single(dev, dma_address, size, direction); else BUG(); } @@ -127,12 +125,10 @@ EXPORT_SYMBOL(dma_unmap_page); int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { - if (dev->bus == &pci_bus_type) - return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); -#ifdef CONFIG_IBMVIO - if (dev->bus == &vio_bus_type) - return vio_map_sg(to_vio_dev(dev), sg, nents, direction); -#endif /* CONFIG_IBMVIO */ + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + + if (dma_ops) + return dma_ops->map_sg(dev, sg, nents, direction); BUG(); return 0; } @@ -141,12 +137,10 @@ EXPORT_SYMBOL(dma_map_sg); void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, enum dma_data_direction direction) { - if (dev->bus == &pci_bus_type) - pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction); -#ifdef CONFIG_IBMVIO - else if (dev->bus == &vio_bus_type) - vio_unmap_sg(to_vio_dev(dev), sg, nhwentries, direction); -#endif /* CONFIG_IBMVIO */ + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + + if (dma_ops) + dma_ops->unmap_sg(dev, sg, nhwentries, direction); else BUG(); } diff -puN arch/ppc64/kernel/iommu.c~ppc64-invert-dma-mapping-routines arch/ppc64/kernel/iommu.c --- 25/arch/ppc64/kernel/iommu.c~ppc64-invert-dma-mapping-routines 2005-03-07 23:32:43.000000000 -0800 +++ 25-akpm/arch/ppc64/kernel/iommu.c 2005-03-07 23:32:43.000000000 -0800 @@ -513,8 +513,8 @@ void iommu_unmap_single(struct iommu_tab * Returns the virtual address of the buffer and sets dma_handle * to the dma address (mapping) of the first page. */ -void *iommu_alloc_consistent(struct iommu_table *tbl, size_t size, - dma_addr_t *dma_handle) +void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, + dma_addr_t *dma_handle, int flag) { void *ret = NULL; dma_addr_t mapping; @@ -538,7 +538,7 @@ void *iommu_alloc_consistent(struct iomm return NULL; /* Alloc enough pages (and possibly more) */ - ret = (void *)__get_free_pages(GFP_ATOMIC, order); + ret = (void *)__get_free_pages(flag, order); if (!ret) return NULL; memset(ret, 0, size); @@ -553,7 +553,7 @@ void *iommu_alloc_consistent(struct iomm return ret; } -void iommu_free_consistent(struct iommu_table *tbl, size_t size, +void iommu_free_coherent(struct iommu_table *tbl, size_t size, void *vaddr, dma_addr_t dma_handle) { unsigned int npages; diff -puN arch/ppc64/kernel/pci.c~ppc64-invert-dma-mapping-routines arch/ppc64/kernel/pci.c --- 25/arch/ppc64/kernel/pci.c~ppc64-invert-dma-mapping-routines 2005-03-07 23:32:43.000000000 -0800 +++ 25-akpm/arch/ppc64/kernel/pci.c 2005-03-07 23:32:43.000000000 -0800 @@ -71,7 +71,7 @@ void iSeries_pcibios_init(void); LIST_HEAD(hose_list); -struct pci_dma_ops pci_dma_ops; +struct dma_mapping_ops pci_dma_ops; EXPORT_SYMBOL(pci_dma_ops); int global_phb_number; /* Global phb counter */ diff -puN arch/ppc64/kernel/pci_direct_iommu.c~ppc64-invert-dma-mapping-routines arch/ppc64/kernel/pci_direct_iommu.c --- 25/arch/ppc64/kernel/pci_direct_iommu.c~ppc64-invert-dma-mapping-routines 2005-03-07 23:32:43.000000000 -0800 +++ 25-akpm/arch/ppc64/kernel/pci_direct_iommu.c 2005-03-07 23:32:43.000000000 -0800 @@ -30,12 +30,12 @@ #include "pci.h" -static void *pci_direct_alloc_consistent(struct pci_dev *hwdev, size_t size, - dma_addr_t *dma_handle) +static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, int flag) { void *ret; - ret = (void *)__get_free_pages(GFP_ATOMIC, get_order(size)); + ret = (void *)__get_free_pages(flag, get_order(size)); if (ret != NULL) { memset(ret, 0, size); *dma_handle = virt_to_abs(ret); @@ -43,24 +43,24 @@ static void *pci_direct_alloc_consistent return ret; } -static void pci_direct_free_consistent(struct pci_dev *hwdev, size_t size, +static void pci_direct_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) { free_pages((unsigned long)vaddr, get_order(size)); } -static dma_addr_t pci_direct_map_single(struct pci_dev *hwdev, void *ptr, +static dma_addr_t pci_direct_map_single(struct device *hwdev, void *ptr, size_t size, enum dma_data_direction direction) { return virt_to_abs(ptr); } -static void pci_direct_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, +static void pci_direct_unmap_single(struct device *hwdev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction) { } -static int pci_direct_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, +static int pci_direct_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { int i; @@ -73,17 +73,23 @@ static int pci_direct_map_sg(struct pci_ return nents; } -static void pci_direct_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, +static void pci_direct_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { } +static int pci_direct_dma_supported(struct device *dev, u64 mask) +{ + return mask < 0x100000000ull; +} + void __init pci_direct_iommu_init(void) { - pci_dma_ops.pci_alloc_consistent = pci_direct_alloc_consistent; - pci_dma_ops.pci_free_consistent = pci_direct_free_consistent; - pci_dma_ops.pci_map_single = pci_direct_map_single; - pci_dma_ops.pci_unmap_single = pci_direct_unmap_single; - pci_dma_ops.pci_map_sg = pci_direct_map_sg; - pci_dma_ops.pci_unmap_sg = pci_direct_unmap_sg; + pci_dma_ops.alloc_coherent = pci_direct_alloc_coherent; + pci_dma_ops.free_coherent = pci_direct_free_coherent; + pci_dma_ops.map_single = pci_direct_map_single; + pci_dma_ops.unmap_single = pci_direct_unmap_single; + pci_dma_ops.map_sg = pci_direct_map_sg; + pci_dma_ops.unmap_sg = pci_direct_unmap_sg; + pci_dma_ops.dma_supported = pci_direct_dma_supported; } diff -puN arch/ppc64/kernel/pci_iommu.c~ppc64-invert-dma-mapping-routines arch/ppc64/kernel/pci_iommu.c --- 25/arch/ppc64/kernel/pci_iommu.c~ppc64-invert-dma-mapping-routines 2005-03-07 23:32:43.000000000 -0800 +++ 25-akpm/arch/ppc64/kernel/pci_iommu.c 2005-03-07 23:32:43.000000000 -0800 @@ -50,19 +50,23 @@ */ #define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata)) -static inline struct iommu_table *devnode_table(struct pci_dev *dev) +static inline struct iommu_table *devnode_table(struct device *dev) { - if (!dev) - dev = ppc64_isabridge_dev; - if (!dev) - return NULL; + struct pci_dev *pdev; + + if (!dev) { + pdev = ppc64_isabridge_dev; + if (!pdev) + return NULL; + } else + pdev = to_pci_dev(dev); #ifdef CONFIG_PPC_ISERIES - return ISERIES_DEVNODE(dev)->iommu_table; + return ISERIES_DEVNODE(pdev)->iommu_table; #endif /* CONFIG_PPC_ISERIES */ #ifdef CONFIG_PPC_MULTIPLATFORM - return PCI_GET_DN(dev)->iommu_table; + return PCI_GET_DN(pdev)->iommu_table; #endif /* CONFIG_PPC_MULTIPLATFORM */ } @@ -71,16 +75,17 @@ static inline struct iommu_table *devnod * Returns the virtual address of the buffer and sets dma_handle * to the dma address (mapping) of the first page. */ -static void *pci_iommu_alloc_consistent(struct pci_dev *hwdev, size_t size, - dma_addr_t *dma_handle) +static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, int flag) { - return iommu_alloc_consistent(devnode_table(hwdev), size, dma_handle); + return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle, + flag); } -static void pci_iommu_free_consistent(struct pci_dev *hwdev, size_t size, +static void pci_iommu_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) { - iommu_free_consistent(devnode_table(hwdev), size, vaddr, dma_handle); + iommu_free_coherent(devnode_table(hwdev), size, vaddr, dma_handle); } /* Creates TCEs for a user provided buffer. The user buffer must be @@ -89,46 +94,46 @@ static void pci_iommu_free_consistent(st * need not be page aligned, the dma_addr_t returned will point to the same * byte within the page as vaddr. */ -static dma_addr_t pci_iommu_map_single(struct pci_dev *hwdev, void *vaddr, +static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr, size_t size, enum dma_data_direction direction) { return iommu_map_single(devnode_table(hwdev), vaddr, size, direction); } -static void pci_iommu_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_handle, +static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { iommu_unmap_single(devnode_table(hwdev), dma_handle, size, direction); } -static int pci_iommu_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, +static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { - return iommu_map_sg(&pdev->dev, devnode_table(pdev), sglist, + return iommu_map_sg(pdev, devnode_table(pdev), sglist, nelems, direction); } -static void pci_iommu_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, +static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { iommu_unmap_sg(devnode_table(pdev), sglist, nelems, direction); } /* We support DMA to/from any memory page via the iommu */ -static int pci_iommu_dma_supported(struct pci_dev *pdev, u64 mask) +static int pci_iommu_dma_supported(struct device *dev, u64 mask) { return 1; } void pci_iommu_init(void) { - pci_dma_ops.pci_alloc_consistent = pci_iommu_alloc_consistent; - pci_dma_ops.pci_free_consistent = pci_iommu_free_consistent; - pci_dma_ops.pci_map_single = pci_iommu_map_single; - pci_dma_ops.pci_unmap_single = pci_iommu_unmap_single; - pci_dma_ops.pci_map_sg = pci_iommu_map_sg; - pci_dma_ops.pci_unmap_sg = pci_iommu_unmap_sg; - pci_dma_ops.pci_dma_supported = pci_iommu_dma_supported; + pci_dma_ops.alloc_coherent = pci_iommu_alloc_coherent; + pci_dma_ops.free_coherent = pci_iommu_free_coherent; + pci_dma_ops.map_single = pci_iommu_map_single; + pci_dma_ops.unmap_single = pci_iommu_unmap_single; + pci_dma_ops.map_sg = pci_iommu_map_sg; + pci_dma_ops.unmap_sg = pci_iommu_unmap_sg; + pci_dma_ops.dma_supported = pci_iommu_dma_supported; } diff -puN arch/ppc64/kernel/vio.c~ppc64-invert-dma-mapping-routines arch/ppc64/kernel/vio.c --- 25/arch/ppc64/kernel/vio.c~ppc64-invert-dma-mapping-routines 2005-03-07 23:32:43.000000000 -0800 +++ 25-akpm/arch/ppc64/kernel/vio.c 2005-03-07 23:32:43.000000000 -0800 @@ -557,48 +557,61 @@ int vio_disable_interrupts(struct vio_de EXPORT_SYMBOL(vio_disable_interrupts); #endif -dma_addr_t vio_map_single(struct vio_dev *dev, void *vaddr, +static dma_addr_t vio_map_single(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { - return iommu_map_single(dev->iommu_table, vaddr, size, direction); + return iommu_map_single(to_vio_dev(dev)->iommu_table, vaddr, size, + direction); } -EXPORT_SYMBOL(vio_map_single); -void vio_unmap_single(struct vio_dev *dev, dma_addr_t dma_handle, +static void vio_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { - iommu_unmap_single(dev->iommu_table, dma_handle, size, direction); + iommu_unmap_single(to_vio_dev(dev)->iommu_table, dma_handle, size, + direction); } -EXPORT_SYMBOL(vio_unmap_single); -int vio_map_sg(struct vio_dev *vdev, struct scatterlist *sglist, int nelems, - enum dma_data_direction direction) +static int vio_map_sg(struct device *dev, struct scatterlist *sglist, + int nelems, enum dma_data_direction direction) { - return iommu_map_sg(&vdev->dev, vdev->iommu_table, sglist, + return iommu_map_sg(dev, to_vio_dev(dev)->iommu_table, sglist, nelems, direction); } -EXPORT_SYMBOL(vio_map_sg); -void vio_unmap_sg(struct vio_dev *vdev, struct scatterlist *sglist, int nelems, - enum dma_data_direction direction) +static void vio_unmap_sg(struct device *dev, struct scatterlist *sglist, + int nelems, enum dma_data_direction direction) { - iommu_unmap_sg(vdev->iommu_table, sglist, nelems, direction); + iommu_unmap_sg(to_vio_dev(dev)->iommu_table, sglist, nelems, direction); } -EXPORT_SYMBOL(vio_unmap_sg); -void *vio_alloc_consistent(struct vio_dev *dev, size_t size, - dma_addr_t *dma_handle) +static void *vio_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, int flag) { - return iommu_alloc_consistent(dev->iommu_table, size, dma_handle); + return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size, + dma_handle, flag); } -EXPORT_SYMBOL(vio_alloc_consistent); -void vio_free_consistent(struct vio_dev *dev, size_t size, +static void vio_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { - iommu_free_consistent(dev->iommu_table, size, vaddr, dma_handle); + iommu_free_coherent(to_vio_dev(dev)->iommu_table, size, vaddr, + dma_handle); } -EXPORT_SYMBOL(vio_free_consistent); + +static int vio_dma_supported(struct device *dev, u64 mask) +{ + return 1; +} + +struct dma_mapping_ops vio_dma_ops = { + .alloc_coherent = vio_alloc_coherent, + .free_coherent = vio_free_coherent, + .map_single = vio_map_single, + .unmap_single = vio_unmap_single, + .map_sg = vio_map_sg, + .unmap_sg = vio_unmap_sg, + .dma_supported = vio_dma_supported, +}; static int vio_bus_match(struct device *dev, struct device_driver *drv) { diff -puN include/asm-ppc64/dma-mapping.h~ppc64-invert-dma-mapping-routines include/asm-ppc64/dma-mapping.h --- 25/include/asm-ppc64/dma-mapping.h~ppc64-invert-dma-mapping-routines 2005-03-07 23:32:43.000000000 -0800 +++ 25-akpm/include/asm-ppc64/dma-mapping.h 2005-03-07 23:32:43.000000000 -0800 @@ -113,4 +113,24 @@ dma_cache_sync(void *vaddr, size_t size, /* nothing to do */ } +/* + * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO + */ +struct dma_mapping_ops { + void * (*alloc_coherent)(struct device *dev, size_t size, + dma_addr_t *dma_handle, int flag); + void (*free_coherent)(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); + dma_addr_t (*map_single)(struct device *dev, void *ptr, + size_t size, enum dma_data_direction direction); + void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction); + int (*map_sg)(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction direction); + void (*unmap_sg)(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction direction); + int (*dma_supported)(struct device *dev, u64 mask); + int (*dac_dma_supported)(struct device *dev, u64 mask); +}; + #endif /* _ASM_DMA_MAPPING_H */ diff -puN include/asm-ppc64/iommu.h~ppc64-invert-dma-mapping-routines include/asm-ppc64/iommu.h --- 25/include/asm-ppc64/iommu.h~ppc64-invert-dma-mapping-routines 2005-03-07 23:32:43.000000000 -0800 +++ 25-akpm/include/asm-ppc64/iommu.h 2005-03-07 23:32:43.000000000 -0800 @@ -145,9 +145,9 @@ extern int iommu_map_sg(struct device *d extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, int nelems, enum dma_data_direction direction); -extern void *iommu_alloc_consistent(struct iommu_table *tbl, size_t size, - dma_addr_t *dma_handle); -extern void iommu_free_consistent(struct iommu_table *tbl, size_t size, +extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, + dma_addr_t *dma_handle, int flag); +extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, void *vaddr, dma_addr_t dma_handle); extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, size_t size, enum dma_data_direction direction); diff -puN include/asm-ppc64/pci.h~ppc64-invert-dma-mapping-routines include/asm-ppc64/pci.h --- 25/include/asm-ppc64/pci.h~ppc64-invert-dma-mapping-routines 2005-03-07 23:32:43.000000000 -0800 +++ 25-akpm/include/asm-ppc64/pci.h 2005-03-07 23:32:43.000000000 -0800 @@ -13,11 +13,14 @@ #include #include #include + #include #include #include #include +#include + #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM 0x10000000 @@ -63,131 +66,18 @@ static inline int pcibios_prep_mwi(struc extern unsigned int pcibios_assign_all_busses(void); -/* - * PCI DMA operations are abstracted for G5 vs. i/pSeries - */ -struct pci_dma_ops { - void * (*pci_alloc_consistent)(struct pci_dev *hwdev, size_t size, - dma_addr_t *dma_handle); - void (*pci_free_consistent)(struct pci_dev *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle); - - dma_addr_t (*pci_map_single)(struct pci_dev *hwdev, void *ptr, - size_t size, enum dma_data_direction direction); - void (*pci_unmap_single)(struct pci_dev *hwdev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction direction); - int (*pci_map_sg)(struct pci_dev *hwdev, struct scatterlist *sg, - int nents, enum dma_data_direction direction); - void (*pci_unmap_sg)(struct pci_dev *hwdev, struct scatterlist *sg, - int nents, enum dma_data_direction direction); - int (*pci_dma_supported)(struct pci_dev *hwdev, u64 mask); - int (*pci_dac_dma_supported)(struct pci_dev *hwdev, u64 mask); -}; - -extern struct pci_dma_ops pci_dma_ops; - -static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, - dma_addr_t *dma_handle) -{ - return pci_dma_ops.pci_alloc_consistent(hwdev, size, dma_handle); -} - -static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle) -{ - pci_dma_ops.pci_free_consistent(hwdev, size, vaddr, dma_handle); -} - -static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, - size_t size, int direction) -{ - return pci_dma_ops.pci_map_single(hwdev, ptr, size, - (enum dma_data_direction)direction); -} - -static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, - size_t size, int direction) -{ - pci_dma_ops.pci_unmap_single(hwdev, dma_addr, size, - (enum dma_data_direction)direction); -} - -static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, - int nents, int direction) -{ - return pci_dma_ops.pci_map_sg(hwdev, sg, nents, - (enum dma_data_direction)direction); -} - -static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, - int nents, int direction) -{ - pci_dma_ops.pci_unmap_sg(hwdev, sg, nents, - (enum dma_data_direction)direction); -} - -static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, - dma_addr_t dma_handle, - size_t size, int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); - /* nothing to do */ -} - -static inline void pci_dma_sync_single_for_device(struct pci_dev *hwdev, - dma_addr_t dma_handle, - size_t size, int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); - /* nothing to do */ -} - -static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, - struct scatterlist *sg, - int nelems, int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); - /* nothing to do */ -} - -static inline void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, - struct scatterlist *sg, - int nelems, int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); - /* nothing to do */ -} - -/* Return whether the given PCI device DMA address mask can - * be supported properly. For example, if your device can - * only drive the low 24-bits during PCI bus mastering, then - * you would pass 0x00ffffff as the mask to this function. - * We default to supporting only 32 bits DMA unless we have - * an explicit override of this function in pci_dma_ops for - * the platform - */ -static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) -{ - if (pci_dma_ops.pci_dma_supported) - return pci_dma_ops.pci_dma_supported(hwdev, mask); - return (mask < 0x100000000ull); -} +extern struct dma_mapping_ops pci_dma_ops; /* For DAC DMA, we currently don't support it by default, but * we let the platform override this */ static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask) { - if (pci_dma_ops.pci_dac_dma_supported) - return pci_dma_ops.pci_dac_dma_supported(hwdev, mask); + if (pci_dma_ops.dac_dma_supported) + return pci_dma_ops.dac_dma_supported(&hwdev->dev, mask); return 0; } -static inline int pci_dma_mapping_error(dma_addr_t dma_addr) -{ - return dma_mapping_error(dma_addr); -} - extern int pci_domain_nr(struct pci_bus *bus); /* Decide whether to display the domain number in /proc */ @@ -201,10 +91,6 @@ int pci_mmap_page_range(struct pci_dev * /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ #define HAVE_PCI_MMAP 1 -#define pci_map_page(dev, page, off, size, dir) \ - pci_map_single(dev, (page_address(page) + (off)), size, dir) -#define pci_unmap_page(dev,addr,sz,dir) pci_unmap_single(dev,addr,sz,dir) - /* pci_unmap_{single,page} is not a nop, thus... */ #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ dma_addr_t ADDR_NAME; diff -puN include/asm-ppc64/vio.h~ppc64-invert-dma-mapping-routines include/asm-ppc64/vio.h --- 25/include/asm-ppc64/vio.h~ppc64-invert-dma-mapping-routines 2005-03-07 23:32:43.000000000 -0800 +++ 25-akpm/include/asm-ppc64/vio.h 2005-03-07 23:32:43.000000000 -0800 @@ -57,32 +57,7 @@ int vio_get_irq(struct vio_dev *dev); int vio_enable_interrupts(struct vio_dev *dev); int vio_disable_interrupts(struct vio_dev *dev); -dma_addr_t vio_map_single(struct vio_dev *dev, void *vaddr, - size_t size, enum dma_data_direction direction); -void vio_unmap_single(struct vio_dev *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction); -int vio_map_sg(struct vio_dev *vdev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction); -void vio_unmap_sg(struct vio_dev *vdev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction); -void *vio_alloc_consistent(struct vio_dev *dev, size_t size, - dma_addr_t *dma_handle); -void vio_free_consistent(struct vio_dev *dev, size_t size, void *vaddr, - dma_addr_t dma_handle); - -static inline int vio_dma_supported(struct vio_dev *hwdev, u64 mask) -{ - return 1; -} - -#define vio_map_page(dev, page, off, size, dir) \ - vio_map_single(dev, (page_address(page) + (off)), size, dir) -#define vio_unmap_page(dev,addr,sz,dir) vio_unmap_single(dev,addr,sz,dir) - -static inline int vio_set_dma_mask(struct vio_dev *dev, u64 mask) -{ - return -EIO; -} +extern struct dma_mapping_ops vio_dma_ops; extern struct bus_type vio_bus_type; _