aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2012-08-28 13:38:13 -0700
committerCatalin Marinas <catalin.marinas@arm.com>2012-09-04 22:41:16 +0100
commit0df52a49f307adb182ac46cafa58798e078487e0 (patch)
tree2d75765eb3754aa1880aac4c00dcb43671c508b7
parent5752d2ef0c49f396a83045a9d27cff857d88a4f6 (diff)
downloadlinux-aarch64-0df52a49f307adb182ac46cafa58798e078487e0.tar.gz
arm64: Assume the DMA is coherent and simplify the API implementation
The initial code was using swiotlb but assuming that the DMA is not coherent. Until we get a platform that needs this, just assume that the DMA is coherent and simplify the default DMA API implementation. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/mm/dma-mapping.c150
1 files changed, 9 insertions, 141 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 4e5871dd8b4..5b197ec57cf 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -33,159 +33,27 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
- struct page *page, **map;
- void *ptr;
- int order = get_order(size);
- int i;
-
- if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
- flags |= GFP_DMA;
-
- ptr = swiotlb_alloc_coherent(dev, size, dma_handle, flags);
- if (!ptr)
- goto no_mem;
- map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA);
- if (!map)
- goto no_map;
-
- /* remove any dirty cache lines on the kernel alias */
- dmac_flush_range(ptr, ptr + size);
-
- /* create a coherent mapping */
- page = virt_to_page(ptr);
- for (i = 0; i < (size >> PAGE_SHIFT); i++)
- map[i] = page + i;
- ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
- pgprot_dmacoherent(pgprot_default)); kfree(map);
- if (!ptr)
- goto no_map;
-
- return ptr;
-
-no_map:
- swiotlb_free_coherent(dev, size, ptr, *dma_handle);
-no_mem:
- *dma_handle = ~0;
- return NULL;
+ return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
}
static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
- vunmap(vaddr);
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
-static dma_addr_t arm64_swiotlb_map_page(struct device *dev,
- struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- dma_addr_t dev_addr;
-
- dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
- dmac_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
-
- return dev_addr;
-}
-
-
-static void arm64_swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- dmac_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
- swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
-}
-
-static int arm64_swiotlb_map_sg_attrs(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- struct scatterlist *sg;
- int i, ret;
-
- ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
- for_each_sg(sgl, sg, ret, i)
- dmac_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
-
- return ret;
-}
-
-static void arm64_swiotlb_unmap_sg_attrs(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(sgl, sg, nelems, i)
- dmac_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
- swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
-}
-
-static void arm64_swiotlb_sync_single_for_cpu(struct device *dev,
- dma_addr_t dev_addr,
- size_t size,
- enum dma_data_direction dir)
-{
- dmac_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
- swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
-}
-
-static void arm64_swiotlb_sync_single_for_device(struct device *dev,
- dma_addr_t dev_addr,
- size_t size,
- enum dma_data_direction dir)
-{
- swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
- dmac_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
-}
-
-static void arm64_swiotlb_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(sgl, sg, nelems, i)
- dmac_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
- swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
-}
-
-static void arm64_swiotlb_sync_sg_for_device(struct device *dev,
- struct scatterlist *sgl,
- int nelems,
- enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int i;
-
- swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
- for_each_sg(sgl, sg, nelems, i)
- dmac_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
-}
-
static struct dma_map_ops arm64_swiotlb_dma_ops = {
.alloc = arm64_swiotlb_alloc_coherent,
.free = arm64_swiotlb_free_coherent,
- .map_page = arm64_swiotlb_map_page,
- .unmap_page = arm64_swiotlb_unmap_page,
- .map_sg = arm64_swiotlb_map_sg_attrs,
- .unmap_sg = arm64_swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = arm64_swiotlb_sync_single_for_cpu,
- .sync_single_for_device = arm64_swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = arm64_swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = arm64_swiotlb_sync_sg_for_device,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
.dma_supported = swiotlb_dma_supported,
.mapping_error = swiotlb_dma_mapping_error,
};