diff options
Diffstat (limited to 'gxp-dma-iommu.c')
-rw-r--r-- | gxp-dma-iommu.c | 858 |
1 files changed, 343 insertions, 515 deletions
diff --git a/gxp-dma-iommu.c b/gxp-dma-iommu.c index 97322f5..1480761 100644 --- a/gxp-dma-iommu.c +++ b/gxp-dma-iommu.c @@ -5,6 +5,7 @@ * Copyright (C) 2021 Google LLC */ +#include <linux/bits.h> #include <linux/dma-iommu.h> #include <linux/dma-mapping.h> #include <linux/iommu.h> @@ -14,16 +15,16 @@ #include "gxp-config.h" #include "gxp-dma.h" -#include "gxp-iova.h" +#include "gxp-mailbox.h" #include "gxp-mapping.h" #include "gxp-pm.h" -#include "gxp-vd.h" +#include "gxp-ssmt.h" +#include "gxp.h" struct gxp_dma_iommu_manager { struct gxp_dma_manager dma_mgr; - struct iommu_domain *default_domain; - void __iomem *idma_ssmt_base; - void __iomem *inst_data_ssmt_base; + struct gxp_iommu_domain *default_domain; + struct gxp_ssmt ssmt; }; /** @@ -40,7 +41,13 @@ struct gxp_dma_iommu_manager { static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, unsigned long attrs) { - int prot = coherent ? IOMMU_CACHE : 0; + int prot = 0; + + if (coherent) { +#ifdef GXP_IS_DMA_COHERENT + prot = IOMMU_CACHE; +#endif + } if (attrs & DMA_ATTR_PRIVILEGED) prot |= IOMMU_PRIV; @@ -56,81 +63,29 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, } } -/* SSMT handling */ - -#define INST_SID_FOR_CORE(_x_) ((1 << 6) | ((_x_) << 4) | (0 << 3)) -#define DATA_SID_FOR_CORE(_x_) ((1 << 6) | ((_x_) << 4) | (1 << 3)) -#define IDMA_SID_FOR_CORE(_x_) ((1 << 6) | ((_x_) << 4)) - -static inline void ssmt_set_vid_for_sid(void __iomem *ssmt, int vid, u8 sid) +static int map_flags_to_iommu_prot(enum dma_data_direction dir, + unsigned long attrs, u32 gxp_dma_flags) { - /* NS_READ_STREAM_VID_<sid> */ - writel(vid, (ssmt) + 0x1000u + (0x4u * (sid))); - /* NS_WRITE_STREAM_VID_<sid> */ - writel(vid, (ssmt) + 0x1200u + (0x4u * (sid))); + bool coherent = gxp_dma_flags & GXP_MAP_COHERENT ? 1 : 0; + + return dma_info_to_prot(dir, coherent, attrs); } static int gxp_dma_ssmt_program(struct gxp_dev *gxp, - struct gxp_virtual_device *vd, uint virt_core, - uint core) + struct iommu_domain *domain, uint core_list) { -/* SSMT is not supported in unittests */ -#ifndef CONFIG_GXP_TEST struct gxp_dma_iommu_manager *mgr = container_of( gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); - int core_vid; - - core_vid = iommu_aux_get_pasid(vd->core_domains[virt_core], gxp->dev); - dev_dbg(gxp->dev, "SysMMU: core%u assigned vid %d\n", core, - core_vid); - ssmt_set_vid_for_sid(mgr->idma_ssmt_base, core_vid, - IDMA_SID_FOR_CORE(core)); - ssmt_set_vid_for_sid(mgr->inst_data_ssmt_base, core_vid, - INST_SID_FOR_CORE(core)); - ssmt_set_vid_for_sid(mgr->inst_data_ssmt_base, core_vid, - DATA_SID_FOR_CORE(core)); -#endif - return 0; -} - - -static inline int ssmt_init(struct gxp_dev *gxp, - struct gxp_dma_iommu_manager *mgr) -{ - struct platform_device *pdev = - container_of(gxp->dev, struct platform_device, dev); - struct resource *r; - - r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ssmt_idma"); - if (!r) { - dev_err(gxp->dev, "Failed to find IDMA SSMT register base\n"); - return -EINVAL; - } - - mgr->idma_ssmt_base = devm_ioremap_resource(gxp->dev, r); - if (IS_ERR(mgr->idma_ssmt_base)) { - dev_err(gxp->dev, - "Failed to map IDMA SSMT register base (%ld)\n", - PTR_ERR(mgr->idma_ssmt_base)); - return PTR_ERR(mgr->idma_ssmt_base); - } - - r = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "ssmt_inst_data"); - if (!r) { - dev_err(gxp->dev, - "Failed to find instruction/data SSMT register base\n"); - return -EINVAL; - } - - mgr->inst_data_ssmt_base = devm_ioremap_resource(gxp->dev, r); - if (IS_ERR(mgr->inst_data_ssmt_base)) { - dev_err(gxp->dev, - "Failed to map instruction/data SSMT register base (%ld)\n", - PTR_ERR(mgr->inst_data_ssmt_base)); - return PTR_ERR(mgr->inst_data_ssmt_base); - } - + int pasid; + uint core; + + pasid = iommu_aux_get_pasid(domain, gxp->dev); + for (core = 0; core < GXP_NUM_CORES; core++) + if (BIT(core) & core_list) { + dev_dbg(gxp->dev, "Assign core%u to PASID %d\n", core, + pasid); + gxp_ssmt_set_core_vid(&mgr->ssmt, core, pasid); + } return 0; } @@ -170,8 +125,100 @@ static int sysmmu_fault_handler(struct iommu_fault *fault, void *token) return -EAGAIN; } +#if GXP_HAS_LAP + +/* No need to map CSRs when local access path exists. */ + +#define gxp_map_csrs(...) 0 +#define gxp_unmap_csrs(...) + +#else /* !GXP_HAS_LAP */ + +#define SYNC_BARRIERS_SIZE 0x100000 + +static int gxp_map_csrs(struct gxp_dev *gxp, struct iommu_domain *domain, + struct gxp_mapped_resource *regs) +{ + int ret = iommu_map(domain, GXP_IOVA_AURORA_TOP, gxp->regs.paddr, + gxp->regs.size, IOMMU_READ | IOMMU_WRITE); + if (ret) + return ret; + /* + * Firmware expects to access the sync barriers at a separate + * address, lower than the rest of the AURORA_TOP registers. + */ + ret = iommu_map(domain, GXP_IOVA_SYNC_BARRIERS, + gxp->regs.paddr + GXP_IOVA_SYNC_BARRIERS, + SYNC_BARRIERS_SIZE, IOMMU_READ | IOMMU_WRITE); + if (ret) { + iommu_unmap(domain, GXP_IOVA_AURORA_TOP, gxp->regs.size); + return ret; + } + + return 0; +} + +static void gxp_unmap_csrs(struct gxp_dev *gxp, struct iommu_domain *domain, + struct gxp_mapped_resource *regs) +{ + iommu_unmap(domain, GXP_IOVA_SYNC_BARRIERS, SYNC_BARRIERS_SIZE); + iommu_unmap(domain, GXP_IOVA_AURORA_TOP, gxp->regs.size); +} + +#endif /* GXP_HAS_LAP */ + +/* Maps the shared buffer region to @domain. */ +static int gxp_map_core_shared_buffer(struct gxp_dev *gxp, + struct iommu_domain *domain, + u8 slice_index) +{ + size_t shared_size = gxp->shared_slice_size; + + if (!gxp->shared_buf.paddr) + return 0; + return iommu_map(domain, gxp->shared_buf.daddr, + gxp->shared_buf.paddr + shared_size * slice_index, + shared_size, IOMMU_READ | IOMMU_WRITE); +} + +/* Reverts gxp_map_core_shared_buffer. */ +static void gxp_unmap_core_shared_buffer(struct gxp_dev *gxp, + struct iommu_domain *domain) +{ + size_t shared_size = gxp->shared_slice_size; + + if (!gxp->shared_buf.paddr) + return; + iommu_unmap(domain, gxp->shared_buf.daddr, shared_size); +} + /* gxp-dma.h Interface */ +uint gxp_iommu_aux_get_pasid(struct gxp_dev *gxp, + struct gxp_iommu_domain *gdomain) +{ + return iommu_aux_get_pasid(gdomain->domain, gxp->dev); +} + +struct gxp_iommu_domain *gxp_iommu_get_domain_for_dev(struct gxp_dev *gxp) +{ + struct gxp_iommu_domain *gdomain = gxp->default_domain; + + if (IS_ERR_OR_NULL(gdomain)) { + gdomain = devm_kzalloc(gxp->dev, sizeof(*gdomain), GFP_KERNEL); + if (!gdomain) + return ERR_PTR(-ENOMEM); + gdomain->domain = iommu_get_domain_for_dev(gxp->dev); + if (!gdomain->domain) { + devm_kfree(gxp->dev, gdomain); + return ERR_PTR(-ENOMEM); + } + gxp->default_domain = gdomain; + } + + return gdomain; +} + int gxp_dma_init(struct gxp_dev *gxp) { struct gxp_dma_iommu_manager *mgr; @@ -188,20 +235,16 @@ int gxp_dma_init(struct gxp_dev *gxp) if (!mgr) return -ENOMEM; -/* TODO(b/201505925): remove this and prepare a of_node in unittests */ -/* SSMT is not supported in unittests */ -#ifndef CONFIG_GXP_TEST - ret = ssmt_init(gxp, mgr); + ret = gxp_ssmt_init(gxp, &mgr->ssmt); if (ret) { dev_err(gxp->dev, "Failed to find SSMT\n"); return ret; } -#endif - mgr->default_domain = iommu_get_domain_for_dev(gxp->dev); - if (!mgr->default_domain) { + mgr->default_domain = gxp_iommu_get_domain_for_dev(gxp); + if (IS_ERR(mgr->default_domain)) { dev_err(gxp->dev, "Failed to find default IOMMU domain\n"); - return -EIO; + return PTR_ERR(mgr->default_domain); } if (iommu_register_device_fault_handler(gxp->dev, sysmmu_fault_handler, @@ -216,8 +259,14 @@ int gxp_dma_init(struct gxp_dev *gxp) goto err_unreg_fault_handler; } +#if IS_ENABLED(CONFIG_ANDROID) /* Enable best fit algorithm to minimize fragmentation */ - iommu_dma_enable_best_fit_algo(gxp->dev); + ret = iommu_dma_enable_best_fit_algo(gxp->dev); + if (ret) + dev_warn(gxp->dev, + "Failed to enable best-fit IOVA allocator (%d)\n", + ret); +#endif gxp->dma_mgr = &(mgr->dma_mgr); @@ -238,95 +287,91 @@ void gxp_dma_exit(struct gxp_dev *gxp) "Failed to unregister SysMMU fault handler\n"); } -#define SYNC_BARRIERS_SIZE 0x100000 -#define SYNC_BARRIERS_TOP_OFFSET 0x100000 -#define EXT_TPU_MBX_SIZE 0x2000 - -/* Offset from mailbox base to the device interface that needs to be mapped */ -#define MAILBOX_DEVICE_INTERFACE_OFFSET 0x10000 +#define EXT_TPU_MBX_SIZE 0x2000 void gxp_dma_init_default_resources(struct gxp_dev *gxp) { unsigned int core; + int i; - for (core = 0; core < GXP_NUM_CORES; core++) { - gxp->mbx[core].daddr = GXP_IOVA_MAILBOX(core); + for (i = 0; i < GXP_NUM_MAILBOXES; i++) + gxp->mbx[i].daddr = GXP_IOVA_MAILBOX(i); + for (core = 0; core < GXP_NUM_CORES; core++) gxp->fwbufs[core].daddr = GXP_IOVA_FIRMWARE(core); - } - gxp->regs.daddr = GXP_IOVA_AURORA_TOP; gxp->fwdatabuf.daddr = GXP_IOVA_FW_DATA; } int gxp_dma_domain_attach_device(struct gxp_dev *gxp, - struct gxp_virtual_device *vd, uint virt_core, - uint core) + struct gxp_iommu_domain *gdomain, + uint core_list) { int ret; - ret = iommu_aux_attach_device(vd->core_domains[virt_core], gxp->dev); + ret = iommu_aux_attach_device(gdomain->domain, gxp->dev); if (ret) goto out; - gxp_dma_ssmt_program(gxp, vd, virt_core, core); + gxp_dma_ssmt_program(gxp, gdomain->domain, core_list); out: return ret; } void gxp_dma_domain_detach_device(struct gxp_dev *gxp, - struct gxp_virtual_device *vd, uint virt_core) + struct gxp_iommu_domain *gdomain) { - iommu_aux_detach_device(vd->core_domains[virt_core], gxp->dev); + iommu_aux_detach_device(gdomain->domain, gxp->dev); } int gxp_dma_map_core_resources(struct gxp_dev *gxp, - struct gxp_virtual_device *vd, uint virt_core, - uint core) + struct gxp_iommu_domain *gdomain, uint core_list, + u8 slice_index) { int ret; + uint i; + struct iommu_domain *domain = gdomain->domain; - ret = iommu_map(vd->core_domains[virt_core], gxp->regs.daddr, - gxp->regs.paddr, gxp->regs.size, - IOMMU_READ | IOMMU_WRITE); - if (ret) - goto err; - /* - * Firmware expects to access the sync barriers at a separate - * address, lower than the rest of the AURORA_TOP registers. - */ - ret = iommu_map(vd->core_domains[virt_core], GXP_IOVA_SYNC_BARRIERS, - gxp->regs.paddr + SYNC_BARRIERS_TOP_OFFSET, - SYNC_BARRIERS_SIZE, IOMMU_READ | IOMMU_WRITE); - if (ret) - goto err; - ret = iommu_map(vd->core_domains[virt_core], gxp->mbx[core].daddr, - gxp->mbx[core].paddr + MAILBOX_DEVICE_INTERFACE_OFFSET, - gxp->mbx[core].size, IOMMU_READ | IOMMU_WRITE); + ret = gxp_map_csrs(gxp, domain, &gxp->regs); if (ret) goto err; + + for (i = 0; i < GXP_NUM_CORES; i++) { + if (!(BIT(i) & core_list)) + continue; + ret = iommu_map(domain, gxp->mbx[i].daddr, + gxp->mbx[i].paddr + + MAILBOX_DEVICE_INTERFACE_OFFSET, + gxp->mbx[i].size, IOMMU_READ | IOMMU_WRITE); + if (ret) + goto err; + } /* * TODO(b/202213606): Map FW regions of all cores in a VD for * each other at VD creation. */ - ret = iommu_map(vd->core_domains[virt_core], gxp->fwbufs[0].daddr, - gxp->fwbufs[0].paddr, + ret = iommu_map(domain, gxp->fwbufs[0].daddr, gxp->fwbufs[0].paddr, gxp->fwbufs[0].size * GXP_NUM_CORES, IOMMU_READ | IOMMU_WRITE); if (ret) goto err; - ret = iommu_map(vd->core_domains[virt_core], gxp->fwdatabuf.daddr, - gxp->fwdatabuf.paddr, gxp->fwdatabuf.size, - IOMMU_READ | IOMMU_WRITE); + ret = iommu_map(domain, gxp->fwdatabuf.daddr, gxp->fwdatabuf.paddr, + gxp->fwdatabuf.size, IOMMU_READ | IOMMU_WRITE); + if (ret) + goto err; + ret = gxp_map_core_shared_buffer(gxp, domain, slice_index); if (ret) goto err; /* Only map the TPU mailboxes if they were found on probe */ if (gxp->tpu_dev.mbx_paddr) { - ret = iommu_map( - vd->core_domains[virt_core], - GXP_IOVA_EXT_TPU_MBX + core * EXT_TPU_MBX_SIZE, - gxp->tpu_dev.mbx_paddr + - core * EXT_TPU_MBX_SIZE, - EXT_TPU_MBX_SIZE, IOMMU_READ | IOMMU_WRITE); - if (ret) - goto err; + for (i = 0; i < GXP_NUM_CORES; i++) { + if (!(BIT(i) & core_list)) + continue; + ret = iommu_map( + domain, + GXP_IOVA_EXT_TPU_MBX + i * EXT_TPU_MBX_SIZE, + gxp->tpu_dev.mbx_paddr + i * EXT_TPU_MBX_SIZE, + EXT_TPU_MBX_SIZE, IOMMU_READ | IOMMU_WRITE); + if (ret) + goto err; + } } return ret; @@ -336,42 +381,47 @@ err: * Any resource that hadn't been mapped yet will cause `iommu_unmap()` * to return immediately, so its safe to try to unmap everything. */ - gxp_dma_unmap_core_resources(gxp, vd, virt_core, core); + gxp_dma_unmap_core_resources(gxp, gdomain, core_list); return ret; } void gxp_dma_unmap_core_resources(struct gxp_dev *gxp, - struct gxp_virtual_device *vd, uint virt_core, - uint core) + struct gxp_iommu_domain *gdomain, + uint core_list) { + uint i; + struct iommu_domain *domain = gdomain->domain; + /* Only unmap the TPU mailboxes if they were found on probe */ if (gxp->tpu_dev.mbx_paddr) { - iommu_unmap(vd->core_domains[virt_core], - GXP_IOVA_EXT_TPU_MBX + - core * EXT_TPU_MBX_SIZE, - EXT_TPU_MBX_SIZE); + for (i = 0; i < GXP_NUM_CORES; i++) { + if (!(BIT(i) & core_list)) + continue; + iommu_unmap(domain, + GXP_IOVA_EXT_TPU_MBX + i * EXT_TPU_MBX_SIZE, + EXT_TPU_MBX_SIZE); + } } - iommu_unmap(vd->core_domains[virt_core], gxp->fwdatabuf.daddr, - gxp->fwdatabuf.size); + gxp_unmap_core_shared_buffer(gxp, domain); + iommu_unmap(domain, gxp->fwdatabuf.daddr, gxp->fwdatabuf.size); /* * TODO(b/202213606): A core should only have access to the FW * of other cores if they're in the same VD, and have the FW * region unmapped on VD destruction. */ - iommu_unmap(vd->core_domains[virt_core], gxp->fwbufs[0].daddr, + iommu_unmap(domain, gxp->fwbufs[0].daddr, gxp->fwbufs[0].size * GXP_NUM_CORES); - iommu_unmap(vd->core_domains[virt_core], gxp->mbx[core].daddr, - gxp->mbx[core].size); - iommu_unmap(vd->core_domains[virt_core], GXP_IOVA_SYNC_BARRIERS, - SYNC_BARRIERS_SIZE); - iommu_unmap(vd->core_domains[virt_core], gxp->regs.daddr, - gxp->regs.size); + for (i = 0; i < GXP_NUM_CORES; i++) { + if (!(BIT(i) & core_list)) + continue; + iommu_unmap(domain, gxp->mbx[i].daddr, gxp->mbx[i].size); + } + gxp_unmap_csrs(gxp, domain, &gxp->regs); } -static inline struct sg_table * -alloc_sgt_for_buffer(void *ptr, size_t size, - struct iommu_domain *domain, - dma_addr_t daddr) +static inline struct sg_table *alloc_sgt_for_buffer(void *ptr, size_t size, + struct iommu_domain *domain, + dma_addr_t daddr) { struct sg_table *sgt; ulong offset; @@ -409,7 +459,7 @@ alloc_sgt_for_buffer(void *ptr, size_t size, */ size_in_page = size > (PAGE_SIZE - offset_in_page(ptr)) ? PAGE_SIZE - offset_in_page(ptr) : - size; + size; page = phys_to_page(iommu_iova_to_phys(domain, daddr)); sg_set_page(next, page, size_in_page, offset_in_page(ptr)); size -= size_in_page; @@ -437,136 +487,114 @@ alloc_sgt_for_buffer(void *ptr, size_t size, return sgt; } -#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5) -int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, struct gxp_virtual_device *vd, - uint virt_core_list, uint core_list, +#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && \ + !IS_ENABLED(CONFIG_GXP_GEM5) +int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, + struct gxp_iommu_domain *gdomain, uint core_list, struct edgetpu_ext_mailbox_info *mbx_info) { - uint orig_virt_core_list = virt_core_list; + uint orig_core_list = core_list; u64 queue_iova; - uint virt_core; int core; int ret; int i = 0; + struct iommu_domain *domain = gdomain->domain; - while (virt_core_list) { + while (core_list) { phys_addr_t cmdq_pa = mbx_info->mailboxes[i].cmdq_pa; phys_addr_t respq_pa = mbx_info->mailboxes[i++].respq_pa; - virt_core = ffs(virt_core_list) - 1; - virt_core_list &= ~BIT(virt_core); core = ffs(core_list) - 1; - core_list &= ~BIT(core); queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core); - ret = iommu_map(vd->core_domains[virt_core], queue_iova, - cmdq_pa, mbx_info->cmdq_size, IOMMU_WRITE); + ret = iommu_map(domain, queue_iova, cmdq_pa, + mbx_info->cmdq_size, IOMMU_WRITE); if (ret) goto error; - ret = iommu_map(vd->core_domains[virt_core], - queue_iova + mbx_info->cmdq_size, respq_pa, - mbx_info->respq_size, IOMMU_READ); + ret = iommu_map(domain, queue_iova + mbx_info->cmdq_size, + respq_pa, mbx_info->respq_size, IOMMU_READ); if (ret) { - iommu_unmap(vd->core_domains[virt_core], queue_iova, - mbx_info->cmdq_size); + iommu_unmap(domain, queue_iova, mbx_info->cmdq_size); goto error; } + core_list &= ~BIT(core); } return 0; error: - virt_core_list ^= orig_virt_core_list; - while (virt_core_list) { - virt_core = ffs(virt_core_list) - 1; - virt_core_list &= ~BIT(virt_core); + core_list ^= orig_core_list; + while (core_list) { core = ffs(core_list) - 1; core_list &= ~BIT(core); queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core); - iommu_unmap(vd->core_domains[virt_core], queue_iova, - mbx_info->cmdq_size); - iommu_unmap(vd->core_domains[virt_core], queue_iova + - mbx_info->cmdq_size, mbx_info->respq_size); + iommu_unmap(domain, queue_iova, mbx_info->cmdq_size); + iommu_unmap(domain, queue_iova + mbx_info->cmdq_size, + mbx_info->respq_size); } return ret; } void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp, - struct gxp_virtual_device *vd, + struct gxp_iommu_domain *gdomain, struct gxp_tpu_mbx_desc mbx_desc) { - uint virt_core_list = mbx_desc.virt_core_list; uint core_list = mbx_desc.phys_core_list; u64 queue_iova; int core; - uint virt_core; + struct iommu_domain *domain = gdomain->domain; - while (virt_core_list) { - virt_core = ffs(virt_core_list) - 1; - virt_core_list &= ~BIT(virt_core); + while (core_list) { core = ffs(core_list) - 1; core_list &= ~BIT(core); queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core); - iommu_unmap(vd->core_domains[virt_core], queue_iova, - mbx_desc.cmdq_size); - iommu_unmap(vd->core_domains[virt_core], queue_iova + - mbx_desc.cmdq_size, mbx_desc.respq_size); + iommu_unmap(domain, queue_iova, mbx_desc.cmdq_size); + iommu_unmap(domain, queue_iova + mbx_desc.cmdq_size, + mbx_desc.respq_size); } } -#endif // (CONFIG_GXP_TEST || CONFIG_ANDROID) && !CONFIG_GXP_GEM5 +#endif // (CONFIG_GXP_TEST || CONFIG_ANDROID) && !CONFIG_GXP_GEM5 -int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp, void *buf, - struct gxp_virtual_device *vd, - uint virt_core_list, size_t size, - dma_addr_t dma_handle, +int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp, + struct gxp_coherent_buf *buf, + struct gxp_iommu_domain *gdomain, uint gxp_dma_flags) { struct gxp_dma_iommu_manager *mgr = container_of( gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); struct sg_table *sgt; - int virt_core; ssize_t size_mapped; + int ret = 0; + size_t size; + struct iommu_domain *domain = gdomain->domain; - size = size < PAGE_SIZE ? PAGE_SIZE : size; - sgt = alloc_sgt_for_buffer(buf, size, mgr->default_domain, dma_handle); + size = buf->size; + sgt = alloc_sgt_for_buffer(buf->vaddr, buf->size, + mgr->default_domain->domain, buf->dma_addr); if (IS_ERR(sgt)) { dev_err(gxp->dev, "Failed to allocate sgt for coherent buffer\n"); - return -ENOMEM; - } - - /* Create identical mappings in the specified cores' domains */ - for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { - if (!(virt_core_list & BIT(virt_core))) - continue; - /* - * In Linux 5.15 and beyond, `iommu_map_sg()` returns a - * `ssize_t` to encode errors that earlier versions throw out. - * Explicitly cast here for backwards compatibility. - */ - size_mapped = (ssize_t)iommu_map_sg(vd->core_domains[virt_core], - dma_handle, sgt->sgl, - sgt->orig_nents, - IOMMU_READ | IOMMU_WRITE); - if (size_mapped != size) - goto err; + return PTR_ERR(sgt); } - sg_free_table(sgt); - kfree(sgt); - return 0; - -err: - for (virt_core -= 1; virt_core >= 0; virt_core--) - iommu_unmap(vd->core_domains[virt_core], dma_handle, size); + /* + * In Linux 5.15 and beyond, `iommu_map_sg()` returns a + * `ssize_t` to encode errors that earlier versions throw out. + * Explicitly cast here for backwards compatibility. + */ + size_mapped = (ssize_t)iommu_map_sg(domain, buf->dma_addr, sgt->sgl, + sgt->orig_nents, + IOMMU_READ | IOMMU_WRITE); + if (size_mapped != size) + ret = size_mapped < 0 ? -EINVAL : (int)size_mapped; sg_free_table(sgt); kfree(sgt); - return -EINVAL; + return ret; } -void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd, - uint virt_core_list, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - uint gxp_dma_flags) +int gxp_dma_alloc_coherent_buf(struct gxp_dev *gxp, + struct gxp_iommu_domain *gdomain, size_t size, + gfp_t flag, uint gxp_dma_flags, + struct gxp_coherent_buf *buffer) { void *buf; dma_addr_t daddr; @@ -578,226 +606,55 @@ void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd, buf = dma_alloc_coherent(gxp->dev, size, &daddr, flag); if (!buf) { dev_err(gxp->dev, "Failed to allocate coherent buffer\n"); - return NULL; + return -ENOMEM; } - if (vd != NULL) { - ret = gxp_dma_map_allocated_coherent_buffer(gxp, buf, vd, - virt_core_list, - size, daddr, - gxp_dma_flags); + + buffer->vaddr = buf; + buffer->size = size; + buffer->dma_addr = daddr; + + if (gdomain != NULL) { + ret = gxp_dma_map_allocated_coherent_buffer( + gxp, buffer, gdomain, gxp_dma_flags); if (ret) { + buffer->vaddr = NULL; + buffer->size = 0; dma_free_coherent(gxp->dev, size, buf, daddr); - return NULL; + return ret; } } - if (dma_handle) - *dma_handle = daddr; + buffer->dsp_addr = daddr; - return buf; + return 0; } void gxp_dma_unmap_allocated_coherent_buffer(struct gxp_dev *gxp, - struct gxp_virtual_device *vd, - uint virt_core_list, size_t size, - dma_addr_t dma_handle) -{ - int virt_core; - - size = size < PAGE_SIZE ? PAGE_SIZE : size; - - for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { - if (!(virt_core_list & BIT(virt_core))) - continue; - if (size != - iommu_unmap(vd->core_domains[virt_core], dma_handle, size)) - dev_warn(gxp->dev, "Failed to unmap coherent buffer\n"); - } -} - -void gxp_dma_free_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd, - uint virt_core_list, size_t size, void *cpu_addr, - dma_addr_t dma_handle) -{ - if (vd != NULL) - gxp_dma_unmap_allocated_coherent_buffer(gxp, vd, virt_core_list, - size, dma_handle); - dma_free_coherent(gxp->dev, size, cpu_addr, dma_handle); -} - -dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp, - struct gxp_virtual_device *vd, - uint virt_core_list, void *cpu_addr, size_t size, - enum dma_data_direction direction, - unsigned long attrs, uint gxp_dma_flags) -{ - struct gxp_dma_iommu_manager *mgr = container_of( - gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); - dma_addr_t daddr; - phys_addr_t paddr; - int prot = dma_info_to_prot(direction, 0, attrs); - int virt_core; - - daddr = dma_map_single_attrs(gxp->dev, cpu_addr, size, direction, - attrs); - if (dma_mapping_error(gxp->dev, daddr)) - return DMA_MAPPING_ERROR; - - paddr = iommu_iova_to_phys(mgr->default_domain, daddr); - for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { - if (!(virt_core_list & BIT(virt_core))) - continue; - if (iommu_map(vd->core_domains[virt_core], daddr, paddr, size, - prot)) - goto err; - } - - return daddr; - -err: - for (virt_core -= 1; virt_core >= 0; virt_core--) - iommu_unmap(vd->core_domains[virt_core], daddr, size); - dma_unmap_single_attrs(gxp->dev, daddr, size, direction, - DMA_ATTR_SKIP_CPU_SYNC); - return DMA_MAPPING_ERROR; -} - -void gxp_dma_unmap_single(struct gxp_dev *gxp, struct gxp_virtual_device *vd, - uint virt_core_list, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction, - unsigned long attrs) -{ - int virt_core; - - for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { - if (!(virt_core_list & BIT(virt_core))) - continue; - if (size != - iommu_unmap(vd->core_domains[virt_core], dma_addr, size)) - dev_warn(gxp->dev, "Failed to unmap single\n"); - } - - dma_unmap_single_attrs(gxp->dev, dma_addr, size, direction, attrs); -} - -dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, struct gxp_virtual_device *vd, - uint virt_core_list, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction, - unsigned long attrs, uint gxp_dma_flags) -{ - struct gxp_dma_iommu_manager *mgr = container_of( - gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); - dma_addr_t daddr; - phys_addr_t paddr; - int prot = dma_info_to_prot(direction, 0, attrs); - int virt_core; - - daddr = dma_map_page_attrs(gxp->dev, page, offset, size, direction, - attrs); - if (dma_mapping_error(gxp->dev, daddr)) - return DMA_MAPPING_ERROR; - - paddr = iommu_iova_to_phys(mgr->default_domain, daddr); - for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { - if (!(virt_core_list & BIT(virt_core))) - continue; - if (iommu_map(vd->core_domains[virt_core], daddr, paddr, size, - prot)) - goto err; - } - - return daddr; - -err: - for (virt_core -= 1; virt_core >= 0; virt_core--) - iommu_unmap(vd->core_domains[virt_core], daddr, size); - dma_unmap_page_attrs(gxp->dev, daddr, size, direction, - DMA_ATTR_SKIP_CPU_SYNC); - return DMA_MAPPING_ERROR; -} - -void gxp_dma_unmap_page(struct gxp_dev *gxp, struct gxp_virtual_device *vd, - uint virt_core_list, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction, unsigned long attrs) -{ - int virt_core; - - for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { - if (!(virt_core_list & BIT(virt_core))) - continue; - if (size != - iommu_unmap(vd->core_domains[virt_core], dma_addr, size)) - dev_warn(gxp->dev, "Failed to unmap page\n"); - } - - dma_unmap_page_attrs(gxp->dev, dma_addr, size, direction, attrs); -} - -dma_addr_t gxp_dma_map_resource(struct gxp_dev *gxp, - struct gxp_virtual_device *vd, - uint virt_core_list, phys_addr_t phys_addr, - size_t size, enum dma_data_direction direction, - unsigned long attrs, uint gxp_dma_flags) + struct gxp_iommu_domain *gdomain, + struct gxp_coherent_buf *buf) { - dma_addr_t daddr; - int prot = dma_info_to_prot(direction, 0, attrs); - int virt_core; - - daddr = dma_map_resource(gxp->dev, phys_addr, size, direction, attrs); - if (dma_mapping_error(gxp->dev, daddr)) - return DMA_MAPPING_ERROR; - - for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { - if (!(virt_core_list & BIT(virt_core))) - continue; - if (iommu_map(vd->core_domains[virt_core], daddr, phys_addr, - size, prot)) - goto err; - } - - return daddr; - -err: - for (virt_core -= 1; virt_core >= 0; virt_core--) - iommu_unmap(vd->core_domains[virt_core], daddr, size); - dma_unmap_resource(gxp->dev, daddr, size, direction, - DMA_ATTR_SKIP_CPU_SYNC); - return DMA_MAPPING_ERROR; + if (buf->size != iommu_unmap(gdomain->domain, buf->dma_addr, buf->size)) + dev_warn(gxp->dev, "Failed to unmap coherent buffer\n"); } -void gxp_dma_unmap_resource(struct gxp_dev *gxp, struct gxp_virtual_device *vd, - uint virt_core_list, dma_addr_t dma_addr, - size_t size, enum dma_data_direction direction, - unsigned long attrs) +void gxp_dma_free_coherent_buf(struct gxp_dev *gxp, + struct gxp_iommu_domain *gdomain, + struct gxp_coherent_buf *buf) { - int virt_core; - - for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { - if (!(virt_core_list & BIT(virt_core))) - continue; - if (size != - iommu_unmap(vd->core_domains[virt_core], dma_addr, size)) - dev_warn(gxp->dev, "Failed to unmap resource\n"); - } - - dma_unmap_resource(gxp->dev, dma_addr, size, direction, attrs); + if (gdomain != NULL) + gxp_dma_unmap_allocated_coherent_buffer(gxp, gdomain, buf); + dma_free_coherent(gxp->dev, buf->size, buf->vaddr, buf->dma_addr); } -int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd, - int virt_core_list, struct scatterlist *sg, int nents, +int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain, + struct scatterlist *sg, int nents, enum dma_data_direction direction, unsigned long attrs, uint gxp_dma_flags) { int nents_mapped; dma_addr_t daddr; - int prot = dma_info_to_prot(direction, 0, attrs); - int virt_core; + int prot = map_flags_to_iommu_prot(direction, attrs, gxp_dma_flags); ssize_t size_mapped; - /* Variables needed to cleanup if an error occurs */ - struct scatterlist *s; - int i; - size_t size = 0; nents_mapped = dma_map_sg_attrs(gxp->dev, sg, nents, direction, attrs); if (!nents_mapped) @@ -805,71 +662,71 @@ int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd, daddr = sg_dma_address(sg); - for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { - if (!(virt_core_list & BIT(virt_core))) - continue; - /* - * In Linux 5.15 and beyond, `iommu_map_sg()` returns a - * `ssize_t` to encode errors that earlier versions throw out. - * Explicitly cast here for backwards compatibility. - */ - size_mapped = (ssize_t)iommu_map_sg(vd->core_domains[virt_core], - daddr, sg, nents, prot); - if (size_mapped <= 0) - goto err; - } + /* + * In Linux 5.15 and beyond, `iommu_map_sg()` returns a + * `ssize_t` to encode errors that earlier versions throw out. + * Explicitly cast here for backwards compatibility. + */ + size_mapped = + (ssize_t)iommu_map_sg(gdomain->domain, daddr, sg, nents, prot); + if (size_mapped <= 0) + goto err; return nents_mapped; err: - for_each_sg(sg, s, nents, i) { - size += sg_dma_len(s); - } - - for (virt_core -= 1; virt_core >= 0; virt_core--) - iommu_unmap(vd->core_domains[virt_core], daddr, size); dma_unmap_sg_attrs(gxp->dev, sg, nents, direction, attrs); return 0; } -void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd, - uint virt_core_list, struct scatterlist *sg, int nents, +void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain, + struct scatterlist *sg, int nents, enum dma_data_direction direction, unsigned long attrs) { struct scatterlist *s; int i; size_t size = 0; - int virt_core; - for_each_sg(sg, s, nents, i) { + for_each_sg (sg, s, nents, i) size += sg_dma_len(s); - } - for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { - if (!(virt_core_list & BIT(virt_core))) - continue; - if (!iommu_unmap(vd->core_domains[virt_core], sg_dma_address(sg), - size)) - dev_warn(gxp->dev, "Failed to unmap sg\n"); - } + if (!iommu_unmap(gdomain->domain, sg_dma_address(sg), size)) + dev_warn(gxp->dev, "Failed to unmap sg\n"); dma_unmap_sg_attrs(gxp->dev, sg, nents, direction, attrs); } -void gxp_dma_sync_single_for_cpu(struct gxp_dev *gxp, dma_addr_t dma_handle, - size_t size, - enum dma_data_direction direction) +int gxp_dma_map_iova_sgt(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain, + dma_addr_t iova, struct sg_table *sgt, int prot) { - /* Syncing is not domain specific. Just call through to DMA API */ - dma_sync_single_for_cpu(gxp->dev, dma_handle, size, direction); + ssize_t size_mapped; + + size_mapped = (ssize_t)iommu_map_sg(gdomain->domain, iova, sgt->sgl, + sgt->orig_nents, prot); + if (size_mapped <= 0) { + dev_err(gxp->dev, "map IOVA %pad to SG table failed: %d", &iova, + (int)size_mapped); + if (size_mapped == 0) + return -EINVAL; + return size_mapped; + } + + return 0; } -void gxp_dma_sync_single_for_device(struct gxp_dev *gxp, dma_addr_t dma_handle, - size_t size, - enum dma_data_direction direction) +void gxp_dma_unmap_iova_sgt(struct gxp_dev *gxp, + struct gxp_iommu_domain *gdomain, dma_addr_t iova, + struct sg_table *sgt) { - /* Syncing is not domain specific. Just call through to DMA API */ - dma_sync_single_for_device(gxp->dev, dma_handle, size, direction); + struct scatterlist *s; + int i; + size_t size = 0; + + for_each_sg (sgt->sgl, s, sgt->orig_nents, i) + size += s->length; + + if (!iommu_unmap(gdomain->domain, iova, size)) + dev_warn(gxp->dev, "Failed to unmap sgt"); } void gxp_dma_sync_sg_for_cpu(struct gxp_dev *gxp, struct scatterlist *sg, @@ -886,20 +743,16 @@ void gxp_dma_sync_sg_for_device(struct gxp_dev *gxp, struct scatterlist *sg, dma_sync_sg_for_device(gxp->dev, sg, nents, direction); } -struct sg_table *gxp_dma_map_dmabuf_attachment( - struct gxp_dev *gxp, struct gxp_virtual_device *vd, uint virt_core_list, - struct dma_buf_attachment *attachment, - enum dma_data_direction direction) +struct sg_table * +gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp, + struct gxp_iommu_domain *gdomain, + struct dma_buf_attachment *attachment, + enum dma_data_direction direction) { struct sg_table *sgt; int prot = dma_info_to_prot(direction, /*coherent=*/0, /*attrs=*/0); ssize_t size_mapped; - int virt_core; int ret; - /* Variables needed to cleanup if an error occurs */ - struct scatterlist *s; - int i; - size_t size = 0; /* Map the attachment into the default domain */ sgt = dma_buf_map_attachment(attachment, direction); @@ -910,49 +763,34 @@ struct sg_table *gxp_dma_map_dmabuf_attachment( return sgt; } - /* Map the sgt into the aux domain of all specified cores */ - for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { - if (!(virt_core_list & BIT(virt_core))) - continue; + /* + * In Linux 5.15 and beyond, `iommu_map_sg()` returns a + * `ssize_t` to encode errors that earlier versions throw out. + * Explicitly cast here for backwards compatibility. + */ + size_mapped = + (ssize_t)iommu_map_sg(gdomain->domain, sg_dma_address(sgt->sgl), + sgt->sgl, sgt->orig_nents, prot); + if (size_mapped <= 0) { + dev_err(gxp->dev, "Failed to map dma-buf: %ld\n", size_mapped); /* - * In Linux 5.15 and beyond, `iommu_map_sg()` returns a - * `ssize_t` to encode errors that earlier versions throw out. - * Explicitly cast here for backwards compatibility. + * Prior to Linux 5.15, `iommu_map_sg()` returns 0 for + * any failure. Return a generic IO error in this case. */ - size_mapped = - (ssize_t)iommu_map_sg(vd->core_domains[virt_core], - sg_dma_address(sgt->sgl), - sgt->sgl, sgt->orig_nents, prot); - if (size_mapped <= 0) { - dev_err(gxp->dev, - "Failed to map dma-buf to virtual core %d (ret=%ld)\n", - virt_core, size_mapped); - /* - * Prior to Linux 5.15, `iommu_map_sg()` returns 0 for - * any failure. Return a generic IO error in this case. - */ - ret = size_mapped == 0 ? -EIO : (int)size_mapped; - goto err; - } + ret = size_mapped == 0 ? -EIO : (int)size_mapped; + goto err; } return sgt; err: - for_each_sg(sgt->sgl, s, sgt->nents, i) - size += sg_dma_len(s); - - for (virt_core -= 1; virt_core >= 0; virt_core--) - iommu_unmap(vd->core_domains[virt_core], sg_dma_address(sgt->sgl), size); dma_buf_unmap_attachment(attachment, sgt, direction); return ERR_PTR(ret); - } void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp, - struct gxp_virtual_device *vd, - uint virt_core_list, + struct gxp_iommu_domain *gdomain, struct dma_buf_attachment *attachment, struct sg_table *sgt, enum dma_data_direction direction) @@ -960,23 +798,13 @@ void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp, struct scatterlist *s; int i; size_t size = 0; - int virt_core; /* Find the size of the mapping in IOVA-space */ - for_each_sg(sgt->sgl, s, sgt->nents, i) + for_each_sg (sgt->sgl, s, sgt->nents, i) size += sg_dma_len(s); - /* Unmap the dma-buf from the aux domain of all specified cores */ - for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { - if (!(virt_core_list & BIT(virt_core))) - continue; - if (!iommu_unmap(vd->core_domains[virt_core], - sg_dma_address(sgt->sgl), size)) - dev_warn( - gxp->dev, - "Failed to unmap dma-buf from virtual core %d\n", - virt_core); - } + if (!iommu_unmap(gdomain->domain, sg_dma_address(sgt->sgl), size)) + dev_warn(gxp->dev, "Failed to unmap dma-buf\n"); /* Unmap the attachment from the default domain */ dma_buf_unmap_attachment(attachment, sgt, direction); |