diff options
author | Aurora pro automerger <aurora-pro-automerger@google.com> | 2022-04-28 17:59:17 +0800 |
---|---|---|
committer | John Scheible <johnscheible@google.com> | 2022-05-02 22:33:52 +0000 |
commit | 27bed782f3a828674c0f1584cf355bf592c382be (patch) | |
tree | 79f6f3410eece25889b3cfc80dcd156ec5b543c6 /gxp-dma-iommu.c | |
parent | a96a198c9328b06866df39ebd420b0f3fd58ce51 (diff) | |
download | gs201-27bed782f3a828674c0f1584cf355bf592c382be.tar.gz |
[Copybara Auto Merge] Merge branch 'gs201-release' into 'android13-gs-pixel-5.10'
gxp: check BLK is on during power state transition
gxp: prepare more worker structures for async jobs
gxp: Cleanup virt<->phys core translation APIs
gxp: switch mux to make sure LPM works
gxp: init has_vd_lock field of gxp_client
gxp: Clean up variable names and update variable type
gxp: remove gxp-tmp.h
gxp: move scratchpad macros from tmp to firmware.h
gxp: remove no-iommu support
gxp: remove SYNC_ macros from tmp.h
gxp: remove DOORBELL macros
gxp: move PSM macros to lpm.h
gxp: Check for valid VD in mb_eventfd IOCTLs
gxp: Firmware startup and Core-On optimizations
gxp: Move ownership of user response queues
gxp: move macros from tmp.h to bpm.c
gxp: remove legacy software mailbox support
gxp: Add gxp-eventfd interface
gxp: remove unused macros from gxp-tmp.h
gxp: bind page tables per virtual device
Bug: 176979630
Bug: 207037425
Bug: 207038856
Bug: 209083969
Bug: 225059930
Bug: 226211187
Bug: 227145352
Bug: 227693917
Bug: 227694164
Bug: 228233514
Bug: 228921329
Bug: 229095276
Bug: 229584236
GitOrigin-RevId: d2c00e3ee2d71e551d41adfa5bcc6bec79379db3
Signed-off-by: Todd Poynor <toddpoynor@google.com>
Change-Id: Ia92e12a2ab46eadc2876bcdb7ed3c04e223b3901
Diffstat (limited to 'gxp-dma-iommu.c')
-rw-r--r-- | gxp-dma-iommu.c | 637 |
1 files changed, 298 insertions, 339 deletions
diff --git a/gxp-dma-iommu.c b/gxp-dma-iommu.c index 77b9d31..caedac3 100644 --- a/gxp-dma-iommu.c +++ b/gxp-dma-iommu.c @@ -13,16 +13,14 @@ #include "gxp-config.h" #include "gxp-dma.h" -#include "gxp-dma-iommu.h" #include "gxp-iova.h" #include "gxp-mapping.h" #include "gxp-pm.h" +#include "gxp-vd.h" struct gxp_dma_iommu_manager { struct gxp_dma_manager dma_mgr; struct iommu_domain *default_domain; - struct iommu_domain *core_domains[GXP_NUM_CORES]; - int core_vids[GXP_NUM_CORES]; void __iomem *idma_ssmt_base; void __iomem *inst_data_ssmt_base; }; @@ -71,26 +69,26 @@ static inline void ssmt_set_vid_for_sid(void __iomem *ssmt, int vid, u8 sid) writel(vid, (ssmt) + 0x1200u + (0x4u * (sid))); } -int gxp_dma_ssmt_program(struct gxp_dev *gxp) +static int gxp_dma_ssmt_program(struct gxp_dev *gxp, + struct gxp_virtual_device *vd, uint virt_core, + uint core) { /* SSMT is not supported in unittests */ #ifndef CONFIG_GXP_TEST struct gxp_dma_iommu_manager *mgr = container_of( gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); - unsigned int core; - - for (core = 0; core < GXP_NUM_CORES; core++) { - ssmt_set_vid_for_sid(mgr->idma_ssmt_base, mgr->core_vids[core], - IDMA_SID_FOR_CORE(core)); - ssmt_set_vid_for_sid(mgr->inst_data_ssmt_base, - mgr->core_vids[core], - INST_SID_FOR_CORE(core)); - ssmt_set_vid_for_sid(mgr->inst_data_ssmt_base, - mgr->core_vids[core], - DATA_SID_FOR_CORE(core)); - } + int core_vid; + + core_vid = iommu_aux_get_pasid(vd->core_domains[virt_core], gxp->dev); + dev_dbg(gxp->dev, "SysMMU: core%u assigned vid %d\n", core, + core_vid); + ssmt_set_vid_for_sid(mgr->idma_ssmt_base, core_vid, + IDMA_SID_FOR_CORE(core)); + ssmt_set_vid_for_sid(mgr->inst_data_ssmt_base, core_vid, + INST_SID_FOR_CORE(core)); + ssmt_set_vid_for_sid(mgr->inst_data_ssmt_base, core_vid, + DATA_SID_FOR_CORE(core)); #endif - return 0; } @@ -176,7 +174,6 @@ static int sysmmu_fault_handler(struct iommu_fault *fault, void *token) int gxp_dma_init(struct gxp_dev *gxp) { struct gxp_dma_iommu_manager *mgr; - unsigned int core; int ret; /* GXP can only address 32-bit IOVAs */ @@ -218,30 +215,10 @@ int gxp_dma_init(struct gxp_dev *gxp) goto err_unreg_fault_handler; } - for (core = 0; core < GXP_NUM_CORES; core++) { - mgr->core_domains[core] = iommu_domain_alloc(gxp->dev->bus); - if (iommu_aux_attach_device(mgr->core_domains[core], - gxp->dev)) { - iommu_domain_free(mgr->core_domains[core]); - goto err_detach_aux_domains; - } - mgr->core_vids[core] = - iommu_aux_get_pasid(mgr->core_domains[core], gxp->dev); - dev_notice(gxp->dev, "SysMMU: core%u assigned vid %d\n", core, - mgr->core_vids[core]); - } - gxp->dma_mgr = &(mgr->dma_mgr); return 0; -err_detach_aux_domains: - /* Detach and free any aux domains successfully setup */ - for (core -= 1; core >= 0; core--) { - iommu_aux_detach_device(mgr->core_domains[core], gxp->dev); - iommu_domain_free(mgr->core_domains[core]); - } - err_unreg_fault_handler: if (iommu_unregister_device_fault_handler(gxp->dev)) dev_err(gxp->dev, @@ -252,24 +229,6 @@ err_unreg_fault_handler: void gxp_dma_exit(struct gxp_dev *gxp) { - struct gxp_dma_iommu_manager *mgr = container_of( - gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); - unsigned int core; - - /* - * The SysMMU driver writes registers in the SysMMU during - * `iommu_aux_detach_device()`, to disable that domain's VID and flush - * its TLB. BLK_AUR must be powered on for these writes to succeed. - */ - gxp_pm_blk_on(gxp); - - for (core = 0; core < GXP_NUM_CORES; core++) { - iommu_aux_detach_device(mgr->core_domains[core], gxp->dev); - iommu_domain_free(mgr->core_domains[core]); - } - - gxp_pm_blk_off(gxp); - if (iommu_unregister_device_fault_handler(gxp->dev)) dev_err(gxp->dev, "Failed to unregister SysMMU fault handler\n"); @@ -282,72 +241,95 @@ void gxp_dma_exit(struct gxp_dev *gxp) /* Offset from mailbox base to the device interface that needs to be mapped */ #define MAILBOX_DEVICE_INTERFACE_OFFSET 0x10000 -int gxp_dma_map_resources(struct gxp_dev *gxp) +void gxp_dma_init_default_resources(struct gxp_dev *gxp) { - struct gxp_dma_iommu_manager *mgr = container_of( - gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); unsigned int core; - int ret = 0; for (core = 0; core < GXP_NUM_CORES; core++) { - ret = iommu_map(mgr->core_domains[core], GXP_IOVA_AURORA_TOP, - gxp->regs.paddr, gxp->regs.size, - IOMMU_READ | IOMMU_WRITE); - if (ret) - goto err; - /* - * Firmware expects to access the sync barriers at a separate - * address, lower than the rest of the AURORA_TOP registers. - */ - ret = iommu_map(mgr->core_domains[core], GXP_IOVA_SYNC_BARRIERS, - gxp->regs.paddr + SYNC_BARRIERS_TOP_OFFSET, - SYNC_BARRIERS_SIZE, IOMMU_READ | IOMMU_WRITE); - if (ret) - goto err; - ret = iommu_map(mgr->core_domains[core], GXP_IOVA_MAILBOX(core), - gxp->mbx[core].paddr + - MAILBOX_DEVICE_INTERFACE_OFFSET, - gxp->mbx[core].size, IOMMU_READ | IOMMU_WRITE); - if (ret) - goto err; - /* - * TODO(b/202213606): Map FW regions of all cores in a VD for - * each other at VD creation. - */ - ret = iommu_map(mgr->core_domains[core], GXP_IOVA_FIRMWARE(0), - gxp->fwbufs[0].paddr, - gxp->fwbufs[0].size * GXP_NUM_CORES, - IOMMU_READ | IOMMU_WRITE); - if (ret) - goto err; - ret = iommu_map(mgr->core_domains[core], GXP_IOVA_CORE_DUMP, - gxp->coredumpbuf.paddr, gxp->coredumpbuf.size, - IOMMU_READ | IOMMU_WRITE); - if (ret) - goto err; - ret = iommu_map(mgr->core_domains[core], GXP_IOVA_FW_DATA, - gxp->fwdatabuf.paddr, gxp->fwdatabuf.size, - IOMMU_READ | IOMMU_WRITE); - if (ret) - goto err; - /* Only map the TPU mailboxes if they were found on probe */ - if (gxp->tpu_dev.mbx_paddr) { - ret = iommu_map( - mgr->core_domains[core], - GXP_IOVA_EXT_TPU_MBX + core * EXT_TPU_MBX_SIZE, - gxp->tpu_dev.mbx_paddr + - core * EXT_TPU_MBX_SIZE, - EXT_TPU_MBX_SIZE, IOMMU_READ | IOMMU_WRITE); - if (ret) - goto err; - } gxp->mbx[core].daddr = GXP_IOVA_MAILBOX(core); gxp->fwbufs[core].daddr = GXP_IOVA_FIRMWARE(core); } gxp->regs.daddr = GXP_IOVA_AURORA_TOP; gxp->coredumpbuf.daddr = GXP_IOVA_CORE_DUMP; gxp->fwdatabuf.daddr = GXP_IOVA_FW_DATA; +} + +int gxp_dma_domain_attach_device(struct gxp_dev *gxp, + struct gxp_virtual_device *vd, uint virt_core, + uint core) +{ + int ret; + + ret = iommu_aux_attach_device(vd->core_domains[virt_core], gxp->dev); + if (ret) + goto out; + gxp_dma_ssmt_program(gxp, vd, virt_core, core); +out: + return ret; +} + +void gxp_dma_domain_detach_device(struct gxp_dev *gxp, + struct gxp_virtual_device *vd, uint virt_core) +{ + iommu_aux_detach_device(vd->core_domains[virt_core], gxp->dev); +} +int gxp_dma_map_core_resources(struct gxp_dev *gxp, + struct gxp_virtual_device *vd, uint virt_core, + uint core) +{ + int ret; + + ret = iommu_map(vd->core_domains[virt_core], gxp->regs.daddr, + gxp->regs.paddr, gxp->regs.size, + IOMMU_READ | IOMMU_WRITE); + if (ret) + goto err; + /* + * Firmware expects to access the sync barriers at a separate + * address, lower than the rest of the AURORA_TOP registers. + */ + ret = iommu_map(vd->core_domains[virt_core], GXP_IOVA_SYNC_BARRIERS, + gxp->regs.paddr + SYNC_BARRIERS_TOP_OFFSET, + SYNC_BARRIERS_SIZE, IOMMU_READ | IOMMU_WRITE); + if (ret) + goto err; + ret = iommu_map(vd->core_domains[virt_core], gxp->mbx[core].daddr, + gxp->mbx[core].paddr + MAILBOX_DEVICE_INTERFACE_OFFSET, + gxp->mbx[core].size, IOMMU_READ | IOMMU_WRITE); + if (ret) + goto err; + /* + * TODO(b/202213606): Map FW regions of all cores in a VD for + * each other at VD creation. + */ + ret = iommu_map(vd->core_domains[virt_core], gxp->fwbufs[0].daddr, + gxp->fwbufs[0].paddr, + gxp->fwbufs[0].size * GXP_NUM_CORES, + IOMMU_READ | IOMMU_WRITE); + if (ret) + goto err; + ret = iommu_map(vd->core_domains[virt_core], gxp->coredumpbuf.daddr, + gxp->coredumpbuf.paddr, gxp->coredumpbuf.size, + IOMMU_READ | IOMMU_WRITE); + if (ret) + goto err; + ret = iommu_map(vd->core_domains[virt_core], gxp->fwdatabuf.daddr, + gxp->fwdatabuf.paddr, gxp->fwdatabuf.size, + IOMMU_READ | IOMMU_WRITE); + if (ret) + goto err; + /* Only map the TPU mailboxes if they were found on probe */ + if (gxp->tpu_dev.mbx_paddr) { + ret = iommu_map( + vd->core_domains[virt_core], + GXP_IOVA_EXT_TPU_MBX + core * EXT_TPU_MBX_SIZE, + gxp->tpu_dev.mbx_paddr + + core * EXT_TPU_MBX_SIZE, + EXT_TPU_MBX_SIZE, IOMMU_READ | IOMMU_WRITE); + if (ret) + goto err; + } return ret; err: @@ -356,42 +338,38 @@ err: * Any resource that hadn't been mapped yet will cause `iommu_unmap()` * to return immediately, so its safe to try to unmap everything. */ - gxp_dma_unmap_resources(gxp); + gxp_dma_unmap_core_resources(gxp, vd, virt_core, core); return ret; } -void gxp_dma_unmap_resources(struct gxp_dev *gxp) +void gxp_dma_unmap_core_resources(struct gxp_dev *gxp, + struct gxp_virtual_device *vd, uint virt_core, + uint core) { - struct gxp_dma_iommu_manager *mgr = container_of( - gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); - unsigned int core; - - for (core = 0; core < GXP_NUM_CORES; core++) { - iommu_unmap(mgr->core_domains[core], GXP_IOVA_AURORA_TOP, - gxp->regs.size); - iommu_unmap(mgr->core_domains[core], GXP_IOVA_SYNC_BARRIERS, - SYNC_BARRIERS_SIZE); - iommu_unmap(mgr->core_domains[core], GXP_IOVA_MAILBOX(core), - gxp->mbx[core].size); - /* - * TODO(b/202213606): A core should only have access to the FW - * of other cores if they're in the same VD, and have the FW - * region unmapped on VD destruction. - */ - iommu_unmap(mgr->core_domains[core], GXP_IOVA_FIRMWARE(0), - gxp->fwbufs[0].size * GXP_NUM_CORES); - iommu_unmap(mgr->core_domains[core], GXP_IOVA_CORE_DUMP, - gxp->coredumpbuf.size); - iommu_unmap(mgr->core_domains[core], GXP_IOVA_FW_DATA, - gxp->fwdatabuf.size); - /* Only unmap the TPU mailboxes if they were found on probe */ - if (gxp->tpu_dev.mbx_paddr) { - iommu_unmap(mgr->core_domains[core], - GXP_IOVA_EXT_TPU_MBX + - core * EXT_TPU_MBX_SIZE, - EXT_TPU_MBX_SIZE); - } - } + /* Only unmap the TPU mailboxes if they were found on probe */ + if (gxp->tpu_dev.mbx_paddr) { + iommu_unmap(vd->core_domains[virt_core], + GXP_IOVA_EXT_TPU_MBX + + core * EXT_TPU_MBX_SIZE, + EXT_TPU_MBX_SIZE); + } + iommu_unmap(vd->core_domains[virt_core], gxp->fwdatabuf.daddr, + gxp->fwdatabuf.size); + iommu_unmap(vd->core_domains[virt_core], gxp->coredumpbuf.daddr, + gxp->coredumpbuf.size); + /* + * TODO(b/202213606): A core should only have access to the FW + * of other cores if they're in the same VD, and have the FW + * region unmapped on VD destruction. + */ + iommu_unmap(vd->core_domains[virt_core], gxp->fwbufs[0].daddr, + gxp->fwbufs[0].size * GXP_NUM_CORES); + iommu_unmap(vd->core_domains[virt_core], gxp->mbx[core].daddr, + gxp->mbx[core].size); + iommu_unmap(vd->core_domains[virt_core], GXP_IOVA_SYNC_BARRIERS, + SYNC_BARRIERS_SIZE); + iommu_unmap(vd->core_domains[virt_core], gxp->regs.daddr, + gxp->regs.size); } static inline struct sg_table * @@ -464,33 +442,35 @@ alloc_sgt_for_buffer(void *ptr, size_t size, } #if IS_ENABLED(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_GXP_GEM5) -int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, uint core_list, +int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, struct gxp_virtual_device *vd, + uint virt_core_list, uint core_list, struct edgetpu_ext_mailbox_info *mbx_info) { - struct gxp_dma_iommu_manager *mgr = container_of( - gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); - uint orig_core_list = core_list; + uint orig_virt_core_list = virt_core_list; u64 queue_iova; + uint virt_core; int core; int ret; int i = 0; - while (core_list) { + while (virt_core_list) { phys_addr_t cmdq_pa = mbx_info->mailboxes[i].cmdq_pa; phys_addr_t respq_pa = mbx_info->mailboxes[i++].respq_pa; + virt_core = ffs(virt_core_list) - 1; + virt_core_list &= ~BIT(virt_core); core = ffs(core_list) - 1; core_list &= ~BIT(core); queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core); - ret = iommu_map(mgr->core_domains[core], queue_iova, + ret = iommu_map(vd->core_domains[virt_core], queue_iova, cmdq_pa, mbx_info->cmdq_size, IOMMU_WRITE); if (ret) goto error; - ret = iommu_map(mgr->core_domains[core], + ret = iommu_map(vd->core_domains[virt_core], queue_iova + mbx_info->cmdq_size, respq_pa, mbx_info->respq_size, IOMMU_READ); if (ret) { - iommu_unmap(mgr->core_domains[core], queue_iova, + iommu_unmap(vd->core_domains[virt_core], queue_iova, mbx_info->cmdq_size); goto error; } @@ -498,129 +478,160 @@ int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, uint core_list, return 0; error: - core_list ^= orig_core_list; - while (core_list) { + virt_core_list ^= orig_virt_core_list; + while (virt_core_list) { + virt_core = ffs(virt_core_list) - 1; + virt_core_list &= ~BIT(virt_core); core = ffs(core_list) - 1; core_list &= ~BIT(core); queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core); - iommu_unmap(mgr->core_domains[core], queue_iova, + iommu_unmap(vd->core_domains[virt_core], queue_iova, mbx_info->cmdq_size); - iommu_unmap(mgr->core_domains[core], queue_iova + + iommu_unmap(vd->core_domains[virt_core], queue_iova + mbx_info->cmdq_size, mbx_info->respq_size); } return ret; } void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp, + struct gxp_virtual_device *vd, struct gxp_tpu_mbx_desc mbx_desc) { - struct gxp_dma_iommu_manager *mgr = container_of( - gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); + uint virt_core_list = mbx_desc.virt_core_list; uint core_list = mbx_desc.phys_core_list; u64 queue_iova; int core; + uint virt_core; - while (core_list) { + while (virt_core_list) { + virt_core = ffs(virt_core_list) - 1; + virt_core_list &= ~BIT(virt_core); core = ffs(core_list) - 1; core_list &= ~BIT(core); queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core); - iommu_unmap(mgr->core_domains[core], queue_iova, + iommu_unmap(vd->core_domains[virt_core], queue_iova, mbx_desc.cmdq_size); - iommu_unmap(mgr->core_domains[core], queue_iova + + iommu_unmap(vd->core_domains[virt_core], queue_iova + mbx_desc.cmdq_size, mbx_desc.respq_size); } } #endif // CONFIG_ANDROID && !CONFIG_GXP_GEM5 -void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, uint core_list, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - uint gxp_dma_flags) +int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp, void *buf, + struct gxp_virtual_device *vd, + uint virt_core_list, size_t size, + dma_addr_t dma_handle, + uint gxp_dma_flags) { struct gxp_dma_iommu_manager *mgr = container_of( gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); - void *buf; struct sg_table *sgt; - dma_addr_t daddr; - int core; + int virt_core; ssize_t size_mapped; size = size < PAGE_SIZE ? PAGE_SIZE : size; - - /* Allocate a coherent buffer in the default domain */ - buf = dma_alloc_coherent(gxp->dev, size, &daddr, flag); - if (!buf) { - dev_err(gxp->dev, "Failed to allocate coherent buffer\n"); - return NULL; - } - - sgt = alloc_sgt_for_buffer(buf, size, mgr->default_domain, daddr); + sgt = alloc_sgt_for_buffer(buf, size, mgr->default_domain, dma_handle); if (IS_ERR(sgt)) { dev_err(gxp->dev, "Failed to allocate sgt for coherent buffer\n"); - dma_free_coherent(gxp->dev, size, buf, daddr); - return NULL; + return -ENOMEM; } /* Create identical mappings in the specified cores' domains */ - for (core = 0; core < GXP_NUM_CORES; core++) { - if (!(core_list & BIT(core))) + for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { + if (!(virt_core_list & BIT(virt_core))) continue; - /* * In Linux 5.15 and beyond, `iommu_map_sg()` returns a * `ssize_t` to encode errors that earlier versions throw out. * Explicitly cast here for backwards compatibility. */ - size_mapped = - (ssize_t)iommu_map_sg(mgr->core_domains[core], daddr, - sgt->sgl, sgt->orig_nents, - IOMMU_READ | IOMMU_WRITE); + size_mapped = (ssize_t)iommu_map_sg(vd->core_domains[virt_core], + dma_handle, sgt->sgl, + sgt->orig_nents, + IOMMU_READ | IOMMU_WRITE); if (size_mapped != size) goto err; } - if (dma_handle) - *dma_handle = daddr; - sg_free_table(sgt); kfree(sgt); - - return buf; + return 0; err: - for (core -= 1; core >= 0; core--) - iommu_unmap(mgr->core_domains[core], daddr, size); - dma_free_coherent(gxp->dev, size, buf, daddr); + for (virt_core -= 1; virt_core >= 0; virt_core--) + iommu_unmap(vd->core_domains[virt_core], dma_handle, size); sg_free_table(sgt); kfree(sgt); + return -EINVAL; +} - return NULL; +void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd, + uint virt_core_list, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + uint gxp_dma_flags) +{ + void *buf; + dma_addr_t daddr; + int ret; + + size = size < PAGE_SIZE ? PAGE_SIZE : size; + + /* Allocate a coherent buffer in the default domain */ + buf = dma_alloc_coherent(gxp->dev, size, &daddr, flag); + if (!buf) { + dev_err(gxp->dev, "Failed to allocate coherent buffer\n"); + return NULL; + } + if (vd != NULL) { + ret = gxp_dma_map_allocated_coherent_buffer(gxp, buf, vd, + virt_core_list, + size, daddr, + gxp_dma_flags); + if (ret) { + dma_free_coherent(gxp->dev, size, buf, daddr); + return NULL; + } + } + + if (dma_handle) + *dma_handle = daddr; + + return buf; } -void gxp_dma_free_coherent(struct gxp_dev *gxp, uint core_list, size_t size, - void *cpu_addr, dma_addr_t dma_handle) +void gxp_dma_unmap_allocated_coherent_buffer(struct gxp_dev *gxp, + struct gxp_virtual_device *vd, + uint virt_core_list, size_t size, + dma_addr_t dma_handle) { - struct gxp_dma_iommu_manager *mgr = container_of( - gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); - int core; + int virt_core; size = size < PAGE_SIZE ? PAGE_SIZE : size; - for (core = 0; core < GXP_NUM_CORES; core++) { - if (!(core_list & BIT(core))) + for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { + if (!(virt_core_list & BIT(virt_core))) continue; - if (size != - iommu_unmap(mgr->core_domains[core], dma_handle, size)) + iommu_unmap(vd->core_domains[virt_core], dma_handle, size)) dev_warn(gxp->dev, "Failed to unmap coherent buffer\n"); } +} +void gxp_dma_free_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd, + uint virt_core_list, size_t size, void *cpu_addr, + dma_addr_t dma_handle) +{ + if (vd != NULL) + gxp_dma_unmap_allocated_coherent_buffer(gxp, vd, virt_core_list, + size, dma_handle); dma_free_coherent(gxp->dev, size, cpu_addr, dma_handle); } -dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp, uint core_list, - void *cpu_addr, size_t size, +dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp, + struct gxp_virtual_device *vd, + uint virt_core_list, void *cpu_addr, size_t size, enum dma_data_direction direction, unsigned long attrs, uint gxp_dma_flags) { @@ -629,7 +640,7 @@ dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp, uint core_list, dma_addr_t daddr; phys_addr_t paddr; int prot = dma_info_to_prot(direction, 0, attrs); - int core; + int virt_core; daddr = dma_map_single_attrs(gxp->dev, cpu_addr, size, direction, attrs); @@ -637,11 +648,10 @@ dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp, uint core_list, return DMA_MAPPING_ERROR; paddr = iommu_iova_to_phys(mgr->default_domain, daddr); - for (core = 0; core < GXP_NUM_CORES; core++) { - if (!(core_list & BIT(core))) + for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { + if (!(virt_core_list & BIT(virt_core))) continue; - - if (iommu_map(mgr->core_domains[core], daddr, paddr, size, + if (iommu_map(vd->core_domains[virt_core], daddr, paddr, size, prot)) goto err; } @@ -649,36 +659,35 @@ dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp, uint core_list, return daddr; err: - for (core -= 1; core >= 0; core--) - iommu_unmap(mgr->core_domains[core], daddr, size); + for (virt_core -= 1; virt_core >= 0; virt_core--) + iommu_unmap(vd->core_domains[virt_core], daddr, size); dma_unmap_single_attrs(gxp->dev, daddr, size, direction, DMA_ATTR_SKIP_CPU_SYNC); return DMA_MAPPING_ERROR; } -void gxp_dma_unmap_single(struct gxp_dev *gxp, uint core_list, - dma_addr_t dma_addr, size_t size, +void gxp_dma_unmap_single(struct gxp_dev *gxp, struct gxp_virtual_device *vd, + uint virt_core_list, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, unsigned long attrs) { - struct gxp_dma_iommu_manager *mgr = container_of( - gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); - int core; + int virt_core; - for (core = 0; core < GXP_NUM_CORES; core++) { - if (!(core_list & BIT(core))) + for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { + if (!(virt_core_list & BIT(virt_core))) continue; if (size != - iommu_unmap(mgr->core_domains[core], dma_addr, size)) + iommu_unmap(vd->core_domains[virt_core], dma_addr, size)) dev_warn(gxp->dev, "Failed to unmap single\n"); } dma_unmap_single_attrs(gxp->dev, dma_addr, size, direction, attrs); } -dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, uint core_list, - struct page *page, unsigned long offset, - size_t size, enum dma_data_direction direction, +dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, struct gxp_virtual_device *vd, + uint virt_core_list, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction, unsigned long attrs, uint gxp_dma_flags) { struct gxp_dma_iommu_manager *mgr = container_of( @@ -686,7 +695,7 @@ dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, uint core_list, dma_addr_t daddr; phys_addr_t paddr; int prot = dma_info_to_prot(direction, 0, attrs); - int core; + int virt_core; daddr = dma_map_page_attrs(gxp->dev, page, offset, size, direction, attrs); @@ -694,11 +703,10 @@ dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, uint core_list, return DMA_MAPPING_ERROR; paddr = iommu_iova_to_phys(mgr->default_domain, daddr); - for (core = 0; core < GXP_NUM_CORES; core++) { - if (!(core_list & BIT(core))) + for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { + if (!(virt_core_list & BIT(virt_core))) continue; - - if (iommu_map(mgr->core_domains[core], daddr, paddr, size, + if (iommu_map(vd->core_domains[virt_core], daddr, paddr, size, prot)) goto err; } @@ -706,96 +714,89 @@ dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, uint core_list, return daddr; err: - for (core -= 1; core >= 0; core--) - iommu_unmap(mgr->core_domains[core], daddr, size); + for (virt_core -= 1; virt_core >= 0; virt_core--) + iommu_unmap(vd->core_domains[virt_core], daddr, size); dma_unmap_page_attrs(gxp->dev, daddr, size, direction, DMA_ATTR_SKIP_CPU_SYNC); return DMA_MAPPING_ERROR; } -void gxp_dma_unmap_page(struct gxp_dev *gxp, uint core_list, - dma_addr_t dma_addr, size_t size, +void gxp_dma_unmap_page(struct gxp_dev *gxp, struct gxp_virtual_device *vd, + uint virt_core_list, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, unsigned long attrs) { - struct gxp_dma_iommu_manager *mgr = container_of( - gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); - int core; + int virt_core; - for (core = 0; core < GXP_NUM_CORES; core++) { - if (!(core_list & BIT(core))) + for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { + if (!(virt_core_list & BIT(virt_core))) continue; if (size != - iommu_unmap(mgr->core_domains[core], dma_addr, size)) + iommu_unmap(vd->core_domains[virt_core], dma_addr, size)) dev_warn(gxp->dev, "Failed to unmap page\n"); } dma_unmap_page_attrs(gxp->dev, dma_addr, size, direction, attrs); } -dma_addr_t gxp_dma_map_resource(struct gxp_dev *gxp, uint core_list, - phys_addr_t phys_addr, size_t size, - enum dma_data_direction direction, +dma_addr_t gxp_dma_map_resource(struct gxp_dev *gxp, + struct gxp_virtual_device *vd, + uint virt_core_list, phys_addr_t phys_addr, + size_t size, enum dma_data_direction direction, unsigned long attrs, uint gxp_dma_flags) { - struct gxp_dma_iommu_manager *mgr = container_of( - gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); dma_addr_t daddr; int prot = dma_info_to_prot(direction, 0, attrs); - int core; + int virt_core; daddr = dma_map_resource(gxp->dev, phys_addr, size, direction, attrs); if (dma_mapping_error(gxp->dev, daddr)) return DMA_MAPPING_ERROR; - for (core = 0; core < GXP_NUM_CORES; core++) { - if (!(core_list & BIT(core))) + for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { + if (!(virt_core_list & BIT(virt_core))) continue; - - if (iommu_map(mgr->core_domains[core], daddr, phys_addr, size, - prot)) + if (iommu_map(vd->core_domains[virt_core], daddr, phys_addr, + size, prot)) goto err; } return daddr; err: - for (core -= 1; core >= 0; core--) - iommu_unmap(mgr->core_domains[core], daddr, size); + for (virt_core -= 1; virt_core >= 0; virt_core--) + iommu_unmap(vd->core_domains[virt_core], daddr, size); dma_unmap_resource(gxp->dev, daddr, size, direction, DMA_ATTR_SKIP_CPU_SYNC); return DMA_MAPPING_ERROR; } -void gxp_dma_unmap_resource(struct gxp_dev *gxp, uint core_list, - dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction, +void gxp_dma_unmap_resource(struct gxp_dev *gxp, struct gxp_virtual_device *vd, + uint virt_core_list, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction, unsigned long attrs) { - struct gxp_dma_iommu_manager *mgr = container_of( - gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); - int core; + int virt_core; - for (core = 0; core < GXP_NUM_CORES; core++) { - if (!(core_list & BIT(core))) + for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { + if (!(virt_core_list & BIT(virt_core))) continue; if (size != - iommu_unmap(mgr->core_domains[core], dma_addr, size)) + iommu_unmap(vd->core_domains[virt_core], dma_addr, size)) dev_warn(gxp->dev, "Failed to unmap resource\n"); } dma_unmap_resource(gxp->dev, dma_addr, size, direction, attrs); } -int gxp_dma_map_sg(struct gxp_dev *gxp, uint core_list, struct scatterlist *sg, - int nents, enum dma_data_direction direction, - unsigned long attrs, uint gxp_dma_flags) +int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd, + int virt_core_list, struct scatterlist *sg, int nents, + enum dma_data_direction direction, unsigned long attrs, + uint gxp_dma_flags) { - struct gxp_dma_iommu_manager *mgr = container_of( - gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); int nents_mapped; dma_addr_t daddr; int prot = dma_info_to_prot(direction, 0, attrs); - int core; + int virt_core; ssize_t size_mapped; /* Variables needed to cleanup if an error occurs */ struct scatterlist *s; @@ -808,16 +809,15 @@ int gxp_dma_map_sg(struct gxp_dev *gxp, uint core_list, struct scatterlist *sg, daddr = sg_dma_address(sg); - for (core = 0; core < GXP_NUM_CORES; core++) { - if (!(core_list & BIT(core))) + for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { + if (!(virt_core_list & BIT(virt_core))) continue; - /* * In Linux 5.15 and beyond, `iommu_map_sg()` returns a * `ssize_t` to encode errors that earlier versions throw out. * Explicitly cast here for backwards compatibility. */ - size_mapped = (ssize_t)iommu_map_sg(mgr->core_domains[core], + size_mapped = (ssize_t)iommu_map_sg(vd->core_domains[virt_core], daddr, sg, nents, prot); if (size_mapped <= 0) goto err; @@ -830,31 +830,29 @@ err: size += sg_dma_len(s); } - for (core -= 1; core >= 0; core--) - iommu_unmap(mgr->core_domains[core], daddr, size); + for (virt_core -= 1; virt_core >= 0; virt_core--) + iommu_unmap(vd->core_domains[virt_core], daddr, size); dma_unmap_sg_attrs(gxp->dev, sg, nents, direction, attrs); return 0; } -void gxp_dma_unmap_sg(struct gxp_dev *gxp, uint core_list, - struct scatterlist *sg, int nents, +void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd, + uint virt_core_list, struct scatterlist *sg, int nents, enum dma_data_direction direction, unsigned long attrs) { - struct gxp_dma_iommu_manager *mgr = container_of( - gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); struct scatterlist *s; int i; size_t size = 0; - int core; + int virt_core; for_each_sg(sg, s, nents, i) { size += sg_dma_len(s); } - for (core = 0; core < GXP_NUM_CORES; core++) { - if (!(core_list & BIT(core))) + for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { + if (!(virt_core_list & BIT(virt_core))) continue; - if (!iommu_unmap(mgr->core_domains[core], sg_dma_address(sg), + if (!iommu_unmap(vd->core_domains[virt_core], sg_dma_address(sg), size)) dev_warn(gxp->dev, "Failed to unmap sg\n"); } @@ -892,51 +890,15 @@ void gxp_dma_sync_sg_for_device(struct gxp_dev *gxp, struct scatterlist *sg, dma_sync_sg_for_device(gxp->dev, sg, nents, direction); } -#ifdef CONFIG_GXP_TEST -/* - * gxp-dma-iommu.h interface - * These APIs expose gxp-dma-iommu implementation details for unit testing. - * They are not meant to be used by other components fo the driver. - */ - -struct iommu_domain *gxp_dma_iommu_get_default_domain(struct gxp_dev *gxp) -{ - struct gxp_dma_iommu_manager *mgr = container_of( - gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); - - if (!mgr) - return ERR_PTR(-ENODEV); - - return mgr->default_domain; -} - -struct iommu_domain *gxp_dma_iommu_get_core_domain(struct gxp_dev *gxp, - uint core) +struct sg_table *gxp_dma_map_dmabuf_attachment( + struct gxp_dev *gxp, struct gxp_virtual_device *vd, uint virt_core_list, + struct dma_buf_attachment *attachment, + enum dma_data_direction direction) { - struct gxp_dma_iommu_manager *mgr = container_of( - gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); - - if (!mgr) - return ERR_PTR(-ENODEV); - - if (core >= GXP_NUM_CORES) - return ERR_PTR(-EINVAL); - - return mgr->core_domains[core]; -} -#endif // CONFIG_GXP_TEST - -struct sg_table * -gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp, uint core_list, - struct dma_buf_attachment *attachment, - enum dma_data_direction direction) -{ - struct gxp_dma_iommu_manager *mgr = container_of( - gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); struct sg_table *sgt; - int core; int prot = dma_info_to_prot(direction, /*coherent=*/0, /*attrs=*/0); ssize_t size_mapped; + int virt_core; int ret; /* Variables needed to cleanup if an error occurs */ struct scatterlist *s; @@ -953,23 +915,22 @@ gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp, uint core_list, } /* Map the sgt into the aux domain of all specified cores */ - for (core = 0; core < GXP_NUM_CORES; core++) { - if (!(core_list & BIT(core))) + for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { + if (!(virt_core_list & BIT(virt_core))) continue; - /* * In Linux 5.15 and beyond, `iommu_map_sg()` returns a * `ssize_t` to encode errors that earlier versions throw out. * Explicitly cast here for backwards compatibility. */ size_mapped = - (ssize_t)iommu_map_sg(mgr->core_domains[core], + (ssize_t)iommu_map_sg(vd->core_domains[virt_core], sg_dma_address(sgt->sgl), sgt->sgl, sgt->orig_nents, prot); if (size_mapped <= 0) { dev_err(gxp->dev, - "Failed to map dma-buf to core %d (ret=%ld)\n", - core, size_mapped); + "Failed to map dma-buf to virtual core %d (ret=%ld)\n", + virt_core, size_mapped); /* * Prior to Linux 5.15, `iommu_map_sg()` returns 0 for * any failure. Return a generic IO error in this case. @@ -985,42 +946,40 @@ err: for_each_sg(sgt->sgl, s, sgt->nents, i) size += sg_dma_len(s); - for (core -= 1; core >= 0; core--) - iommu_unmap(mgr->core_domains[core], sg_dma_address(sgt->sgl), - size); + for (virt_core -= 1; virt_core >= 0; virt_core--) + iommu_unmap(vd->core_domains[virt_core], sg_dma_address(sgt->sgl), size); dma_buf_unmap_attachment(attachment, sgt, direction); return ERR_PTR(ret); } -void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp, uint core_list, +void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp, + struct gxp_virtual_device *vd, + uint virt_core_list, struct dma_buf_attachment *attachment, struct sg_table *sgt, enum dma_data_direction direction) { - struct gxp_dma_iommu_manager *mgr = container_of( - gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr); struct scatterlist *s; int i; size_t size = 0; - int core; + int virt_core; /* Find the size of the mapping in IOVA-space */ for_each_sg(sgt->sgl, s, sgt->nents, i) size += sg_dma_len(s); /* Unmap the dma-buf from the aux domain of all specified cores */ - for (core = 0; core < GXP_NUM_CORES; core++) { - if (!(core_list & BIT(core))) + for (virt_core = 0; virt_core < vd->num_cores; virt_core++) { + if (!(virt_core_list & BIT(virt_core))) continue; - - if (!iommu_unmap(mgr->core_domains[core], + if (!iommu_unmap(vd->core_domains[virt_core], sg_dma_address(sgt->sgl), size)) dev_warn( gxp->dev, - "Failed to unmap dma-buf from core %d\n", - core); + "Failed to unmap dma-buf from virtual core %d\n", + virt_core); } /* Unmap the attachment from the default domain */ |