From 34333c35377d6625f6019644c2bbb31fa704ea52 Mon Sep 17 00:00:00 2001 From: Aurora zuma automerger Date: Sat, 4 Feb 2023 12:03:14 +0800 Subject: gxp: [Copybara Auto Merge] Merge branch 'zuma' into 'android14-gs-pixel-5.15' gcip: Add a new KCI code for thermal control Bug: 266837631 GCIP_HEADERS_REV_ID: f4bfe07c37ad5c61abca721151d5ab5bfc4d3ad2 gxp: temporarily ignore img cfg dup mapping Bug: 268151565 gxp: define and use common interface for iommu_map Bug: 248436918 gxp: Merge identical parts in [callisto|europa]-platform.c. Bug: 249918544 GitOrigin-RevId: d31f54ad547e6020bef3af0b555219fec4bb7ef8 Change-Id: I426400c5abec897e18b80b5aed09aae3935191e9 --- callisto-platform.c | 278 +--------------------------- gcip-kernel-driver/include/gcip/gcip-kci.h | 1 + gxp-common-platform.c | 3 +- gxp-config.h | 7 + gxp-dma-iommu.c | 12 ++ gxp-dma.h | 19 +- gxp-mcu-firmware.c | 43 ++++- gxp-mcu-platform.c | 279 +++++++++++++++++++++++++++++ gxp-mcu.c | 10 +- gxp-vd.c | 42 +++-- 10 files changed, 378 insertions(+), 316 deletions(-) diff --git a/callisto-platform.c b/callisto-platform.c index e9335b0..0d006c2 100644 --- a/callisto-platform.c +++ b/callisto-platform.c @@ -76,294 +76,18 @@ err: return 0; } -static int callisto_request_power_states(struct gxp_client *client, - struct gxp_power_states power_states) -{ - struct gxp_dev *gxp = client->gxp; - struct gxp_mcu *mcu = gxp_mcu_of(gxp); - struct gxp_uci_command cmd; - int ret; - - if (gxp_is_direct_mode(gxp)) - return -EOPNOTSUPP; - - /* Plus 1 to align with power states in MCU firmware. */ - cmd.wakelock_command_params.dsp_operating_point = power_states.power + 1; - cmd.wakelock_command_params.memory_operating_point = power_states.memory; - cmd.type = WAKELOCK_COMMAND; - cmd.client_id = client->vd->client_id; - - ret = gxp_uci_send_command( - &mcu->uci, client->vd, &cmd, - &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].wait_queue, - &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].dest_queue, - &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].lock, - &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].waitq, - client->mb_eventfds[UCI_RESOURCE_ID]); - return ret; -} - -static int allocate_vmbox(struct gxp_dev *gxp, struct gxp_virtual_device *vd) -{ - struct gxp_kci *kci = &(gxp_mcu_of(gxp)->kci); - int client_id, ret; - - if (vd->is_secure) - client_id = SECURE_CLIENT_ID; - else - client_id = gxp_iommu_aux_get_pasid(gxp, vd->domain); - - ret = gxp_kci_allocate_vmbox(kci, client_id, vd->num_cores, - vd->slice_index, vd->first_open); - if (ret) { - if (ret != GCIP_KCI_ERROR_UNIMPLEMENTED) { - dev_err(gxp->dev, - "Failed to allocate VMBox for client %d, TPU client %d: %d", - client_id, vd->tpu_client_id, ret); - return ret; - } - - /* - * TODO(241057541): Remove this conditional branch after the firmware side - * implements handling allocate_vmbox command. - */ - dev_info( - gxp->dev, - "Allocating VMBox is not implemented from the firmware side"); - } - - vd->client_id = client_id; - vd->first_open = false; - - return 0; -} - -static void release_vmbox(struct gxp_dev *gxp, struct gxp_virtual_device *vd) -{ - struct gxp_kci *kci = &(gxp_mcu_of(gxp)->kci); - int ret; - - if (vd->client_id < 0) - return; - - ret = gxp_kci_release_vmbox(kci, vd->client_id); - if (ret) { - /* - * TODO(241057541): Remove this conditional branch after the firmware side - * implements handling allocate_vmbox command. - */ - if (ret == GCIP_KCI_ERROR_UNIMPLEMENTED) - dev_info( - gxp->dev, - "Releasing VMBox is not implemented from the firmware side"); - else - dev_err(gxp->dev, - "Failed to release VMBox for client %d: %d", - vd->client_id, ret); - } - - vd->client_id = -1; -} - -static int link_offload_vmbox(struct gxp_dev *gxp, - struct gxp_virtual_device *vd, - u32 offload_client_id, u8 offload_chip_type) -{ - struct gxp_kci *kci = &(gxp_mcu_of(gxp)->kci); - int ret; - - ret = gxp_kci_link_unlink_offload_vmbox( - kci, vd->client_id, offload_client_id, offload_chip_type, true); - if (ret) { - if (ret != GCIP_KCI_ERROR_UNIMPLEMENTED) { - dev_err(gxp->dev, - "Failed to link offload VMBox for client %d, offload client %u, offload chip type %d: %d", - vd->client_id, offload_client_id, - offload_chip_type, ret); - return ret; - } - - /* - * TODO(241057541): Remove this conditional branch after the firmware side - * implements handling link_offload_vmbox command. - */ - dev_info( - gxp->dev, - "Linking offload VMBox is not implemented from the firmware side"); - } - - return 0; -} - -static void unlink_offload_vmbox(struct gxp_dev *gxp, - struct gxp_virtual_device *vd, - u32 offload_client_id, u8 offload_chip_type) -{ - struct gxp_kci *kci = &(gxp_mcu_of(gxp)->kci); - int ret; - - ret = gxp_kci_link_unlink_offload_vmbox(kci, vd->client_id, - offload_client_id, - offload_chip_type, false); - if (ret) { - /* - * TODO(241057541): Remove this conditional branch after the firmware side - * implements handling allocate_vmbox command. - */ - if (ret == GCIP_KCI_ERROR_UNIMPLEMENTED) - dev_info( - gxp->dev, - "Unlinking offload VMBox is not implemented from the firmware side"); - else - dev_err(gxp->dev, - "Failed to unlink offload VMBox for client %d, offload client %u, offload chip type %d: %d", - vd->client_id, offload_client_id, - offload_chip_type, ret); - } -} - -static int callisto_platform_after_vd_block_ready(struct gxp_dev *gxp, - struct gxp_virtual_device *vd) -{ - int ret; - - if (gxp_is_direct_mode(gxp)) - return 0; - - ret = allocate_vmbox(gxp, vd); - if (ret) - return ret; - - if (vd->tpu_client_id >= 0) { - ret = link_offload_vmbox(gxp, vd, vd->tpu_client_id, - GCIP_KCI_OFFLOAD_CHIP_TYPE_TPU); - if (ret) - goto err_release_vmbox; - } - - return 0; - -err_release_vmbox: - release_vmbox(gxp, vd); - return ret; -} - -static void -callisto_platform_before_vd_block_unready(struct gxp_dev *gxp, - struct gxp_virtual_device *vd) -{ - if (gxp_is_direct_mode(gxp)) - return; - if (vd->client_id < 0 || vd->state == GXP_VD_UNAVAILABLE) - return; - if (vd->tpu_client_id >= 0) - unlink_offload_vmbox(gxp, vd, vd->tpu_client_id, - GCIP_KCI_OFFLOAD_CHIP_TYPE_TPU); - release_vmbox(gxp, vd); -} - -static int callisto_wakelock_after_blk_on(struct gxp_dev *gxp) -{ - struct gxp_mcu_firmware *mcu_fw = gxp_mcu_firmware_of(gxp); - - if (gxp_is_direct_mode(gxp)) - return 0; - return gxp_mcu_firmware_run(mcu_fw); -} - -static void callisto_wakelock_before_blk_off(struct gxp_dev *gxp) -{ - struct gxp_mcu_firmware *mcu_fw = gxp_mcu_firmware_of(gxp); - - if (gxp_is_direct_mode(gxp)) - return; - gxp_mcu_firmware_stop(mcu_fw); -} - -#ifdef HAS_TPU_EXT - -static int get_tpu_client_id(struct gxp_client *client) -{ - struct gxp_dev *gxp = client->gxp; - struct edgetpu_ext_offload_info offload_info; - struct edgetpu_ext_client_info tpu_info; - int ret; - - tpu_info.tpu_file = client->tpu_file; - ret = edgetpu_ext_driver_cmd(gxp->tpu_dev.dev, - EDGETPU_EXTERNAL_CLIENT_TYPE_DSP, - START_OFFLOAD, &tpu_info, &offload_info); - if (ret) - return ret; - - return offload_info.client_id; -} - -static int callisto_after_map_tpu_mbx_queue(struct gxp_dev *gxp, - struct gxp_client *client) -{ - struct gxp_virtual_device *vd = client->vd; - int tpu_client_id = -1, ret; - - if (gxp_is_direct_mode(gxp)) - return 0; - - tpu_client_id = get_tpu_client_id(client); - if (tpu_client_id < 0) { - dev_err(gxp->dev, "Failed to get a TPU client ID: %d", - tpu_client_id); - return tpu_client_id; - } - - if (vd->client_id >= 0) { - ret = link_offload_vmbox(gxp, vd, tpu_client_id, - GCIP_KCI_OFFLOAD_CHIP_TYPE_TPU); - if (ret) - return ret; - } - - vd->tpu_client_id = tpu_client_id; - - return 0; -} - -static void callisto_before_unmap_tpu_mbx_queue(struct gxp_dev *gxp, - struct gxp_client *client) -{ - struct gxp_virtual_device *vd = client->vd; - - if (vd->client_id >= 0 && vd->tpu_client_id >= 0) - unlink_offload_vmbox(gxp, vd, vd->tpu_client_id, - GCIP_KCI_OFFLOAD_CHIP_TYPE_TPU); - vd->tpu_client_id = -1; -} - -#endif /* HAS_TPU_EXT */ - static int gxp_platform_probe(struct platform_device *pdev) { struct callisto_dev *callisto = devm_kzalloc(&pdev->dev, sizeof(*callisto), GFP_KERNEL); struct gxp_mcu_dev *mcu_dev = &callisto->mcu_dev; - struct gxp_dev *gxp; + struct gxp_dev *gxp = &mcu_dev->gxp; if (!callisto) return -ENOMEM; gxp_mcu_dev_init(mcu_dev); - - gxp = &mcu_dev->gxp; gxp->parse_dt = callisto_platform_parse_dt; - gxp->after_vd_block_ready = callisto_platform_after_vd_block_ready; - gxp->before_vd_block_unready = - callisto_platform_before_vd_block_unready; - gxp->request_power_states = callisto_request_power_states; - gxp->wakelock_after_blk_on = callisto_wakelock_after_blk_on; - gxp->wakelock_before_blk_off = callisto_wakelock_before_blk_off; -#ifdef HAS_TPU_EXT - gxp->after_map_tpu_mbx_queue = callisto_after_map_tpu_mbx_queue; - gxp->before_unmap_tpu_mbx_queue = callisto_before_unmap_tpu_mbx_queue; -#endif return gxp_common_platform_probe(pdev, gxp); } diff --git a/gcip-kernel-driver/include/gcip/gcip-kci.h b/gcip-kernel-driver/include/gcip/gcip-kci.h index 282ba88..03cc078 100644 --- a/gcip-kernel-driver/include/gcip/gcip-kci.h +++ b/gcip-kernel-driver/include/gcip/gcip-kci.h @@ -98,6 +98,7 @@ enum gcip_kci_code { GCIP_KCI_CODE_LINK_OFFLOAD_VMBOX = 17, GCIP_KCI_CODE_UNLINK_OFFLOAD_VMBOX = 18, GCIP_KCI_CODE_FIRMWARE_TRACING_LEVEL = 19, + GCIP_KCI_CODE_THERMAL_CONTROL = 20, GCIP_KCI_CODE_RKCI_ACK = 256, }; diff --git a/gxp-common-platform.c b/gxp-common-platform.c index 3319924..8d56391 100644 --- a/gxp-common-platform.c +++ b/gxp-common-platform.c @@ -47,8 +47,7 @@ #include "gxp-wakelock.h" #include "gxp.h" -#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5) -#define HAS_TPU_EXT +#if HAS_TPU_EXT #include #endif diff --git a/gxp-config.h b/gxp-config.h index 3b11205..6a35d90 100644 --- a/gxp-config.h +++ b/gxp-config.h @@ -40,4 +40,11 @@ #define GXP_HAS_MCU 1 #endif +#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && \ + !IS_ENABLED(CONFIG_GXP_GEM5) +#define HAS_TPU_EXT 1 +#else +#define HAS_TPU_EXT 0 +#endif + #endif /* __GXP_CONFIG_H__ */ diff --git a/gxp-dma-iommu.c b/gxp-dma-iommu.c index 3dfd70b..94a78b3 100644 --- a/gxp-dma-iommu.c +++ b/gxp-dma-iommu.c @@ -201,6 +201,18 @@ struct gxp_iommu_domain *gxp_iommu_get_domain_for_dev(struct gxp_dev *gxp) return gdomain; } +int gxp_iommu_map(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain, + unsigned long iova, phys_addr_t paddr, size_t size, int prot) +{ + return iommu_map(gdomain->domain, iova, paddr, size, prot); +} + +void gxp_iommu_unmap(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain, + unsigned long iova, size_t size) +{ + iommu_unmap(gdomain->domain, iova, size); +} + int gxp_dma_init(struct gxp_dev *gxp) { struct gxp_dma_iommu_manager *mgr; diff --git a/gxp-dma.h b/gxp-dma.h index da7d433..eb131fd 100644 --- a/gxp-dma.h +++ b/gxp-dma.h @@ -52,6 +52,24 @@ struct gxp_dma_manager { #define DMA_MAPPING_ERROR (~(dma_addr_t)0) #endif +/** + * gxp_iommu_map() - Create mappings in iommu + * @gxp: The GXP device + * @gdomain: The IOMMU domain to create mappings in. + * + * Return: 0 on success or negative value indicating error + */ +int gxp_iommu_map(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain, + unsigned long iova, phys_addr_t paddr, size_t size, int prot); + +/** + * gxp_iommu_unmap() - Reverts mappings created by gxp_iommu_map() + * @gxp: The GXP device + * @gdomain: The IOMMU domain to revert mappings in. + */ +void gxp_iommu_unmap(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain, + unsigned long iova, size_t size); + /** * gxp_dma_init() - Initialize the GXP DMA subsystem * @gxp: The GXP device to initialize DMA for @@ -356,5 +374,4 @@ uint gxp_iommu_aux_get_pasid(struct gxp_dev *gxp, */ void gxp_iommu_setup_shareability(struct gxp_dev *gxp); - #endif /* __GXP_DMA_H__ */ diff --git a/gxp-mcu-firmware.c b/gxp-mcu-firmware.c index 30e0a64..956ccce 100644 --- a/gxp-mcu-firmware.c +++ b/gxp-mcu-firmware.c @@ -18,6 +18,7 @@ #include "gxp-bpm.h" #include "gxp-config.h" +#include "gxp-dma.h" #include "gxp-doorbell.h" #include "gxp-internal.h" #include "gxp-kci.h" @@ -111,10 +112,10 @@ static int gxp_mcu_firmware_load_locked(struct gxp_mcu_firmware *mcu_fw, if (ret) dev_err(dev, "image config parsing failed: %d", ret); } else - ret = iommu_map(iommu_get_domain_for_dev(gxp->dev), - mcu_fw->image_buf.daddr, - mcu_fw->image_buf.paddr, mcu_fw->image_buf.size, - IOMMU_READ | IOMMU_WRITE); + ret = gxp_iommu_map(gxp, gxp_iommu_get_domain_for_dev(gxp), + mcu_fw->image_buf.daddr, + mcu_fw->image_buf.paddr, mcu_fw->image_buf.size, + IOMMU_READ | IOMMU_WRITE); if (ret) goto out_release_firmware; @@ -136,8 +137,8 @@ static void gxp_mcu_firmware_unload_locked(struct gxp_mcu_firmware *mcu_fw) if (mcu_fw->is_signed) gcip_image_config_clear(&mcu_fw->cfg_parser); else - iommu_unmap(iommu_get_domain_for_dev(mcu_fw->gxp->dev), - mcu_fw->image_buf.daddr, mcu_fw->image_buf.size); + gxp_iommu_unmap(mcu_fw->gxp, gxp_iommu_get_domain_for_dev(mcu_fw->gxp), + mcu_fw->image_buf.daddr, mcu_fw->image_buf.size); } static int gxp_mcu_firmware_handshake(struct gxp_mcu_firmware *mcu_fw) @@ -421,8 +422,23 @@ static int image_config_map(void *data, dma_addr_t daddr, phys_addr_t paddr, dev_err(gxp->dev, "image config NS mappings are not supported"); return -EINVAL; } - return iommu_map(iommu_get_domain_for_dev(gxp->dev), daddr, paddr, size, - IOMMU_READ | IOMMU_WRITE); + + /* TODO(b/268150335): remove this block once MCU FW changes land */ + { + int i; + + for (i = GXP_NUM_CORES; i < GXP_NUM_MAILBOXES; i++) { + if (daddr == gxp->mbx[i].daddr) { + dev_warn( + gxp->dev, + "Skip mapping in MCU image config: %pad", + &daddr); + return 0; + } + } + } + return gxp_iommu_map(gxp, gxp_iommu_get_domain_for_dev(gxp), daddr, + paddr, size, IOMMU_READ | IOMMU_WRITE); } static void image_config_unmap(void *data, dma_addr_t daddr, size_t size, @@ -430,7 +446,16 @@ static void image_config_unmap(void *data, dma_addr_t daddr, size_t size, { struct gxp_dev *gxp = data; - iommu_unmap(iommu_get_domain_for_dev(gxp->dev), daddr, size); + /* TODO(b/268150335): remove this block once MCU FW changes land */ + { + int i; + + for (i = GXP_NUM_CORES; i < GXP_NUM_MAILBOXES; i++) { + if (daddr == gxp->mbx[i].daddr) + return; + } + } + gxp_iommu_unmap(gxp, gxp_iommu_get_domain_for_dev(gxp), daddr, size); } int gxp_mcu_firmware_init(struct gxp_dev *gxp, struct gxp_mcu_firmware *mcu_fw) diff --git a/gxp-mcu-platform.c b/gxp-mcu-platform.c index 852596b..de16dc4 100644 --- a/gxp-mcu-platform.c +++ b/gxp-mcu-platform.c @@ -7,6 +7,7 @@ #include +#include "gxp-config.h" #include "gxp-internal.h" #include "gxp-mcu-fs.h" #include "gxp-mcu-platform.h" @@ -24,6 +25,275 @@ module_param_named(work_mode, gxp_work_mode_name, charp, 0660); static char *chip_rev = "a0"; module_param(chip_rev, charp, 0660); +static int gxp_mcu_request_power_states(struct gxp_client *client, + struct gxp_power_states power_states) +{ + struct gxp_dev *gxp = client->gxp; + struct gxp_mcu *mcu = gxp_mcu_of(gxp); + struct gxp_uci_command cmd; + int ret; + + if (gxp_is_direct_mode(gxp)) + return -EOPNOTSUPP; + + /* Plus 1 to align with power states in MCU firmware. */ + cmd.wakelock_command_params.dsp_operating_point = + power_states.power + 1; + cmd.wakelock_command_params.memory_operating_point = + power_states.memory; + cmd.type = WAKELOCK_COMMAND; + cmd.client_id = client->vd->client_id; + + ret = gxp_uci_send_command( + &mcu->uci, client->vd, &cmd, + &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].wait_queue, + &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].dest_queue, + &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].lock, + &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].waitq, + client->mb_eventfds[UCI_RESOURCE_ID]); + return ret; +} + +static int allocate_vmbox(struct gxp_dev *gxp, struct gxp_virtual_device *vd) +{ + struct gxp_kci *kci = &(gxp_mcu_of(gxp)->kci); + int client_id, ret; + + if (vd->is_secure) + client_id = SECURE_CLIENT_ID; + else + client_id = gxp_iommu_aux_get_pasid(gxp, vd->domain); + + ret = gxp_kci_allocate_vmbox(kci, client_id, vd->num_cores, + vd->slice_index, vd->first_open); + if (ret) { + if (ret != GCIP_KCI_ERROR_UNIMPLEMENTED) { + dev_err(gxp->dev, + "Failed to allocate VMBox for client %d, TPU client %d: %d", + client_id, vd->tpu_client_id, ret); + return ret; + } + + /* + * TODO(241057541): Remove this conditional branch after the firmware side + * implements handling allocate_vmbox command. + */ + dev_info( + gxp->dev, + "Allocating VMBox is not implemented from the firmware side"); + } + + vd->client_id = client_id; + vd->first_open = false; + + return 0; +} + +static void release_vmbox(struct gxp_dev *gxp, struct gxp_virtual_device *vd) +{ + struct gxp_kci *kci = &(gxp_mcu_of(gxp)->kci); + int ret; + + if (vd->client_id < 0) + return; + + ret = gxp_kci_release_vmbox(kci, vd->client_id); + if (ret) { + /* + * TODO(241057541): Remove this conditional branch after the firmware side + * implements handling allocate_vmbox command. + */ + if (ret == GCIP_KCI_ERROR_UNIMPLEMENTED) + dev_info( + gxp->dev, + "Releasing VMBox is not implemented from the firmware side"); + else + dev_err(gxp->dev, + "Failed to release VMBox for client %d: %d", + vd->client_id, ret); + } + + vd->client_id = -1; +} + +static int gxp_mcu_link_offload_vmbox(struct gxp_dev *gxp, + struct gxp_virtual_device *vd, + u32 offload_client_id, + u8 offload_chip_type) +{ + struct gxp_kci *kci = &(gxp_mcu_of(gxp)->kci); + int ret; + + ret = gxp_kci_link_unlink_offload_vmbox( + kci, vd->client_id, offload_client_id, offload_chip_type, true); + if (ret) { + if (ret != GCIP_KCI_ERROR_UNIMPLEMENTED) { + dev_err(gxp->dev, + "Failed to link offload VMBox for client %d, offload client %u, offload chip type %d: %d", + vd->client_id, offload_client_id, + offload_chip_type, ret); + return ret; + } + + /* + * TODO(241057541): Remove this conditional branch after the firmware side + * implements handling link_offload_vmbox command. + */ + dev_info( + gxp->dev, + "Linking offload VMBox is not implemented from the firmware side"); + } + + return 0; +} + +static void gxp_mcu_unlink_offload_vmbox(struct gxp_dev *gxp, + struct gxp_virtual_device *vd, + u32 offload_client_id, + u8 offload_chip_type) +{ + struct gxp_kci *kci = &(gxp_mcu_of(gxp)->kci); + int ret; + + ret = gxp_kci_link_unlink_offload_vmbox(kci, vd->client_id, + offload_client_id, + offload_chip_type, false); + if (ret) { + /* + * TODO(241057541): Remove this conditional branch after the firmware side + * implements handling allocate_vmbox command. + */ + if (ret == GCIP_KCI_ERROR_UNIMPLEMENTED) + dev_info( + gxp->dev, + "Unlinking offload VMBox is not implemented from the firmware side"); + else + dev_err(gxp->dev, + "Failed to unlink offload VMBox for client %d, offload client %u, offload chip type %d: %d", + vd->client_id, offload_client_id, + offload_chip_type, ret); + } +} + +static int gxp_mcu_platform_after_vd_block_ready(struct gxp_dev *gxp, + struct gxp_virtual_device *vd) +{ + int ret; + + if (gxp_is_direct_mode(gxp)) + return 0; + + ret = allocate_vmbox(gxp, vd); + if (ret) + return ret; + + if (vd->tpu_client_id >= 0) { + ret = gxp_mcu_link_offload_vmbox( + gxp, vd, vd->tpu_client_id, + GCIP_KCI_OFFLOAD_CHIP_TYPE_TPU); + if (ret) + goto err_release_vmbox; + } + + return 0; + +err_release_vmbox: + release_vmbox(gxp, vd); + return ret; +} + +static void +gxp_mcu_platform_before_vd_block_unready(struct gxp_dev *gxp, + struct gxp_virtual_device *vd) +{ + if (gxp_is_direct_mode(gxp)) + return; + if (vd->client_id < 0 || vd->state == GXP_VD_UNAVAILABLE) + return; + if (vd->tpu_client_id >= 0) + gxp_mcu_unlink_offload_vmbox(gxp, vd, vd->tpu_client_id, + GCIP_KCI_OFFLOAD_CHIP_TYPE_TPU); + release_vmbox(gxp, vd); +} + +static int gxp_mcu_wakelock_after_blk_on(struct gxp_dev *gxp) +{ + struct gxp_mcu_firmware *mcu_fw = gxp_mcu_firmware_of(gxp); + + if (gxp_is_direct_mode(gxp)) + return 0; + return gxp_mcu_firmware_run(mcu_fw); +} + +static void gxp_mcu_wakelock_before_blk_off(struct gxp_dev *gxp) +{ + struct gxp_mcu_firmware *mcu_fw = gxp_mcu_firmware_of(gxp); + + if (gxp_is_direct_mode(gxp)) + return; + gxp_mcu_firmware_stop(mcu_fw); +} + +#if HAS_TPU_EXT + +static int get_tpu_client_id(struct gxp_client *client) +{ + struct gxp_dev *gxp = client->gxp; + struct edgetpu_ext_offload_info offload_info; + struct edgetpu_ext_client_info tpu_info; + int ret; + + tpu_info.tpu_file = client->tpu_file; + ret = edgetpu_ext_driver_cmd(gxp->tpu_dev.dev, + EDGETPU_EXTERNAL_CLIENT_TYPE_DSP, + START_OFFLOAD, &tpu_info, &offload_info); + if (ret) + return ret; + + return offload_info.client_id; +} + +static int gxp_mcu_after_map_tpu_mbx_queue(struct gxp_dev *gxp, + struct gxp_client *client) +{ + struct gxp_virtual_device *vd = client->vd; + int tpu_client_id = -1, ret; + + if (gxp_is_direct_mode(gxp)) + return 0; + + tpu_client_id = get_tpu_client_id(client); + if (tpu_client_id < 0) { + dev_err(gxp->dev, "Failed to get a TPU client ID: %d", + tpu_client_id); + return tpu_client_id; + } + + if (vd->client_id >= 0) { + ret = gxp_mcu_link_offload_vmbox( + gxp, vd, tpu_client_id, GCIP_KCI_OFFLOAD_CHIP_TYPE_TPU); + if (ret) + return ret; + } + + vd->tpu_client_id = tpu_client_id; + + return 0; +} + +static void gxp_mcu_before_unmap_tpu_mbx_queue(struct gxp_dev *gxp, + struct gxp_client *client) +{ + struct gxp_virtual_device *vd = client->vd; + + if (vd->client_id >= 0 && vd->tpu_client_id >= 0) + gxp_mcu_unlink_offload_vmbox(gxp, vd, vd->tpu_client_id, + GCIP_KCI_OFFLOAD_CHIP_TYPE_TPU); + vd->tpu_client_id = -1; +} + +#endif /* HAS_TPU_EXT */ + struct gxp_mcu *gxp_mcu_of(struct gxp_dev *gxp) { return &(to_mcu_dev(gxp)->mcu); @@ -77,6 +347,15 @@ void gxp_mcu_dev_init(struct gxp_mcu_dev *mcu_dev) gxp->before_remove = gxp_mcu_platform_before_remove; gxp->handle_ioctl = gxp_mcu_ioctl; gxp->handle_mmap = gxp_mcu_mmap; + gxp->after_vd_block_ready = gxp_mcu_platform_after_vd_block_ready; + gxp->before_vd_block_unready = gxp_mcu_platform_before_vd_block_unready; + gxp->request_power_states = gxp_mcu_request_power_states; + gxp->wakelock_after_blk_on = gxp_mcu_wakelock_after_blk_on; + gxp->wakelock_before_blk_off = gxp_mcu_wakelock_before_blk_off; +#if HAS_TPU_EXT + gxp->after_map_tpu_mbx_queue = gxp_mcu_after_map_tpu_mbx_queue; + gxp->before_unmap_tpu_mbx_queue = gxp_mcu_before_unmap_tpu_mbx_queue; +#endif } enum gxp_work_mode gxp_dev_parse_work_mode(const char *work_mode) diff --git a/gxp-mcu.c b/gxp-mcu.c index aae9519..0aab0a2 100644 --- a/gxp-mcu.c +++ b/gxp-mcu.c @@ -81,9 +81,10 @@ static void gxp_mcu_unmap_resources(struct gxp_mcu *mcu) int i; for (i = GXP_NUM_CORES; i < GXP_NUM_MAILBOXES; i++) - iommu_unmap(gdomain->domain, gxp->mbx[i].daddr, gxp->mbx[i].size); + gxp_iommu_unmap(gxp, gdomain, gxp->mbx[i].daddr, gxp->mbx[i].size); } +/* TODO(b/268150335): remove this function once MCU FW change lands */ static int gxp_mcu_map_resources(struct gxp_dev *gxp, struct gxp_mcu *mcu) { struct gxp_iommu_domain *gdomain = gxp_iommu_get_domain_for_dev(gxp); @@ -91,10 +92,9 @@ static int gxp_mcu_map_resources(struct gxp_dev *gxp, struct gxp_mcu *mcu) for (i = GXP_NUM_CORES; i < GXP_NUM_MAILBOXES; i++) { gxp->mbx[i].daddr = GXP_MCU_NS_MAILBOX(i - GXP_NUM_CORES); - ret = iommu_map(gdomain->domain, gxp->mbx[i].daddr, - gxp->mbx[i].paddr + - MAILBOX_DEVICE_INTERFACE_OFFSET, - gxp->mbx[i].size, IOMMU_READ | IOMMU_WRITE); + ret = gxp_iommu_map(gxp, gdomain, gxp->mbx[i].daddr, + gxp->mbx[i].paddr + MAILBOX_DEVICE_INTERFACE_OFFSET, + gxp->mbx[i].size, IOMMU_READ | IOMMU_WRITE); if (ret) goto err; } diff --git a/gxp-vd.c b/gxp-vd.c index 483321e..2468ca9 100644 --- a/gxp-vd.c +++ b/gxp-vd.c @@ -120,26 +120,25 @@ static void unmap_ns_region(struct gxp_virtual_device *vd, dma_addr_t daddr) static int map_core_shared_buffer(struct gxp_virtual_device *vd) { struct gxp_dev *gxp = vd->gxp; - struct iommu_domain *domain = vd->domain->domain; const size_t shared_size = GXP_SHARED_SLICE_SIZE; if (!gxp->shared_buf.paddr) return 0; - return iommu_map(domain, gxp->shared_buf.daddr, - gxp->shared_buf.paddr + shared_size * vd->slice_index, - shared_size, IOMMU_READ | IOMMU_WRITE); + return gxp_iommu_map(gxp, vd->domain, gxp->shared_buf.daddr, + gxp->shared_buf.paddr + + shared_size * vd->slice_index, + shared_size, IOMMU_READ | IOMMU_WRITE); } /* Reverts map_core_shared_buffer. */ static void unmap_core_shared_buffer(struct gxp_virtual_device *vd) { struct gxp_dev *gxp = vd->gxp; - struct iommu_domain *domain = vd->domain->domain; const size_t shared_size = GXP_SHARED_SLICE_SIZE; if (!gxp->shared_buf.paddr) return; - iommu_unmap(domain, gxp->shared_buf.daddr, shared_size); + gxp_iommu_unmap(gxp, vd->domain, gxp->shared_buf.daddr, shared_size); } /* Maps @res->daddr to @res->paddr to @vd->domain. */ @@ -148,8 +147,8 @@ static int map_resource(struct gxp_virtual_device *vd, { if (res->daddr == 0) return 0; - return iommu_map(vd->domain->domain, res->daddr, res->paddr, res->size, - IOMMU_READ | IOMMU_WRITE); + return gxp_iommu_map(vd->gxp, vd->domain, res->daddr, res->paddr, res->size, + IOMMU_READ | IOMMU_WRITE); } /* Reverts map_resource. */ @@ -158,7 +157,7 @@ static void unmap_resource(struct gxp_virtual_device *vd, { if (res->daddr == 0) return; - iommu_unmap(vd->domain->domain, res->daddr, res->size); + gxp_iommu_unmap(vd->gxp, vd->domain, res->daddr, res->size); } /* @@ -377,14 +376,14 @@ static int alloc_and_map_fw_image(struct gxp_dev *gxp, struct gxp_virtual_device *vd) { size_t ro_size = vd->fw_ro_size, rw_size; - struct iommu_domain *domain = vd->domain->domain; + struct gxp_iommu_domain *gdomain = vd->domain; int i, ret; /* Maps all FW regions together and no rwdata_sgt in this case. */ if (ro_size == gxp->fwbufs[0].size) - return iommu_map(domain, gxp->fwbufs[0].daddr, - gxp->fwbufs[0].paddr, ro_size * GXP_NUM_CORES, - IOMMU_READ | IOMMU_WRITE); + return gxp_iommu_map(gxp, gdomain, gxp->fwbufs[0].daddr, + gxp->fwbufs[0].paddr, ro_size * GXP_NUM_CORES, + IOMMU_READ | IOMMU_WRITE); dev_info(gxp->dev, "mapping firmware RO size %#zx", ro_size); rw_size = gxp->fwbufs[0].size - ro_size; @@ -399,9 +398,9 @@ static int alloc_and_map_fw_image(struct gxp_dev *gxp, } } for (i = 0; i < GXP_NUM_CORES; i++) { - ret = iommu_map(domain, gxp->fwbufs[i].daddr, - gxp->fwbufs[i].paddr, ro_size, - IOMMU_READ | IOMMU_WRITE); + ret = gxp_iommu_map(gxp, gdomain, gxp->fwbufs[i].daddr, + gxp->fwbufs[i].paddr, ro_size, + IOMMU_READ | IOMMU_WRITE); if (ret) { dev_err(gxp->dev, "map firmware RO for core %d failed", i); @@ -414,7 +413,7 @@ static int alloc_and_map_fw_image(struct gxp_dev *gxp, if (ret) { dev_err(gxp->dev, "map firmware RW for core %d failed", i); - iommu_unmap(domain, gxp->fwbufs[i].daddr, ro_size); + gxp_iommu_unmap(gxp, gdomain, gxp->fwbufs[i].daddr, ro_size); goto err_unmap; } } @@ -422,7 +421,7 @@ static int alloc_and_map_fw_image(struct gxp_dev *gxp, err_unmap: while (i--) { - iommu_unmap(domain, gxp->fwbufs[i].daddr, ro_size); + gxp_iommu_unmap(gxp, gdomain, gxp->fwbufs[i].daddr, ro_size); gxp_dma_unmap_iova_sgt(gxp, vd->domain, gxp->fwbufs[i].daddr + ro_size, vd->rwdata_sgt[i]); @@ -440,17 +439,16 @@ static void unmap_and_free_fw_image(struct gxp_dev *gxp, struct gxp_virtual_device *vd) { size_t ro_size = vd->fw_ro_size; - struct iommu_domain *domain = vd->domain->domain; + struct gxp_iommu_domain *gdomain = vd->domain; int i; if (ro_size == gxp->fwbufs[0].size) { - iommu_unmap(domain, gxp->fwbufs[0].daddr, - ro_size * GXP_NUM_CORES); + gxp_iommu_unmap(gxp, gdomain, gxp->fwbufs[0].daddr, ro_size * GXP_NUM_CORES); return; } for (i = 0; i < GXP_NUM_CORES; i++) { - iommu_unmap(domain, gxp->fwbufs[i].daddr, ro_size); + gxp_iommu_unmap(gxp, gdomain, gxp->fwbufs[i].daddr, ro_size); gxp_dma_unmap_iova_sgt(gxp, vd->domain, gxp->fwbufs[i].daddr + ro_size, vd->rwdata_sgt[i]); -- cgit v1.2.3