diff options
author | Victor Hsu <hsuvictor@google.com> | 2021-11-10 14:01:16 +0800 |
---|---|---|
committer | Victor Hsu <hsuvictor@google.com> | 2021-12-13 13:34:17 +0800 |
commit | c972edc5cac0526943caaa390ddd0faf2ecb18db (patch) | |
tree | 653bbcbe5021b5cac251e2da2ac41583c6b0868d | |
parent | 11b8045bbc3a20844fd6abeb460df31dcd70bbb4 (diff) | |
download | cnss2-c972edc5cac0526943caaa390ddd0faf2ecb18db.tar.gz |
wcn6740: Build mhi/qmi/qrtr out of kernel tree
Bug: 203725513
Signed-off-by: Victor Hsu <hsuvictor@google.com>
Change-Id: Ie484ffaf1bde124d141e8e87adf9e3beb5cf7174
-rw-r--r-- | Kbuild | 20 | ||||
-rw-r--r-- | Makefile | 15 | ||||
-rw-r--r-- | inc/ipc_logging.h | 287 | ||||
-rw-r--r-- | inc/mhi.h | 843 | ||||
-rw-r--r-- | inc/mhi_misc.h | 562 | ||||
-rw-r--r-- | inc/soc/qcom/qmi.h | 272 | ||||
-rw-r--r-- | mhi/Kconfig | 31 | ||||
-rw-r--r-- | mhi/Makefile | 3 | ||||
-rw-r--r-- | mhi/core/Makefile | 7 | ||||
-rw-r--r-- | mhi/core/boot.c | 570 | ||||
-rw-r--r-- | mhi/core/debugfs.c | 413 | ||||
-rw-r--r-- | mhi/core/init.c | 1447 | ||||
-rw-r--r-- | mhi/core/internal.h | 720 | ||||
-rw-r--r-- | mhi/core/main.c | 1858 | ||||
-rw-r--r-- | mhi/core/misc.c | 1674 | ||||
-rw-r--r-- | mhi/core/misc.h | 303 | ||||
-rw-r--r-- | mhi/core/pm.c | 1255 | ||||
-rw-r--r-- | qmi/Makefile | 3 | ||||
-rw-r--r-- | qmi/qmi_encdec.c | 816 | ||||
-rw-r--r-- | qmi/qmi_interface.c | 849 | ||||
-rw-r--r-- | qrtr/Kconfig | 70 | ||||
-rw-r--r-- | qrtr/Makefile | 12 | ||||
-rw-r--r-- | qrtr/mhi.c | 186 | ||||
-rw-r--r-- | qrtr/ns.c | 809 | ||||
-rw-r--r-- | qrtr/qrtr.c | 2069 | ||||
-rw-r--r-- | qrtr/qrtr.h | 45 |
26 files changed, 15139 insertions, 0 deletions
@@ -20,3 +20,23 @@ obj-$(CONFIG_CNSS2) += cnss2/ obj-$(CONFIG_CNSS_GENL) += cnss_genl/ obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc/ obj-y += cnss_utils/ + + +# MHI +ifeq ($(CONFIG_MHI_BUS_MISC),y) +KBUILD_CPPFLAGS += -DCONFIG_MHI_BUS_MISC +endif + +ifeq ($(CONFIG_MHI_DEBUG),y) +KBUILD_CPPFLAGS += -DCONFIG_MHI_DEBUG +endif + +obj-$(CONFIG_MHI_BUS) += mhi/ + +# QMI +obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi/ + +# QRTR +KBUILD_CPPFLAGS += -DCONFIG_QRTR_NODE_ID=$(CONFIG_QRTR_NODE_ID) +KBUILD_CPPFLAGS += -DCONFIG_QRTR_WAKEUP_MS=$(CONFIG_QRTR_WAKEUP_MS) +obj-$(CONFIG_QRTR) += qrtr/ @@ -18,6 +18,21 @@ KBUILD_OPTIONS += CONFIG_CNSS_PLAT_IPC_QMI_SVC=m KBUILD_OPTIONS += CONFIG_CNSS_GENL=m KBUILD_OPTIONS += CONFIG_WCNSS_MEM_PRE_ALLOC=m KBUILD_OPTIONS += CONFIG_CNSS_UTILS=m + +# MHI +KBUILD_OPTIONS += CONFIG_MHI_BUS=m +KBUILD_OPTIONS += CONFIG_MHI_BUS_MISC=y +KBUILD_OPTIONS += CONFIG_MHI_DEBUG=y + +# QMI +KBUILD_OPTIONS += CONFIG_QCOM_QMI_HELPERS=m + +# QRTR +KBUILD_OPTIONS += CONFIG_QRTR=m +KBUILD_OPTIONS += CONFIG_QRTR_MHI=m +KBUILD_OPTIONS += CONFIG_QRTR_NODE_ID=1 +KBUILD_OPTIONS += CONFIG_QRTR_WAKEUP_MS=0 + endif all: diff --git a/inc/ipc_logging.h b/inc/ipc_logging.h new file mode 100644 index 0000000..e60492f --- /dev/null +++ b/inc/ipc_logging.h @@ -0,0 +1,287 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2015,2017,2020 The Linux Foundation. All rights reserved. + */ + +#ifndef _IPC_LOGGING_H +#define _IPC_LOGGING_H + +#include <linux/errno.h> +#include <linux/types.h> + +#define MAX_MSG_SIZE 255 + +enum { + TSV_TYPE_MSG_START = 1, + TSV_TYPE_SKB = TSV_TYPE_MSG_START, + TSV_TYPE_STRING, + TSV_TYPE_MSG_END = TSV_TYPE_STRING, +}; + +struct tsv_header { + unsigned char type; + unsigned char size; /* size of data field */ +}; + +struct encode_context { + struct tsv_header hdr; + char buff[MAX_MSG_SIZE]; + int offset; +}; + +struct decode_context { + int output_format; /* 0 = debugfs */ + char *buff; /* output buffer */ + int size; /* size of output buffer */ +}; + +#if IS_ENABLED(CONFIG_IPC_LOGGING) +/* + * ipc_log_context_create: Create a debug log context + * Should not be called from atomic context + * + * @max_num_pages: Number of pages of logging space required (max. 10) + * @mod_name : Name of the directory entry under DEBUGFS + * @feature_version : First 16 bit for version number of user-defined message + * formats and next 16 bit for enabling minidump + * + * returns context id on success, NULL on failure + */ +void *ipc_log_context_create(int max_num_pages, const char *modname, + uint32_t feature_version); + +/* + * msg_encode_start: Start encoding a log message + * + * @ectxt: Temporary storage to hold the encoded message + * @type: Root event type defined by the module which is logging + */ +void msg_encode_start(struct encode_context *ectxt, uint32_t type); + +/* + * tsv_timestamp_write: Writes the current timestamp count + * + * @ectxt: Context initialized by calling msg_encode_start() + */ +int tsv_timestamp_write(struct encode_context *ectxt); + +/* + * tsv_qtimer_write: Writes the current QTimer timestamp count + * + * @ectxt: Context initialized by calling msg_encode_start() + */ +int tsv_qtimer_write(struct encode_context *ectxt); + +/* + * tsv_pointer_write: Writes a data pointer + * + * @ectxt: Context initialized by calling msg_encode_start() + * @pointer: Pointer value to write + */ +int tsv_pointer_write(struct encode_context *ectxt, void *pointer); + +/* + * tsv_int32_write: Writes a 32-bit integer value + * + * @ectxt: Context initialized by calling msg_encode_start() + * @n: Integer to write + */ +int tsv_int32_write(struct encode_context *ectxt, int32_t n); + +/* + * tsv_byte_array_write: Writes a byte array + * + * @ectxt: Context initialized by calling msg_encode_start() + * @data: Pointer to byte array + * @data_size: Size of byte array + */ +int tsv_byte_array_write(struct encode_context *ectxt, + void *data, int data_size); + +/* + * msg_encode_end: Complete the message encode process + * + * @ectxt: Temporary storage which holds the encoded message + */ +void msg_encode_end(struct encode_context *ectxt); + +/* + * ipc_log_write: Commits message to logging ring buffer + * + * @ctxt: Logging context + * @ectxt: Temporary storage which holds the encoded message + */ +void ipc_log_write(void *ctxt, struct encode_context *ectxt); + +/* + * ipc_log_string: Helper function to log a string + * + * @ilctxt: Debug Log Context created using ipc_log_context_create() + * @fmt: Data specified using format specifiers + */ +int ipc_log_string(void *ilctxt, const char *fmt, ...) __printf(2, 3); + +/** + * ipc_log_extract - Reads and deserializes log + * + * @ilctxt: logging context + * @buff: buffer to receive the data + * @size: size of the buffer + * @returns: 0 if no data read; >0 number of bytes read; < 0 error + * + * If no data is available to be read, then the ilctxt::read_avail + * completion is reinitialized. This allows clients to block + * until new log data is save. + */ +int ipc_log_extract(void *ilctxt, char *buff, int size); + +/* + * Print a string to decode context. + * @dctxt Decode context + * @args printf args + */ +#define IPC_SPRINTF_DECODE(dctxt, args...) \ +do { \ + int i; \ + i = scnprintf(dctxt->buff, dctxt->size, args); \ + dctxt->buff += i; \ + dctxt->size -= i; \ +} while (0) + +/* + * tsv_timestamp_read: Reads a timestamp + * + * @ectxt: Context retrieved by reading from log space + * @dctxt: Temporary storage to hold the decoded message + * @format: Output format while dumping through DEBUGFS + */ +void tsv_timestamp_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format); + +/* + * tsv_qtimer_read: Reads a QTimer timestamp + * + * @ectxt: Context retrieved by reading from log space + * @dctxt: Temporary storage to hold the decoded message + * @format: Output format while dumping through DEBUGFS + */ +void tsv_qtimer_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format); + +/* + * tsv_pointer_read: Reads a data pointer + * + * @ectxt: Context retrieved by reading from log space + * @dctxt: Temporary storage to hold the decoded message + * @format: Output format while dumping through DEBUGFS + */ +void tsv_pointer_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format); + +/* + * tsv_int32_read: Reads a 32-bit integer value + * + * @ectxt: Context retrieved by reading from log space + * @dctxt: Temporary storage to hold the decoded message + * @format: Output format while dumping through DEBUGFS + */ +int32_t tsv_int32_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format); + +/* + * tsv_byte_array_read: Reads a byte array + * + * @ectxt: Context retrieved by reading from log space + * @dctxt: Temporary storage to hold the decoded message + * @format: Output format while dumping through DEBUGFS + */ +void tsv_byte_array_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format); + +/* + * add_deserialization_func: Register a deserialization function to + * to unpack the subevents of a main event + * + * @ctxt: Debug log context to which the deserialization function has + * to be registered + * @type: Main/Root event, defined by the module which is logging, to + * which this deserialization function has to be registered. + * @dfune: Deserialization function to be registered + * + * return 0 on success, -ve value on FAILURE + */ +int add_deserialization_func(void *ctxt, int type, + void (*dfunc)(struct encode_context *, + struct decode_context *)); + +/* + * ipc_log_context_destroy: Destroy debug log context + * + * @ctxt: debug log context created by calling ipc_log_context_create API. + */ +int ipc_log_context_destroy(void *ctxt); + +#else + +static inline void *ipc_log_context_create(int max_num_pages, + const char *modname, uint32_t feature_version) +{ return NULL; } + +static inline void msg_encode_start(struct encode_context *ectxt, + uint32_t type) { } + +static inline int tsv_timestamp_write(struct encode_context *ectxt) +{ return -EINVAL; } + +static inline int tsv_qtimer_write(struct encode_context *ectxt) +{ return -EINVAL; } + +static inline int tsv_pointer_write(struct encode_context *ectxt, void *pointer) +{ return -EINVAL; } + +static inline int tsv_int32_write(struct encode_context *ectxt, int32_t n) +{ return -EINVAL; } + +static inline int tsv_byte_array_write(struct encode_context *ectxt, + void *data, int data_size) +{ return -EINVAL; } + +static inline void msg_encode_end(struct encode_context *ectxt) { } + +static inline void ipc_log_write(void *ctxt, struct encode_context *ectxt) { } + +static inline int ipc_log_string(void *ilctxt, const char *fmt, ...) +{ return -EINVAL; } + +static inline int ipc_log_extract(void *ilctxt, char *buff, int size) +{ return -EINVAL; } + +#define IPC_SPRINTF_DECODE(dctxt, args...) do { } while (0) + +static inline void tsv_timestamp_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format) { } + +static inline void tsv_qtimer_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format) { } + +static inline void tsv_pointer_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format) { } + +static inline int32_t tsv_int32_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format) +{ return 0; } + +static inline void tsv_byte_array_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format) { } + +static inline int add_deserialization_func(void *ctxt, int type, + void (*dfunc)(struct encode_context *, + struct decode_context *)) +{ return 0; } + +static inline int ipc_log_context_destroy(void *ctxt) +{ return 0; } + +#endif + +#endif diff --git a/inc/mhi.h b/inc/mhi.h new file mode 100644 index 0000000..50e4935 --- /dev/null +++ b/inc/mhi.h @@ -0,0 +1,843 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * + */ +#ifndef _MHI_H_ +#define _MHI_H_ + +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/mutex.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/wait.h> +#include <linux/workqueue.h> + +/* MHI client drivers to set this upper bound for tx buffer */ +#define MHI_MAX_MTU 0xffff + +#define MHI_MAX_OEM_PK_HASH_SEGMENTS 16 + +struct mhi_chan; +struct mhi_event; +struct mhi_ctxt; +struct mhi_cmd; +struct mhi_buf_info; + +/** + * enum mhi_callback - MHI callback + * @MHI_CB_IDLE: MHI entered idle state + * @MHI_CB_PENDING_DATA: New data available for client to process + * @MHI_CB_LPM_ENTER: MHI host entered low power mode + * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode + * @MHI_CB_EE_RDDM: MHI device entered RDDM exec env + * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env + * @MHI_CB_SYS_ERROR: MHI device entered error state (may recover) + * @MHI_CB_FATAL_ERROR: MHI device entered fatal error state + * @MHI_CB_BW_REQ: Received a bandwidth switch request from device + * @MHI_CB_FALLBACK_IMG: MHI device was loaded with the provided fallback image + * @MHI_CB_DTR_SIGNAL: DTR signaling update + */ +enum mhi_callback { + MHI_CB_IDLE, + MHI_CB_PENDING_DATA, + MHI_CB_LPM_ENTER, + MHI_CB_LPM_EXIT, + MHI_CB_EE_RDDM, + MHI_CB_EE_MISSION_MODE, + MHI_CB_SYS_ERROR, + MHI_CB_FATAL_ERROR, + MHI_CB_BW_REQ, + MHI_CB_FALLBACK_IMG, + MHI_CB_DTR_SIGNAL, +}; + +/** + * enum mhi_flags - Transfer flags + * @MHI_EOB: End of buffer for bulk transfer + * @MHI_EOT: End of transfer + * @MHI_CHAIN: Linked transfer + */ +enum mhi_flags { + MHI_EOB = BIT(0), + MHI_EOT = BIT(1), + MHI_CHAIN = BIT(2), +}; + +/** + * enum mhi_device_type - Device types + * @MHI_DEVICE_XFER: Handles data transfer + * @MHI_DEVICE_CONTROLLER: Control device + */ +enum mhi_device_type { + MHI_DEVICE_XFER, + MHI_DEVICE_CONTROLLER, +}; + +/** + * enum mhi_ch_type - Channel types + * @MHI_CH_TYPE_INVALID: Invalid channel type + * @MHI_CH_TYPE_OUTBOUND: Outbound channel to the device + * @MHI_CH_TYPE_INBOUND: Inbound channel from the device + * @MHI_CH_TYPE_INBOUND_COALESCED: Coalesced channel for the device to combine + * multiple packets and send them as a single + * large packet to reduce CPU consumption + */ +enum mhi_ch_type { + MHI_CH_TYPE_INVALID = 0, + MHI_CH_TYPE_OUTBOUND = DMA_TO_DEVICE, + MHI_CH_TYPE_INBOUND = DMA_FROM_DEVICE, + MHI_CH_TYPE_INBOUND_COALESCED = 3, +}; + +/** + * struct image_info - Firmware and RDDM table + * @mhi_buf: Buffer for firmware and RDDM table + * @entries: # of entries in table + */ +struct image_info { + struct mhi_buf *mhi_buf; + /* private: from internal.h */ + struct bhi_vec_entry *bhi_vec; + /* public: */ + u32 entries; +}; + +/** + * struct mhi_link_info - BW requirement + * target_link_speed - Link speed as defined by TLS bits in LinkControl reg + * target_link_width - Link width as defined by NLW bits in LinkStatus reg + * sequence_num - used by device to track bw requests sent to host + */ +struct mhi_link_info { + unsigned int target_link_speed; + unsigned int target_link_width; + int sequence_num; +}; + +/** + * enum mhi_ee_type - Execution environment types + * @MHI_EE_PBL: Primary Bootloader + * @MHI_EE_SBL: Secondary Bootloader + * @MHI_EE_AMSS: Modem, aka the primary runtime EE + * @MHI_EE_RDDM: Ram dump download mode + * @MHI_EE_WFW: WLAN firmware mode + * @MHI_EE_PTHRU: Passthrough + * @MHI_EE_EDL: Embedded downloader + */ +enum mhi_ee_type { + MHI_EE_PBL, + MHI_EE_SBL, + MHI_EE_AMSS, + MHI_EE_RDDM, + MHI_EE_WFW, + MHI_EE_PTHRU, + MHI_EE_EDL, + MHI_EE_MAX_SUPPORTED = MHI_EE_EDL, + MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */ + MHI_EE_NOT_SUPPORTED, + MHI_EE_MAX, +}; + +/** + * enum mhi_state - MHI states + * @MHI_STATE_RESET: Reset state + * @MHI_STATE_READY: Ready state + * @MHI_STATE_M0: M0 state + * @MHI_STATE_M1: M1 state + * @MHI_STATE_M2: M2 state + * @MHI_STATE_M3: M3 state + * @MHI_STATE_M3_FAST: M3 Fast state + * @MHI_STATE_BHI: BHI state + * @MHI_STATE_SYS_ERR: System Error state + */ +enum mhi_state { + MHI_STATE_RESET = 0x0, + MHI_STATE_READY = 0x1, + MHI_STATE_M0 = 0x2, + MHI_STATE_M1 = 0x3, + MHI_STATE_M2 = 0x4, + MHI_STATE_M3 = 0x5, + MHI_STATE_M3_FAST = 0x6, + MHI_STATE_BHI = 0x7, + MHI_STATE_SYS_ERR = 0xFF, + MHI_STATE_MAX, +}; + +/** + * enum mhi_ch_ee_mask - Execution environment mask for channel + * @MHI_CH_EE_PBL: Allow channel to be used in PBL EE + * @MHI_CH_EE_SBL: Allow channel to be used in SBL EE + * @MHI_CH_EE_AMSS: Allow channel to be used in AMSS EE + * @MHI_CH_EE_RDDM: Allow channel to be used in RDDM EE + * @MHI_CH_EE_PTHRU: Allow channel to be used in PTHRU EE + * @MHI_CH_EE_WFW: Allow channel to be used in WFW EE + * @MHI_CH_EE_EDL: Allow channel to be used in EDL EE + */ +enum mhi_ch_ee_mask { + MHI_CH_EE_PBL = BIT(MHI_EE_PBL), + MHI_CH_EE_SBL = BIT(MHI_EE_SBL), + MHI_CH_EE_AMSS = BIT(MHI_EE_AMSS), + MHI_CH_EE_RDDM = BIT(MHI_EE_RDDM), + MHI_CH_EE_PTHRU = BIT(MHI_EE_PTHRU), + MHI_CH_EE_WFW = BIT(MHI_EE_WFW), + MHI_CH_EE_EDL = BIT(MHI_EE_EDL), +}; + +/** + * enum mhi_er_data_type - Event ring data types + * @MHI_ER_DATA: Only client data over this ring + * @MHI_ER_CTRL: MHI control data and client data + * @MHI_ER_BW_SCALE: MHI controller bandwidth scale functionality + * @MHI_ER_TIMESYNC: MHI controller time synchronization DB mode functionality + */ +enum mhi_er_data_type { + MHI_ER_DATA, + MHI_ER_CTRL, + MHI_ER_BW_SCALE, + MHI_ER_TIMESYNC, +}; + +/** + * enum mhi_er_priority - Event ring processing priority + * @MHI_ER_PRIORITY_DEFAULT_NOSLEEP: processed by tasklet + * @MHI_ER_PRIORITY_HI_NOSLEEP: processed by hi-priority tasklet + * @MHI_ER_PRIORITY_HI_SLEEP: processed by hi-priority wq + */ +enum mhi_er_priority { + MHI_ER_PRIORITY_DEFAULT_NOSLEEP, + MHI_ER_PRIORITY_HI_NOSLEEP, + MHI_ER_PRIORITY_HI_SLEEP, +}; + +/** + * enum mhi_db_brst_mode - Doorbell mode + * @MHI_DB_BRST_DISABLE: Burst mode disable + * @MHI_DB_BRST_ENABLE: Burst mode enable + */ +enum mhi_db_brst_mode { + MHI_DB_BRST_DISABLE = 0x2, + MHI_DB_BRST_ENABLE = 0x3, +}; + +/** + * struct mhi_channel_config - Channel configuration structure for controller + * @name: The name of this channel + * @num: The number assigned to this channel + * @num_elements: The number of elements that can be queued to this channel + * @local_elements: The local ring length of the channel + * @event_ring: The event rung index that services this channel + * @dir: Direction that data may flow on this channel + * @type: Channel type + * @ee_mask: Execution Environment mask for this channel + * @pollcfg: Polling configuration for burst mode. 0 is default. milliseconds + for UL channels, multiple of 8 ring elements for DL channels + * @doorbell: Doorbell mode + * @lpm_notify: The channel master requires low power mode notifications + * @offload_channel: The client manages the channel completely + * @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition + * @auto_queue: Framework will automatically queue buffers for DL traffic + * @wake-capable: Channel capable of waking up the system + */ +struct mhi_channel_config { + char *name; + u32 num; + u32 num_elements; + u32 local_elements; + u32 event_ring; + enum dma_data_direction dir; + enum mhi_ch_type type; + u32 ee_mask; + u32 pollcfg; + enum mhi_db_brst_mode doorbell; + bool lpm_notify; + bool offload_channel; + bool doorbell_mode_switch; + bool auto_queue; + bool wake_capable; +}; + +/** + * struct mhi_event_config - Event ring configuration structure for controller + * @num_elements: The number of elements that can be queued to this ring + * @irq_moderation_ms: Delay irq for additional events to be aggregated + * @irq: IRQ associated with this ring + * @channel: Dedicated channel number. U32_MAX indicates a non-dedicated ring + * @priority: Processing priority of this ring. + * @mode: Doorbell mode + * @data_type: Type of data this ring will process + * @hardware_event: This ring is associated with hardware channels + * @client_managed: This ring is client managed + * @offload_channel: This ring is associated with an offloaded channel + */ +struct mhi_event_config { + u32 num_elements; + u32 irq_moderation_ms; + u32 irq; + u32 channel; + u32 priority; + enum mhi_db_brst_mode mode; + enum mhi_er_data_type data_type; + bool hardware_event; + bool client_managed; + bool offload_channel; +}; + +/** + * struct mhi_controller_config - Root MHI controller configuration + * @max_channels: Maximum number of channels supported + * @timeout_ms: Timeout value for operations. 0 means use default + * @buf_len: Size of automatically allocated buffers. 0 means use default + * @num_channels: Number of channels defined in @ch_cfg + * @ch_cfg: Array of defined channels + * @num_events: Number of event rings defined in @event_cfg + * @event_cfg: Array of defined event rings + * @use_bounce_buf: Use a bounce buffer pool due to limited DDR access + * @m2_no_db: Host is not allowed to ring DB in M2 state + */ +struct mhi_controller_config { + u32 max_channels; + u32 timeout_ms; + u32 buf_len; + u32 num_channels; + const struct mhi_channel_config *ch_cfg; + u32 num_events; + const struct mhi_event_config *event_cfg; + bool use_bounce_buf; + bool m2_no_db; +}; + +/** + * struct mhi_controller - Master MHI controller structure + * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI + * controller (required) + * @mhi_dev: MHI device instance for the controller + * @debugfs_dentry: MHI controller debugfs directory + * @regs: Base address of MHI MMIO register space (required) + * @reg_len: Length of the MHI MMIO region (required) + * @bhi: Points to base of MHI BHI register space + * @bhie: Points to base of MHI BHIe register space + * @wake_db: MHI WAKE doorbell register address + * @iova_start: IOMMU starting address for data (required) + * @iova_stop: IOMMU stop address for data (required) + * @fw_image: Firmware image name for normal booting (required) + * @fallback_fw_image: Fallback firmware image name for backup boot (optional) + * @edl_image: Firmware image name for emergency download mode (optional) + * @rddm_size: RAM dump size that host should allocate for debugging purpose + * @sbl_size: SBL image size downloaded through BHIe (optional) + * @seg_len: BHIe vector size (optional) + * @fbc_image: Points to firmware image buffer + * @rddm_image: Points to RAM dump buffer + * @mhi_chan: Points to the channel configuration table + * @lpm_chans: List of channels that require LPM notifications + * @irq: base irq # to request (required) + * @max_chan: Maximum number of channels the controller supports + * @total_ev_rings: Total # of event rings allocated + * @hw_ev_rings: Number of hardware event rings + * @sw_ev_rings: Number of software event rings + * @nr_irqs: Number of IRQ allocated by bus master (required) + * @family_number: MHI controller family number + * @device_number: MHI controller device number + * @major_version: MHI controller major revision number + * @minor_version: MHI controller minor revision number + * @serial_number: MHI controller serial number obtained from BHI + * @oem_pk_hash: MHI controller OEM PK Hash obtained from BHI + * @mhi_event: MHI event ring configurations table + * @mhi_cmd: MHI command ring configurations table + * @mhi_ctxt: MHI device context, shared memory between host and device + * @pm_mutex: Mutex for suspend/resume operation + * @pm_lock: Lock for protecting MHI power management state + * @timeout_ms: Timeout in ms for state transitions + * @pm_state: MHI power management state + * @db_access: DB access states + * @ee: MHI device execution environment + * @dev_state: MHI device state + * @dev_wake: Device wakeup count + * @pending_pkts: Pending packets for the controller + * @M0, M2, M3, M3_fast: Counters to track number of device MHI state changes + * @transition_list: List of MHI state transitions + * @transition_lock: Lock for protecting MHI state transition list + * @wlock: Lock for protecting device wakeup + * @mhi_link_info: Device bandwidth info + * @st_worker: State transition worker + * @hiprio_wq: High priority workqueue for MHI work such as state transitions + * @state_event: State change event + * @status_cb: CB function to notify power states of the device (required) + * @wake_get: CB function to assert device wake (optional) + * @wake_put: CB function to de-assert device wake (optional) + * @wake_toggle: CB function to assert and de-assert device wake (optional) + * @runtime_get: CB function to controller runtime resume (required) + * @runtime_put: CB function to decrement pm usage (required) + * @map_single: CB function to create TRE buffer + * @unmap_single: CB function to destroy TRE buffer + * @read_reg: Read a MHI register via the physical link (required) + * @write_reg: Write a MHI register via the physical link (required) + * @reset: Controller specific reset function (optional) + * @buffer_len: Bounce buffer length + * @index: Index of the MHI controller instance + * @img_pre_alloc: allocate rddm and fbc image buffers one time + * @bounce_buf: Use of bounce buffer + * @fbc_download: MHI host needs to do complete image transfer (optional) + * @wake_set: Device wakeup set flag + * + * Fields marked as (required) need to be populated by the controller driver + * before calling mhi_register_controller(). For the fields marked as (optional) + * they can be populated depending on the usecase. + * + * The following fields are present for the purpose of implementing any device + * specific quirks or customizations for specific MHI revisions used in device + * by the controller drivers. The MHI stack will just populate these fields + * during mhi_register_controller(): + * family_number + * device_number + * major_version + * minor_version + */ +struct mhi_controller { + struct device *cntrl_dev; + struct mhi_device *mhi_dev; + struct dentry *debugfs_dentry; + void __iomem *regs; + size_t reg_len; + void __iomem *bhi; + void __iomem *bhie; + void __iomem *wake_db; + + dma_addr_t iova_start; + dma_addr_t iova_stop; + const char *fw_image; + const char *fallback_fw_image; + const char *edl_image; + size_t rddm_size; + size_t sbl_size; + size_t seg_len; + struct image_info *fbc_image; + struct image_info *rddm_image; + struct mhi_chan *mhi_chan; + struct list_head lpm_chans; + int *irq; + u32 max_chan; + u32 total_ev_rings; + u32 hw_ev_rings; + u32 sw_ev_rings; + u32 nr_irqs; + u32 family_number; + u32 device_number; + u32 major_version; + u32 minor_version; + u32 serial_number; + u32 oem_pk_hash[MHI_MAX_OEM_PK_HASH_SEGMENTS]; + u32 session_id; + + struct mhi_event *mhi_event; + struct mhi_cmd *mhi_cmd; + struct mhi_ctxt *mhi_ctxt; + + struct mutex pm_mutex; + rwlock_t pm_lock; + u32 timeout_ms; + u32 pm_state; + u32 db_access; + enum mhi_ee_type ee; + enum mhi_state dev_state; + atomic_t dev_wake; + atomic_t pending_pkts; + u32 M0, M2, M3, M3_fast; + struct list_head transition_list; + spinlock_t transition_lock; + spinlock_t wlock; + struct mhi_link_info mhi_link_info; + struct work_struct st_worker; + struct workqueue_struct *hiprio_wq; + wait_queue_head_t state_event; + + void (*status_cb)(struct mhi_controller *mhi_cntrl, + enum mhi_callback cb); + void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override); + void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override); + void (*wake_toggle)(struct mhi_controller *mhi_cntrl); + int (*runtime_get)(struct mhi_controller *mhi_cntrl); + void (*runtime_put)(struct mhi_controller *mhi_cntrl); + int (*map_single)(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf); + void (*unmap_single)(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf); + int (*read_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr, + u32 *out); + void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr, + u32 val); + void (*reset)(struct mhi_controller *mhi_cntrl); + + size_t buffer_len; + int index; + bool img_pre_alloc; + bool bounce_buf; + bool fbc_download; + bool wake_set; +}; + +/** + * struct mhi_device - Structure representing an MHI device which binds + * to channels or is associated with controllers + * @id: Pointer to MHI device ID struct + * @name: Name of the associated MHI device + * @mhi_cntrl: Controller the device belongs to + * @ul_chan: UL channel for the device + * @dl_chan: DL channel for the device + * @dev: Driver model device node for the MHI device + * @dev_type: MHI device type + * @ul_chan_id: MHI channel id for UL transfer + * @ul_event_id: MHI event ring id for UL transfer + * @dl_chan_id: MHI channel id for DL transfer + * @ul_event_id: MHI event ring id for DL transfer + * @dev_wake: Device wakeup counter + * @tiocm: Device current terminal settings + */ +struct mhi_device { + const struct mhi_device_id *id; + const char *name; + struct mhi_controller *mhi_cntrl; + struct mhi_chan *ul_chan; + struct mhi_chan *dl_chan; + struct device dev; + enum mhi_device_type dev_type; + int ul_chan_id; + int ul_event_id; + int dl_chan_id; + int dl_event_id; + u32 dev_wake; + u32 tiocm; +}; + +/** + * struct mhi_result - Completed buffer information + * @buf_addr: Address of data buffer + * @bytes_xferd: # of bytes transferred + * @dir: Channel direction + * @transaction_status: Status of last transaction + */ +struct mhi_result { + void *buf_addr; + size_t bytes_xferd; + enum dma_data_direction dir; + int transaction_status; +}; + +/** + * struct mhi_buf - MHI Buffer description + * @buf: Virtual address of the buffer + * @name: Buffer label. For offload channel, configurations name must be: + * ECA - Event context array data + * CCA - Channel context array data + * @dma_addr: IOMMU address of the buffer + * @len: # of bytes + */ +struct mhi_buf { + void *buf; + const char *name; + dma_addr_t dma_addr; + size_t len; +}; + +/** + * struct mhi_driver - Structure representing a MHI client driver + * @probe: CB function for client driver probe function + * @remove: CB function for client driver remove function + * @ul_xfer_cb: CB function for UL data transfer + * @dl_xfer_cb: CB function for DL data transfer + * @status_cb: CB functions for asynchronous status + * @driver: Device driver model driver + */ +struct mhi_driver { + const struct mhi_device_id *id_table; + int (*probe)(struct mhi_device *mhi_dev, + const struct mhi_device_id *id); + void (*remove)(struct mhi_device *mhi_dev); + void (*ul_xfer_cb)(struct mhi_device *mhi_dev, + struct mhi_result *result); + void (*dl_xfer_cb)(struct mhi_device *mhi_dev, + struct mhi_result *result); + void (*status_cb)(struct mhi_device *mhi_dev, enum mhi_callback mhi_cb); + struct device_driver driver; +}; + +#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver) +#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev) + +/** + * mhi_alloc_controller - Allocate the MHI Controller structure + * Allocate the mhi_controller structure using zero initialized memory + */ +struct mhi_controller *mhi_alloc_controller(void); + +/** + * mhi_free_controller - Free the MHI Controller structure + * Free the mhi_controller structure which was previously allocated + */ +void mhi_free_controller(struct mhi_controller *mhi_cntrl); + +/** + * mhi_register_controller - Register MHI controller + * @mhi_cntrl: MHI controller to register + * @config: Configuration to use for the controller + */ +int mhi_register_controller(struct mhi_controller *mhi_cntrl, + const struct mhi_controller_config *config); + +/** + * mhi_unregister_controller - Unregister MHI controller + * @mhi_cntrl: MHI controller to unregister + */ +void mhi_unregister_controller(struct mhi_controller *mhi_cntrl); + +/* + * module_mhi_driver() - Helper macro for drivers that don't do + * anything special other than using default mhi_driver_register() and + * mhi_driver_unregister(). This eliminates a lot of boilerplate. + * Each module may only use this macro once. + */ +#define module_mhi_driver(mhi_drv) \ + module_driver(mhi_drv, mhi_driver_register, \ + mhi_driver_unregister) + +/* + * Macro to avoid include chaining to get THIS_MODULE + */ +#define mhi_driver_register(mhi_drv) \ + __mhi_driver_register(mhi_drv, THIS_MODULE) + +/** + * __mhi_driver_register - Register driver with MHI framework + * @mhi_drv: Driver associated with the device + * @owner: The module owner + */ +int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner); + +/** + * mhi_driver_unregister - Unregister a driver for mhi_devices + * @mhi_drv: Driver associated with the device + */ +void mhi_driver_unregister(struct mhi_driver *mhi_drv); + +/** + * mhi_set_mhi_state - Set MHI device state + * @mhi_cntrl: MHI controller + * @state: State to set + */ +void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, + enum mhi_state state); + +/** + * mhi_notify - Notify the MHI client driver about client device status + * @mhi_dev: MHI device instance + * @cb_reason: MHI callback reason + */ +void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason); + +/** + * mhi_get_free_desc_count - Get transfer ring length + * Get # of TD available to queue buffers + * @mhi_dev: Device associated with the channels + * @dir: Direction of the channel + */ +int mhi_get_free_desc_count(struct mhi_device *mhi_dev, + enum dma_data_direction dir); + +/** + * mhi_prepare_for_power_up - Do pre-initialization before power up. + * This is optional, call this before power up if + * the controller does not want bus framework to + * automatically free any allocated memory during + * shutdown process. + * @mhi_cntrl: MHI controller + */ +int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_async_power_up - Start MHI power up sequence + * @mhi_cntrl: MHI controller + */ +int mhi_async_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_sync_power_up - Start MHI power up sequence and wait till the device + * enters valid EE state + * @mhi_cntrl: MHI controller + */ +int mhi_sync_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_power_down - Start MHI power down sequence + * @mhi_cntrl: MHI controller + * @graceful: Link is still accessible, so do a graceful shutdown process + */ +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful); + +/** + * mhi_unprepare_after_power_down - Free any allocated memory after power down + * @mhi_cntrl: MHI controller + */ +void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl); + +/** + * mhi_pm_suspend - Move MHI into a suspended state + * @mhi_cntrl: MHI controller + */ +int mhi_pm_suspend(struct mhi_controller *mhi_cntrl); + +/** + * mhi_pm_resume - Resume MHI from suspended state + * @mhi_cntrl: MHI controller + */ +int mhi_pm_resume(struct mhi_controller *mhi_cntrl); + +/** + * mhi_download_rddm_image - Download ramdump image from device for + * debugging purpose. + * @mhi_cntrl: MHI controller + * @in_panic: Download rddm image during kernel panic + */ +int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic); + +/** + * mhi_force_rddm_mode - Force device into rddm mode + * @mhi_cntrl: MHI controller + */ +int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl); + +/** + * mhi_get_exec_env - Get BHI execution environment of the device + * @mhi_cntrl: MHI controller + */ +enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl); + +/** + * mhi_get_mhi_state - Get MHI state of the device + * @mhi_cntrl: MHI controller + */ +enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl); + +/** + * mhi_soc_reset - Trigger a device reset. This can be used as a last resort + * to reset and recover a device. + * @mhi_cntrl: MHI controller + */ +void mhi_soc_reset(struct mhi_controller *mhi_cntrl); + +/** + * mhi_device_get - Disable device low power mode + * @mhi_dev: Device associated with the channel + */ +void mhi_device_get(struct mhi_device *mhi_dev); + +/** + * mhi_device_get_sync - Disable device low power mode. Synchronously + * take the controller out of suspended state + * @mhi_dev: Device associated with the channel + */ +int mhi_device_get_sync(struct mhi_device *mhi_dev); + +/** + * mhi_device_put - Re-enable device low power mode + * @mhi_dev: Device associated with the channel + */ +void mhi_device_put(struct mhi_device *mhi_dev); + +/** + * mhi_prepare_for_transfer - Setup UL and DL channels for data transfer. + * Allocate and initialize the channel context and + * also issue the START channel command to both + * channels. Channels can be started only if both + * host and device execution environments match and + * channels are in a DISABLED state. Calling the + * mhi_start_transfer() function is not required + * afterwards as channels are already started. This + * function also initializes the channel context + * whereas mhi_start_transfer() can only be used to + * issue the start channel command once the context + * is setup. + * @mhi_dev: Device associated with the channels + */ +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev); + +/** + * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer. + * Issue the RESET channel command and let the + * device clean-up the context so no incoming + * transfers are seen on the host. Free memory + * associated with the context on host. If device + * is unresponsive, only perform a host side + * clean-up. Channels can be reset only if both + * host and device execution environments match + * and channels are in an ENABLED, STOPPED or + * SUSPENDED state. Calling mhi_stop_transfer() is + * not required before calling this function as it + * will only stop transfers, not reset channels. + * @mhi_dev: Device associated with the channels + */ +void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev); + +/** + * mhi_stop_transfer - Pauses ongoing channel activity by issuing the STOP + * channel command to both UL and DL channels. This command + * does not reset the channel context and the client drivers + * can issue mhi_start_transfer to resume activity. + * @mhi_dev: Device associated with the channels + */ +int mhi_stop_transfer(struct mhi_device *mhi_dev); + +/** + * mhi_start_transfer - Resumes channel activity by issuing the START channel + * command to both UL and DL channels. This command assumes + * the channel context is already setup and the client + * drivers can issue mhi_stop_transfer to pause activity if + * required. + * @mhi_dev: Device associated with the channels + */ +int mhi_start_transfer(struct mhi_device *mhi_dev); + +/** + * mhi_poll - Poll for any available data in DL direction + * @mhi_dev: Device associated with the channels + * @budget: # of events to process + */ +int mhi_poll(struct mhi_device *mhi_dev, u32 budget); + +/** + * mhi_queue_dma - Send or receive DMA mapped buffers from client device + * over MHI channel + * @mhi_dev: Device associated with the channels + * @dir: DMA direction for the channel + * @mhi_buf: Buffer for holding the DMA mapped data + * @len: Buffer length + * @mflags: MHI transfer flags used for the transfer + */ +int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, + struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags); + +/** + * mhi_queue_buf - Send or receive raw buffers from client device over MHI + * channel + * @mhi_dev: Device associated with the channels + * @dir: DMA direction for the channel + * @buf: Buffer for holding the data + * @len: Buffer length + * @mflags: MHI transfer flags used for the transfer + */ +int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, + void *buf, size_t len, enum mhi_flags mflags); + +/** + * mhi_queue_skb - Send or receive SKBs from client device over MHI channel + * @mhi_dev: Device associated with the channels + * @dir: DMA direction for the channel + * @skb: Buffer for holding SKBs + * @len: Buffer length + * @mflags: MHI transfer flags used for the transfer + */ +int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, + struct sk_buff *skb, size_t len, enum mhi_flags mflags); + +#endif /* _MHI_H_ */ diff --git a/inc/mhi_misc.h b/inc/mhi_misc.h new file mode 100644 index 0000000..b8ff0ff --- /dev/null +++ b/inc/mhi_misc.h @@ -0,0 +1,562 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + */ + +#ifndef _MHI_MISC_H_ +#define _MHI_MISC_H_ + +#include <linux/mhi.h> +#include <linux/ipc_logging.h> + +/** + * enum MHI_DEBUG_LEVEL - various debugging levels + */ +enum MHI_DEBUG_LEVEL { + MHI_MSG_LVL_VERBOSE, + MHI_MSG_LVL_INFO, + MHI_MSG_LVL_ERROR, + MHI_MSG_LVL_CRITICAL, + MHI_MSG_LVL_MASK_ALL, + MHI_MSG_LVL_MAX, +}; + +#ifdef CONFIG_MHI_BUS_MISC + +/** + * mhi_report_error - Can be used by controller to signal error condition to the + * MHI core driver in case of any need to halt processing or incoming sideband + * signal detects an error on endpoint + * @mhi_cntrl: MHI controller + * + * Returns: + * 0 if success in reporting the error condition to MHI core + * error code on failure + */ +int mhi_report_error(struct mhi_controller *mhi_cntrl); + +/** + * mhi_controller_set_privdata - Set private data for MHI controller + * @mhi_cntrl: MHI controller + * @priv: pointer to data + */ +void mhi_controller_set_privdata(struct mhi_controller *mhi_cntrl, void *priv); + +/** + * mhi_controller_get_privdata - Get private data from MHI controller + * @mhi_cntrl: MHI controller + */ +void *mhi_controller_get_privdata(struct mhi_controller *mhi_cntrl); + +/** + * mhi_bdf_to_controller - Get controller associated with given BDF values + * @domain: Domain or root complex of PCIe port + * @bus: Bus number + * @slot: PCI slot or function number + * @dev_id: Device ID of the endpoint + * + * Returns: + * MHI controller structure pointer if BDF match is found + * NULL if cookie is not found + */ +struct mhi_controller *mhi_bdf_to_controller(u32 domain, u32 bus, u32 slot, u32 dev_id); + +/** + * mhi_set_m2_timeout_ms - Set M2 timeout in milliseconds to wait before a + * fast/silent suspend + * @mhi_cntrl: MHI controller + * @timeout: timeout in ms + */ +void mhi_set_m2_timeout_ms(struct mhi_controller *mhi_cntrl, u32 timeout); + +/** + * mhi_pm_fast_resume - Resume MHI from a fast/silent suspended state + * @mhi_cntrl: MHI controller + * @notify_clients: if true, clients will be notified of the resume transition + */ +int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_clients); + +/** + * mhi_pm_fast_suspend - Move MHI into a fast/silent suspended state + * @mhi_cntrl: MHI controller + * @notify_clients: if true, clients will be notified of the suspend transition + */ +int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_clients); + +/** + * mhi_debug_reg_dump - dump MHI registers for debug purpose + * @mhi_cntrl: MHI controller + */ +void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl); + +/** + * mhi_dump_sfr - Print SFR string from RDDM table. + * @mhi_cntrl: MHI controller + */ +void mhi_dump_sfr(struct mhi_controller *mhi_cntrl); + +/** + * mhi_device_configure - Allow devices with offload channels to setup their own + * channel and event ring context. + * @mhi_dev: MHI device + * @dir: direction associated with the channel needed to configure + * @cfg_tbl: Buffer with ECA/CCA information and data needed to setup context + * @elements: Number of items to iterate over from the configuration table + */ +int mhi_device_configure(struct mhi_device *mhi_dev, + enum dma_data_direction dir, + struct mhi_buf *cfg_tbl, + int elements); + +/** + * mhi_scan_rddm_cookie - Look for supplied cookie value in the BHI debug + * registers set by device to indicate rddm readiness for debugging purposes. + * @mhi_cntrl: MHI controller + * @cookie: cookie/pattern value to match + * + * Returns: + * true if cookie is found + * false if cookie is not found + */ +bool mhi_scan_rddm_cookie(struct mhi_controller *mhi_cntrl, u32 cookie); + +/** + * mhi_device_get_sync_atomic - Asserts device_wait and moves device to M0 + * @mhi_dev: Device associated with the channels + * @timeout_us: timeout, in micro-seconds + * @in_panic: If requested while kernel is in panic state and no ISRs expected + * + * The device_wake is asserted to keep device in M0 or bring it to M0. + * If device is not in M0 state, then this function will wait for device to + * move to M0, until @timeout_us elapses. + * However, if device's M1 state-change event races with this function + * then there is a possiblity of device moving from M0 to M2 and back + * to M0. That can't be avoided as host must transition device from M1 to M2 + * as per the spec. + * Clients can ignore that transition after this function returns as the device + * is expected to immediately move from M2 to M0 as wake is asserted and + * wouldn't enter low power state. + * If in_panic boolean is set, no ISRs are expected, hence this API will have to + * resort to reading the MHI status register and poll on M0 state change. + * + * Returns: + * 0 if operation was successful (however, M0 -> M2 -> M0 is possible later) as + * mentioned above. + * -ETIMEDOUT is device faled to move to M0 before @timeout_us elapsed + * -EIO if the MHI state is one of the ERROR states. + */ +int mhi_device_get_sync_atomic(struct mhi_device *mhi_dev, int timeout_us, + bool in_panic); + +/** + * mhi_controller_set_bw_scale_cb - Set the BW scale callback for MHI controller + * @mhi_cntrl: MHI controller + * @cb_func: Callback to set for the MHI controller to receive BW scale requests + */ +void mhi_controller_set_bw_scale_cb(struct mhi_controller *mhi_cntrl, + int (*cb_func)(struct mhi_controller *mhi_cntrl, + struct mhi_link_info *link_info)); +/** + * mhi_controller_set_base - Set the controller base / resource start address + * @mhi_cntrl: MHI controller + * @base: Physical address to be set for future reference + */ +void mhi_controller_set_base(struct mhi_controller *mhi_cntrl, + phys_addr_t base); + +/** + * mhi_get_channel_db_base - retrieve the channel doorbell base address + * @mhi_dev: Device associated with the channels + * @value: Pointer to an address value which will be populated + */ +int mhi_get_channel_db_base(struct mhi_device *mhi_dev, phys_addr_t *value); + +/** + * mhi_get_event_ring_db_base - retrieve the event ring doorbell base address + * @mhi_dev: Device associated with the channels + * @value: Pointer to an address value which will be populated + */ +int mhi_get_event_ring_db_base(struct mhi_device *mhi_dev, phys_addr_t *value); + +/** + * mhi_get_device_for_channel - get the MHI device for a specific channel number + * @mhi_cntrl: MHI controller + * @channel - channel number + * + * Returns: + * Pointer to the MHI device associated with the channel + */ +struct mhi_device *mhi_get_device_for_channel(struct mhi_controller *mhi_cntrl, + u32 channel); + +/** + * mhi_device_ioctl - user space IOCTL support for MHI channels + * Native support for setting TIOCM + * @mhi_dev: Device associated with the channels + * @cmd: IOCTL cmd + * @arg: Optional parameter, iotcl cmd specific + */ +long mhi_device_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, + unsigned long arg); + +/** + * mhi_controller_set_sfr_support - Set support for subsystem failure reason + * @mhi_cntrl: MHI controller + * + * Returns: + * 0 for success, error code for failure + */ +int mhi_controller_set_sfr_support(struct mhi_controller *mhi_cntrl, + size_t len); + +/** + * mhi_controller_setup_timesync - Set support for time synchronization feature + * @mhi_cntrl: MHI controller + * @time_get: Callback to set for the MHI controller to receive host time + * @lpm_disable: Callback to set for the MHI controller to disable link LPM + * @lpm_enable: Callback to set for the MHI controller to enable link LPM + * + * Returns: + * 0 for success, error code for failure + */ +int mhi_controller_setup_timesync(struct mhi_controller *mhi_cntrl, + u64 (*time_get)(struct mhi_controller *c), + int (*lpm_disable)(struct mhi_controller *c), + int (*lpm_enable)(struct mhi_controller *c)); + +/** + * mhi_get_remote_time_sync - Get external soc time relative to local soc time + * using MMIO method. + * @mhi_dev: Device associated with the channels + * @t_host: Pointer to output local soc time + * @t_dev: Pointer to output remote soc time + * + * Returns: + * 0 for success, error code for failure + */ +int mhi_get_remote_time_sync(struct mhi_device *mhi_dev, + u64 *t_host, + u64 *t_dev); + +/** + * mhi_get_remote_time - Get external modem time relative to host time + * Trigger event to capture modem time, also capture host time so client + * can do a relative drift comparision. + * Recommended only tsync device calls this method and do not call this + * from atomic context + * @mhi_dev: Device associated with the channels + * @sequence:unique sequence id track event + * @cb_func: callback function to call back + * + * Returns: + * 0 for success, error code for failure + */ +int mhi_get_remote_time(struct mhi_device *mhi_dev, + u32 sequence, + void (*cb_func)(struct mhi_device *mhi_dev, + u32 sequence, + u64 local_time, + u64 remote_time)); + +#else + +/** + * mhi_report_error - Can be used by controller to signal error condition to the + * MHI core driver in case of any need to halt processing or incoming sideband + * signal detects an error on endpoint + * @mhi_cntrl: MHI controller + * + * Returns: + * 0 if success in reporting the error condition to MHI core + * error code on failure + */ +static inline int mhi_report_error(struct mhi_controller *mhi_cntrl) +{ + return -EPERM; +} + +/** + * mhi_controller_set_privdata - Set private data for MHI controller + * @mhi_cntrl: MHI controller + * @priv: pointer to data + */ +void mhi_controller_set_privdata(struct mhi_controller *mhi_cntrl, void *priv) +{ +} + +/** + * mhi_controller_get_privdata - Get private data from MHI controller + * @mhi_cntrl: MHI controller + */ +void *mhi_controller_get_privdata(struct mhi_controller *mhi_cntrl) +{ + return ERR_PTR(-EINVAL); +} + +/** + * mhi_bdf_to_controller - Get controller associated with given BDF values + * @domain: Domain or root complex of PCIe port + * @bus: Bus number + * @slot: PCI slot or function number + * @dev_id: Device ID of the endpoint + * + * Returns: + * MHI controller structure pointer if BDF match is found + * NULL if cookie is not found + */ +struct mhi_controller *mhi_bdf_to_controller(u32 domain, u32 bus, u32 slot, u32 dev_id) +{ + return ERR_PTR(-EINVAL); +} + +/** + * mhi_set_m2_timeout_ms - Set M2 timeout in milliseconds to wait before a + * fast/silent suspend + * @mhi_cntrl: MHI controller + * @timeout: timeout in ms + */ +void mhi_set_m2_timeout_ms(struct mhi_controller *mhi_cntrl, u32 timeout) +{ +} + +/** + * mhi_pm_fast_resume - Resume MHI from a fast/silent suspended state + * @mhi_cntrl: MHI controller + * @notify_clients: if true, clients will be notified of the resume transition + */ +int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_clients) +{ + return -EPERM; +} + +/** + * mhi_pm_fast_suspend - Move MHI into a fast/silent suspended state + * @mhi_cntrl: MHI controller + * @notify_clients: if true, clients will be notified of the suspend transition + */ +int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_clients) +{ + return -EPERM; +} + +/** + * mhi_debug_reg_dump - dump MHI registers for debug purpose + * @mhi_cntrl: MHI controller + */ +void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl) +{ +} + +/** + * mhi_dump_sfr - Print SFR string from RDDM table. + * @mhi_cntrl: MHI controller + */ +void mhi_dump_sfr(struct mhi_controller *mhi_cntrl) +{ +} + +/** + * mhi_device_configure - Allow devices with offload channels to setup their own + * channel and event ring context. + * @mhi_dev: MHI device + * @dir: direction associated with the channel needed to configure + * @cfg_tbl: Buffer with ECA/CCA information and data needed to setup context + * @elements: Number of items to iterate over from the configuration table + */ +int mhi_device_configure(struct mhi_device *mhi_dev, + enum dma_data_direction dir, + struct mhi_buf *cfg_tbl, + int elements) +{ + return -EPERM; +} + +/** + * mhi_scan_rddm_cookie - Look for supplied cookie value in the BHI debug + * registers set by device to indicate rddm readiness for debugging purposes. + * @mhi_cntrl: MHI controller + * @cookie: cookie/pattern value to match + * + * Returns: + * true if cookie is found + * false if cookie is not found + */ +bool mhi_scan_rddm_cookie(struct mhi_controller *mhi_cntrl, u32 cookie) +{ + return false; +} + +/** + * mhi_device_get_sync_atomic - Asserts device_wait and moves device to M0 + * @mhi_dev: Device associated with the channels + * @timeout_us: timeout, in micro-seconds + * @in_panic: If requested while kernel is in panic state and no ISRs expected + * + * The device_wake is asserted to keep device in M0 or bring it to M0. + * If device is not in M0 state, then this function will wait for device to + * move to M0, until @timeout_us elapses. + * However, if device's M1 state-change event races with this function + * then there is a possiblity of device moving from M0 to M2 and back + * to M0. That can't be avoided as host must transition device from M1 to M2 + * as per the spec. + * Clients can ignore that transition after this function returns as the device + * is expected to immediately move from M2 to M0 as wake is asserted and + * wouldn't enter low power state. + * If in_panic boolean is set, no ISRs are expected, hence this API will have to + * resort to reading the MHI status register and poll on M0 state change. + * + * Returns: + * 0 if operation was successful (however, M0 -> M2 -> M0 is possible later) as + * mentioned above. + * -ETIMEDOUT is device faled to move to M0 before @timeout_us elapsed + * -EIO if the MHI state is one of the ERROR states. + */ +int mhi_device_get_sync_atomic(struct mhi_device *mhi_dev, int timeout_us, + bool in_panic) +{ + return -EPERM; +} + +/** + * mhi_controller_set_bw_scale_cb - Set the BW scale callback for MHI controller + * @mhi_cntrl: MHI controller + * @cb_func: Callback to set for the MHI controller to receive BW scale requests + */ +void mhi_controller_set_bw_scale_cb(struct mhi_controller *mhi_cntrl, + int (*cb_func)(struct mhi_controller *mhi_cntrl, + struct mhi_link_info *link_info)) +{ +} + +/** + * mhi_controller_set_base - Set the controller base / resource start address + * @mhi_cntrl: MHI controller + * @base: Physical address to be set for future reference + */ +void mhi_controller_set_base(struct mhi_controller *mhi_cntrl, + phys_addr_t base) +{ +} + +/** + * mhi_get_channel_db_base - retrieve the channel doorbell base address + * @mhi_dev: Device associated with the channels + * @value: Pointer to an address value which will be populated + */ +int mhi_get_channel_db_base(struct mhi_device *mhi_dev, phys_addr_t *value) +{ + return -EPERM; +} + +/** + * mhi_get_event_ring_db_base - retrieve the event ring doorbell base address + * @mhi_dev: Device associated with the channels + * @value: Pointer to an address value which will be populated + */ +int mhi_get_event_ring_db_base(struct mhi_device *mhi_dev, phys_addr_t *value) +{ + return -EPERM; +} + +/** + * mhi_get_device_for_channel - get the MHI device for a specific channel number + * @mhi_cntrl: MHI controller + * @channel - channel number + * + * Returns: + * Pointer to the MHI device associated with the channel + */ +struct mhi_device *mhi_get_device_for_channel(struct mhi_controller *mhi_cntrl, + u32 channel) +{ + return ERR_PTR(-EINVAL); +} + +/** + * mhi_device_ioctl - user space IOCTL support for MHI channels + * Native support for setting TIOCM + * @mhi_dev: Device associated with the channels + * @cmd: IOCTL cmd + * @arg: Optional parameter, iotcl cmd specific + */ +long mhi_device_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, + unsigned long arg) +{ + return -EPERM; +} + +/** + * mhi_controller_set_sfr_support - Set support for subsystem failure reason + * @mhi_cntrl: MHI controller + * + * Returns: + * 0 for success, error code for failure + */ +int mhi_controller_set_sfr_support(struct mhi_controller *mhi_cntrl, + size_t len) +{ + return -EPERM; +} + +/** + * mhi_controller_setup_timesync - Set support for time synchronization feature + * @mhi_cntrl: MHI controller + * @time_get: Callback to set for the MHI controller to receive host time + * @lpm_disable: Callback to set for the MHI controller to disable link LPM + * @lpm_enable: Callback to set for the MHI controller to enable link LPM + * + * Returns: + * 0 for success, error code for failure + */ +int mhi_controller_setup_timesync(struct mhi_controller *mhi_cntrl, + u64 (*time_get)(struct mhi_controller *c), + int (*lpm_disable)(struct mhi_controller *c), + int (*lpm_enable)(struct mhi_controller *c)) +{ + return -EPERM; +} + +/** + * mhi_get_remote_time_sync - Get external soc time relative to local soc time + * using MMIO method. + * @mhi_dev: Device associated with the channels + * @t_host: Pointer to output local soc time + * @t_dev: Pointer to output remote soc time + * + * Returns: + * 0 for success, error code for failure + */ +int mhi_get_remote_time_sync(struct mhi_device *mhi_dev, + u64 *t_host, + u64 *t_dev) +{ + return -EPERM; +} + +/** + * mhi_get_remote_time - Get external modem time relative to host time + * Trigger event to capture modem time, also capture host time so client + * can do a relative drift comparision. + * Recommended only tsync device calls this method and do not call this + * from atomic context + * @mhi_dev: Device associated with the channels + * @sequence:unique sequence id track event + * @cb_func: callback function to call back + * + * Returns: + * 0 for success, error code for failure + */ +int mhi_get_remote_time(struct mhi_device *mhi_dev, + u32 sequence, + void (*cb_func)(struct mhi_device *mhi_dev, + u32 sequence, + u64 local_time, + u64 remote_time)) +{ + return -EPERM; +} + +#endif /* CONFIG_MHI_BUS_MISC */ + +#endif /* _MHI_MISC_H_ */ diff --git a/inc/soc/qcom/qmi.h b/inc/soc/qcom/qmi.h new file mode 100644 index 0000000..e712f94 --- /dev/null +++ b/inc/soc/qcom/qmi.h @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * Copyright (c) 2017, Linaro Ltd. + */ +#ifndef __QMI_HELPERS_H__ +#define __QMI_HELPERS_H__ + +#include <linux/completion.h> +#include <linux/idr.h> +#include <linux/list.h> +#include <linux/qrtr.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +struct socket; + +/** + * qmi_header - wireformat header of QMI messages + * @type: type of message + * @txn_id: transaction id + * @msg_id: message id + * @msg_len: length of message payload following header + */ +struct qmi_header { + u8 type; + u16 txn_id; + u16 msg_id; + u16 msg_len; +} __packed; + +#define QMI_REQUEST 0 +#define QMI_RESPONSE 2 +#define QMI_INDICATION 4 + +#define QMI_COMMON_TLV_TYPE 0 + +enum qmi_elem_type { + QMI_EOTI, + QMI_OPT_FLAG, + QMI_DATA_LEN, + QMI_UNSIGNED_1_BYTE, + QMI_UNSIGNED_2_BYTE, + QMI_UNSIGNED_4_BYTE, + QMI_UNSIGNED_8_BYTE, + QMI_SIGNED_2_BYTE_ENUM, + QMI_SIGNED_4_BYTE_ENUM, + QMI_STRUCT, + QMI_STRING, +}; + +enum qmi_array_type { + NO_ARRAY, + STATIC_ARRAY, + VAR_LEN_ARRAY, +}; + +/** + * struct qmi_elem_info - describes how to encode a single QMI element + * @data_type: Data type of this element. + * @elem_len: Array length of this element, if an array. + * @elem_size: Size of a single instance of this data type. + * @array_type: Array type of this element. + * @tlv_type: QMI message specific type to identify which element + * is present in an incoming message. + * @offset: Specifies the offset of the first instance of this + * element in the data structure. + * @ei_array: Null-terminated array of @qmi_elem_info to describe nested + * structures. + */ +struct qmi_elem_info { + enum qmi_elem_type data_type; + u32 elem_len; + u32 elem_size; + enum qmi_array_type array_type; + u8 tlv_type; + u32 offset; + struct qmi_elem_info *ei_array; +}; + +#define QMI_RESULT_SUCCESS_V01 0 +#define QMI_RESULT_FAILURE_V01 1 + +#define QMI_ERR_NONE_V01 0 +#define QMI_ERR_MALFORMED_MSG_V01 1 +#define QMI_ERR_NO_MEMORY_V01 2 +#define QMI_ERR_INTERNAL_V01 3 +#define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 5 +#define QMI_ERR_INVALID_ID_V01 41 +#define QMI_ERR_ENCODING_V01 58 +#define QMI_ERR_DISABLED_V01 69 +#define QMI_ERR_INCOMPATIBLE_STATE_V01 90 +#define QMI_ERR_NOT_SUPPORTED_V01 94 + +/** + * qmi_response_type_v01 - common response header (decoded) + * @result: result of the transaction + * @error: error value, when @result is QMI_RESULT_FAILURE_V01 + */ +struct qmi_response_type_v01 { + u16 result; + u16 error; +}; + +extern struct qmi_elem_info qmi_response_type_v01_ei[]; + +/** + * struct qmi_service - context to track lookup-results + * @service: service type + * @version: version of the @service + * @instance: instance id of the @service + * @node: node of the service + * @port: port of the service + * @priv: handle for client's use + * @list_node: list_head for house keeping + */ +struct qmi_service { + unsigned int service; + unsigned int version; + unsigned int instance; + + unsigned int node; + unsigned int port; + + void *priv; + struct list_head list_node; +}; + +struct qmi_handle; + +/** + * struct qmi_ops - callbacks for qmi_handle + * @new_server: inform client of a new_server lookup-result, returning + * successfully from this call causes the library to call + * @del_server as the service is removed from the + * lookup-result. @priv of the qmi_service can be used by + * the client + * @del_server: inform client of a del_server lookup-result + * @net_reset: inform client that the name service was restarted and + * that and any state needs to be released + * @msg_handler: invoked for incoming messages, allows a client to + * override the usual QMI message handler + * @bye: inform a client that all clients from a node are gone + * @del_client: inform a client that a particular client is gone + */ +struct qmi_ops { + int (*new_server)(struct qmi_handle *qmi, struct qmi_service *svc); + void (*del_server)(struct qmi_handle *qmi, struct qmi_service *svc); + void (*net_reset)(struct qmi_handle *qmi); + void (*msg_handler)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + const void *data, size_t count); + void (*bye)(struct qmi_handle *qmi, unsigned int node); + void (*del_client)(struct qmi_handle *qmi, + unsigned int node, unsigned int port); +}; + +/** + * struct qmi_txn - transaction context + * @qmi: QMI handle this transaction is associated with + * @id: transaction id + * @lock: for synchronization between handler and waiter of messages + * @completion: completion object as the transaction receives a response + * @result: result code for the completed transaction + * @ei: description of the QMI encoded response (optional) + * @dest: destination buffer to decode message into (optional) + */ +struct qmi_txn { + struct qmi_handle *qmi; + + u16 id; + + struct mutex lock; + struct completion completion; + int result; + + struct qmi_elem_info *ei; + void *dest; +}; + +/** + * struct qmi_msg_handler - description of QMI message handler + * @type: type of message + * @msg_id: message id + * @ei: description of the QMI encoded message + * @decoded_size: size of the decoded object + * @fn: function to invoke as the message is decoded + */ +struct qmi_msg_handler { + unsigned int type; + unsigned int msg_id; + + struct qmi_elem_info *ei; + + size_t decoded_size; + void (*fn)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, const void *decoded); +}; + +/** + * struct qmi_handle - QMI context + * @sock: socket handle + * @sock_lock: synchronization of @sock modifications + * @sq: sockaddr of @sock + * @work: work for handling incoming messages + * @wq: workqueue to post @work on + * @recv_buf: scratch buffer for handling incoming messages + * @recv_buf_size: size of @recv_buf + * @lookups: list of registered lookup requests + * @lookup_results: list of lookup-results advertised to the client + * @services: list of registered services (by this client) + * @ops: reference to callbacks + * @txns: outstanding transactions + * @txn_lock: lock for modifications of @txns + * @handlers: list of handlers for incoming messages + */ +struct qmi_handle { + struct socket *sock; + struct mutex sock_lock; + + struct sockaddr_qrtr sq; + + struct work_struct work; + struct workqueue_struct *wq; + + void *recv_buf; + size_t recv_buf_size; + + struct list_head lookups; + struct list_head lookup_results; + struct list_head services; + + struct qmi_ops ops; + + struct idr txns; + struct mutex txn_lock; + + const struct qmi_msg_handler *handlers; +}; + +int qmi_add_lookup(struct qmi_handle *qmi, unsigned int service, + unsigned int version, unsigned int instance); +int qmi_add_server(struct qmi_handle *qmi, unsigned int service, + unsigned int version, unsigned int instance); + +int qmi_handle_init(struct qmi_handle *qmi, size_t max_msg_len, + const struct qmi_ops *ops, + const struct qmi_msg_handler *handlers); +void qmi_handle_release(struct qmi_handle *qmi); + +ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, int msg_id, size_t len, + struct qmi_elem_info *ei, const void *c_struct); +ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, int msg_id, size_t len, + struct qmi_elem_info *ei, const void *c_struct); +ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + int msg_id, size_t len, struct qmi_elem_info *ei, + const void *c_struct); + +void *qmi_encode_message(int type, unsigned int msg_id, size_t *len, + unsigned int txn_id, struct qmi_elem_info *ei, + const void *c_struct); + +int qmi_decode_message(const void *buf, size_t len, + struct qmi_elem_info *ei, void *c_struct); + +int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn, + struct qmi_elem_info *ei, void *c_struct); +int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout); +void qmi_txn_cancel(struct qmi_txn *txn); + +#endif diff --git a/mhi/Kconfig b/mhi/Kconfig new file mode 100644 index 0000000..7eb139c --- /dev/null +++ b/mhi/Kconfig @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# MHI bus +# +# Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. +# + +config MHI_BUS + tristate "Modem Host Interface (MHI) bus" + help + Bus driver for MHI protocol. Modem Host Interface (MHI) is a + communication protocol used by the host processors to control + and communicate with modem devices over a high speed peripheral + bus or shared memory. + +config MHI_BUS_MISC + bool "Support for miscellaneous MHI features" + depends on MHI_BUS + help + Miscellaneous features support for MHI Bus driver includes IPC logs, + introduction of a list of controllers for debug using ramdumps and + other features not present upstream such as Dynamic Resource Vote, + SFR parsing using RDDM dumps, scanning for the RDDM cookie and more. + +config MHI_BUS_DEBUG + bool "Debugfs support for the MHI bus" + depends on MHI_BUS && DEBUG_FS + help + Enable debugfs support for use with the MHI transport. Allows + reading and/or modifying some values within the MHI controller + for debug and test purposes. diff --git a/mhi/Makefile b/mhi/Makefile new file mode 100644 index 0000000..edd806f --- /dev/null +++ b/mhi/Makefile @@ -0,0 +1,3 @@ +ccflags-y += -I$(WLAN_PLATFORM_ROOT)/inc +# core layer +obj-y += core/ diff --git a/mhi/core/Makefile b/mhi/core/Makefile new file mode 100644 index 0000000..17be4ee --- /dev/null +++ b/mhi/core/Makefile @@ -0,0 +1,7 @@ +obj-$(CONFIG_MHI_BUS) += mhi.o + +mhi-y := init.o main.o pm.o boot.o +mhi-$(CONFIG_MHI_BUS_MISC) += misc.o +mhi-$(CONFIG_MHI_BUS_DEBUG) += debugfs.o + +ccflags-y += -DDEBUG -DCONFIG_WCN_GOOGLE diff --git a/mhi/core/boot.c b/mhi/core/boot.c new file mode 100644 index 0000000..39961b8 --- /dev/null +++ b/mhi/core/boot.c @@ -0,0 +1,570 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> +#include <linux/firmware.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mhi.h> +#include <linux/module.h> +#include <linux/random.h> +#include <linux/slab.h> +#include <linux/wait.h> +#include "internal.h" + +/* Setup RDDM vector table for RDDM transfer and program RXVEC */ +void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, + struct image_info *img_info) +{ + struct mhi_buf *mhi_buf = img_info->mhi_buf; + struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; + void __iomem *base = mhi_cntrl->bhie; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 sequence_id; + unsigned int i; + + for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { + bhi_vec->dma_addr = mhi_buf->dma_addr; + bhi_vec->size = mhi_buf->len; + } + + MHI_VERB("BHIe programming for RDDM\n"); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); + sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK); + + mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, + BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, + sequence_id); + + MHI_VERB("Address: %p and len: 0x%zx sequence: %u\n", + &mhi_buf->dma_addr, mhi_buf->len, sequence_id); +} + +/* Collect RDDM buffer during kernel panic */ +static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) +{ + int ret; + u32 rx_status; + enum mhi_ee_type ee; + const u32 delayus = 2000; + u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus; + const u32 rddm_timeout_us = 200000; + int rddm_retry = rddm_timeout_us / delayus; + void __iomem *base = mhi_cntrl->bhie; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + MHI_VERB("Entered with pm_state:%s dev_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + /* + * This should only be executing during a kernel panic, we expect all + * other cores to shutdown while we're collecting RDDM buffer. After + * returning from this function, we expect the device to reset. + * + * Normaly, we read/write pm_state only after grabbing the + * pm_lock, since we're in a panic, skipping it. Also there is no + * gurantee that this state change would take effect since + * we're setting it w/o grabbing pm_lock + */ + mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; + /* update should take the effect immediately */ + smp_wmb(); + + /* + * Make sure device is not already in RDDM. In case the device asserts + * and a kernel panic follows, device will already be in RDDM. + * Do not trigger SYS ERR again and proceed with waiting for + * image download completion. + */ + ee = mhi_get_exec_env(mhi_cntrl); + if (ee == MHI_EE_MAX) + goto error_exit_rddm; + + if (ee != MHI_EE_RDDM) { + MHI_VERB("Trigger device into RDDM mode using SYS ERR\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + + MHI_VERB("Waiting for device to enter RDDM\n"); + while (rddm_retry--) { + ee = mhi_get_exec_env(mhi_cntrl); + if (ee == MHI_EE_RDDM) + break; + + udelay(delayus); + } + + if (rddm_retry <= 0) { + /* Hardware reset so force device to enter RDDM */ + MHI_VERB( + "Did not enter RDDM, do a host req reset\n"); + mhi_soc_reset(mhi_cntrl); + udelay(delayus); + } + + ee = mhi_get_exec_env(mhi_cntrl); + } + + MHI_VERB( + "Waiting for RDDM image download via BHIe, current EE:%s\n", + TO_MHI_EXEC_STR(ee)); + + while (retry--) { + ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, + BHIE_RXVECSTATUS_STATUS_BMSK, + BHIE_RXVECSTATUS_STATUS_SHFT, + &rx_status); + if (ret) + return -EIO; + + if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) + return 0; + + udelay(delayus); + } + + ee = mhi_get_exec_env(mhi_cntrl); + ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status); + + MHI_ERR("RXVEC_STATUS: 0x%x\n", rx_status); + +error_exit_rddm: + MHI_ERR("RDDM transfer failed. Current EE: %s\n", + TO_MHI_EXEC_STR(ee)); + + return -EIO; +} + +/* Download RDDM image from device */ +int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic) +{ + void __iomem *base = mhi_cntrl->bhie; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 rx_status; + + if (in_panic) + return __mhi_download_rddm_in_panic(mhi_cntrl); + + MHI_VERB("Waiting for RDDM image download via BHIe\n"); + + /* Wait for the image download to complete */ + wait_event_timeout(mhi_cntrl->state_event, + mhi_read_reg_field(mhi_cntrl, base, + BHIE_RXVECSTATUS_OFFS, + BHIE_RXVECSTATUS_STATUS_BMSK, + BHIE_RXVECSTATUS_STATUS_SHFT, + &rx_status) || rx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; +} +EXPORT_SYMBOL_GPL(mhi_download_rddm_image); + +static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl, + const struct mhi_buf *mhi_buf) +{ + void __iomem *base = mhi_cntrl->bhie; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + u32 tx_status, sequence_id, val; + int ret, rd; + + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + return -EIO; + } + + sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK); + MHI_VERB("Starting image download via BHIe. Sequence ID: %u\n", + sequence_id); + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); + + mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, + BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT, + sequence_id); + read_unlock_bh(pm_lock); + + /* Wait for the image download to complete */ + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, + BHIE_TXVECSTATUS_OFFS, + BHIE_TXVECSTATUS_STATUS_BMSK, + BHIE_TXVECSTATUS_STATUS_SHFT, + &tx_status) || tx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL) { + rd = mhi_read_reg(mhi_cntrl, base, BHIE_TXVECSTATUS_OFFS, &val); + MHI_ERR("BHIE_TXVECSTATUS: 0x%x, reg read: %d, tx_status: %u\n", + val, rd, tx_status); + return -EIO; + } + + return (!ret) ? -ETIMEDOUT : 0; +} + +static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl, + dma_addr_t dma_addr, + size_t size) +{ + u32 tx_status, val; + int i, ret; + void __iomem *base = mhi_cntrl->bhi; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct { + char *name; + u32 offset; + } error_reg[] = { + { "ERROR_CODE", BHI_ERRCODE }, + { "ERROR_DBG1", BHI_ERRDBG1 }, + { "ERROR_DBG2", BHI_ERRDBG2 }, + { "ERROR_DBG3", BHI_ERRDBG3 }, + { NULL }, + }; + + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + goto invalid_pm_state; + } + + mhi_cntrl->session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK); + MHI_VERB("Starting image download via BHI. Session ID: %u\n", + mhi_cntrl->session_id); + mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, + upper_32_bits(dma_addr)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, + lower_32_bits(dma_addr)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size); + mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, mhi_cntrl->session_id); + read_unlock_bh(pm_lock); + + /* Wait for the image download to complete */ + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS, + BHI_STATUS_MASK, BHI_STATUS_SHIFT, + &tx_status) || tx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + { + MHI_ERR("Image transfer failed: MHI_PM_IN_ERROR_STATE\n"); + goto invalid_pm_state; + } + + if (tx_status == BHI_STATUS_ERROR) { + MHI_ERR("Image transfer failed\n"); + read_lock_bh(pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + for (i = 0; error_reg[i].name; i++) { + ret = mhi_read_reg(mhi_cntrl, base, + error_reg[i].offset, &val); + if (ret) + break; + MHI_ERR("Reg: %s value: 0x%x\n", + error_reg[i].name, val); + } + } + read_unlock_bh(pm_lock); + goto invalid_pm_state; + } + + return (!ret) ? -ETIMEDOUT : 0; + +invalid_pm_state: + + return -EIO; +} + +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info) +{ + int i; + struct mhi_buf *mhi_buf = (*image_info)->mhi_buf; + + if (mhi_cntrl->img_pre_alloc) + return; + + for (i = 0; i < (*image_info)->entries; i++, mhi_buf++) + dma_free_attrs(mhi_cntrl->cntrl_dev, mhi_buf->len, mhi_buf->buf, + mhi_buf->dma_addr, DMA_ATTR_FORCE_CONTIGUOUS); + + kfree((*image_info)->mhi_buf); + kfree(*image_info); + + *image_info = NULL; +} + +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, + size_t alloc_size) +{ + size_t seg_size = mhi_cntrl->seg_len; + int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1; + int i; + struct image_info *img_info; + struct mhi_buf *mhi_buf; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + if (mhi_cntrl->img_pre_alloc) + return 0; + + MHI_LOG("Allocating bytes: %zu seg_size: %zu total_seg: %u\n", + alloc_size, seg_size, segments); + + img_info = kzalloc(sizeof(*img_info), GFP_KERNEL); + if (!img_info) + return -ENOMEM; + + /* Allocate memory for entries */ + img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf), + GFP_KERNEL); + if (!img_info->mhi_buf) + goto error_alloc_mhi_buf; + + /* Allocate and populate vector table */ + mhi_buf = img_info->mhi_buf; + for (i = 0; i < segments; i++, mhi_buf++) { + size_t vec_size = seg_size; + + /* Vector table is the last entry */ + if (i == segments - 1) + vec_size = sizeof(struct bhi_vec_entry) * i; + + mhi_buf->len = vec_size; + mhi_buf->buf = dma_alloc_attrs(mhi_cntrl->cntrl_dev, vec_size, + &mhi_buf->dma_addr, GFP_KERNEL, + DMA_ATTR_FORCE_CONTIGUOUS); + if (!mhi_buf->buf) + goto error_alloc_segment; + + MHI_LOG("Entry: %d Address: 0x%llx size: %lu\n", i, + mhi_buf->dma_addr, mhi_buf->len); + } + + img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf; + img_info->entries = segments; + *image_info = img_info; + + MHI_LOG("Successfully allocated BHIe vector table\n"); + + return 0; + +error_alloc_segment: + for (--i, --mhi_buf; i >= 0; i--, mhi_buf--) + dma_free_attrs(mhi_cntrl->cntrl_dev, mhi_buf->len, mhi_buf->buf, + mhi_buf->dma_addr, DMA_ATTR_FORCE_CONTIGUOUS); + +error_alloc_mhi_buf: + kfree(img_info); + + return -ENOMEM; +} + +static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, + const struct firmware *firmware, + struct image_info *img_info) +{ + size_t remainder = firmware->size; + size_t to_cpy; + const u8 *buf = firmware->data; + struct mhi_buf *mhi_buf = img_info->mhi_buf; + struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + while (remainder) { + to_cpy = min(remainder, mhi_buf->len); + memcpy(mhi_buf->buf, buf, to_cpy); + bhi_vec->dma_addr = mhi_buf->dma_addr; + bhi_vec->size = to_cpy; + + MHI_VERB("Setting Vector: 0x%llx size: %llu\n", + bhi_vec->dma_addr, bhi_vec->size); + + buf += to_cpy; + remainder -= to_cpy; + bhi_vec++; + mhi_buf++; + } +} + +void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) +{ + const struct firmware *firmware = NULL; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + const char *fw_name; + void *buf; + dma_addr_t dma_addr; + size_t size; + int i, ret; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("Device MHI is not in valid state\n"); + return; + } + + /* save hardware info from BHI */ + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_SERIALNU, + &mhi_cntrl->serial_number); + if (ret) + MHI_ERR("Could not capture serial number via BHI\n"); + + for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) { + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i), + &mhi_cntrl->oem_pk_hash[i]); + if (ret) { + MHI_ERR("Could not capture OEM PK HASH via BHI\n"); + break; + } + } + + /* If device is in pass through, do reset to ready state transition */ + if (mhi_cntrl->ee == MHI_EE_PTHRU) + goto fw_load_ee_pthru; + + fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ? + mhi_cntrl->edl_image : mhi_cntrl->fw_image; + + if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size || + !mhi_cntrl->seg_len))) { + MHI_ERR( + "No firmware image defined or !sbl_size || !seg_len\n"); + goto error_fw_load; + } + + ret = request_firmware(&firmware, fw_name, dev->parent); + if (ret) { + if (!mhi_cntrl->fallback_fw_image) { + MHI_ERR("Error loading firmware: %d\n", ret); + goto error_fw_load; + } + + ret = request_firmware(&firmware, + mhi_cntrl->fallback_fw_image, + dev->parent); + if (ret) { + MHI_ERR("Error loading fallback firmware: %d\n", + ret); + goto error_fw_load; + } + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FALLBACK_IMG); + } + + size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size; + + /* SBL size provided is maximum size, not necessarily the image size */ + if (size > firmware->size) + size = firmware->size; + + buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr, GFP_KERNEL); + if (!buf) { + release_firmware(firmware); + goto error_fw_load; + } + + /* Download image using BHI */ + memcpy(buf, firmware->data, size); + ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size); + dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr); + + /* Error or in EDL mode, we're done */ + if (ret) { + MHI_ERR("MHI did not load image over BHI, ret: %d\n", ret); + release_firmware(firmware); + goto error_fw_load; + } + + if (mhi_cntrl->ee == MHI_EE_EDL) { + release_firmware(firmware); + return; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_RESET; + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* + * If we're doing fbc, populate vector tables while + * device transitioning into MHI READY state + */ + if (mhi_cntrl->fbc_download) { + ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, + firmware->size); + if (ret) { + release_firmware(firmware); + goto error_fw_load; + } + + /* Load the firmware into BHIE vec table */ + mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image); + } + + release_firmware(firmware); + +fw_load_ee_pthru: + /* Transitioning into MHI RESET->READY state */ + ret = mhi_ready_state_transition(mhi_cntrl); + + if (!mhi_cntrl->fbc_download) + return; + + if (ret) { + MHI_ERR("MHI did not enter READY state\n"); + goto error_ready_state; + } + + return; + +error_ready_state: + mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image); + +error_fw_load: + mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR; + wake_up_all(&mhi_cntrl->state_event); +} + +int mhi_download_amss_image(struct mhi_controller *mhi_cntrl) +{ + struct image_info *image_info = mhi_cntrl->fbc_image; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + if (!image_info) + return -EIO; + + ret = mhi_fw_load_bhie(mhi_cntrl, + /* Vector table is the last entry */ + &image_info->mhi_buf[image_info->entries - 1]); + if (ret) { + MHI_ERR("MHI did not load AMSS, ret:%d\n", ret); + mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR; + wake_up_all(&mhi_cntrl->state_event); + } + + return ret; +} diff --git a/mhi/core/debugfs.c b/mhi/core/debugfs.c new file mode 100644 index 0000000..704fcf5 --- /dev/null +++ b/mhi/core/debugfs.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mhi.h> +#include <linux/module.h> +#include "internal.h" + +static int mhi_debugfs_states_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + + /* states */ + seq_printf(m, "PM state: %s Device: %s MHI state: %s EE: %s wake: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + mhi_is_active(mhi_cntrl) ? "Active" : "Inactive", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), + mhi_cntrl->wake_set ? "true" : "false"); + + /* counters */ + seq_printf(m, "M0: %u M2: %u M3: %u, M3_fast: %u", mhi_cntrl->M0, + mhi_cntrl->M2, mhi_cntrl->M3, mhi_cntrl->M3_fast); + + seq_printf(m, " device wake: %u pending packets: %u\n", + atomic_read(&mhi_cntrl->dev_wake), + atomic_read(&mhi_cntrl->pending_pkts)); + + return 0; +} + +static int mhi_debugfs_events_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_event *mhi_event; + struct mhi_event_ctxt *er_ctxt; + int i; + + if (!mhi_is_active(mhi_cntrl)) { + seq_puts(m, "Device not ready\n"); + return -ENODEV; + } + + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; + i++, er_ctxt++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev) { + seq_printf(m, "Index: %d is an offload event ring\n", + i); + continue; + } + + seq_printf(m, "Index: %d intmod count: %lu time: %lu", + i, (er_ctxt->intmod & EV_CTX_INTMODC_MASK) >> + EV_CTX_INTMODC_SHIFT, + (er_ctxt->intmod & EV_CTX_INTMODT_MASK) >> + EV_CTX_INTMODT_SHIFT); + + seq_printf(m, " base: 0x%0llx len: 0x%llx", er_ctxt->rbase, + er_ctxt->rlen); + + seq_printf(m, " rp: 0x%llx wp: 0x%llx", er_ctxt->rp, + er_ctxt->wp); + + seq_printf(m, " local rp: 0x%pK db: 0x%pad\n", ring->rp, + &mhi_event->db_cfg.db_val); + } + + return 0; +} + +static int mhi_debugfs_channels_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_chan *mhi_chan; + struct mhi_chan_ctxt *chan_ctxt; + int i; + + if (!mhi_is_active(mhi_cntrl)) { + seq_puts(m, "Device not ready\n"); + return -ENODEV; + } + + mhi_chan = mhi_cntrl->mhi_chan; + chan_ctxt = mhi_cntrl->mhi_ctxt->chan_ctxt; + for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { + struct mhi_ring *ring = &mhi_chan->tre_ring; + + if (mhi_chan->offload_ch) { + seq_printf(m, "%s(%u) is an offload channel\n", + mhi_chan->name, mhi_chan->chan); + continue; + } + + if (!mhi_chan->mhi_dev) + continue; + + seq_printf(m, + "%s(%u) state: 0x%lx brstmode: 0x%lx pollcfg: 0x%lx", + mhi_chan->name, mhi_chan->chan, (chan_ctxt->chcfg & + CHAN_CTX_CHSTATE_MASK) >> CHAN_CTX_CHSTATE_SHIFT, + (chan_ctxt->chcfg & CHAN_CTX_BRSTMODE_MASK) >> + CHAN_CTX_BRSTMODE_SHIFT, (chan_ctxt->chcfg & + CHAN_CTX_POLLCFG_MASK) >> CHAN_CTX_POLLCFG_SHIFT); + + seq_printf(m, " type: 0x%x event ring: %u", chan_ctxt->chtype, + chan_ctxt->erindex); + + seq_printf(m, " base: 0x%llx len: 0x%llx rp: 0x%llx wp: 0x%llx", + chan_ctxt->rbase, chan_ctxt->rlen, chan_ctxt->rp, + chan_ctxt->wp); + + seq_printf(m, " local rp: 0x%pK local wp: 0x%pK db: 0x%pad\n", + ring->rp, ring->wp, + &mhi_chan->db_cfg.db_val); + } + + return 0; +} + +static int mhi_device_info_show(struct device *dev, void *data) +{ + struct mhi_device *mhi_dev; + + if (dev->bus != &mhi_bus_type) + return 0; + + mhi_dev = to_mhi_device(dev); + + seq_printf((struct seq_file *)data, "%s: type: %s dev_wake: %u", + mhi_dev->name, mhi_dev->dev_type ? "Controller" : "Transfer", + mhi_dev->dev_wake); + + /* for transfer device types only */ + if (mhi_dev->dev_type == MHI_DEVICE_XFER) + seq_printf((struct seq_file *)data, " channels: %u(UL)/%u(DL)", + mhi_dev->ul_chan_id, mhi_dev->dl_chan_id); + + seq_puts((struct seq_file *)data, "\n"); + + return 0; +} + +static int mhi_debugfs_devices_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + + if (!mhi_is_active(mhi_cntrl)) { + seq_puts(m, "Device not ready\n"); + return -ENODEV; + } + + /* Show controller and client(s) info */ + mhi_device_info_show(&mhi_cntrl->mhi_dev->dev, m); + device_for_each_child(&mhi_cntrl->mhi_dev->dev, m, mhi_device_info_show); + + return 0; +} + +static int mhi_debugfs_regdump_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + enum mhi_state state; + enum mhi_ee_type ee; + int i, ret = -EIO; + u32 val; + void __iomem *mhi_base = mhi_cntrl->regs; + void __iomem *bhi_base = mhi_cntrl->bhi; + void __iomem *bhie_base = mhi_cntrl->bhie; + void __iomem *wake_db = mhi_cntrl->wake_db; + struct { + const char *name; + int offset; + void __iomem *base; + } regs[] = { + { "MHI_REGLEN", MHIREGLEN, mhi_base}, + { "MHI_VER", MHIVER, mhi_base}, + { "MHI_CFG", MHICFG, mhi_base}, + { "MHI_CTRL", MHICTRL, mhi_base}, + { "MHI_STATUS", MHISTATUS, mhi_base}, + { "MHI_WAKE_DB", 0, wake_db}, + { "BHI_EXECENV", BHI_EXECENV, bhi_base}, + { "BHI_STATUS", BHI_STATUS, bhi_base}, + { "BHI_ERRCODE", BHI_ERRCODE, bhi_base}, + { "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base}, + { "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base}, + { "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base}, + { "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base}, + { "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base}, + { "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base}, + { "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base}, + { NULL }, + }; + + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + return ret; + + seq_printf(m, "Host PM state: %s Device state: %s EE: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + state = mhi_get_mhi_state(mhi_cntrl); + ee = mhi_get_exec_env(mhi_cntrl); + seq_printf(m, "Device EE: %s state: %s\n", TO_MHI_EXEC_STR(ee), + TO_MHI_STATE_STR(state)); + + for (i = 0; regs[i].name; i++) { + if (!regs[i].base) + continue; + ret = mhi_read_reg(mhi_cntrl, regs[i].base, regs[i].offset, + &val); + if (ret) + continue; + + seq_printf(m, "%s: 0x%x\n", regs[i].name, val); + } + + return 0; +} + +static int mhi_debugfs_device_wake_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; + + if (!mhi_is_active(mhi_cntrl)) { + seq_puts(m, "Device not ready\n"); + return -ENODEV; + } + + seq_printf(m, + "Wake count: %d\n%s\n", mhi_dev->dev_wake, + "Usage: echo get/put > device_wake to vote/unvote for M0"); + + return 0; +} + +static ssize_t mhi_debugfs_device_wake_write(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = file->private_data; + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; + char buf[16]; + int ret = -EINVAL; + + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + + if (!strncmp(buf, "get", 3)) { + ret = mhi_device_get_sync(mhi_dev); + } else if (!strncmp(buf, "put", 3)) { + mhi_device_put(mhi_dev); + ret = 0; + } + + return ret ? ret : count; +} + +static int mhi_debugfs_timeout_ms_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + + seq_printf(m, "%u ms\n", mhi_cntrl->timeout_ms); + + return 0; +} + +static ssize_t mhi_debugfs_timeout_ms_write(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = file->private_data; + struct mhi_controller *mhi_cntrl = m->private; + u32 timeout_ms; + + if (kstrtou32_from_user(ubuf, count, 0, &timeout_ms)) + return -EINVAL; + + mhi_cntrl->timeout_ms = timeout_ms; + + return count; +} + +static int mhi_debugfs_states_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_states_show, inode->i_private); +} + +static int mhi_debugfs_events_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_events_show, inode->i_private); +} + +static int mhi_debugfs_channels_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_channels_show, inode->i_private); +} + +static int mhi_debugfs_devices_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_devices_show, inode->i_private); +} + +static int mhi_debugfs_regdump_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_regdump_show, inode->i_private); +} + +static int mhi_debugfs_device_wake_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_device_wake_show, inode->i_private); +} + +static int mhi_debugfs_timeout_ms_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_timeout_ms_show, inode->i_private); +} + +static const struct file_operations debugfs_states_fops = { + .open = mhi_debugfs_states_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_events_fops = { + .open = mhi_debugfs_events_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_channels_fops = { + .open = mhi_debugfs_channels_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_devices_fops = { + .open = mhi_debugfs_devices_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_regdump_fops = { + .open = mhi_debugfs_regdump_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_device_wake_fops = { + .open = mhi_debugfs_device_wake_open, + .write = mhi_debugfs_device_wake_write, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_timeout_ms_fops = { + .open = mhi_debugfs_timeout_ms_open, + .write = mhi_debugfs_timeout_ms_write, + .release = single_release, + .read = seq_read, +}; + +static struct dentry *mhi_debugfs_root; + +void mhi_create_debugfs(struct mhi_controller *mhi_cntrl) +{ + mhi_cntrl->debugfs_dentry = + debugfs_create_dir(dev_name(mhi_cntrl->cntrl_dev), + mhi_debugfs_root); + + debugfs_create_file("states", 0444, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_states_fops); + debugfs_create_file("events", 0444, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_events_fops); + debugfs_create_file("channels", 0444, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_channels_fops); + debugfs_create_file("devices", 0444, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_devices_fops); + debugfs_create_file("regdump", 0444, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_regdump_fops); + debugfs_create_file("device_wake", 0644, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_device_wake_fops); + debugfs_create_file("timeout_ms", 0644, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_timeout_ms_fops); +} + +void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl) +{ + debugfs_remove_recursive(mhi_cntrl->debugfs_dentry); + mhi_cntrl->debugfs_dentry = NULL; +} + +void mhi_debugfs_init(void) +{ + mhi_debugfs_root = debugfs_create_dir(mhi_bus_type.name, NULL); +} + +void mhi_debugfs_exit(void) +{ + debugfs_remove_recursive(mhi_debugfs_root); +} diff --git a/mhi/core/init.c b/mhi/core/init.c new file mode 100644 index 0000000..94d1c6b --- /dev/null +++ b/mhi/core/init.c @@ -0,0 +1,1447 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> +#include <linux/idr.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mhi.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/wait.h> +#include "internal.h" + +static DEFINE_IDA(mhi_controller_ida); + +const char * const mhi_ee_str[MHI_EE_MAX] = { + [MHI_EE_PBL] = "PBL", + [MHI_EE_SBL] = "SBL", + [MHI_EE_AMSS] = "AMSS", + [MHI_EE_RDDM] = "RDDM", + [MHI_EE_WFW] = "WFW", + [MHI_EE_PTHRU] = "PASS THRU", + [MHI_EE_EDL] = "EDL", + [MHI_EE_DISABLE_TRANSITION] = "DISABLE", + [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED", +}; + +const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = { + [DEV_ST_TRANSITION_PBL] = "PBL", + [DEV_ST_TRANSITION_READY] = "READY", + [DEV_ST_TRANSITION_SBL] = "SBL", + [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE", + [DEV_ST_TRANSITION_SYS_ERR] = "SYS_ERR", + [DEV_ST_TRANSITION_DISABLE] = "DISABLE", +}; + +const char * const mhi_state_str[MHI_STATE_MAX] = { + [MHI_STATE_RESET] = "RESET", + [MHI_STATE_READY] = "READY", + [MHI_STATE_M0] = "M0", + [MHI_STATE_M1] = "M1", + [MHI_STATE_M2] = "M2", + [MHI_STATE_M3] = "M3", + [MHI_STATE_M3_FAST] = "M3_FAST", + [MHI_STATE_BHI] = "BHI", + [MHI_STATE_SYS_ERR] = "SYS_ERR", +}; + +const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = { + [MHI_CH_STATE_TYPE_RESET] = "RESET", + [MHI_CH_STATE_TYPE_STOP] = "STOP", + [MHI_CH_STATE_TYPE_START] = "START", +}; + +static const char * const mhi_pm_state_str[] = { + [MHI_PM_STATE_DISABLE] = "DISABLE", + [MHI_PM_STATE_POR] = "POR", + [MHI_PM_STATE_M0] = "M0", + [MHI_PM_STATE_M2] = "M2", + [MHI_PM_STATE_M3_ENTER] = "M?->M3", + [MHI_PM_STATE_M3] = "M3", + [MHI_PM_STATE_M3_EXIT] = "M3->M0", + [MHI_PM_STATE_FW_DL_ERR] = "FW DL Error", + [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS_ERR Detect", + [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS_ERR Process", + [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process", + [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect", +}; + +const char *to_mhi_pm_state_str(enum mhi_pm_state state) +{ + int index; + + if (state) + index = __fls(state); + + if (!state || index >= ARRAY_SIZE(mhi_pm_state_str)) + return "Invalid State"; + + return mhi_pm_state_str[index]; +} + +static ssize_t serial_number_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + return snprintf(buf, PAGE_SIZE, "Serial Number: %u\n", + mhi_cntrl->serial_number); +} +static DEVICE_ATTR_RO(serial_number); + +static ssize_t oem_pk_hash_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int i, cnt = 0; + + for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) + cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, + "OEMPKHASH[%d]: 0x%x\n", i, + mhi_cntrl->oem_pk_hash[i]); + + return cnt; +} +static DEVICE_ATTR_RO(oem_pk_hash); + +static struct attribute *mhi_dev_attrs[] = { + &dev_attr_serial_number.attr, + &dev_attr_oem_pk_hash.attr, + NULL, +}; +ATTRIBUTE_GROUPS(mhi_dev); + +/* MHI protocol requires the transfer ring to be aligned with ring length */ +static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring, + u64 len) +{ + ring->alloc_size = len + (len - 1); + ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, + ring->alloc_size, + &ring->dma_handle, + GFP_KERNEL); + if (!ring->pre_aligned) + return -ENOMEM; + + ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1); + ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); + + return 0; +} + +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); + } + + free_irq(mhi_cntrl->irq[0], mhi_cntrl); +} + +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) +{ + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int i, ret; + + /* Setup BHI_INTVEC IRQ */ + ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler, + mhi_intvec_threaded_handler, + IRQF_SHARED | IRQF_NO_SUSPEND, + "bhi", mhi_cntrl); + if (ret) + return ret; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + if (mhi_event->irq >= mhi_cntrl->nr_irqs) { + MHI_ERR("irq %d not available for event ring\n", + mhi_event->irq); + ret = -EINVAL; + goto error_request; + } + + ret = request_irq(mhi_cntrl->irq[mhi_event->irq], + mhi_irq_handler, + IRQF_SHARED | IRQF_NO_SUSPEND, + "mhi", mhi_event); + if (ret) { + MHI_ERR("Error requesting irq:%d for ev:%d\n", + mhi_cntrl->irq[mhi_event->irq], i); + goto error_request; + } + } + + return 0; + +error_request: + for (--i, --mhi_event; i >= 0; i--, mhi_event--) { + if (mhi_event->offload_ev) + continue; + + free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); + } + free_irq(mhi_cntrl->irq[0], mhi_cntrl); + + return ret; +} + +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event *mhi_event; + struct mhi_ring *ring; + + mhi_cmd = mhi_cntrl->mhi_cmd; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) { + ring = &mhi_cmd->ring; + dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + ring->base = NULL; + ring->iommu_base = 0; + } + + dma_free_coherent(mhi_cntrl->cntrl_dev, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + ring = &mhi_event->ring; + dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + ring->base = NULL; + ring->iommu_base = 0; + } + + dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, + mhi_ctxt->er_ctxt_addr); + + dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, + mhi_ctxt->chan_ctxt_addr); + + kfree(mhi_ctxt); + mhi_cntrl->mhi_ctxt = NULL; +} + +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) +{ + struct mhi_ctxt *mhi_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_event_ctxt *er_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_chan *mhi_chan; + struct mhi_event *mhi_event; + struct mhi_cmd *mhi_cmd; + u32 tmp; + int ret = -ENOMEM, i; + + atomic_set(&mhi_cntrl->dev_wake, 0); + atomic_set(&mhi_cntrl->pending_pkts, 0); + + mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL); + if (!mhi_ctxt) + return -ENOMEM; + + /* Setup channel ctxt */ + mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, + sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, + &mhi_ctxt->chan_ctxt_addr, + GFP_KERNEL); + if (!mhi_ctxt->chan_ctxt) + goto error_alloc_chan_ctxt; + + mhi_chan = mhi_cntrl->mhi_chan; + chan_ctxt = mhi_ctxt->chan_ctxt; + for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { + /* Skip if it is an offload channel */ + if (mhi_chan->offload_ch) + continue; + + tmp = chan_ctxt->chcfg; + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); + tmp &= ~CHAN_CTX_BRSTMODE_MASK; + tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT); + tmp &= ~CHAN_CTX_POLLCFG_MASK; + tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT); + chan_ctxt->chcfg = tmp; + + chan_ctxt->chtype = mhi_chan->type; + chan_ctxt->erindex = mhi_chan->er_index; + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp; + } + + /* Setup event context */ + mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, + sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, + &mhi_ctxt->er_ctxt_addr, + GFP_KERNEL); + if (!mhi_ctxt->er_ctxt) + goto error_alloc_er_ctxt; + + er_ctxt = mhi_ctxt->er_ctxt; + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* Skip if it is an offload event */ + if (mhi_event->offload_ev) + continue; + + tmp = er_ctxt->intmod; + tmp &= ~EV_CTX_INTMODC_MASK; + tmp &= ~EV_CTX_INTMODT_MASK; + tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT); + er_ctxt->intmod = tmp; + + er_ctxt->ertype = MHI_ER_TYPE_VALID; + er_ctxt->msivec = mhi_event->irq; + mhi_event->db_cfg.db_mode = true; + + ring->el_size = sizeof(struct mhi_tre); + ring->len = ring->el_size * ring->elements; + ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); + if (ret) + goto error_alloc_er; + + /* + * If the read pointer equals to the write pointer, then the + * ring is empty + */ + ring->rp = ring->wp = ring->base; + er_ctxt->rbase = ring->iommu_base; + er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; + er_ctxt->rlen = ring->len; + ring->ctxt_wp = &er_ctxt->wp; + } + + /* Setup cmd context */ + ret = -ENOMEM; + mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, + sizeof(*mhi_ctxt->cmd_ctxt) * + NR_OF_CMD_RINGS, + &mhi_ctxt->cmd_ctxt_addr, + GFP_KERNEL); + if (!mhi_ctxt->cmd_ctxt) + goto error_alloc_er; + + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->el_size = sizeof(struct mhi_tre); + ring->elements = CMD_EL_PER_RING; + ring->len = ring->el_size * ring->elements; + ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); + if (ret) + goto error_alloc_cmd; + + ring->rp = ring->wp = ring->base; + cmd_ctxt->rbase = ring->iommu_base; + cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; + cmd_ctxt->rlen = ring->len; + ring->ctxt_wp = &cmd_ctxt->wp; + } + + mhi_cntrl->mhi_ctxt = mhi_ctxt; + + return 0; + +error_alloc_cmd: + for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) { + struct mhi_ring *ring = &mhi_cmd->ring; + + dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + } + dma_free_coherent(mhi_cntrl->cntrl_dev, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); + i = mhi_cntrl->total_ev_rings; + mhi_event = mhi_cntrl->mhi_event + i; + +error_alloc_er: + for (--i, --mhi_event; i >= 0; i--, mhi_event--) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev) + continue; + + dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + } + dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, + mhi_ctxt->er_ctxt_addr); + +error_alloc_er_ctxt: + dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, + mhi_ctxt->chan_ctxt_addr); + +error_alloc_chan_ctxt: + kfree(mhi_ctxt); + + return ret; +} + +int mhi_init_mmio(struct mhi_controller *mhi_cntrl) +{ + u32 val; + int i, ret; + struct mhi_chan *mhi_chan; + struct mhi_event *mhi_event; + void __iomem *base = mhi_cntrl->regs; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct { + u32 offset; + u32 mask; + u32 shift; + u32 val; + } reg_info[] = { + { + CCABAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), + }, + { + CCABAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), + }, + { + ECABAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), + }, + { + ECABAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), + }, + { + CRCBAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), + }, + { + CRCBAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), + }, + { + MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, + mhi_cntrl->total_ev_rings, + }, + { + MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, + mhi_cntrl->hw_ev_rings, + }, + { + MHICTRLBASE_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_start), + }, + { + MHICTRLBASE_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_start), + }, + { + MHIDATABASE_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_start), + }, + { + MHIDATABASE_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_start), + }, + { + MHICTRLLIMIT_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_stop), + }, + { + MHICTRLLIMIT_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_stop), + }, + { + MHIDATALIMIT_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_stop), + }, + { + MHIDATALIMIT_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_stop), + }, + { 0, 0, 0 } + }; + + MHI_VERB("Initializing MHI registers\n"); + + /* Read channel db offset */ + ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK, + CHDBOFF_CHDBOFF_SHIFT, &val); + if (ret) { + MHI_ERR("Unable to read CHDBOFF register\n"); + return -EIO; + } + + /* Setup wake db */ + mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); + mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0); + mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0); + mhi_cntrl->wake_set = false; + + /* Setup channel db address for each channel in tre_ring */ + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++) + mhi_chan->tre_ring.db_addr = base + val; + + /* Read event ring db offset */ + ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK, + ERDBOFF_ERDBOFF_SHIFT, &val); + if (ret) { + MHI_ERR("Unable to read ERDBOFF register\n"); + return -EIO; + } + + /* Setup event db address for each ev_ring */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + mhi_event->ring.db_addr = base + val; + } + + /* Setup DB register for primary CMD rings */ + mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; + + /* Write to MMIO registers */ + for (i = 0; reg_info[i].offset; i++) + mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset, + reg_info[i].mask, reg_info[i].shift, + reg_info[i].val); + + mhi_misc_init_mmio(mhi_cntrl); + + return 0; +} + +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring; + struct mhi_ring *tre_ring; + struct mhi_chan_ctxt *chan_ctxt; + u32 tmp; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; + + dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, + tre_ring->pre_aligned, tre_ring->dma_handle); + vfree(buf_ring->base); + + buf_ring->base = tre_ring->base = NULL; + tre_ring->ctxt_wp = NULL; + chan_ctxt->rbase = 0; + chan_ctxt->rlen = 0; + chan_ctxt->rp = 0; + chan_ctxt->wp = 0; + + tmp = chan_ctxt->chcfg; + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); + chan_ctxt->chcfg = tmp; + + /* Update to all cores */ + smp_wmb(); +} + +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring; + struct mhi_ring *tre_ring; + struct mhi_chan_ctxt *chan_ctxt; + u32 tmp; + int ret; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + tre_ring->el_size = sizeof(struct mhi_tre); + tre_ring->len = tre_ring->el_size * tre_ring->elements; + chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; + ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len); + if (ret) + return -ENOMEM; + + buf_ring->el_size = sizeof(struct mhi_buf_info); + buf_ring->len = buf_ring->el_size * buf_ring->elements; + buf_ring->base = vzalloc(buf_ring->len); + + if (!buf_ring->base) { + dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, + tre_ring->pre_aligned, tre_ring->dma_handle); + return -ENOMEM; + } + + tmp = chan_ctxt->chcfg; + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT); + chan_ctxt->chcfg = tmp; + + chan_ctxt->rbase = tre_ring->iommu_base; + chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; + chan_ctxt->rlen = tre_ring->len; + tre_ring->ctxt_wp = &chan_ctxt->wp; + + tre_ring->rp = tre_ring->wp = tre_ring->base; + buf_ring->rp = buf_ring->wp = buf_ring->base; + mhi_chan->db_cfg.db_mode = 1; + + /* Update to all cores */ + smp_wmb(); + + return 0; +} + +static int parse_ev_cfg(struct mhi_controller *mhi_cntrl, + const struct mhi_controller_config *config) +{ + struct mhi_event *mhi_event; + const struct mhi_event_config *event_cfg; + struct device *dev = mhi_cntrl->cntrl_dev; + int i, num; + + num = config->num_events; + mhi_cntrl->total_ev_rings = num; + mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), + GFP_KERNEL); + if (!mhi_cntrl->mhi_event) + return -ENOMEM; + + /* Populate event ring */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < num; i++) { + event_cfg = &config->event_cfg[i]; + + mhi_event->er_index = i; + mhi_event->ring.elements = event_cfg->num_elements; + mhi_event->intmod = event_cfg->irq_moderation_ms; + mhi_event->irq = event_cfg->irq; + + if (event_cfg->channel != U32_MAX) { + /* This event ring has a dedicated channel */ + mhi_event->chan = event_cfg->channel; + if (mhi_event->chan >= mhi_cntrl->max_chan) { + MHI_ERR( + "Event Ring channel not available\n"); + goto error_ev_cfg; + } + + mhi_event->mhi_chan = + &mhi_cntrl->mhi_chan[mhi_event->chan]; + } + + mhi_event->priority = event_cfg->priority; + + mhi_event->db_cfg.brstmode = event_cfg->mode; + if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) + goto error_ev_cfg; + + if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE) + mhi_event->db_cfg.process_db = mhi_db_brstmode; + else + mhi_event->db_cfg.process_db = mhi_db_brstmode_disable; + + mhi_event->data_type = event_cfg->data_type; + + switch (mhi_event->data_type) { + case MHI_ER_DATA: + mhi_event->process_event = mhi_process_data_event_ring; + break; + case MHI_ER_CTRL: + mhi_event->process_event = mhi_process_ctrl_ev_ring; + break; + case MHI_ER_BW_SCALE: + mhi_event->process_event = mhi_process_misc_bw_ev_ring; + break; + case MHI_ER_TIMESYNC: + mhi_event->process_event = + mhi_process_misc_tsync_ev_ring; + break; + default: + MHI_ERR("Event Ring type not supported\n"); + goto error_ev_cfg; + } + + mhi_event->hw_ring = event_cfg->hardware_event; + if (mhi_event->hw_ring) + mhi_cntrl->hw_ev_rings++; + else + mhi_cntrl->sw_ev_rings++; + + mhi_event->cl_manage = event_cfg->client_managed; + mhi_event->offload_ev = event_cfg->offload_channel; + mhi_event++; + } + + return 0; + +error_ev_cfg: + + kfree(mhi_cntrl->mhi_event); + return -EINVAL; +} + +static int parse_ch_cfg(struct mhi_controller *mhi_cntrl, + const struct mhi_controller_config *config) +{ + const struct mhi_channel_config *ch_cfg; + struct device *dev = mhi_cntrl->cntrl_dev; + int i; + u32 chan; + + mhi_cntrl->max_chan = config->max_channels; + + /* + * The allocation of MHI channels can exceed 32KB in some scenarios, + * so to avoid any memory possible allocation failures, vzalloc is + * used here + */ + mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan * + sizeof(*mhi_cntrl->mhi_chan)); + if (!mhi_cntrl->mhi_chan) + return -ENOMEM; + + INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); + + /* Populate channel configurations */ + for (i = 0; i < config->num_channels; i++) { + struct mhi_chan *mhi_chan; + + ch_cfg = &config->ch_cfg[i]; + + chan = ch_cfg->num; + if (chan >= mhi_cntrl->max_chan) { + MHI_ERR("Channel %d not available\n", chan); + goto error_chan_cfg; + } + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + mhi_chan->name = ch_cfg->name; + mhi_chan->chan = chan; + + mhi_chan->tre_ring.elements = ch_cfg->num_elements; + if (!mhi_chan->tre_ring.elements) + goto error_chan_cfg; + + /* + * For some channels, local ring length should be bigger than + * the transfer ring length due to internal logical channels + * in device. So host can queue much more buffers than transfer + * ring length. Example, RSC channels should have a larger local + * channel length than transfer ring length. + */ + mhi_chan->buf_ring.elements = ch_cfg->local_elements; + if (!mhi_chan->buf_ring.elements) + mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements; + mhi_chan->er_index = ch_cfg->event_ring; + mhi_chan->dir = ch_cfg->dir; + + /* + * For most channels, chtype is identical to channel directions. + * So, if it is not defined then assign channel direction to + * chtype + */ + mhi_chan->type = ch_cfg->type; + if (!mhi_chan->type) + mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir; + + mhi_chan->ee_mask = ch_cfg->ee_mask; + mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg; + mhi_chan->lpm_notify = ch_cfg->lpm_notify; + mhi_chan->offload_ch = ch_cfg->offload_channel; + mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch; + mhi_chan->pre_alloc = ch_cfg->auto_queue; + + /* + * If MHI host allocates buffers, then the channel direction + * should be DMA_FROM_DEVICE + */ + if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) { + MHI_ERR("Invalid channel configuration\n"); + goto error_chan_cfg; + } + + /* + * Bi-directional and direction less channel must be an + * offload channel + */ + if ((mhi_chan->dir == DMA_BIDIRECTIONAL || + mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) { + MHI_ERR("Invalid channel configuration\n"); + goto error_chan_cfg; + } + + if (!mhi_chan->offload_ch) { + mhi_chan->db_cfg.brstmode = ch_cfg->doorbell; + if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) { + MHI_ERR("Invalid Door bell mode\n"); + goto error_chan_cfg; + } + } + + if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE) + mhi_chan->db_cfg.process_db = mhi_db_brstmode; + else + mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable; + + mhi_chan->configured = true; + + if (mhi_chan->lpm_notify) + list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); + } + + return 0; + +error_chan_cfg: + vfree(mhi_cntrl->mhi_chan); + + return -EINVAL; +} + +static int parse_config(struct mhi_controller *mhi_cntrl, + const struct mhi_controller_config *config) +{ + int ret; + + /* Parse MHI channel configuration */ + ret = parse_ch_cfg(mhi_cntrl, config); + if (ret) + return ret; + + /* Parse MHI event configuration */ + ret = parse_ev_cfg(mhi_cntrl, config); + if (ret) + goto error_ev_cfg; + + mhi_cntrl->timeout_ms = config->timeout_ms; + if (!mhi_cntrl->timeout_ms) + mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; + + mhi_cntrl->bounce_buf = config->use_bounce_buf; + mhi_cntrl->buffer_len = config->buf_len; + if (!mhi_cntrl->buffer_len) + mhi_cntrl->buffer_len = MHI_MAX_MTU; + + /* By default, host is allowed to ring DB in both M0 and M2 states */ + mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2; + if (config->m2_no_db) + mhi_cntrl->db_access &= ~MHI_PM_M2; + + return 0; + +error_ev_cfg: + vfree(mhi_cntrl->mhi_chan); + + return ret; +} + +int mhi_register_controller(struct mhi_controller *mhi_cntrl, + const struct mhi_controller_config *config) +{ + struct mhi_event *mhi_event; + struct mhi_chan *mhi_chan; + struct mhi_cmd *mhi_cmd; + struct mhi_device *mhi_dev; + u32 soc_info; + int ret, i; + + if (!mhi_cntrl) + return -EINVAL; + + if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put || + !mhi_cntrl->status_cb || !mhi_cntrl->read_reg || + !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs) + return -EINVAL; + + ret = parse_config(mhi_cntrl, config); + if (ret) + return -EINVAL; + + mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, + sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); + if (!mhi_cntrl->mhi_cmd) { + ret = -ENOMEM; + goto err_free_event; + } + + INIT_LIST_HEAD(&mhi_cntrl->transition_list); + mutex_init(&mhi_cntrl->pm_mutex); + rwlock_init(&mhi_cntrl->pm_lock); + spin_lock_init(&mhi_cntrl->transition_lock); + spin_lock_init(&mhi_cntrl->wlock); + INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); + init_waitqueue_head(&mhi_cntrl->state_event); + + mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", + WQ_HIGHPRI); + if (!mhi_cntrl->hiprio_wq) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n"); + ret = -ENOMEM; + goto err_free_cmd; + } + + mhi_cmd = mhi_cntrl->mhi_cmd; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) + spin_lock_init(&mhi_cmd->lock); + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + /* Skip for offload events */ + if (mhi_event->offload_ev) + continue; + + mhi_event->mhi_cntrl = mhi_cntrl; + spin_lock_init(&mhi_event->lock); + + if (mhi_event->priority == MHI_ER_PRIORITY_HI_SLEEP) + INIT_WORK(&mhi_event->work, mhi_process_ev_work); + else + tasklet_init(&mhi_event->task, + (mhi_event->data_type == MHI_ER_CTRL) ? + mhi_ctrl_ev_task : mhi_ev_task, + (ulong)mhi_event); + } + + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + mutex_init(&mhi_chan->mutex); + init_completion(&mhi_chan->completion); + rwlock_init(&mhi_chan->lock); + + /* used in setting bei field of TRE */ + mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + mhi_chan->intmod = mhi_event->intmod; + } + + if (mhi_cntrl->bounce_buf) { + mhi_cntrl->map_single = mhi_map_single_use_bb; + mhi_cntrl->unmap_single = mhi_unmap_single_use_bb; + } else { + mhi_cntrl->map_single = mhi_map_single_no_bb; + mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; + } + + /* Read the MHI device info */ + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, + SOC_HW_VERSION_OFFS, &soc_info); + if (ret) + goto err_destroy_wq; + + mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >> + SOC_HW_VERSION_FAM_NUM_SHFT; + mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >> + SOC_HW_VERSION_DEV_NUM_SHFT; + mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >> + SOC_HW_VERSION_MAJOR_VER_SHFT; + mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >> + SOC_HW_VERSION_MINOR_VER_SHFT; + + mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL); + if (mhi_cntrl->index < 0) { + ret = mhi_cntrl->index; + goto err_destroy_wq; + } + + /* Register controller with MHI bus */ + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (IS_ERR(mhi_dev)) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n"); + ret = PTR_ERR(mhi_dev); + goto err_ida_free; + } + + mhi_dev->dev_type = MHI_DEVICE_CONTROLLER; + mhi_dev->mhi_cntrl = mhi_cntrl; + dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index); + mhi_dev->name = dev_name(&mhi_dev->dev); + + /* Init wakeup source */ + device_init_wakeup(&mhi_dev->dev, true); + + ret = device_add(&mhi_dev->dev); + if (ret) + goto err_release_dev; + + mhi_cntrl->mhi_dev = mhi_dev; + + ret = mhi_misc_register_controller(mhi_cntrl); + if (ret) { + dev_err(mhi_cntrl->cntrl_dev, + "Could not enable miscellaneous features\n"); + mhi_cntrl->mhi_dev = NULL; + goto err_release_dev; + } + + mhi_create_debugfs(mhi_cntrl); + + return 0; + +err_release_dev: + put_device(&mhi_dev->dev); +err_ida_free: + ida_free(&mhi_controller_ida, mhi_cntrl->index); +err_destroy_wq: + destroy_workqueue(mhi_cntrl->hiprio_wq); +err_free_cmd: + kfree(mhi_cntrl->mhi_cmd); +err_free_event: + kfree(mhi_cntrl->mhi_event); + vfree(mhi_cntrl->mhi_chan); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_register_controller); + +void mhi_unregister_controller(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; + struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan; + unsigned int i; + + mhi_misc_unregister_controller(mhi_cntrl); + + /* Free the memory controller wanted to preserve for BHIe images */ + if (mhi_cntrl->img_pre_alloc) { + mhi_cntrl->img_pre_alloc = false; + if (mhi_cntrl->fbc_image) + mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image); + if (mhi_cntrl->rddm_image) + mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image); + } + + mhi_destroy_debugfs(mhi_cntrl); + + destroy_workqueue(mhi_cntrl->hiprio_wq); + kfree(mhi_cntrl->mhi_cmd); + kfree(mhi_cntrl->mhi_event); + + /* Drop the references to MHI devices created for channels */ + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + if (!mhi_chan->mhi_dev) + continue; + + put_device(&mhi_chan->mhi_dev->dev); + } + vfree(mhi_cntrl->mhi_chan); + + device_del(&mhi_dev->dev); + put_device(&mhi_dev->dev); + + ida_free(&mhi_controller_ida, mhi_cntrl->index); +} +EXPORT_SYMBOL_GPL(mhi_unregister_controller); + +struct mhi_controller *mhi_alloc_controller(void) +{ + struct mhi_controller *mhi_cntrl; + + mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL); + + return mhi_cntrl; +} +EXPORT_SYMBOL_GPL(mhi_alloc_controller); + +void mhi_free_controller(struct mhi_controller *mhi_cntrl) +{ + kfree(mhi_cntrl); +} +EXPORT_SYMBOL_GPL(mhi_free_controller); + +int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 bhi_off, bhie_off; + int ret; + + mutex_lock(&mhi_cntrl->pm_mutex); + + ret = mhi_init_dev_ctxt(mhi_cntrl); + if (ret) + goto error_dev_ctxt; + + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, + &bhi_off); + if (ret) { + MHI_ERR("Error getting BHI offset\n"); + goto error_reg_offset; + } + if (bhi_off >= mhi_cntrl->reg_len) { + MHI_ERR("BHI offset is out of range\n"); + ret = -EINVAL; + goto error_reg_offset; + } + mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off; + + if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) { + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, + &bhie_off); + if (ret) { + MHI_ERR("Error getting BHIE offset\n"); + goto error_reg_offset; + } + if (bhie_off >= mhi_cntrl->reg_len) { + MHI_ERR("BHIe offset is out of range\n"); + ret = -EINVAL; + goto error_reg_offset; + } + mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off; + } + + if (mhi_cntrl->rddm_size) { + /* + * This controller supports RDDM, so we need to manually clear + * BHIE RX registers since POR values are undefined. + */ + memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS, + 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + + 4); + /* + * Allocate RDDM table for debugging purpose if specified + */ + mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, + mhi_cntrl->rddm_size); + if (mhi_cntrl->rddm_image) + mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image); + } + + mutex_unlock(&mhi_cntrl->pm_mutex); + + return 0; + +error_reg_offset: + mhi_deinit_dev_ctxt(mhi_cntrl); + +error_dev_ctxt: + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up); + +void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl) +{ + if (mhi_cntrl->rddm_image) + mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image); + + mhi_cntrl->bhi = NULL; + mhi_cntrl->bhie = NULL; + + mhi_deinit_dev_ctxt(mhi_cntrl); +} +EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down); + +static void mhi_release_device(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + + /* + * We need to set the mhi_chan->mhi_dev to NULL here since the MHI + * devices for the channels will only get created if the mhi_dev + * associated with it is NULL. This scenario will happen during the + * controller suspend and resume. + */ + if (mhi_dev->ul_chan) + mhi_dev->ul_chan->mhi_dev = NULL; + + if (mhi_dev->dl_chan) + mhi_dev->dl_chan->mhi_dev = NULL; + + kfree(mhi_dev); +} + +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev; + struct device *dev; + + mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); + if (!mhi_dev) + return ERR_PTR(-ENOMEM); + + dev = &mhi_dev->dev; + device_initialize(dev); + dev->bus = &mhi_bus_type; + dev->release = mhi_release_device; + + if (mhi_cntrl->mhi_dev) { + /* for MHI client devices, parent is the MHI controller device */ + dev->parent = &mhi_cntrl->mhi_dev->dev; + } else { + /* for MHI controller device, parent is the bus device (e.g. pci device) */ + dev->parent = mhi_cntrl->cntrl_dev; + } + + mhi_dev->mhi_cntrl = mhi_cntrl; + mhi_dev->dev_wake = 0; + + return mhi_dev; +} + +static int mhi_driver_probe(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device_driver *drv = dev->driver; + struct mhi_driver *mhi_drv = to_mhi_driver(drv); + struct mhi_event *mhi_event; + struct mhi_chan *ul_chan = mhi_dev->ul_chan; + struct mhi_chan *dl_chan = mhi_dev->dl_chan; + int ret; + + /* Bring device out of LPM */ + ret = mhi_device_get_sync(mhi_dev); + if (ret) + return ret; + + ret = -EINVAL; + + if (ul_chan) { + /* + * If channel supports LPM notifications then status_cb should + * be provided + */ + if (ul_chan->lpm_notify && !mhi_drv->status_cb) + goto exit_probe; + + /* For non-offload channels then xfer_cb should be provided */ + if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb) + goto exit_probe; + + ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; + } + + ret = -EINVAL; + if (dl_chan) { + /* + * If channel supports LPM notifications then status_cb should + * be provided + */ + if (dl_chan->lpm_notify && !mhi_drv->status_cb) + goto exit_probe; + + /* For non-offload channels then xfer_cb should be provided */ + if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb) + goto exit_probe; + + mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index]; + + /* + * If the channel event ring is managed by client, then + * status_cb must be provided so that the framework can + * notify pending data + */ + if (mhi_event->cl_manage && !mhi_drv->status_cb) + goto exit_probe; + + dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; + } + + /* Call the user provided probe function */ + ret = mhi_drv->probe(mhi_dev, mhi_dev->id); + if (ret) + goto exit_probe; + + mhi_device_put(mhi_dev); + + return ret; + +exit_probe: + mhi_unprepare_from_transfer(mhi_dev); + + mhi_device_put(mhi_dev); + + return ret; +} + +static int mhi_driver_remove(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + enum mhi_ch_state ch_state[] = { + MHI_CH_STATE_DISABLED, + MHI_CH_STATE_DISABLED + }; + int dir; + + /* Skip if it is a controller device */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + /* Reset both channels */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + /* Wake all threads waiting for completion */ + write_lock_irq(&mhi_chan->lock); + mhi_chan->ccs = MHI_EV_CC_INVALID; + complete_all(&mhi_chan->completion); + write_unlock_irq(&mhi_chan->lock); + + /* Set the channel state to disabled */ + mutex_lock(&mhi_chan->mutex); + write_lock_irq(&mhi_chan->lock); + ch_state[dir] = mhi_chan->ch_state; + mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED; + write_unlock_irq(&mhi_chan->lock); + + /* Reset the non-offload channel */ + if (!mhi_chan->offload_ch) + mhi_reset_chan(mhi_cntrl, mhi_chan); + + mutex_unlock(&mhi_chan->mutex); + } + + mhi_drv->remove(mhi_dev); + + /* De-init channel if it was enabled */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + mutex_lock(&mhi_chan->mutex); + + if ((ch_state[dir] == MHI_CH_STATE_ENABLED || + ch_state[dir] == MHI_CH_STATE_STOP) && + mhi_chan->ch_state != MHI_CH_STATE_DISABLED && + !mhi_chan->offload_ch) + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + + mutex_unlock(&mhi_chan->mutex); + } + + while (mhi_dev->dev_wake) + mhi_device_put(mhi_dev); + + return 0; +} + +int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner) +{ + struct device_driver *driver = &mhi_drv->driver; + + if (!mhi_drv->probe || !mhi_drv->remove) + return -EINVAL; + + driver->bus = &mhi_bus_type; + driver->owner = owner; + driver->probe = mhi_driver_probe; + driver->remove = mhi_driver_remove; + + return driver_register(driver); +} +EXPORT_SYMBOL_GPL(__mhi_driver_register); + +void mhi_driver_unregister(struct mhi_driver *mhi_drv) +{ + driver_unregister(&mhi_drv->driver); +} +EXPORT_SYMBOL_GPL(mhi_driver_unregister); + +static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + + return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT, + mhi_dev->name); +} + +static int mhi_match(struct device *dev, struct device_driver *drv) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_driver *mhi_drv = to_mhi_driver(drv); + const struct mhi_device_id *id; + + /* + * If the device is a controller type then there is no client driver + * associated with it + */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + for (id = mhi_drv->id_table; id->chan[0]; id++) + if (!strcmp(mhi_dev->name, id->chan)) { + mhi_dev->id = id; + return 1; + } + + return 0; +}; + +struct bus_type mhi_bus_type = { + .name = "mhi", + .dev_name = "mhi", + .match = mhi_match, + .uevent = mhi_uevent, + .dev_groups = mhi_dev_groups, +}; + +static int __init mhi_init(void) +{ + mhi_misc_init(); + mhi_debugfs_init(); + return bus_register(&mhi_bus_type); +} + +static void __exit mhi_exit(void) +{ + mhi_misc_exit(); + mhi_debugfs_exit(); + bus_unregister(&mhi_bus_type); +} + +postcore_initcall(mhi_init); +module_exit(mhi_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MHI Host Interface"); diff --git a/mhi/core/internal.h b/mhi/core/internal.h new file mode 100644 index 0000000..1b14dc6 --- /dev/null +++ b/mhi/core/internal.h @@ -0,0 +1,720 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * + */ + +#ifndef _MHI_INT_H +#define _MHI_INT_H + +#include <linux/mhi.h> +#include "misc.h" + +extern struct bus_type mhi_bus_type; + +#define MHIREGLEN (0x0) +#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF) +#define MHIREGLEN_MHIREGLEN_SHIFT (0) + +#define MHIVER (0x8) +#define MHIVER_MHIVER_MASK (0xFFFFFFFF) +#define MHIVER_MHIVER_SHIFT (0) + +#define MHICFG (0x10) +#define MHICFG_NHWER_MASK (0xFF000000) +#define MHICFG_NHWER_SHIFT (24) +#define MHICFG_NER_MASK (0xFF0000) +#define MHICFG_NER_SHIFT (16) +#define MHICFG_NHWCH_MASK (0xFF00) +#define MHICFG_NHWCH_SHIFT (8) +#define MHICFG_NCH_MASK (0xFF) +#define MHICFG_NCH_SHIFT (0) + +#define CHDBOFF (0x18) +#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF) +#define CHDBOFF_CHDBOFF_SHIFT (0) + +#define ERDBOFF (0x20) +#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF) +#define ERDBOFF_ERDBOFF_SHIFT (0) + +#define BHIOFF (0x28) +#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF) +#define BHIOFF_BHIOFF_SHIFT (0) + +#define BHIEOFF (0x2C) +#define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF) +#define BHIEOFF_BHIEOFF_SHIFT (0) + +#define DEBUGOFF (0x30) +#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF) +#define DEBUGOFF_DEBUGOFF_SHIFT (0) + +#define MHICTRL (0x38) +#define MHICTRL_MHISTATE_MASK (0x0000FF00) +#define MHICTRL_MHISTATE_SHIFT (8) +#define MHICTRL_RESET_MASK (0x2) +#define MHICTRL_RESET_SHIFT (1) + +#define MHISTATUS (0x48) +#define MHISTATUS_MHISTATE_MASK (0x0000FF00) +#define MHISTATUS_MHISTATE_SHIFT (8) +#define MHISTATUS_SYSERR_MASK (0x4) +#define MHISTATUS_SYSERR_SHIFT (2) +#define MHISTATUS_READY_MASK (0x1) +#define MHISTATUS_READY_SHIFT (0) + +#define CCABAP_LOWER (0x58) +#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF) +#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0) + +#define CCABAP_HIGHER (0x5C) +#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF) +#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0) + +#define ECABAP_LOWER (0x60) +#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF) +#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0) + +#define ECABAP_HIGHER (0x64) +#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF) +#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0) + +#define CRCBAP_LOWER (0x68) +#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF) +#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0) + +#define CRCBAP_HIGHER (0x6C) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0) + +#define CRDB_LOWER (0x70) +#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF) +#define CRDB_LOWER_CRDB_LOWER_SHIFT (0) + +#define CRDB_HIGHER (0x74) +#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF) +#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0) + +#define MHICTRLBASE_LOWER (0x80) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0) + +#define MHICTRLBASE_HIGHER (0x84) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0) + +#define MHICTRLLIMIT_LOWER (0x88) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0) + +#define MHICTRLLIMIT_HIGHER (0x8C) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0) + +#define MHIDATABASE_LOWER (0x98) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0) + +#define MHIDATABASE_HIGHER (0x9C) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0) + +#define MHIDATALIMIT_LOWER (0xA0) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0) + +#define MHIDATALIMIT_HIGHER (0xA4) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0) + +/* Host request register */ +#define MHI_SOC_RESET_REQ_OFFSET (0xB0) +#define MHI_SOC_RESET_REQ BIT(0) + +/* MHI BHI offfsets */ +#define BHI_BHIVERSION_MINOR (0x00) +#define BHI_BHIVERSION_MAJOR (0x04) +#define BHI_IMGADDR_LOW (0x08) +#define BHI_IMGADDR_HIGH (0x0C) +#define BHI_IMGSIZE (0x10) +#define BHI_RSVD1 (0x14) +#define BHI_IMGTXDB (0x18) +#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHI_TXDB_SEQNUM_SHFT (0) +#define BHI_RSVD2 (0x1C) +#define BHI_INTVEC (0x20) +#define BHI_RSVD3 (0x24) +#define BHI_EXECENV (0x28) +#define BHI_STATUS (0x2C) +#define BHI_ERRCODE (0x30) +#define BHI_ERRDBG1 (0x34) +#define BHI_ERRDBG2 (0x38) +#define BHI_ERRDBG3 (0x3C) +#define BHI_SERIALNU (0x40) +#define BHI_SBLANTIROLLVER (0x44) +#define BHI_NUMSEG (0x48) +#define BHI_MSMHWID(n) (0x4C + (0x4 * (n))) +#define BHI_OEMPKHASH(n) (0x64 + (0x4 * (n))) +#define BHI_RSVD5 (0xC4) +#define BHI_STATUS_MASK (0xC0000000) +#define BHI_STATUS_SHIFT (30) +#define BHI_STATUS_ERROR (3) +#define BHI_STATUS_SUCCESS (2) +#define BHI_STATUS_RESET (0) + +/* MHI BHIE offsets */ +#define BHIE_MSMSOCID_OFFS (0x0000) +#define BHIE_TXVECADDR_LOW_OFFS (0x002C) +#define BHIE_TXVECADDR_HIGH_OFFS (0x0030) +#define BHIE_TXVECSIZE_OFFS (0x0034) +#define BHIE_TXVECDB_OFFS (0x003C) +#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_TXVECDB_SEQNUM_SHFT (0) +#define BHIE_TXVECSTATUS_OFFS (0x0044) +#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0) +#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000) +#define BHIE_TXVECSTATUS_STATUS_SHFT (30) +#define BHIE_TXVECSTATUS_STATUS_RESET (0x00) +#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02) +#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03) +#define BHIE_RXVECADDR_LOW_OFFS (0x0060) +#define BHIE_RXVECADDR_HIGH_OFFS (0x0064) +#define BHIE_RXVECSIZE_OFFS (0x0068) +#define BHIE_RXVECDB_OFFS (0x0070) +#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_RXVECDB_SEQNUM_SHFT (0) +#define BHIE_RXVECSTATUS_OFFS (0x0078) +#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0) +#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000) +#define BHIE_RXVECSTATUS_STATUS_SHFT (30) +#define BHIE_RXVECSTATUS_STATUS_RESET (0x00) +#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02) +#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03) + +#define SOC_HW_VERSION_OFFS (0x224) +#define SOC_HW_VERSION_FAM_NUM_BMSK (0xF0000000) +#define SOC_HW_VERSION_FAM_NUM_SHFT (28) +#define SOC_HW_VERSION_DEV_NUM_BMSK (0x0FFF0000) +#define SOC_HW_VERSION_DEV_NUM_SHFT (16) +#define SOC_HW_VERSION_MAJOR_VER_BMSK (0x0000FF00) +#define SOC_HW_VERSION_MAJOR_VER_SHFT (8) +#define SOC_HW_VERSION_MINOR_VER_BMSK (0x000000FF) +#define SOC_HW_VERSION_MINOR_VER_SHFT (0) + +#define EV_CTX_RESERVED_MASK GENMASK(7, 0) +#define EV_CTX_INTMODC_MASK GENMASK(15, 8) +#define EV_CTX_INTMODC_SHIFT 8 +#define EV_CTX_INTMODT_MASK GENMASK(31, 16) +#define EV_CTX_INTMODT_SHIFT 16 +struct mhi_event_ctxt { + __u32 intmod; + __u32 ertype; + __u32 msivec; + + __u64 rbase __packed __aligned(4); + __u64 rlen __packed __aligned(4); + __u64 rp __packed __aligned(4); + __u64 wp __packed __aligned(4); +}; + +#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0) +#define CHAN_CTX_CHSTATE_SHIFT 0 +#define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8) +#define CHAN_CTX_BRSTMODE_SHIFT 8 +#define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10) +#define CHAN_CTX_POLLCFG_SHIFT 10 +#define CHAN_CTX_RESERVED_MASK GENMASK(31, 16) +struct mhi_chan_ctxt { + __u32 chcfg; + __u32 chtype; + __u32 erindex; + + __u64 rbase __packed __aligned(4); + __u64 rlen __packed __aligned(4); + __u64 rp __packed __aligned(4); + __u64 wp __packed __aligned(4); +}; + +struct mhi_cmd_ctxt { + __u32 reserved0; + __u32 reserved1; + __u32 reserved2; + + __u64 rbase __packed __aligned(4); + __u64 rlen __packed __aligned(4); + __u64 rp __packed __aligned(4); + __u64 wp __packed __aligned(4); +}; + +struct mhi_ctxt { + struct mhi_event_ctxt *er_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + dma_addr_t er_ctxt_addr; + dma_addr_t chan_ctxt_addr; + dma_addr_t cmd_ctxt_addr; +}; + +struct mhi_tre { + u64 ptr; + u32 dword[2]; +}; + +struct bhi_vec_entry { + u64 dma_addr; + u64 size; +}; + +enum mhi_cmd_type { + MHI_CMD_NOP = 1, + MHI_CMD_RESET_CHAN = 16, + MHI_CMD_STOP_CHAN = 17, + MHI_CMD_START_CHAN = 18, + MHI_CMD_SFR_CFG = 73, +}; + +/* No operation command */ +#define MHI_TRE_CMD_NOOP_PTR (0) +#define MHI_TRE_CMD_NOOP_DWORD0 (0) +#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_NOP << 16) + +/* Channel reset command */ +#define MHI_TRE_CMD_RESET_PTR (0) +#define MHI_TRE_CMD_RESET_DWORD0 (0) +#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_RESET_CHAN << 16)) + +/* Channel stop command */ +#define MHI_TRE_CMD_STOP_PTR (0) +#define MHI_TRE_CMD_STOP_DWORD0 (0) +#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_STOP_CHAN << 16)) + +/* Channel start command */ +#define MHI_TRE_CMD_START_PTR (0) +#define MHI_TRE_CMD_START_DWORD0 (0) +#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_START_CHAN << 16)) + +#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) + +/* Event descriptor macros */ +#define MHI_TRE_EV_PTR(ptr) (ptr) +#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len) +#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16)) +#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr) +#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF) +#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) +#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0]) +#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr) +#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr) +#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF) +#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF) + +/* Transfer descriptor macros */ +#define MHI_TRE_DATA_PTR(ptr) (ptr) +#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU) +#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \ + | (ieot << 9) | (ieob << 8) | chain) + +/* RSC transfer descriptor macros */ +#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr) +#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie) +#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16) + +enum mhi_pkt_type { + MHI_PKT_TYPE_INVALID = 0x0, + MHI_PKT_TYPE_NOOP_CMD = 0x1, + MHI_PKT_TYPE_TRANSFER = 0x2, + MHI_PKT_TYPE_COALESCING = 0x8, + MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, + MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, + MHI_PKT_TYPE_START_CHAN_CMD = 0x12, + MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, + MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, + MHI_PKT_TYPE_TX_EVENT = 0x22, + MHI_PKT_TYPE_RSC_TX_EVENT = 0x28, + MHI_PKT_TYPE_EE_EVENT = 0x40, + MHI_PKT_TYPE_TSYNC_EVENT = 0x48, + MHI_PKT_TYPE_BW_REQ_EVENT = 0x50, + MHI_PKT_TYPE_STALE_EVENT, /* internal event */ +}; + +/* MHI transfer completion events */ +enum mhi_ev_ccs { + MHI_EV_CC_INVALID = 0x0, + MHI_EV_CC_SUCCESS = 0x1, + MHI_EV_CC_EOT = 0x2, /* End of transfer event */ + MHI_EV_CC_OVERFLOW = 0x3, + MHI_EV_CC_EOB = 0x4, /* End of block event */ + MHI_EV_CC_OOB = 0x5, /* Out of block event */ + MHI_EV_CC_DB_MODE = 0x6, + MHI_EV_CC_UNDEFINED_ERR = 0x10, + MHI_EV_CC_BAD_TRE = 0x11, +}; + +enum mhi_ch_state { + MHI_CH_STATE_DISABLED = 0x0, + MHI_CH_STATE_ENABLED = 0x1, + MHI_CH_STATE_RUNNING = 0x2, + MHI_CH_STATE_SUSPENDED = 0x3, + MHI_CH_STATE_STOP = 0x4, + MHI_CH_STATE_ERROR = 0x5, +}; + +enum mhi_ch_state_type { + MHI_CH_STATE_TYPE_RESET, + MHI_CH_STATE_TYPE_STOP, + MHI_CH_STATE_TYPE_START, + MHI_CH_STATE_TYPE_MAX, +}; + +extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX]; +#define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \ + "INVALID_STATE" : \ + mhi_ch_state_type_str[(state)]) + +#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \ + mode != MHI_DB_BRST_ENABLE) + +extern const char * const mhi_ee_str[MHI_EE_MAX]; +#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \ + "INVALID_EE" : mhi_ee_str[ee]) + +#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \ + ee == MHI_EE_EDL) + +#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW) + +enum dev_st_transition { + DEV_ST_TRANSITION_PBL, + DEV_ST_TRANSITION_READY, + DEV_ST_TRANSITION_SBL, + DEV_ST_TRANSITION_MISSION_MODE, + DEV_ST_TRANSITION_SYS_ERR, + DEV_ST_TRANSITION_DISABLE, + DEV_ST_TRANSITION_MAX, +}; + +extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX]; +#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \ + "INVALID_STATE" : dev_state_tran_str[state]) + +extern const char * const mhi_state_str[MHI_STATE_MAX]; +#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \ + !mhi_state_str[state]) ? \ + "INVALID_STATE" : mhi_state_str[state]) + +/* internal power states */ +enum mhi_pm_state { + MHI_PM_STATE_DISABLE, + MHI_PM_STATE_POR, + MHI_PM_STATE_M0, + MHI_PM_STATE_M2, + MHI_PM_STATE_M3_ENTER, + MHI_PM_STATE_M3, + MHI_PM_STATE_M3_EXIT, + MHI_PM_STATE_FW_DL_ERR, + MHI_PM_STATE_SYS_ERR_DETECT, + MHI_PM_STATE_SYS_ERR_PROCESS, + MHI_PM_STATE_SHUTDOWN_PROCESS, + MHI_PM_STATE_LD_ERR_FATAL_DETECT, + MHI_PM_STATE_MAX +}; + +#define MHI_PM_DISABLE BIT(0) +#define MHI_PM_POR BIT(1) +#define MHI_PM_M0 BIT(2) +#define MHI_PM_M2 BIT(3) +#define MHI_PM_M3_ENTER BIT(4) +#define MHI_PM_M3 BIT(5) +#define MHI_PM_M3_EXIT BIT(6) +/* firmware download failure state */ +#define MHI_PM_FW_DL_ERR BIT(7) +#define MHI_PM_SYS_ERR_DETECT BIT(8) +#define MHI_PM_SYS_ERR_PROCESS BIT(9) +#define MHI_PM_SHUTDOWN_PROCESS BIT(10) +/* link not accessible */ +#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11) + +#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \ + MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \ + MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \ + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR))) +#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR) +#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT) +#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \ + mhi_cntrl->db_access) +#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \ + MHI_PM_M2 | MHI_PM_M3_EXIT)) +#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2) +#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state) +#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \ + MHI_PM_IN_ERROR_STATE(pm_state)) +#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \ + (MHI_PM_M3_ENTER | MHI_PM_M3)) + +#define NR_OF_CMD_RINGS 1 +#define CMD_EL_PER_RING 128 +#define PRIMARY_CMD_RING 0 +#define MHI_DEV_WAKE_DB 127 +#define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1) + +enum mhi_er_type { + MHI_ER_TYPE_INVALID = 0x0, + MHI_ER_TYPE_VALID = 0x1, +}; + +struct db_cfg { + bool reset_req; + bool db_mode; + u32 pollcfg; + enum mhi_db_brst_mode brstmode; + dma_addr_t db_val; + void (*process_db)(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, void __iomem *io_addr, + dma_addr_t db_val); +}; + +struct mhi_pm_transitions { + enum mhi_pm_state from_state; + u32 to_states; +}; + +struct state_transition { + struct list_head node; + enum dev_st_transition state; +}; + +struct mhi_ring { + dma_addr_t dma_handle; + dma_addr_t iommu_base; + u64 *ctxt_wp; /* point to ctxt wp */ + void *pre_aligned; + void *base; + void *rp; + void *wp; + size_t el_size; + size_t len; + size_t elements; + size_t alloc_size; + void __iomem *db_addr; +}; + +struct mhi_cmd { + struct mhi_ring ring; + spinlock_t lock; +}; + +struct mhi_buf_info { + void *v_addr; + void *bb_addr; + void *wp; + void *cb_buf; + dma_addr_t p_addr; + size_t len; + enum dma_data_direction dir; + bool used; /* Indicates whether the buffer is used or not */ + bool pre_mapped; /* Already pre-mapped by client */ +}; + +struct mhi_event { + struct mhi_controller *mhi_cntrl; + struct mhi_chan *mhi_chan; /* dedicated to channel */ + u32 er_index; + u32 intmod; + u32 irq; + int chan; /* this event ring is dedicated to a channel (optional) */ + u32 priority; + enum mhi_er_data_type data_type; + struct mhi_ring ring; + struct db_cfg db_cfg; + struct tasklet_struct task; + struct work_struct work; + spinlock_t lock; + int (*process_event)(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota); + bool hw_ring; + bool cl_manage; + bool offload_ev; /* managed by a device driver */ +}; + +struct mhi_chan { + const char *name; + /* + * Important: When consuming, increment tre_ring first and when + * releasing, decrement buf_ring first. If tre_ring has space, buf_ring + * is guranteed to have space so we do not need to check both rings. + */ + struct mhi_ring buf_ring; + struct mhi_ring tre_ring; + u32 chan; + u32 er_index; + u32 intmod; + enum mhi_ch_type type; + enum dma_data_direction dir; + struct db_cfg db_cfg; + enum mhi_ch_ee_mask ee_mask; + enum mhi_ch_state ch_state; + enum mhi_ev_ccs ccs; + struct mhi_device *mhi_dev; + void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result); + struct mutex mutex; + struct completion completion; + rwlock_t lock; + struct list_head node; + bool lpm_notify; + bool configured; + bool offload_ch; + bool pre_alloc; + bool wake_capable; +}; + +/* Default MHI timeout */ +#define MHI_TIMEOUT_MS (1000) + +/* debugfs related functions */ +#ifdef CONFIG_MHI_BUS_DEBUG +void mhi_create_debugfs(struct mhi_controller *mhi_cntrl); +void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl); +void mhi_debugfs_init(void); +void mhi_debugfs_exit(void); +#else +static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl) +{ +} + +static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl) +{ +} + +static inline void mhi_debugfs_init(void) +{ +} + +static inline void mhi_debugfs_exit(void) +{ +} +#endif + +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl); + +int mhi_destroy_device(struct device *dev, void *data); +void mhi_create_devices(struct mhi_controller *mhi_cntrl); + +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, size_t alloc_size); +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info); + +/* Power management APIs */ +enum mhi_pm_state __must_check mhi_tryset_pm_state( + struct mhi_controller *mhi_cntrl, + enum mhi_pm_state state); +const char *to_mhi_pm_state_str(enum mhi_pm_state state); +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum dev_st_transition state); +void mhi_pm_st_worker(struct work_struct *work); +void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl); +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl); +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl); +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl); +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl); +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl); +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + enum mhi_cmd_type cmd); +int mhi_download_amss_image(struct mhi_controller *mhi_cntrl); +static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl) +{ + return (mhi_cntrl->dev_state >= MHI_STATE_M0 && + mhi_cntrl->dev_state <= MHI_STATE_M3_FAST); +} + +static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl) +{ + pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0); + mhi_cntrl->runtime_get(mhi_cntrl); + mhi_cntrl->runtime_put(mhi_cntrl); +} + +/* Register access methods */ +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg, + void __iomem *db_addr, dma_addr_t db_val); +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_mode, void __iomem *db_addr, + dma_addr_t db_val); +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 *out); +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 shift, u32 *out); +int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 shift, u32 val, u32 delayus); +void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 val); +void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 mask, u32 shift, u32 val); +void mhi_ring_er_db(struct mhi_event *mhi_event); +void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, + dma_addr_t db_val); +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd); +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr); + +/* Initialization methods */ +int mhi_init_mmio(struct mhi_controller *mhi_cntrl); +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl); +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl); +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); +void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, + struct image_info *img_info); +void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); +int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); + +/* Event processing methods */ +void mhi_ctrl_ev_task(unsigned long data); +void mhi_ev_task(unsigned long data); +void mhi_process_ev_work(struct work_struct *work); +void mhi_process_sleeping_events(struct mhi_controller *mhi_cntrl); +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); + +/* ISR handlers */ +irqreturn_t mhi_irq_handler(int irq_number, void *dev); +irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev); +irqreturn_t mhi_intvec_handler(int irq_number, void *dev); + +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + struct mhi_buf_info *info, enum mhi_flags flags); +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); + +#endif /* _MHI_INT_H */ diff --git a/mhi/core/main.c b/mhi/core/main.c new file mode 100644 index 0000000..925b468 --- /dev/null +++ b/mhi/core/main.c @@ -0,0 +1,1858 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mhi.h> +#include <linux/module.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include "internal.h" + +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 *out) +{ + return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out); +} + +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, + u32 mask, u32 shift, u32 *out) +{ + u32 tmp; + int ret; + + ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); + if (ret) + return ret; + + *out = (tmp & mask) >> shift; + + return 0; +} + +int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, + u32 mask, u32 shift, u32 val, u32 delayus) +{ + int ret; + u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus; + + while (retry--) { + ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, shift, + &out); + if (ret) + return ret; + + if (out == val) + return 0; + + fsleep(delayus); + } + + return -ETIMEDOUT; +} + +void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 val) +{ + mhi_cntrl->write_reg(mhi_cntrl, base + offset, val); +} + +void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 mask, u32 shift, u32 val) +{ + int ret; + u32 tmp; + + ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); + if (ret) + return; + + tmp &= ~mask; + tmp |= (val << shift); + mhi_write_reg(mhi_cntrl, base, offset, tmp); +} + +void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, + dma_addr_t db_val) +{ + mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val)); + mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val)); +} + +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, + void __iomem *db_addr, + dma_addr_t db_val) +{ + if (db_cfg->db_mode) { + db_cfg->db_val = db_val; + mhi_write_db(mhi_cntrl, db_addr, db_val); + db_cfg->db_mode = 0; + } +} + +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, + void __iomem *db_addr, + dma_addr_t db_val) +{ + db_cfg->db_val = db_val; + mhi_write_db(mhi_cntrl, db_addr, db_val); +} + +void mhi_ring_er_db(struct mhi_event *mhi_event) +{ + struct mhi_ring *ring = &mhi_event->ring; + + mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, + ring->db_addr, *ring->ctxt_wp); +} + +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) +{ + dma_addr_t db; + struct mhi_ring *ring = &mhi_cmd->ring; + + db = ring->iommu_base + (ring->wp - ring->base); + *ring->ctxt_wp = db; + mhi_write_db(mhi_cntrl, ring->db_addr, db); +} + +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *ring = &mhi_chan->tre_ring; + dma_addr_t db; + + db = ring->iommu_base + (ring->wp - ring->base); + *ring->ctxt_wp = db; + mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, + ring->db_addr, db); +} + +enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl) +{ + u32 exec; + int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); + + return (ret) ? MHI_EE_MAX : exec; +} +EXPORT_SYMBOL_GPL(mhi_get_exec_env); + +enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl) +{ + u32 state; + int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, + MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, &state); + return ret ? MHI_STATE_MAX : state; +} +EXPORT_SYMBOL_GPL(mhi_get_mhi_state); + +void mhi_soc_reset(struct mhi_controller *mhi_cntrl) +{ + if (mhi_cntrl->reset) { + mhi_cntrl->reset(mhi_cntrl); + return; + } + + /* Generic MHI SoC reset */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET, + MHI_SOC_RESET_REQ); +} +EXPORT_SYMBOL_GPL(mhi_soc_reset); + +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev, + buf_info->v_addr, buf_info->len, + buf_info->dir); + if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr)) + return -ENOMEM; + + return 0; +} + +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len, + &buf_info->p_addr, GFP_ATOMIC); + + if (!buf) + return -ENOMEM; + + if (buf_info->dir == DMA_TO_DEVICE) + memcpy(buf, buf_info->v_addr, buf_info->len); + + buf_info->bb_addr = buf; + + return 0; +} + +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len, + buf_info->dir); +} + +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + if (buf_info->dir == DMA_FROM_DEVICE) + memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len); + + dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len, buf_info->bb_addr, + buf_info->p_addr); +} + +static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + int nr_el; + + if (ring->wp < ring->rp) { + nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1; + } else { + nr_el = (ring->rp - ring->base) / ring->el_size; + nr_el += ((ring->base + ring->len - ring->wp) / + ring->el_size) - 1; + } + + return nr_el; +} + +void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr) +{ + return (addr - ring->iommu_base) + ring->base; +} + +static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + ring->wp += ring->el_size; + if (ring->wp >= (ring->base + ring->len)) + ring->wp = ring->base; + /* smp update */ + smp_wmb(); +} + +static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + ring->rp += ring->el_size; + if (ring->rp >= (ring->base + ring->len)) + ring->rp = ring->base; + /* smp update */ + smp_wmb(); +} + +static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr) +{ + return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len; +} + +int mhi_destroy_device(struct device *dev, void *data) +{ + struct mhi_chan *ul_chan, *dl_chan; + struct mhi_device *mhi_dev; + struct mhi_controller *mhi_cntrl; + enum mhi_ee_type ee = MHI_EE_MAX; + + if (dev->bus != &mhi_bus_type) + return 0; + + mhi_dev = to_mhi_device(dev); + mhi_cntrl = mhi_dev->mhi_cntrl; + + /* Only destroy virtual devices thats attached to bus */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + ul_chan = mhi_dev->ul_chan; + dl_chan = mhi_dev->dl_chan; + + /* + * If execution environment is specified, remove only those devices that + * started in them based on ee_mask for the channels as we move on to a + * different execution environment + */ + if (data) + ee = *(enum mhi_ee_type *)data; + + /* + * For the suspend and resume case, this function will get called + * without mhi_unregister_controller(). Hence, we need to drop the + * references to mhi_dev created for ul and dl channels. We can + * be sure that there will be no instances of mhi_dev left after + * this. + */ + if (ul_chan) { + if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee))) + return 0; + + put_device(&ul_chan->mhi_dev->dev); + } + + if (dl_chan) { + if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee))) + return 0; + + put_device(&dl_chan->mhi_dev->dev); + } + + dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n", + mhi_dev->name); + + /* Notify the client and remove the device from MHI bus */ + device_del(dev); + put_device(dev); + + return 0; +} + +int mhi_get_free_desc_count(struct mhi_device *mhi_dev, + enum dma_data_direction dir) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? + mhi_dev->ul_chan : mhi_dev->dl_chan; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + return get_nr_avail_ring_elements(mhi_cntrl, tre_ring); +} +EXPORT_SYMBOL(mhi_get_free_desc_count); + +void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason) +{ + struct mhi_driver *mhi_drv; + + if (!mhi_dev->dev.driver) + return; + + mhi_drv = to_mhi_driver(mhi_dev->dev.driver); + + if (mhi_drv->status_cb) + mhi_drv->status_cb(mhi_dev, cb_reason); +} +EXPORT_SYMBOL_GPL(mhi_notify); + +/* Bind MHI channels to MHI devices */ +void mhi_create_devices(struct mhi_controller *mhi_cntrl) +{ + struct mhi_chan *mhi_chan; + struct mhi_device *mhi_dev; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int i, ret; + + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + if (!mhi_chan->configured || mhi_chan->mhi_dev || + !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee))) + continue; + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (IS_ERR(mhi_dev)) + return; + + mhi_dev->dev_type = MHI_DEVICE_XFER; + switch (mhi_chan->dir) { + case DMA_TO_DEVICE: + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + mhi_dev->ul_event_id = mhi_chan->er_index; + break; + case DMA_FROM_DEVICE: + /* We use dl_chan as offload channels */ + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + mhi_dev->dl_event_id = mhi_chan->er_index; + break; + default: + MHI_ERR("Direction not supported\n"); + put_device(&mhi_dev->dev); + return; + } + + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + + /* Check next channel if it matches */ + if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { + if (!strcmp(mhi_chan[1].name, mhi_chan->name)) { + i++; + mhi_chan++; + if (mhi_chan->dir == DMA_TO_DEVICE) { + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + mhi_dev->ul_event_id = mhi_chan->er_index; + } else { + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + mhi_dev->dl_event_id = mhi_chan->er_index; + } + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + } + } + + /* Channel name is same for both UL and DL */ + mhi_dev->name = mhi_chan->name; + dev_set_name(&mhi_dev->dev, "%s_%s", dev_name(dev), + mhi_dev->name); + + /* Init wakeup source if available */ + if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable) + device_init_wakeup(&mhi_dev->dev, true); + + ret = device_add(&mhi_dev->dev); + if (ret) + put_device(&mhi_dev->dev); + } +} + +void mhi_process_sleeping_events(struct mhi_controller *mhi_cntrl) +{ + struct mhi_event *mhi_event; + struct mhi_event_ctxt *er_ctxt; + struct mhi_ring *ev_ring; + int i; + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev || mhi_event->priority != + MHI_ER_PRIORITY_HI_SLEEP) + continue; + + er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + ev_ring = &mhi_event->ring; + + /* Only proceed if event ring has pending events */ + if (ev_ring->rp == mhi_to_virtual(ev_ring, er_ctxt->rp)) + continue; + + queue_work(mhi_cntrl->hiprio_wq, &mhi_event->work); + } +} + +irqreturn_t mhi_irq_handler(int irq_number, void *priv) +{ + struct mhi_event *mhi_event = priv; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct mhi_ring *ev_ring = &mhi_event->ring; + dma_addr_t ptr = er_ctxt->rp; + void *dev_rp; + + if (!is_valid_ring_ptr(ev_ring, ptr)) { + MHI_ERR("Event ring rp points outside of the event ring\n"); + return IRQ_HANDLED; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); + + /* Only proceed if event ring has pending events */ + if (ev_ring->rp == dev_rp) + return IRQ_HANDLED; + + /* For client managed event ring, notify pending data */ + if (mhi_event->cl_manage) { + struct mhi_chan *mhi_chan = mhi_event->mhi_chan; + struct mhi_device *mhi_dev = mhi_chan->mhi_dev; + + if (mhi_dev) + mhi_notify(mhi_dev, MHI_CB_PENDING_DATA); + + return IRQ_HANDLED; + } + + switch (mhi_event->priority) { + case MHI_ER_PRIORITY_HI_NOSLEEP: + tasklet_hi_schedule(&mhi_event->task); + break; + case MHI_ER_PRIORITY_DEFAULT_NOSLEEP: + tasklet_schedule(&mhi_event->task); + break; + case MHI_ER_PRIORITY_HI_SLEEP: + queue_work(mhi_cntrl->hiprio_wq, &mhi_event->work); + break; + default: + MHI_VERB("skip unknown priority event\n"); + break; + } + + return IRQ_HANDLED; +} + +irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv) +{ + struct mhi_controller *mhi_cntrl = priv; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state = MHI_STATE_MAX; + enum mhi_pm_state pm_state = 0; + enum mhi_ee_type ee = MHI_EE_MAX; + + write_lock_irq(&mhi_cntrl->pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + write_unlock_irq(&mhi_cntrl->pm_lock); + goto exit_intvec; + } + + state = mhi_get_mhi_state(mhi_cntrl); + ee = mhi_get_exec_env(mhi_cntrl); + MHI_VERB("local ee:%s state:%s device ee:%s state:%s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(ee), TO_MHI_STATE_STR(state)); + + if (state == MHI_STATE_SYS_ERR) { + MHI_VERB("System error detected\n"); + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + } + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* If device supports RDDM don't bother processing SYS error */ + if (mhi_cntrl->rddm_image) { + /* host may be performing a device power down already */ + if (!mhi_is_active(mhi_cntrl)) + goto exit_intvec; + + if (ee == MHI_EE_RDDM && mhi_cntrl->ee != MHI_EE_RDDM) { + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); + mhi_cntrl->ee = ee; + wake_up_all(&mhi_cntrl->state_event); + } + goto exit_intvec; + } + + if (pm_state == MHI_PM_SYS_ERR_DETECT) { + wake_up_all(&mhi_cntrl->state_event); + + /* For fatal errors, we let controller decide next step */ + if (MHI_IN_PBL(ee)) { + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR); + mhi_cntrl->ee = ee; + } else { + mhi_pm_sys_err_handler(mhi_cntrl); + } + } + +exit_intvec: + + return IRQ_HANDLED; +} + +irqreturn_t mhi_intvec_handler(int irq_number, void *dev) +{ + struct mhi_controller *mhi_cntrl = dev; + + /* Wake up events waiting for state change */ + wake_up_all(&mhi_cntrl->state_event); + + return IRQ_WAKE_THREAD; +} + +static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + dma_addr_t ctxt_wp; + + /* Update the WP */ + ring->wp += ring->el_size; + ctxt_wp = *ring->ctxt_wp + ring->el_size; + + if (ring->wp >= (ring->base + ring->len)) { + ring->wp = ring->base; + ctxt_wp = ring->iommu_base; + } + + *ring->ctxt_wp = ctxt_wp; + + /* Update the RP */ + ring->rp += ring->el_size; + if (ring->rp >= (ring->base + ring->len)) + ring->rp = ring->base; + + /* Update to all cores */ + smp_wmb(); +} + +static int parse_xfer_event(struct mhi_controller *mhi_cntrl, + struct mhi_tre *event, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_result result; + unsigned long flags = 0; + u32 ev_code; + + ev_code = MHI_TRE_GET_EV_CODE(event); + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? + -EOVERFLOW : 0; + + /* + * If it's a DB Event then we need to grab the lock + * with preemption disabled and as a write because we + * have to update db register and there are chances that + * another thread could be doing the same. + */ + if (ev_code >= MHI_EV_CC_OOB) + write_lock_irqsave(&mhi_chan->lock, flags); + else + read_lock_bh(&mhi_chan->lock); + + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + goto end_process_tx_event; + + switch (ev_code) { + case MHI_EV_CC_OVERFLOW: + case MHI_EV_CC_EOB: + case MHI_EV_CC_EOT: + { + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event); + struct mhi_tre *local_rp, *ev_tre; + void *dev_rp; + struct mhi_buf_info *buf_info; + u16 xfer_len; + + if (!is_valid_ring_ptr(tre_ring, ptr)) { + MHI_ERR("Event element points outside of the tre ring\n"); + break; + } + /* Get the TRB this event points to */ + ev_tre = mhi_to_virtual(tre_ring, ptr); + + dev_rp = ev_tre + 1; + if (dev_rp >= (tre_ring->base + tre_ring->len)) + dev_rp = tre_ring->base; + + result.dir = mhi_chan->dir; + + local_rp = tre_ring->rp; + while (local_rp != dev_rp) { + buf_info = buf_ring->rp; + /* If it's the last TRE, get length from the event */ + if (local_rp == ev_tre) + xfer_len = MHI_TRE_GET_EV_LEN(event); + else + xfer_len = buf_info->len; + + /* Unmap if it's not pre-mapped by client */ + if (likely(!buf_info->pre_mapped)) + mhi_cntrl->unmap_single(mhi_cntrl, buf_info); + + result.buf_addr = buf_info->cb_buf; + + /* truncate to buf len if xfer_len is larger */ + result.bytes_xferd = + min_t(u16, xfer_len, buf_info->len); + mhi_del_ring_element(mhi_cntrl, buf_ring); + mhi_del_ring_element(mhi_cntrl, tre_ring); + local_rp = tre_ring->rp; + + /* notify client */ + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + if (mhi_chan->dir == DMA_TO_DEVICE) + atomic_dec(&mhi_cntrl->pending_pkts); + + /* + * Recycle the buffer if buffer is pre-allocated, + * if there is an error, not much we can do apart + * from dropping the packet + */ + if (mhi_chan->pre_alloc) { + if (mhi_queue_buf(mhi_chan->mhi_dev, + mhi_chan->dir, + buf_info->cb_buf, + buf_info->len, MHI_EOT)) { + MHI_ERR( + "Error recycling buffer for chan:%d\n", + mhi_chan->chan); + kfree(buf_info->cb_buf); + } + } + } + break; + } /* CC_EOT */ + case MHI_EV_CC_OOB: + case MHI_EV_CC_DB_MODE: + { + unsigned long flags; + + mhi_chan->db_cfg.db_mode = 1; + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); + if (tre_ring->wp != tre_ring->rp && + MHI_DB_ACCESS_VALID(mhi_cntrl)) { + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + } + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + break; + } + case MHI_EV_CC_BAD_TRE: + default: + MHI_ERR("Unknown event 0x%x\n", ev_code); + panic("Unknown event 0x%x\n", ev_code); + break; + } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */ + +end_process_tx_event: + if (ev_code >= MHI_EV_CC_OOB) + write_unlock_irqrestore(&mhi_chan->lock, flags); + else + read_unlock_bh(&mhi_chan->lock); + + return 0; +} + +static int parse_rsc_event(struct mhi_controller *mhi_cntrl, + struct mhi_tre *event, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_buf_info *buf_info; + struct mhi_result result; + int ev_code; + u32 cookie; /* offset to local descriptor */ + u16 xfer_len; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + ev_code = MHI_TRE_GET_EV_CODE(event); + cookie = MHI_TRE_GET_EV_COOKIE(event); + xfer_len = MHI_TRE_GET_EV_LEN(event); + + /* Received out of bound cookie */ + WARN_ON(cookie >= buf_ring->len); + + buf_info = buf_ring->base + cookie; + + result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? + -EOVERFLOW : 0; + + /* truncate to buf len if xfer_len is larger */ + result.bytes_xferd = min_t(u16, xfer_len, buf_info->len); + result.buf_addr = buf_info->cb_buf; + result.dir = mhi_chan->dir; + + read_lock_bh(&mhi_chan->lock); + + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + goto end_process_rsc_event; + + WARN_ON(!buf_info->used); + + /* notify the client */ + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* + * Note: We're arbitrarily incrementing RP even though, completion + * packet we processed might not be the same one, reason we can do this + * is because device guaranteed to cache descriptors in order it + * receive, so even though completion event is different we can re-use + * all descriptors in between. + * Example: + * Transfer Ring has descriptors: A, B, C, D + * Last descriptor host queue is D (WP) and first descriptor + * host queue is A (RP). + * The completion event we just serviced is descriptor C. + * Then we can safely queue descriptors to replace A, B, and C + * even though host did not receive any completions. + */ + mhi_del_ring_element(mhi_cntrl, tre_ring); + buf_info->used = false; + +end_process_rsc_event: + read_unlock_bh(&mhi_chan->lock); + + return 0; +} + +static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, + struct mhi_tre *tre) +{ + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *mhi_ring = &cmd_ring->ring; + struct mhi_tre *cmd_pkt; + struct mhi_chan *mhi_chan; + u32 chan; + + if (!is_valid_ring_ptr(mhi_ring, ptr)) { + MHI_ERR("Event element points outside of the cmd ring\n"); + return; + } + + cmd_pkt = mhi_to_virtual(mhi_ring, ptr); + + if (cmd_pkt != mhi_ring->rp) + panic("Out of order cmd completion: 0x%llx. Expected: 0x%llx\n", + cmd_pkt, mhi_ring->rp); + + if (MHI_TRE_GET_CMD_TYPE(cmd_pkt) == MHI_CMD_SFR_CFG) { + mhi_misc_cmd_completion(mhi_cntrl, MHI_CMD_SFR_CFG, + MHI_TRE_GET_EV_CODE(tre)); + goto exit_cmd_completion; + } + + chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); + if (chan >= mhi_cntrl->max_chan) { + MHI_ERR("Invalid channel id: %u\n", chan); + goto exit_cmd_completion; + } + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + write_lock_bh(&mhi_chan->lock); + mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); + complete(&mhi_chan->completion); + write_unlock_bh(&mhi_chan->lock); + +exit_cmd_completion: + mhi_del_ring_element(mhi_cntrl, mhi_ring); +} + +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct mhi_chan *mhi_chan; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 chan; + int count = 0; + dma_addr_t ptr = er_ctxt->rp; + + /* + * This is a quick check to avoid unnecessary event processing + * in case MHI is already in error state, but it's still possible + * to transition to error state while processing events + */ + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) + return -EIO; + + if (!is_valid_ring_ptr(ev_ring, ptr)) { + MHI_ERR("Event ring rp points outside of the event ring\n"); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp) { + enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); + + MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", + local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); + + switch (type) { + case MHI_PKT_TYPE_BW_REQ_EVENT: + { + struct mhi_link_info *link_info; + + link_info = &mhi_cntrl->mhi_link_info; + write_lock_irq(&mhi_cntrl->pm_lock); + link_info->target_link_speed = + MHI_TRE_GET_EV_LINKSPEED(local_rp); + link_info->target_link_width = + MHI_TRE_GET_EV_LINKWIDTH(local_rp); + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_VERB("Received BW_REQ event\n"); + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ); + break; + } + case MHI_PKT_TYPE_STATE_CHANGE_EVENT: + { + enum mhi_state new_state; + + new_state = MHI_TRE_GET_EV_STATE(local_rp); + + MHI_VERB("State change event to state: %s\n", + TO_MHI_STATE_STR(new_state)); + + switch (new_state) { + case MHI_STATE_M0: + mhi_pm_m0_transition(mhi_cntrl); + break; + case MHI_STATE_M1: + mhi_pm_m1_transition(mhi_cntrl); + break; + case MHI_STATE_M3: + mhi_pm_m3_transition(mhi_cntrl); + break; + case MHI_STATE_SYS_ERR: + { + enum mhi_pm_state new_state; + + MHI_VERB("System error detected\n"); + write_lock_irq(&mhi_cntrl->pm_lock); + new_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (new_state == MHI_PM_SYS_ERR_DETECT) + mhi_pm_sys_err_handler(mhi_cntrl); + break; + } + default: + MHI_ERR("Invalid state: %s\n", + TO_MHI_STATE_STR(new_state)); + } + + break; + } + case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: + mhi_process_cmd_completion(mhi_cntrl, local_rp); + break; + case MHI_PKT_TYPE_EE_EVENT: + { + enum dev_st_transition st = DEV_ST_TRANSITION_MAX; + enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp); + + MHI_VERB("Received EE event: %s\n", + TO_MHI_EXEC_STR(event)); + switch (event) { + case MHI_EE_SBL: + st = DEV_ST_TRANSITION_SBL; + break; + case MHI_EE_WFW: + case MHI_EE_AMSS: + st = DEV_ST_TRANSITION_MISSION_MODE; + break; + case MHI_EE_RDDM: + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = event; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + break; + default: + MHI_ERR( + "Unhandled EE event: 0x%x\n", type); + } + if (st != DEV_ST_TRANSITION_MAX) + mhi_queue_state_transition(mhi_cntrl, st); + + break; + } + case MHI_PKT_TYPE_TX_EVENT: + chan = MHI_TRE_GET_EV_CHID(local_rp); + + WARN_ON(chan >= mhi_cntrl->max_chan); + + /* + * Only process the event ring elements whose channel + * ID is within the maximum supported range. + */ + if (chan < mhi_cntrl->max_chan) { + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + if (!mhi_chan->configured) + break; + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } + break; + default: + MHI_ERR("Unhandled event type: %d\n", type); + break; + } + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + + ptr = er_ctxt->rp; + if (!is_valid_ring_ptr(ev_ring, ptr)) { + MHI_ERR("Event ring rp points outside of the event ring\n"); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); + count++; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return count; +} + +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int count = 0; + u32 chan; + struct mhi_chan *mhi_chan; + dma_addr_t ptr = er_ctxt->rp; + + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) + return -EIO; + + if (!is_valid_ring_ptr(ev_ring, ptr)) { + MHI_ERR("Event ring rp points outside of the event ring\n"); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp && event_quota > 0) { + enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); + + MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", + local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); + + chan = MHI_TRE_GET_EV_CHID(local_rp); + + WARN_ON(chan >= mhi_cntrl->max_chan); + + /* + * Only process the event ring elements whose channel + * ID is within the maximum supported range. + */ + if (chan < mhi_cntrl->max_chan && + mhi_cntrl->mhi_chan[chan].configured) { + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + + if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { + parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } + } + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + + ptr = er_ctxt->rp; + if (!is_valid_ring_ptr(ev_ring, ptr)) { + MHI_ERR("Event ring rp points outside of the event ring\n"); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); + count++; + } + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return count; +} + +void mhi_ev_task(unsigned long data) +{ + unsigned long flags; + struct mhi_event *mhi_event = (struct mhi_event *)data; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + + /* process all pending events */ + spin_lock_irqsave(&mhi_event->lock, flags); + mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); + spin_unlock_irqrestore(&mhi_event->lock, flags); +} + +void mhi_ctrl_ev_task(unsigned long data) +{ + struct mhi_event *mhi_event = (struct mhi_event *)data; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + enum mhi_pm_state pm_state = 0; + int ret; + + /* + * We can check PM state w/o a lock here because there is no way + * PM state can change from reg access valid to no access while this + * thread being executed. + */ + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + /* + * We may have a pending event but not allowed to + * process it since we are probably in a suspended state, + * so trigger a resume. + */ + mhi_trigger_resume(mhi_cntrl); + + return; + } + + /* Process ctrl events events */ + ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); + + /* + * We received an IRQ but no events to process, maybe device went to + * SYS_ERR state? Check the state to confirm. + */ + if (!ret) { + write_lock_irq(&mhi_cntrl->pm_lock); + state = mhi_get_mhi_state(mhi_cntrl); + if (state == MHI_STATE_SYS_ERR) { + MHI_VERB("System error detected\n"); + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + } + write_unlock_irq(&mhi_cntrl->pm_lock); + if (pm_state == MHI_PM_SYS_ERR_DETECT) + mhi_pm_sys_err_handler(mhi_cntrl); + } +} + +void mhi_process_ev_work(struct work_struct *work) +{ + struct mhi_event *mhi_event = container_of(work, struct mhi_event, + work); + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + struct device *dev = mhi_cntrl->cntrl_dev; + + MHI_VERB("Enter with pm_state:%s MHI_STATE:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) + return; + + mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); +} + +static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + void *tmp = ring->wp + ring->el_size; + + if (tmp >= (ring->base + ring->len)) + tmp = ring->base; + + return (tmp == ring->rp); +} + +int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, + struct sk_buff *skb, size_t len, enum mhi_flags mflags) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : + mhi_dev->dl_chan; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + struct mhi_buf_info buf_info = { }; + int ret; + + /* If MHI host pre-allocates buffers then client drivers cannot queue */ + if (mhi_chan->pre_alloc) + return -EINVAL; + + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + return -ENOMEM; + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) + mhi_trigger_resume(mhi_cntrl); + + /* Toggle wake to exit out of M2 */ + mhi_cntrl->wake_toggle(mhi_cntrl); + + buf_info.v_addr = skb->data; + buf_info.cb_buf = skb; + buf_info.len = len; + + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags); + if (unlikely(ret)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + return ret; + } + + if (mhi_chan->dir == DMA_TO_DEVICE) + atomic_inc(&mhi_cntrl->pending_pkts); + + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) { + read_lock_bh(&mhi_chan->lock); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_bh(&mhi_chan->lock); + } + + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(mhi_queue_skb); + +int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, + struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : + mhi_dev->dl_chan; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + struct mhi_buf_info buf_info = { }; + int ret; + + /* If MHI host pre-allocates buffers then client drivers cannot queue */ + if (mhi_chan->pre_alloc) + return -EINVAL; + + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + return -ENOMEM; + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_ERR("MHI is not in activate state, PM state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -EIO; + } + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) + mhi_trigger_resume(mhi_cntrl); + + /* Toggle wake to exit out of M2 */ + mhi_cntrl->wake_toggle(mhi_cntrl); + + buf_info.p_addr = mhi_buf->dma_addr; + buf_info.cb_buf = mhi_buf; + buf_info.pre_mapped = true; + buf_info.len = len; + + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags); + if (unlikely(ret)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + return ret; + } + + if (mhi_chan->dir == DMA_TO_DEVICE) + atomic_inc(&mhi_cntrl->pending_pkts); + + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) { + read_lock_bh(&mhi_chan->lock); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_bh(&mhi_chan->lock); + } + + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(mhi_queue_dma); + +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + struct mhi_buf_info *info, enum mhi_flags flags) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_tre *mhi_tre; + struct mhi_buf_info *buf_info; + int eot, eob, chain, bei; + int ret; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + buf_info = buf_ring->wp; + WARN_ON(buf_info->used); + buf_info->pre_mapped = info->pre_mapped; + if (info->pre_mapped) + buf_info->p_addr = info->p_addr; + else + buf_info->v_addr = info->v_addr; + buf_info->cb_buf = info->cb_buf; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = info->len; + + if (!info->pre_mapped) { + ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); + if (ret) + return ret; + } + + eob = !!(flags & MHI_EOB); + eot = !!(flags & MHI_EOT); + chain = !!(flags & MHI_CHAIN); + bei = !!(mhi_chan->intmod); + + mhi_tre = tre_ring->wp; + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); + + MHI_VERB("Channel: %d WP: 0x%llx TRE: 0x%llx 0x%08x 0x%08x\n", + mhi_chan->chan, mhi_tre, mhi_tre->ptr, mhi_tre->dword[0], + mhi_tre->dword[1]); + + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + + return 0; +} + +int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, + void *buf, size_t len, enum mhi_flags mflags) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : + mhi_dev->dl_chan; + struct mhi_ring *tre_ring; + struct mhi_buf_info buf_info = { }; + unsigned long flags; + int ret; + + /* + * this check here only as a guard, it's always + * possible mhi can enter error while executing rest of function, + * which is not fatal so we do not need to hold pm_lock + */ + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) + return -EIO; + + tre_ring = &mhi_chan->tre_ring; + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + return -ENOMEM; + + buf_info.v_addr = buf; + buf_info.cb_buf = buf; + buf_info.len = len; + + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags); + if (unlikely(ret)) + return ret; + + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) + mhi_trigger_resume(mhi_cntrl); + + /* Toggle wake to exit out of M2 */ + mhi_cntrl->wake_toggle(mhi_cntrl); + + if (mhi_chan->dir == DMA_TO_DEVICE) + atomic_inc(&mhi_cntrl->pending_pkts); + + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) { + unsigned long flags; + + read_lock_irqsave(&mhi_chan->lock, flags); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irqrestore(&mhi_chan->lock, flags); + } + + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(mhi_queue_buf); + +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + enum mhi_cmd_type cmd) +{ + struct mhi_tre *cmd_tre = NULL; + struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *ring = &mhi_cmd->ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int chan = 0; + + if (mhi_chan) + chan = mhi_chan->chan; + + spin_lock_bh(&mhi_cmd->lock); + if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) { + spin_unlock_bh(&mhi_cmd->lock); + return -ENOMEM; + } + + /* prepare the cmd tre */ + cmd_tre = ring->wp; + switch (cmd) { + case MHI_CMD_RESET_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); + break; + case MHI_CMD_STOP_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan); + break; + case MHI_CMD_START_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_START_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); + break; + case MHI_CMD_SFR_CFG: + mhi_misc_cmd_configure(mhi_cntrl, MHI_CMD_SFR_CFG, + &cmd_tre->ptr, &cmd_tre->dword[0], + &cmd_tre->dword[1]); + break; + default: + MHI_ERR("Command not supported\n"); + break; + } + + /* queue to hardware */ + mhi_add_ring_element(mhi_cntrl, ring); + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); + read_unlock_bh(&mhi_cntrl->pm_lock); + spin_unlock_bh(&mhi_cmd->lock); + + return 0; +} + +static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + enum mhi_ch_state_type to_state) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_cmd_type cmd = MHI_CMD_NOP; + int ret = -EIO; + + MHI_VERB("Updating channel %s(%d) state to: %s\n", mhi_chan->name, + mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); + + switch (to_state) { + case MHI_CH_STATE_TYPE_RESET: + write_lock_irq(&mhi_chan->lock); + if (mhi_chan->ch_state != MHI_CH_STATE_STOP && + mhi_chan->ch_state != MHI_CH_STATE_ENABLED && + mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) { + write_unlock_irq(&mhi_chan->lock); + goto exit_invalid_state; + } + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + write_unlock_irq(&mhi_chan->lock); + + cmd = MHI_CMD_RESET_CHAN; + break; + case MHI_CH_STATE_TYPE_STOP: + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + goto exit_invalid_state; + + cmd = MHI_CMD_STOP_CHAN; + break; + case MHI_CH_STATE_TYPE_START: + if (mhi_chan->ch_state != MHI_CH_STATE_STOP && + mhi_chan->ch_state != MHI_CH_STATE_DISABLED) + goto exit_invalid_state; + + cmd = MHI_CMD_START_CHAN; + break; + default: + goto exit_invalid_state; + } + + /* bring host and device out of suspended states */ + ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); + if (ret) + return ret; + mhi_cntrl->runtime_get(mhi_cntrl); + + reinit_completion(&mhi_chan->completion); + ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd); + if (ret) { + MHI_ERR("Failed to send %s(%d) %s command\n", + mhi_chan->name, mhi_chan->chan, + TO_CH_STATE_TYPE_STR(to_state)); + goto exit_command_failure; + } + + ret = wait_for_completion_timeout(&mhi_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { + MHI_ERR("Failed to receive %s(%d) %s command completion\n", + mhi_chan->name, mhi_chan->chan, + TO_CH_STATE_TYPE_STR(to_state)); + ret = -EIO; + goto exit_command_failure; + } + + ret = 0; + + if (to_state != MHI_CH_STATE_TYPE_RESET) { + write_lock_irq(&mhi_chan->lock); + mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ? + MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP; + write_unlock_irq(&mhi_chan->lock); + } + + MHI_VERB("Channel %s(%d) state change to %s successful\n", + mhi_chan->name, mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); + +exit_command_failure: + mhi_cntrl->runtime_put(mhi_cntrl); + mhi_device_put(mhi_cntrl->mhi_dev); + + return ret; + +exit_invalid_state: + MHI_ERR("Channel %s(%d) update to %s not allowed\n", + mhi_chan->name, mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); + + return -EINVAL; +} + +static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + int ret; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + /* no more processing events for this channel */ + mutex_lock(&mhi_chan->mutex); + + if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { + MHI_ERR( + "Current EE: %s Required EE Mask: 0x%x for chan: %s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask, + mhi_chan->name); + goto exit_unprepare_channel; + } + + ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, + MHI_CH_STATE_TYPE_RESET); + if (ret) + MHI_ERR("Failed to reset channel, still resetting\n"); + +exit_unprepare_channel: + write_lock_irq(&mhi_chan->lock); + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + write_unlock_irq(&mhi_chan->lock); + + if (!mhi_chan->offload_ch) { + mhi_reset_chan(mhi_cntrl, mhi_chan); + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + } + MHI_VERB("chan:%d successfully reset\n", mhi_chan->chan); + + mutex_unlock(&mhi_chan->mutex); +} + +int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + int ret = 0; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { + MHI_ERR( + "Current EE: %s Required EE Mask: 0x%x for chan: %s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask, + mhi_chan->name); + return -ENOTCONN; + } + + mutex_lock(&mhi_chan->mutex); + + /* Check of client manages channel context for offload channels */ + if (!mhi_chan->offload_ch) { + ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan); + if (ret) + goto error_init_chan; + } + + ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, + MHI_CH_STATE_TYPE_START); + if (ret) + goto error_pm_state; + + /* Pre-allocate buffer for xfer ring */ + if (mhi_chan->pre_alloc) { + int nr_el = get_nr_avail_ring_elements(mhi_cntrl, + &mhi_chan->tre_ring); + size_t len = mhi_cntrl->buffer_len; + + while (nr_el--) { + void *buf; + struct mhi_buf_info info = { }; + buf = kmalloc(len, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto error_pre_alloc; + } + + /* Prepare transfer descriptors */ + info.v_addr = buf; + info.cb_buf = buf; + info.len = len; + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT); + if (ret) { + kfree(buf); + goto error_pre_alloc; + } + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_DB_ACCESS_VALID(mhi_cntrl)) { + read_lock_irq(&mhi_chan->lock); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irq(&mhi_chan->lock); + } + read_unlock_bh(&mhi_cntrl->pm_lock); + } + + mutex_unlock(&mhi_chan->mutex); + + return 0; + +error_pm_state: + if (!mhi_chan->offload_ch) + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + +error_init_chan: + mutex_unlock(&mhi_chan->mutex); + + return ret; + +error_pre_alloc: + mutex_unlock(&mhi_chan->mutex); + mhi_unprepare_channel(mhi_cntrl, mhi_chan); + + return ret; +} + +static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + struct mhi_event_ctxt *er_ctxt, + int chan) + +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + unsigned long flags; + dma_addr_t ptr; + + MHI_VERB("Marking all events for chan: %d as stale\n", chan); + + ev_ring = &mhi_event->ring; + + /* mark all stale events related to channel as STALE event */ + spin_lock_irqsave(&mhi_event->lock, flags); + + ptr = er_ctxt->rp; + if (!is_valid_ring_ptr(ev_ring, ptr)) { + MHI_ERR("Event ring rp points outside of the event ring\n"); + dev_rp = ev_ring->rp; + } else { + dev_rp = mhi_to_virtual(ev_ring, ptr); + } + + local_rp = ev_ring->rp; + while (dev_rp != local_rp) { + if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT && + chan == MHI_TRE_GET_EV_CHID(local_rp)) + local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan, + MHI_PKT_TYPE_STALE_EVENT); + local_rp++; + if (local_rp == (ev_ring->base + ev_ring->len)) + local_rp = ev_ring->base; + } + + MHI_VERB("Finished marking events as stale events\n"); + spin_unlock_irqrestore(&mhi_event->lock, flags); +} + +static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_result result; + + /* Reset any pending buffers */ + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + while (tre_ring->rp != tre_ring->wp) { + struct mhi_buf_info *buf_info = buf_ring->rp; + + if (mhi_chan->dir == DMA_TO_DEVICE) + atomic_dec(&mhi_cntrl->pending_pkts); + + if (!buf_info->pre_mapped) + mhi_cntrl->unmap_single(mhi_cntrl, buf_info); + + mhi_del_ring_element(mhi_cntrl, buf_ring); + mhi_del_ring_element(mhi_cntrl, tre_ring); + + if (mhi_chan->pre_alloc) { + kfree(buf_info->cb_buf); + } else { + result.buf_addr = buf_info->cb_buf; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + } +} + +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) +{ + struct mhi_event *mhi_event; + struct mhi_event_ctxt *er_ctxt; + int chan = mhi_chan->chan; + + /* Nothing to reset, client doesn't queue buffers */ + if (mhi_chan->offload_ch) + return; + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; + + mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan); + + mhi_reset_data_chan(mhi_cntrl, mhi_chan); + + read_unlock_bh(&mhi_cntrl->pm_lock); +} + +/* Move channel to start state */ +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) +{ + int ret, dir; + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; + if (!mhi_chan) + continue; + + ret = mhi_prepare_channel(mhi_cntrl, mhi_chan); + if (ret) + goto error_open_chan; + } + + return 0; + +error_open_chan: + for (--dir; dir >= 0; dir--) { + mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; + if (!mhi_chan) + continue; + + mhi_unprepare_channel(mhi_cntrl, mhi_chan); + } + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer); + +void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + int dir; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + if (!mhi_chan) + continue; + + mhi_unprepare_channel(mhi_cntrl, mhi_chan); + } +} +EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer); + +static int mhi_update_transfer_state(struct mhi_device *mhi_dev, + enum mhi_ch_state_type to_state) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_chan *mhi_chan; + struct mhi_chan_ctxt *chan_ctxt; + int dir, ret; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + /* + * Bail out if one of the channels fails as client will reset + * both upon failure + */ + mutex_lock(&mhi_chan->mutex); + chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; + if (!(chan_ctxt->chcfg & CHAN_CTX_CHSTATE_MASK)) { + mutex_unlock(&mhi_chan->mutex); + MHI_ERR("Channel %s(%u) context not initialized\n", + mhi_chan->name, mhi_chan->chan); + return -EINVAL; + } + ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, to_state); + if (ret) { + mutex_unlock(&mhi_chan->mutex); + return ret; + } + mutex_unlock(&mhi_chan->mutex); + } + + return 0; +} + +int mhi_stop_transfer(struct mhi_device *mhi_dev) +{ + return mhi_update_transfer_state(mhi_dev, MHI_CH_STATE_TYPE_STOP); +} +EXPORT_SYMBOL(mhi_stop_transfer); + +int mhi_start_transfer(struct mhi_device *mhi_dev) +{ + return mhi_update_transfer_state(mhi_dev, MHI_CH_STATE_TYPE_START); +} +EXPORT_SYMBOL(mhi_start_transfer); + +int mhi_poll(struct mhi_device *mhi_dev, u32 budget) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = mhi_dev->dl_chan; + struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + int ret; + + spin_lock_bh(&mhi_event->lock); + ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget); + spin_unlock_bh(&mhi_event->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_poll); diff --git a/mhi/core/misc.c b/mhi/core/misc.c new file mode 100644 index 0000000..03792de --- /dev/null +++ b/mhi/core/misc.c @@ -0,0 +1,1674 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/wait.h> +#include "internal.h" + +#ifdef CONFIG_MHI_BUS_DEBUG +#define MHI_MISC_DEBUG_LEVEL MHI_MSG_LVL_VERBOSE +#else +#define MHI_MISC_DEBUG_LEVEL MHI_MSG_LVL_ERROR +#endif + +const char * const mhi_log_level_str[MHI_MSG_LVL_MAX] = { + [MHI_MSG_LVL_VERBOSE] = "Verbose", + [MHI_MSG_LVL_INFO] = "Info", + [MHI_MSG_LVL_ERROR] = "Error", + [MHI_MSG_LVL_CRITICAL] = "Critical", + [MHI_MSG_LVL_MASK_ALL] = "Mask all", +}; +#define TO_MHI_LOG_LEVEL_STR(level) ((level >= MHI_MSG_LVL_MAX || \ + !mhi_log_level_str[level]) ? \ + "Mask all" : mhi_log_level_str[level]) + +struct mhi_bus mhi_bus; + +void mhi_misc_init(void) +{ + mutex_init(&mhi_bus.lock); + INIT_LIST_HEAD(&mhi_bus.controller_list); +} + +void mhi_misc_exit(void) +{ + mutex_destroy(&mhi_bus.lock); +} + +static ssize_t time_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + u64 t_host, t_device; + int ret; + + ret = mhi_get_remote_time_sync(mhi_dev, &t_host, &t_device); + if (ret) { + MHI_ERR("Failed to obtain time, ret:%d\n", ret); + return scnprintf(buf, PAGE_SIZE, + "Request failed or feature unsupported\n"); + } + + return scnprintf(buf, PAGE_SIZE, "local: %llu remote: %llu (ticks)\n", + t_host, t_device); +} +static DEVICE_ATTR_RO(time); + +static void mhi_time_async_cb(struct mhi_device *mhi_dev, u32 sequence, + u64 local_time, u64 remote_time) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device *dev = &mhi_dev->dev; + + MHI_LOG("Time response: seq:%llx local: %llu remote: %llu (ticks)\n", + sequence, local_time, remote_time); +} + +static ssize_t time_async_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + u32 seq = prandom_u32(); + int ret; + + if (!seq) + seq = 1; + + ret = mhi_get_remote_time(mhi_dev, seq, &mhi_time_async_cb); + if (ret) { + MHI_ERR("Failed to request time, seq:%llx, ret:%d\n", seq, ret); + return scnprintf(buf, PAGE_SIZE, + "Request failed or feature unsupported\n"); + } + + return scnprintf(buf, PAGE_SIZE, + "Requested time asynchronously with seq:%llx\n", seq); +} +static DEVICE_ATTR_RO(time_async); + +static struct attribute *mhi_tsync_attrs[] = { + &dev_attr_time.attr, + &dev_attr_time_async.attr, + NULL, +}; + +static const struct attribute_group mhi_tsync_group = { + .attrs = mhi_tsync_attrs, +}; + +static ssize_t log_level_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_private *mhi_priv = dev_get_drvdata(&mhi_cntrl->mhi_dev->dev); + + if (!mhi_priv) + return -EIO; + + return scnprintf(buf, PAGE_SIZE, "IPC log level begins from: %s\n", + TO_MHI_LOG_LEVEL_STR(mhi_priv->log_lvl)); +} + +static ssize_t log_level_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_private *mhi_priv = dev_get_drvdata(&mhi_cntrl->mhi_dev->dev); + enum MHI_DEBUG_LEVEL log_level; + + if (kstrtou32(buf, 0, &log_level) < 0) + return -EINVAL; + + if (!mhi_priv) + return -EIO; + + mhi_priv->log_lvl = log_level; + + MHI_LOG("IPC log level changed to: %s\n", + TO_MHI_LOG_LEVEL_STR(log_level)); + + return count; +} +static DEVICE_ATTR_RW(log_level); + +static struct attribute *mhi_misc_attrs[] = { + &dev_attr_log_level.attr, + NULL, +}; + +static const struct attribute_group mhi_misc_group = { + .attrs = mhi_misc_attrs, +}; + +int mhi_misc_register_controller(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = kzalloc(sizeof(*mhi_priv), GFP_KERNEL); + struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; + struct pci_dev *parent = to_pci_dev(mhi_cntrl->cntrl_dev); + int ret; + + if (!mhi_priv) + return -ENOMEM; + + if (parent) { + dev_set_name(&mhi_dev->dev, "mhi_%04x_%02u.%02u.%02u", + parent->device, pci_domain_nr(parent->bus), + parent->bus->number, PCI_SLOT(parent->devfn)); + mhi_dev->name = dev_name(&mhi_dev->dev); + } + + mhi_priv->log_buf = ipc_log_context_create(MHI_IPC_LOG_PAGES, + mhi_dev->name, 0); + mhi_priv->log_lvl = MHI_MISC_DEBUG_LEVEL; + mhi_priv->mhi_cntrl = mhi_cntrl; + + /* adding it to this list only for debug purpose */ + mutex_lock(&mhi_bus.lock); + list_add_tail(&mhi_priv->node, &mhi_bus.controller_list); + mutex_unlock(&mhi_bus.lock); + + dev_set_drvdata(dev, mhi_priv); + + ret = sysfs_create_group(&dev->kobj, &mhi_misc_group); + if (ret) + MHI_ERR("Failed to create misc sysfs group\n"); + + ret = sysfs_create_group(&dev->kobj, &mhi_tsync_group); + if (ret) + MHI_ERR("Failed to create time synchronization sysfs group\n"); + + return 0; +} + +void mhi_misc_unregister_controller(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(&mhi_cntrl->mhi_dev->dev); + + if (!mhi_priv) + return; + + mutex_lock(&mhi_bus.lock); + list_del(&mhi_priv->node); + mutex_unlock(&mhi_bus.lock); + + sysfs_remove_group(&dev->kobj, &mhi_tsync_group); + sysfs_remove_group(&dev->kobj, &mhi_misc_group); + + if (mhi_priv->sfr_info) + kfree(mhi_priv->sfr_info->str); + kfree(mhi_priv->sfr_info); + kfree(mhi_priv->timesync); + kfree(mhi_priv); +} + +void *mhi_controller_get_privdata(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev; + struct mhi_private *mhi_priv; + + if (!mhi_cntrl) + return NULL; + + mhi_dev = mhi_cntrl->mhi_dev; + if (!mhi_dev) + return NULL; + + mhi_priv = dev_get_drvdata(&mhi_dev->dev); + if (!mhi_priv) + return NULL; + + return mhi_priv->priv_data; +} +EXPORT_SYMBOL(mhi_controller_get_privdata); + +void mhi_controller_set_privdata(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_device *mhi_dev; + struct mhi_private *mhi_priv; + + if (!mhi_cntrl) + return; + + mhi_dev = mhi_cntrl->mhi_dev; + if (!mhi_dev) + return; + + mhi_priv = dev_get_drvdata(&mhi_dev->dev); + if (!mhi_priv) + return; + + mhi_priv->priv_data = priv; +} +EXPORT_SYMBOL(mhi_controller_set_privdata); + +static struct mhi_controller *find_mhi_controller_by_name(const char *name) +{ + struct mhi_private *mhi_priv, *tmp_priv; + struct mhi_controller *mhi_cntrl; + + list_for_each_entry_safe(mhi_priv, tmp_priv, &mhi_bus.controller_list, + node) { + mhi_cntrl = mhi_priv->mhi_cntrl; + if (mhi_cntrl->mhi_dev->name && (!strcmp(name, mhi_cntrl->mhi_dev->name))) + return mhi_cntrl; + } + + return NULL; +} + +struct mhi_controller *mhi_bdf_to_controller(u32 domain, + u32 bus, + u32 slot, + u32 dev_id) +{ + char name[32]; + + snprintf(name, sizeof(name), "mhi_%04x_%02u.%02u.%02u", dev_id, domain, + bus, slot); + + return find_mhi_controller_by_name(name); +} +EXPORT_SYMBOL(mhi_bdf_to_controller); + +static int mhi_notify_fatal_cb(struct device *dev, void *data) +{ + mhi_notify(to_mhi_device(dev), MHI_CB_FATAL_ERROR); + + return 0; +} + +int mhi_report_error(struct mhi_controller *mhi_cntrl) +{ + struct device *dev; + struct mhi_private *mhi_priv; + struct mhi_sfr_info *sfr_info; + enum mhi_pm_state cur_state; + + if (!mhi_cntrl) + return -EINVAL; + + dev = &mhi_cntrl->mhi_dev->dev; + mhi_priv = dev_get_drvdata(dev); + sfr_info = mhi_priv->sfr_info; + + write_lock_irq(&mhi_cntrl->pm_lock); + + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_DETECT); + if (cur_state != MHI_PM_SYS_ERR_DETECT) { + dev_err(mhi_cntrl->cntrl_dev, + "Failed to move to state: %s from: %s\n", + to_mhi_pm_state_str(MHI_PM_SYS_ERR_DETECT), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EPERM; + } + + /* force inactive/error state */ + mhi_cntrl->dev_state = MHI_STATE_SYS_ERR; + wake_up_all(&mhi_cntrl->state_event); + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* copy subsystem failure reason string if supported */ + if (sfr_info && sfr_info->buf_addr) { + memcpy(sfr_info->str, sfr_info->buf_addr, sfr_info->len); + MHI_ERR("mhi: %s sfr: %s\n", dev_name(dev), sfr_info->buf_addr); + } + + /* Notify fatal error to all client drivers to halt processing */ + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, + mhi_notify_fatal_cb); + + return 0; +} +EXPORT_SYMBOL(mhi_report_error); + +int mhi_device_configure(struct mhi_device *mhi_dev, + enum dma_data_direction dir, + struct mhi_buf *cfg_tbl, + int elements) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_chan *mhi_chan; + struct mhi_event_ctxt *er_ctxt; + struct mhi_chan_ctxt *ch_ctxt; + int er_index, chan; + + switch (dir) { + case DMA_TO_DEVICE: + mhi_chan = mhi_dev->ul_chan; + break; + case DMA_BIDIRECTIONAL: + case DMA_FROM_DEVICE: + case DMA_NONE: + mhi_chan = mhi_dev->dl_chan; + break; + default: + return -EINVAL; + } + + er_index = mhi_chan->er_index; + chan = mhi_chan->chan; + + for (; elements > 0; elements--, cfg_tbl++) { + /* update event context array */ + if (!strcmp(cfg_tbl->name, "ECA")) { + er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[er_index]; + if (sizeof(*er_ctxt) != cfg_tbl->len) { + MHI_ERR( + "Invalid ECA size, expected:%zu actual%zu\n", + sizeof(*er_ctxt), cfg_tbl->len); + return -EINVAL; + } + memcpy((void *)er_ctxt, cfg_tbl->buf, sizeof(*er_ctxt)); + continue; + } + + /* update channel context array */ + if (!strcmp(cfg_tbl->name, "CCA")) { + ch_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[chan]; + if (cfg_tbl->len != sizeof(*ch_ctxt)) { + MHI_ERR( + "Invalid CCA size, expected:%zu actual:%zu\n", + sizeof(*ch_ctxt), cfg_tbl->len); + return -EINVAL; + } + memcpy((void *)ch_ctxt, cfg_tbl->buf, sizeof(*ch_ctxt)); + continue; + } + + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(mhi_device_configure); + +void mhi_set_m2_timeout_ms(struct mhi_controller *mhi_cntrl, u32 timeout) +{ + struct mhi_private *mhi_priv = dev_get_drvdata(&mhi_cntrl->mhi_dev->dev); + + mhi_priv->m2_timeout_ms = timeout; +} +EXPORT_SYMBOL(mhi_set_m2_timeout_ms); + +int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_clients) +{ + struct mhi_chan *itr, *tmp; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + + MHI_VERB("Entered with PM state: %s, MHI state: %s notify: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + notify_clients ? "true" : "false"); + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return 0; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + read_lock_bh(&mhi_cntrl->pm_lock); + WARN_ON(mhi_cntrl->pm_state != MHI_PM_M3); + read_unlock_bh(&mhi_cntrl->pm_lock); + + if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM + && mhi_is_active(mhi_cntrl)) { + mhi_cntrl->ee = MHI_EE_RDDM; + + MHI_ERR("RDDM event occurred!\n"); + + /* notify critical clients with early notifications */ + mhi_report_error(mhi_cntrl); + + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); + wake_up_all(&mhi_cntrl->state_event); + + return 0; + } + + /* Notify clients about exiting LPM */ + if (notify_clients) { + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, + node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); + mutex_unlock(&itr->mutex); + } + } + + /* disable primary event ring processing to prevent interference */ + tasklet_disable(&mhi_cntrl->mhi_event->task); + + write_lock_irq(&mhi_cntrl->pm_lock); + + /* re-check to make sure no error has occurred before proceeding */ + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + write_unlock_irq(&mhi_cntrl->pm_lock); + tasklet_enable(&mhi_cntrl->mhi_event->task); + return -EIO; + } + + /* restore the states */ + mhi_cntrl->pm_state = mhi_priv->saved_pm_state; + mhi_cntrl->dev_state = mhi_priv->saved_dev_state; + + write_unlock_irq(&mhi_cntrl->pm_lock); + + switch (mhi_cntrl->pm_state) { + case MHI_PM_M0: + mhi_pm_m0_transition(mhi_cntrl); + break; + case MHI_PM_M2: + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + mhi_cntrl->wake_put(mhi_cntrl, true); + read_unlock_bh(&mhi_cntrl->pm_lock); + break; + default: + MHI_ERR("Unexpected PM state:%s after restore\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + } + + /* enable primary event ring processing and check for events */ + tasklet_enable(&mhi_cntrl->mhi_event->task); + mhi_irq_handler(0, mhi_cntrl->mhi_event); + + return 0; +} +EXPORT_SYMBOL(mhi_pm_fast_resume); + +int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_clients) +{ + struct mhi_chan *itr, *tmp; + struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; + struct device *dev = &mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + enum mhi_pm_state new_state; + int ret; + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return -EINVAL; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + /* check if host/clients have any bus votes or packets to be sent */ + if (atomic_read(&mhi_cntrl->pending_pkts)) + return -EBUSY; + + /* wait for the device to attempt a low power mode (M2 entry) */ + wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M2, + msecs_to_jiffies(mhi_priv->m2_timeout_ms)); + + /* disable primary event ring processing to prevent interference */ + tasklet_disable(&mhi_cntrl->mhi_event->task); + + write_lock_irq(&mhi_cntrl->pm_lock); + + /* re-check if host/clients have any bus votes or packets to be sent */ + if (atomic_read(&mhi_cntrl->pending_pkts)) { + ret = -EBUSY; + goto error_suspend; + } + + /* re-check to make sure no error has occurred before proceeding */ + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + ret = -EIO; + goto error_suspend; + } + + MHI_VERB("Allowing Fast M3 transition with notify: %s\n", + notify_clients ? "true" : "false"); + + /* save the current states */ + mhi_priv->saved_pm_state = mhi_cntrl->pm_state; + mhi_priv->saved_dev_state = mhi_cntrl->dev_state; + + /* move from M2 to M0 as device can allow the transition but not host */ + if (mhi_cntrl->pm_state == MHI_PM_M2) { + new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0); + if (new_state != MHI_PM_M0) { + MHI_ERR("Error setting to PM state: %s from: %s\n", + to_mhi_pm_state_str(MHI_PM_M0), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + ret = -EIO; + goto error_suspend; + } + } + + new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER); + if (new_state != MHI_PM_M3_ENTER) { + MHI_ERR("Error setting to PM state: %s from: %s\n", + to_mhi_pm_state_str(MHI_PM_M3_ENTER), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + ret = -EIO; + goto error_suspend; + } + + /* set dev_state to M3_FAST and host pm_state to M3 */ + new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3); + if (new_state != MHI_PM_M3) { + MHI_ERR("Error setting to PM state: %s from: %s\n", + to_mhi_pm_state_str(MHI_PM_M3), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + ret = -EIO; + goto error_suspend; + } + + mhi_cntrl->dev_state = MHI_STATE_M3_FAST; + mhi_cntrl->M3_fast++; + + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* enable primary event ring processing and check for events */ + tasklet_enable(&mhi_cntrl->mhi_event->task); + mhi_irq_handler(0, mhi_cntrl->mhi_event); + + /* Notify clients about entering LPM */ + if (notify_clients) { + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, + node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); + mutex_unlock(&itr->mutex); + } + } + + return 0; + +error_suspend: + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* enable primary event ring processing and check for events */ + tasklet_enable(&mhi_cntrl->mhi_event->task); + mhi_irq_handler(0, mhi_cntrl->mhi_event); + + return ret; +} +EXPORT_SYMBOL(mhi_pm_fast_suspend); + +static void mhi_process_sfr(struct mhi_controller *mhi_cntrl, + struct file_info *info) +{ + struct mhi_buf *mhi_buf = mhi_cntrl->rddm_image->mhi_buf; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u8 *sfr_buf, *file_offset = info->file_offset; + u32 file_size = info->file_size; + u32 rem_seg_len = info->rem_seg_len; + u32 seg_idx = info->seg_idx; + + sfr_buf = kzalloc(file_size + 1, GFP_KERNEL); + if (!sfr_buf) + return; + + while (file_size) { + /* file offset starting from seg base */ + if (!rem_seg_len) { + file_offset = mhi_buf[seg_idx].buf; + if (file_size > mhi_buf[seg_idx].len) + rem_seg_len = mhi_buf[seg_idx].len; + else + rem_seg_len = file_size; + } + + if (file_size <= rem_seg_len) { + memcpy(sfr_buf, file_offset, file_size); + break; + } + + memcpy(sfr_buf, file_offset, rem_seg_len); + sfr_buf += rem_seg_len; + file_size -= rem_seg_len; + rem_seg_len = 0; + seg_idx++; + if (seg_idx == mhi_cntrl->rddm_image->entries) { + MHI_ERR("invalid size for SFR file\n"); + goto err; + } + } + sfr_buf[info->file_size] = '\0'; + + /* force sfr string to log in kernel msg */ + MHI_ERR("%s\n", sfr_buf); +err: + kfree(sfr_buf); +} + +static int mhi_find_next_file_offset(struct mhi_controller *mhi_cntrl, + struct file_info *info, + struct rddm_table_info *table_info) +{ + struct mhi_buf *mhi_buf = mhi_cntrl->rddm_image->mhi_buf; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + if (info->rem_seg_len >= table_info->size) { + info->file_offset += table_info->size; + info->rem_seg_len -= table_info->size; + return 0; + } + + info->file_size = table_info->size - info->rem_seg_len; + info->rem_seg_len = 0; + /* iterate over segments until eof is reached */ + while (info->file_size) { + info->seg_idx++; + if (info->seg_idx == mhi_cntrl->rddm_image->entries) { + MHI_ERR("invalid size for file %s\n", + table_info->file_name); + return -EINVAL; + } + if (info->file_size > mhi_buf[info->seg_idx].len) { + info->file_size -= mhi_buf[info->seg_idx].len; + } else { + info->file_offset = mhi_buf[info->seg_idx].buf + + info->file_size; + info->rem_seg_len = mhi_buf[info->seg_idx].len - + info->file_size; + info->file_size = 0; + } + } + + return 0; +} + +void mhi_dump_sfr(struct mhi_controller *mhi_cntrl) +{ + struct mhi_buf *mhi_buf = mhi_cntrl->rddm_image->mhi_buf; + struct rddm_header *rddm_header = + (struct rddm_header *)mhi_buf->buf; + struct rddm_table_info *table_info; + struct file_info info; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 table_size, n; + + memset(&info, 0, sizeof(info)); + + if (rddm_header->header_size > sizeof(*rddm_header) || + rddm_header->header_size < 8) { + MHI_ERR("invalid reported header size %u\n", + rddm_header->header_size); + return; + } + + table_size = (rddm_header->header_size - 8) / sizeof(*table_info); + if (!table_size) { + MHI_ERR("invalid rddm table size %u\n", table_size); + return; + } + + info.file_offset = (u8 *)rddm_header + rddm_header->header_size; + info.rem_seg_len = mhi_buf[0].len - rddm_header->header_size; + for (n = 0; n < table_size; n++) { + table_info = &rddm_header->table_info[n]; + + if (!strcmp(table_info->file_name, "Q6-SFR.bin")) { + info.file_size = table_info->size; + mhi_process_sfr(mhi_cntrl, &info); + return; + } + + if (mhi_find_next_file_offset(mhi_cntrl, &info, table_info)) + return; + } +} +EXPORT_SYMBOL(mhi_dump_sfr); + +bool mhi_scan_rddm_cookie(struct mhi_controller *mhi_cntrl, u32 cookie) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + u32 val; + + if (!mhi_cntrl->rddm_image || !cookie) + return false; + + MHI_VERB("Checking BHI debug register for 0x%x\n", cookie); + + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + return false; + + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_ERRDBG2, &val); + if (ret) + return false; + + MHI_VERB("BHI_ERRDBG2 value:0x%x\n", val); + if (val == cookie) + return true; + + return false; +} +EXPORT_SYMBOL(mhi_scan_rddm_cookie); + +void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + enum mhi_ee_type ee; + int i, ret; + u32 val; + void __iomem *mhi_base = mhi_cntrl->regs; + void __iomem *bhi_base = mhi_cntrl->bhi; + void __iomem *bhie_base = mhi_cntrl->bhie; + void __iomem *wake_db = mhi_cntrl->wake_db; + struct { + const char *name; + int offset; + void __iomem *base; + } debug_reg[] = { + { "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base}, + { "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base}, + { "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base}, + { "BHI_ERRCODE", BHI_ERRCODE, bhi_base}, + { "BHI_EXECENV", BHI_EXECENV, bhi_base}, + { "BHI_STATUS", BHI_STATUS, bhi_base}, + { "MHI_CNTRL", MHICTRL, mhi_base}, + { "MHI_STATUS", MHISTATUS, mhi_base}, + { "MHI_WAKE_DB", 0, wake_db}, + { "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base}, + { "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base}, + { "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base}, + { "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base}, + { NULL }, + }; + + MHI_ERR("host pm_state:%s dev_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + state = mhi_get_mhi_state(mhi_cntrl); + ee = mhi_get_exec_env(mhi_cntrl); + + MHI_ERR("device ee: %s dev_state: %s\n", TO_MHI_EXEC_STR(ee), + TO_MHI_STATE_STR(state)); + + for (i = 0; debug_reg[i].name; i++) { + if (!debug_reg[i].base) + continue; + ret = mhi_read_reg(mhi_cntrl, debug_reg[i].base, + debug_reg[i].offset, &val); + MHI_ERR("reg: %s val: 0x%x, ret: %d\n", debug_reg[i].name, + val, ret); + } +} +EXPORT_SYMBOL(mhi_debug_reg_dump); + +int mhi_device_get_sync_atomic(struct mhi_device *mhi_dev, int timeout_us, + bool in_panic) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device *dev = &mhi_dev->dev; + + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + mhi_cntrl->wake_get(mhi_cntrl, true); + read_unlock_bh(&mhi_cntrl->pm_lock); + + mhi_dev->dev_wake++; + pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0); + mhi_cntrl->runtime_get(mhi_cntrl); + + /* Return if client doesn't want us to wait */ + if (!timeout_us) { + if (mhi_cntrl->pm_state != MHI_PM_M0) + MHI_ERR("Return without waiting for M0\n"); + + mhi_cntrl->runtime_put(mhi_cntrl); + return 0; + } + + if (in_panic) { + while (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M0 && + !MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) && + timeout_us > 0) { + udelay(MHI_FORCE_WAKE_DELAY_US); + timeout_us -= MHI_FORCE_WAKE_DELAY_US; + } + } else { + while (mhi_cntrl->pm_state != MHI_PM_M0 && + !MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) && + timeout_us > 0) { + udelay(MHI_FORCE_WAKE_DELAY_US); + timeout_us -= MHI_FORCE_WAKE_DELAY_US; + } + } + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || timeout_us <= 0) { + MHI_ERR("Did not enter M0, cur_state: %s pm_state: %s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + mhi_dev->dev_wake--; + mhi_cntrl->runtime_put(mhi_cntrl); + return -ETIMEDOUT; + } + + mhi_cntrl->runtime_put(mhi_cntrl); + + return 0; +} +EXPORT_SYMBOL(mhi_device_get_sync_atomic); + +static int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, + u32 capability, u32 *offset) +{ + u32 cur_cap, next_offset; + int ret; + + /* get the 1st supported capability offset */ + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MISC_OFFSET, + MISC_CAP_MASK, MISC_CAP_SHIFT, offset); + if (ret) + return ret; + do { + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset, + CAP_CAPID_MASK, CAP_CAPID_SHIFT, + &cur_cap); + if (ret) + return ret; + + if (cur_cap == capability) + return 0; + + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset, + CAP_NEXT_CAP_MASK, CAP_NEXT_CAP_SHIFT, + &next_offset); + if (ret) + return ret; + + *offset = next_offset; + if (*offset >= MHI_REG_SIZE) + return -ENXIO; + } while (next_offset); + + return -ENXIO; +} + +/* to be used only if a single event ring with the type is present */ +static int mhi_get_er_index(struct mhi_controller *mhi_cntrl, + enum mhi_er_data_type type) +{ + int i; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + + /* find event ring for requested type */ + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->data_type == type) + return mhi_event->er_index; + } + + return -ENOENT; +} + +static int mhi_init_bw_scale(struct mhi_controller *mhi_cntrl, + void __iomem *bw_scale_db) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + int ret, er_index; + u32 bw_cfg_offset; + + /* controller doesn't support dynamic bw switch */ + if (!mhi_priv->bw_scale) + return -ENODEV; + + ret = mhi_get_capability_offset(mhi_cntrl, BW_SCALE_CAP_ID, + &bw_cfg_offset); + if (ret) + return ret; + + /* No ER configured to support BW scale */ + er_index = mhi_get_er_index(mhi_cntrl, MHI_ER_BW_SCALE); + if (er_index < 0) + return er_index; + + bw_cfg_offset += BW_SCALE_CFG_OFFSET; + + mhi_priv->bw_scale_db = bw_scale_db; + + /* advertise host support */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, bw_cfg_offset, + MHI_BW_SCALE_SETUP(er_index)); + + MHI_VERB("Bandwidth scaling setup complete. Event ring:%d\n", + er_index); + + return 0; +} + +int mhi_controller_setup_timesync(struct mhi_controller *mhi_cntrl, + u64 (*time_get)(struct mhi_controller *c), + int (*lpm_disable)(struct mhi_controller *c), + int (*lpm_enable)(struct mhi_controller *c)) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + struct mhi_timesync *mhi_tsync = kzalloc(sizeof(*mhi_tsync), + GFP_KERNEL); + + if (!mhi_tsync) + return -ENOMEM; + + mhi_tsync->time_get = time_get; + mhi_tsync->lpm_disable = lpm_disable; + mhi_tsync->lpm_enable = lpm_enable; + + mhi_priv->timesync = mhi_tsync; + + return 0; +} +EXPORT_SYMBOL(mhi_controller_setup_timesync); + +static int mhi_init_timesync(struct mhi_controller *mhi_cntrl, + void __iomem *time_db) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + struct mhi_timesync *mhi_tsync = mhi_priv->timesync; + u32 time_offset; + int ret, er_index; + + if (!mhi_tsync) + return -EINVAL; + + ret = mhi_get_capability_offset(mhi_cntrl, TIMESYNC_CAP_ID, + &time_offset); + if (ret) + return ret; + + /* save time_offset for obtaining time via MMIO register reads */ + mhi_tsync->time_reg = mhi_cntrl->regs + time_offset; + + mutex_init(&mhi_tsync->mutex); + + /* get timesync event ring configuration */ + er_index = mhi_get_er_index(mhi_cntrl, MHI_ER_TIMESYNC); + if (er_index < 0) + return 0; + + spin_lock_init(&mhi_tsync->lock); + INIT_LIST_HEAD(&mhi_tsync->head); + + mhi_tsync->time_db = time_db; + + /* advertise host support */ + mhi_write_reg(mhi_cntrl, mhi_tsync->time_reg, TIMESYNC_CFG_OFFSET, + MHI_TIMESYNC_DB_SETUP(er_index)); + + MHI_VERB("Time synchronization DB mode setup complete. Event ring:%d\n", + er_index); + + return 0; +} + +int mhi_misc_init_mmio(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 chdb_off; + int ret; + + /* Read channel db offset */ + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, CHDBOFF, + CHDBOFF_CHDBOFF_MASK, + CHDBOFF_CHDBOFF_SHIFT, &chdb_off); + if (ret) { + MHI_ERR("Unable to read CHDBOFF register\n"); + return -EIO; + } + + ret = mhi_init_bw_scale(mhi_cntrl, (mhi_cntrl->regs + chdb_off + + (8 * MHI_BW_SCALE_CHAN_DB))); + if (ret) + MHI_LOG("BW scale setup failure\n"); + + ret = mhi_init_timesync(mhi_cntrl, (mhi_cntrl->regs + chdb_off + + (8 * MHI_TIMESYNC_CHAN_DB))); + if (ret) + MHI_LOG("Time synchronization setup failure\n"); + + return 0; +} + +/* Recycle by fast forwarding WP to the last posted event */ +static void mhi_recycle_fwd_ev_ring_element + (struct mhi_controller *mhi_cntrl, struct mhi_ring *ring) +{ + dma_addr_t ctxt_wp; + + /* update the WP */ + ring->wp += ring->el_size; + if (ring->wp >= (ring->base + ring->len)) + ring->wp = ring->base; + + /* update the context WP based on the RP to support fast forwarding */ + ctxt_wp = ring->iommu_base + (ring->wp - ring->base); + *ring->ctxt_wp = ctxt_wp; + + /* update the RP */ + ring->rp += ring->el_size; + if (ring->rp >= (ring->base + ring->len)) + ring->rp = ring->base; + + /* visible to other cores */ + smp_wmb(); +} + +/* dedicated bw scale event ring processing */ +int mhi_process_misc_tsync_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + struct mhi_timesync *mhi_tsync = mhi_priv->timesync; + u32 sequence; + u64 remote_time; + int ret = 0; + + spin_lock_bh(&mhi_event->lock); + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + if (ev_ring->rp == dev_rp) { + spin_unlock_bh(&mhi_event->lock); + goto exit_tsync_process; + } + + /* if rp points to base, we need to wrap it around */ + if (dev_rp == ev_ring->base) + dev_rp = ev_ring->base + ev_ring->len; + dev_rp--; + + /* fast forward to currently processed element and recycle er */ + ev_ring->rp = dev_rp; + ev_ring->wp = dev_rp - 1; + if (ev_ring->wp < ev_ring->base) + ev_ring->wp = ev_ring->base + ev_ring->len - ev_ring->el_size; + mhi_recycle_fwd_ev_ring_element(mhi_cntrl, ev_ring); + + if (WARN_ON(MHI_TRE_GET_EV_TYPE(dev_rp) != MHI_PKT_TYPE_TSYNC_EVENT)) { + MHI_ERR("!TIMESYNC event\n"); + ret = -EINVAL; + spin_unlock_bh(&mhi_event->lock); + goto exit_tsync_process; + } + + sequence = MHI_TRE_GET_EV_SEQ(dev_rp); + remote_time = MHI_TRE_GET_EV_TIME(dev_rp); + + MHI_VERB("Received TSYNC event with seq: 0x%llx time: 0x%llx\n", + sequence, remote_time); + + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + spin_unlock_bh(&mhi_event->lock); + + mutex_lock(&mhi_tsync->mutex); + + if (WARN_ON(mhi_tsync->int_sequence != sequence)) { + MHI_ERR("Unexpected response: 0x%llx Expected: 0x%llx\n", + sequence, mhi_tsync->int_sequence); + + mhi_cntrl->runtime_put(mhi_cntrl); + mhi_device_put(mhi_cntrl->mhi_dev); + + mutex_unlock(&mhi_tsync->mutex); + ret = -EINVAL; + goto exit_tsync_process; + } + + do { + struct tsync_node *tsync_node; + + spin_lock(&mhi_tsync->lock); + tsync_node = list_first_entry_or_null(&mhi_tsync->head, + struct tsync_node, node); + if (!tsync_node) { + spin_unlock(&mhi_tsync->lock); + break; + } + + list_del(&tsync_node->node); + spin_unlock(&mhi_tsync->lock); + + tsync_node->cb_func(tsync_node->mhi_dev, + tsync_node->sequence, + mhi_tsync->local_time, remote_time); + kfree(tsync_node); + } while (true); + + mhi_tsync->db_pending = false; + mhi_tsync->remote_time = remote_time; + complete(&mhi_tsync->completion); + + mhi_cntrl->runtime_put(mhi_cntrl); + mhi_device_put(mhi_cntrl->mhi_dev); + + mutex_unlock(&mhi_tsync->mutex); + +exit_tsync_process: + MHI_VERB("exit er_index: %u, ret: %d\n", mhi_event->er_index, ret); + + return ret; +} + +/* dedicated bw scale event ring processing */ +int mhi_process_misc_bw_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct mhi_link_info link_info, *cur_info = &mhi_cntrl->mhi_link_info; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + u32 result = MHI_BW_SCALE_NACK; + int ret = -EINVAL; + + if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) + goto exit_bw_scale_process; + + spin_lock_bh(&mhi_event->lock); + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + + /* if rp points to base, we need to wrap it around */ + if (dev_rp == ev_ring->base) + dev_rp = ev_ring->base + ev_ring->len; + dev_rp--; + + /* fast forward to currently processed element and recycle er */ + ev_ring->rp = dev_rp; + ev_ring->wp = dev_rp - 1; + if (ev_ring->wp < ev_ring->base) + ev_ring->wp = ev_ring->base + ev_ring->len - ev_ring->el_size; + mhi_recycle_fwd_ev_ring_element(mhi_cntrl, ev_ring); + + if (WARN_ON(MHI_TRE_GET_EV_TYPE(dev_rp) != MHI_PKT_TYPE_BW_REQ_EVENT)) { + MHI_ERR("!BW SCALE REQ event\n"); + spin_unlock_bh(&mhi_event->lock); + goto exit_bw_scale_process; + } + + link_info.target_link_speed = MHI_TRE_GET_EV_LINKSPEED(dev_rp); + link_info.target_link_width = MHI_TRE_GET_EV_LINKWIDTH(dev_rp); + link_info.sequence_num = MHI_TRE_GET_EV_BW_REQ_SEQ(dev_rp); + + MHI_VERB("Received BW_REQ with seq:%d link speed:0x%x width:0x%x\n", + link_info.sequence_num, + link_info.target_link_speed, + link_info.target_link_width); + + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + spin_unlock_bh(&mhi_event->lock); + + ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); + if (ret) + goto exit_bw_scale_process; + mhi_cntrl->runtime_get(mhi_cntrl); + + mutex_lock(&mhi_cntrl->pm_mutex); + + ret = mhi_priv->bw_scale(mhi_cntrl, &link_info); + if (!ret) { + *cur_info = link_info; + result = 0; + } + + write_lock_bh(&mhi_cntrl->pm_lock); + mhi_priv->bw_response = MHI_BW_SCALE_RESULT(result, + link_info.sequence_num); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) { + mhi_write_reg(mhi_cntrl, mhi_priv->bw_scale_db, 0, + mhi_priv->bw_response); + mhi_priv->bw_response = 0; + } else { + MHI_VERB("Cached BW response for seq: %u, result: %d\n", + link_info.sequence_num, mhi_priv->bw_response); + } + write_unlock_bh(&mhi_cntrl->pm_lock); + + mhi_cntrl->runtime_put(mhi_cntrl); + mhi_device_put(mhi_cntrl->mhi_dev); + + mutex_unlock(&mhi_cntrl->pm_mutex); + +exit_bw_scale_process: + MHI_VERB("exit er_index:%u ret:%d\n", mhi_event->er_index, ret); + + return ret; +} + +void mhi_misc_dbs_pending(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + + if (mhi_priv->bw_scale && mhi_priv->bw_response) { + mhi_write_reg(mhi_cntrl, mhi_priv->bw_scale_db, 0, + mhi_priv->bw_response); + MHI_VERB("Completed BW response: %d\n", mhi_priv->bw_response); + mhi_priv->bw_response = 0; + } +} + +void mhi_controller_set_bw_scale_cb(struct mhi_controller *mhi_cntrl, + int (*cb_func)(struct mhi_controller *mhi_cntrl, + struct mhi_link_info *link_info)) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + + mhi_priv->bw_scale = cb_func; +} +EXPORT_SYMBOL(mhi_controller_set_bw_scale_cb); + +void mhi_controller_set_base(struct mhi_controller *mhi_cntrl, phys_addr_t base) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + + mhi_priv->base_addr = base; +} +EXPORT_SYMBOL(mhi_controller_set_base); + +int mhi_get_channel_db_base(struct mhi_device *mhi_dev, phys_addr_t *value) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + u32 offset; + int ret; + + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + return -EIO; + + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, CHDBOFF, + CHDBOFF_CHDBOFF_MASK, CHDBOFF_CHDBOFF_SHIFT, + &offset); + if (ret) + return -EIO; + + *value = mhi_priv->base_addr + offset; + + return ret; +} +EXPORT_SYMBOL(mhi_get_channel_db_base); + +int mhi_get_event_ring_db_base(struct mhi_device *mhi_dev, phys_addr_t *value) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + u32 offset; + int ret; + + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + return -EIO; + + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, ERDBOFF, + ERDBOFF_ERDBOFF_MASK, ERDBOFF_ERDBOFF_SHIFT, + &offset); + if (ret) + return -EIO; + + *value = mhi_priv->base_addr + offset; + + return ret; +} +EXPORT_SYMBOL(mhi_get_event_ring_db_base); + +struct mhi_device *mhi_get_device_for_channel(struct mhi_controller *mhi_cntrl, + u32 channel) +{ + if (channel >= mhi_cntrl->max_chan) + return NULL; + + return mhi_cntrl->mhi_chan[channel].mhi_dev; +} +EXPORT_SYMBOL(mhi_get_device_for_channel); + +#if !IS_ENABLED(CONFIG_MHI_DTR) +long mhi_device_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, + unsigned long arg) +{ + return -EIO; +} +EXPORT_SYMBOL(mhi_device_ioctl); +#endif + +int mhi_controller_set_sfr_support(struct mhi_controller *mhi_cntrl, size_t len) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + struct mhi_sfr_info *sfr_info; + + sfr_info = kzalloc(sizeof(*sfr_info), GFP_KERNEL); + if (!sfr_info) + return -ENOMEM; + + sfr_info->len = len; + sfr_info->str = kzalloc(len, GFP_KERNEL); + if (!sfr_info->str) + return -ENOMEM; + + mhi_priv->sfr_info = sfr_info; + + return 0; +} +EXPORT_SYMBOL(mhi_controller_set_sfr_support); + +void mhi_misc_mission_mode(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + struct mhi_sfr_info *sfr_info = mhi_priv->sfr_info; + u64 local, remote; + int ret = -EIO; + + /* Attempt to print local and remote SOC time delta for debug */ + ret = mhi_get_remote_time_sync(mhi_cntrl->mhi_dev, &local, &remote); + if (!ret) + MHI_LOG("Timesync: local: %llx, remote: %llx\n", local, remote); + + /* initialize SFR */ + if (!sfr_info) + return; + + /* do a clean-up if we reach here post SSR */ + memset(sfr_info->str, 0, sfr_info->len); + + sfr_info->buf_addr = dma_alloc_coherent(mhi_cntrl->cntrl_dev, + sfr_info->len, + &sfr_info->dma_addr, + GFP_KERNEL); + if (!sfr_info->buf_addr) { + MHI_ERR("Failed to allocate memory for sfr\n"); + return; + } + + init_completion(&sfr_info->completion); + + ret = mhi_send_cmd(mhi_cntrl, NULL, MHI_CMD_SFR_CFG); + if (ret) { + MHI_ERR("Failed to send sfr cfg cmd\n"); + return; + } + + ret = wait_for_completion_timeout(&sfr_info->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || sfr_info->ccs != MHI_EV_CC_SUCCESS) + MHI_ERR("Failed to get sfr cfg cmd completion\n"); +} + +void mhi_misc_disable(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + struct mhi_sfr_info *sfr_info = mhi_priv->sfr_info; + + if (sfr_info && sfr_info->buf_addr) { + dma_free_coherent(mhi_cntrl->cntrl_dev, sfr_info->len, + sfr_info->buf_addr, sfr_info->dma_addr); + sfr_info->buf_addr = NULL; + } +} + +void mhi_misc_cmd_configure(struct mhi_controller *mhi_cntrl, unsigned int type, + u64 *ptr, u32 *dword0, u32 *dword1) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + struct mhi_sfr_info *sfr_info = mhi_priv->sfr_info; + + if (type == MHI_CMD_SFR_CFG && sfr_info) { + *ptr = MHI_TRE_CMD_SFR_CFG_PTR(sfr_info->dma_addr); + *dword0 = MHI_TRE_CMD_SFR_CFG_DWORD0(sfr_info->len - 1); + *dword1 = MHI_TRE_CMD_SFR_CFG_DWORD1; + } +} + +void mhi_misc_cmd_completion(struct mhi_controller *mhi_cntrl, + unsigned int type, unsigned int ccs) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + struct mhi_sfr_info *sfr_info = mhi_priv->sfr_info; + + if (type == MHI_CMD_SFR_CFG && sfr_info) { + sfr_info->ccs = ccs; + complete(&sfr_info->completion); + } +} + +int mhi_get_remote_time_sync(struct mhi_device *mhi_dev, + u64 *t_host, + u64 *t_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + struct mhi_timesync *mhi_tsync = mhi_priv->timesync; + u64 local_time; + u32 tdev_lo = U32_MAX, tdev_hi = U32_MAX; + int ret; + + /* not all devices support time features */ + if (!mhi_tsync) + return -EINVAL; + + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_ERR("MHI is not in active state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + mutex_lock(&mhi_tsync->mutex); + + /* return times from last async request completion */ + if (mhi_tsync->db_pending) { + local_time = mhi_tsync->local_time; + mutex_unlock(&mhi_tsync->mutex); + + ret = wait_for_completion_timeout(&mhi_tsync->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || !ret) { + MHI_ERR("Pending DB request did not complete, abort\n"); + return -EAGAIN; + } + + *t_host = local_time; + *t_dev = mhi_tsync->remote_time; + + return 0; + } + + /* bring to M0 state */ + ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); + if (ret) + goto error_unlock; + mhi_cntrl->runtime_get(mhi_cntrl); + + /* disable link level low power modes */ + ret = mhi_tsync->lpm_disable(mhi_cntrl); + if (ret) + goto error_invalid_state; + + /* + * time critical code to fetch device times, + * delay between these two steps should be + * deterministic as possible. + */ + preempt_disable(); + local_irq_disable(); + + *t_host = mhi_tsync->time_get(mhi_cntrl); + + ret = mhi_read_reg(mhi_cntrl, mhi_tsync->time_reg, + TIMESYNC_TIME_LOW_OFFSET, &tdev_lo); + if (ret) + MHI_ERR("Time LOW register read error\n"); + + ret = mhi_read_reg(mhi_cntrl, mhi_tsync->time_reg, + TIMESYNC_TIME_HIGH_OFFSET, &tdev_hi); + if (ret) + MHI_ERR("Time HIGH register read error\n"); + + *t_dev = (u64) tdev_hi << 32 | tdev_lo; + + local_irq_enable(); + preempt_enable(); + + mhi_tsync->lpm_enable(mhi_cntrl); + +error_invalid_state: + mhi_cntrl->runtime_put(mhi_cntrl); + mhi_device_put(mhi_cntrl->mhi_dev); +error_unlock: + mutex_unlock(&mhi_tsync->mutex); + return ret; +} +EXPORT_SYMBOL(mhi_get_remote_time_sync); + +int mhi_get_remote_time(struct mhi_device *mhi_dev, + u32 sequence, + void (*cb_func)(struct mhi_device *mhi_dev, + u32 sequence, + u64 local_time, + u64 remote_time)) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_private *mhi_priv = dev_get_drvdata(dev); + struct mhi_timesync *mhi_tsync = mhi_priv->timesync; + struct tsync_node *tsync_node; + int ret = 0; + + /* not all devices support all time features */ + if (!mhi_tsync || !mhi_tsync->time_db) + return -EINVAL; + + mutex_lock(&mhi_tsync->mutex); + + ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); + if (ret) + goto error_unlock; + mhi_cntrl->runtime_get(mhi_cntrl); + + MHI_LOG("Enter with pm_state:%s MHI_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + /* + * technically we can use GFP_KERNEL, but wants to avoid + * # of times scheduling out + */ + tsync_node = kzalloc(sizeof(*tsync_node), GFP_ATOMIC); + if (!tsync_node) { + ret = -ENOMEM; + goto error_no_mem; + } + + tsync_node->sequence = sequence; + tsync_node->cb_func = cb_func; + tsync_node->mhi_dev = mhi_dev; + + if (mhi_tsync->db_pending) { + mhi_cntrl->runtime_put(mhi_cntrl); + mhi_device_put(mhi_cntrl->mhi_dev); + goto skip_tsync_db; + } + + mhi_tsync->int_sequence++; + if (mhi_tsync->int_sequence == 0xFFFFFFFF) + mhi_tsync->int_sequence = 0; + + /* disable link level low power modes */ + ret = mhi_tsync->lpm_disable(mhi_cntrl); + if (ret) { + MHI_ERR("LPM disable request failed for %s!\n", mhi_dev->name); + goto error_invalid_state; + } + + /* + * time critical code, delay between these two steps should be + * deterministic as possible. + */ + preempt_disable(); + local_irq_disable(); + + mhi_tsync->local_time = mhi_tsync->time_get(mhi_cntrl); + mhi_write_reg(mhi_cntrl, mhi_tsync->time_db, 0, mhi_tsync->int_sequence); + + /* write must go through immediately */ + wmb(); + + local_irq_enable(); + preempt_enable(); + + mhi_tsync->lpm_enable(mhi_cntrl); + + MHI_VERB("time DB request with seq:0x%llx\n", mhi_tsync->int_sequence); + + mhi_tsync->db_pending = true; + init_completion(&mhi_tsync->completion); + +skip_tsync_db: + spin_lock(&mhi_tsync->lock); + list_add_tail(&tsync_node->node, &mhi_tsync->head); + spin_unlock(&mhi_tsync->lock); + + mutex_unlock(&mhi_tsync->mutex); + + return 0; + +error_invalid_state: + kfree(tsync_node); +error_no_mem: + mhi_cntrl->runtime_put(mhi_cntrl); + mhi_device_put(mhi_cntrl->mhi_dev); +error_unlock: + mutex_unlock(&mhi_tsync->mutex); + return ret; +} +EXPORT_SYMBOL(mhi_get_remote_time); diff --git a/mhi/core/misc.h b/mhi/core/misc.h new file mode 100644 index 0000000..fb14894 --- /dev/null +++ b/mhi/core/misc.h @@ -0,0 +1,303 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + */ + +#ifndef _MHI_CORE_MISC_H_ +#define _MHI_CORE_MISC_H_ + +#include <linux/mhi_misc.h> + +#define MHI_FORCE_WAKE_DELAY_US (100) +#define MHI_IPC_LOG_PAGES (100) +#define MAX_RDDM_TABLE_SIZE (7) +#define MHI_REG_SIZE (SZ_4K) + +/* MHI misc capability registers */ +#define MISC_OFFSET (0x24) +#define MISC_CAP_MASK (0xFFFFFFFF) +#define MISC_CAP_SHIFT (0) + +#define CAP_CAPID_MASK (0xFF000000) +#define CAP_CAPID_SHIFT (24) +#define CAP_NEXT_CAP_MASK (0x00FFF000) +#define CAP_NEXT_CAP_SHIFT (12) + +/* MHI Bandwidth scaling offsets */ +#define BW_SCALE_CFG_OFFSET (0x04) +#define BW_SCALE_CFG_CHAN_DB_ID_MASK (0xFE000000) +#define BW_SCALE_CFG_CHAN_DB_ID_SHIFT (25) +#define BW_SCALE_CFG_ENABLED_MASK (0x01000000) +#define BW_SCALE_CFG_ENABLED_SHIFT (24) +#define BW_SCALE_CFG_ER_ID_MASK (0x00F80000) +#define BW_SCALE_CFG_ER_ID_SHIFT (19) + +#define BW_SCALE_CAP_ID (3) +#define MHI_TRE_GET_EV_BW_REQ_SEQ(tre) (((tre)->dword[0] >> 8) & 0xFF) +#define MHI_BW_SCALE_CHAN_DB 126 + +#define MHI_BW_SCALE_SETUP(er_index) (((MHI_BW_SCALE_CHAN_DB << \ + BW_SCALE_CFG_CHAN_DB_ID_SHIFT) & BW_SCALE_CFG_CHAN_DB_ID_MASK) | \ + ((1 << BW_SCALE_CFG_ENABLED_SHIFT) & BW_SCALE_CFG_ENABLED_MASK) | \ + (((er_index) << BW_SCALE_CFG_ER_ID_SHIFT) & BW_SCALE_CFG_ER_ID_MASK)) + +#define MHI_BW_SCALE_RESULT(status, seq) (((status) & 0xF) << 8 | \ + ((seq) & 0xFF)) +#define MHI_BW_SCALE_NACK 0xF + +/* subsystem failure reason cfg command */ +#define MHI_TRE_CMD_SFR_CFG_PTR(ptr) (ptr) +#define MHI_TRE_CMD_SFR_CFG_DWORD0(len) (len) +#define MHI_TRE_CMD_SFR_CFG_DWORD1 (MHI_CMD_SFR_CFG << 16) + +/* MHI Timesync offsets */ +#define TIMESYNC_CFG_OFFSET (0x04) +#define TIMESYNC_CFG_ENABLED_MASK (0x80000000) +#define TIMESYNC_CFG_ENABLED_SHIFT (31) +#define TIMESYNC_CFG_CHAN_DB_ID_MASK (0x0000FF00) +#define TIMESYNC_CFG_CHAN_DB_ID_SHIFT (8) +#define TIMESYNC_CFG_ER_ID_MASK (0x000000FF) +#define TIMESYNC_CFG_ER_ID_SHIFT (0) + +#define TIMESYNC_TIME_LOW_OFFSET (0x8) +#define TIMESYNC_TIME_HIGH_OFFSET (0xC) + +#define MHI_TIMESYNC_CHAN_DB (125) +#define TIMESYNC_CAP_ID (2) + +#define MHI_TIMESYNC_DB_SETUP(er_index) ((MHI_TIMESYNC_CHAN_DB << \ + TIMESYNC_CFG_CHAN_DB_ID_SHIFT) & TIMESYNC_CFG_CHAN_DB_ID_MASK | \ + (1 << TIMESYNC_CFG_ENABLED_SHIFT) & TIMESYNC_CFG_ENABLED_MASK | \ + ((er_index) << TIMESYNC_CFG_ER_ID_SHIFT) & TIMESYNC_CFG_ER_ID_MASK) + +#define MHI_VERB(fmt, ...) do { \ + struct mhi_private *mhi_priv = \ + dev_get_drvdata(&mhi_cntrl->mhi_dev->dev); \ + dev_dbg(dev, "[D][%s] " fmt, __func__, ##__VA_ARGS__); \ + if (mhi_priv->log_lvl <= MHI_MSG_LVL_VERBOSE) \ + ipc_log_string(mhi_priv->log_buf, "[D][%s] " fmt, __func__, \ + ##__VA_ARGS__); \ +} while (0) + +#define MHI_LOG(fmt, ...) do { \ + struct mhi_private *mhi_priv = \ + dev_get_drvdata(&mhi_cntrl->mhi_dev->dev); \ + dev_dbg(dev, "[I][%s] " fmt, __func__, ##__VA_ARGS__); \ + if (mhi_priv->log_lvl <= MHI_MSG_LVL_INFO) \ + ipc_log_string(mhi_priv->log_buf, "[I][%s] " fmt, __func__, \ + ##__VA_ARGS__); \ +} while (0) + +#define MHI_ERR(fmt, ...) do { \ + struct mhi_private *mhi_priv = \ + dev_get_drvdata(&mhi_cntrl->mhi_dev->dev); \ + dev_err(dev, "[E][%s] " fmt, __func__, ##__VA_ARGS__); \ + if (mhi_priv->log_lvl <= MHI_MSG_LVL_ERROR) \ + ipc_log_string(mhi_priv->log_buf, "[E][%s] " fmt, __func__, \ + ##__VA_ARGS__); \ +} while (0) + +#define MHI_CRITICAL(fmt, ...) do { \ + struct mhi_private *mhi_priv = \ + dev_get_drvdata(&mhi_cntrl->mhi_dev->dev); \ + dev_crit(dev, "[C][%s] " fmt, __func__, ##__VA_ARGS__); \ + if (mhi_priv->log_lvl <= MHI_MSG_LVL_CRITICAL) \ + ipc_log_string(mhi_priv->log_buf, "[C][%s] " fmt, __func__, \ + ##__VA_ARGS__); \ +} while (0) + +/** + * struct rddm_table_info - rddm table info + * @base_address - Start offset of the file + * @actual_phys_address - phys addr offset of file + * @size - size of file + * @description - file description + * @file_name - name of file + */ +struct rddm_table_info { + u64 base_address; + u64 actual_phys_address; + u64 size; + char description[20]; + char file_name[20]; +}; + +/** + * struct rddm_header - rddm header + * @version - header ver + * @header_size - size of header + * @rddm_table_info - array of rddm table info + */ +struct rddm_header { + u32 version; + u32 header_size; + struct rddm_table_info table_info[MAX_RDDM_TABLE_SIZE]; +}; + +/** + * struct file_info - keeping track of file info while traversing the rddm + * table header + * @file_offset - current file offset + * @seg_idx - mhi buf seg array index + * @rem_seg_len - remaining length of the segment containing current file + */ +struct file_info { + u8 *file_offset; + u32 file_size; + u32 seg_idx; + u32 rem_seg_len; +}; + +/** + * struct mhi_private - For private variables of an MHI controller + */ +struct mhi_private { + struct list_head node; + struct mhi_controller *mhi_cntrl; + enum MHI_DEBUG_LEVEL log_lvl; + void *log_buf; + u32 saved_pm_state; + enum mhi_state saved_dev_state; + u32 m2_timeout_ms; + void *priv_data; + void __iomem *bw_scale_db; + int (*bw_scale)(struct mhi_controller *mhi_cntrl, + struct mhi_link_info *link_info); + phys_addr_t base_addr; + u32 bw_response; + struct mhi_sfr_info *sfr_info; + struct mhi_timesync *timesync; +}; + +/** + * struct mhi_bus - For MHI controller debug + */ +struct mhi_bus { + struct list_head controller_list; + struct mutex lock; +}; + +/** + * struct mhi_sfr_info - For receiving MHI subsystem failure reason + */ +struct mhi_sfr_info { + void *buf_addr; + dma_addr_t dma_addr; + size_t len; + char *str; + unsigned int ccs; + struct completion completion; +}; + +/** + * struct mhi_timesync - For enabling use of MHI time synchronization feature + */ +struct mhi_timesync { + u64 (*time_get)(struct mhi_controller *mhi_cntrl); + int (*lpm_disable)(struct mhi_controller *mhi_cntrl); + int (*lpm_enable)(struct mhi_controller *mhi_cntrl); + void __iomem *time_reg; + void __iomem *time_db; + u32 int_sequence; + u64 local_time; + u64 remote_time; + bool db_pending; + struct completion completion; + spinlock_t lock; /* list protection */ + struct list_head head; + struct mutex mutex; +}; + +/** + * struct tsync_node - Stores requests when using the timesync doorbell method + */ +struct tsync_node { + struct list_head node; + u32 sequence; + u64 remote_time; + struct mhi_device *mhi_dev; + void (*cb_func)(struct mhi_device *mhi_dev, u32 sequence, + u64 local_time, u64 remote_time); +}; + +#ifdef CONFIG_MHI_BUS_MISC +void mhi_misc_init(void); +void mhi_misc_exit(void); +int mhi_misc_init_mmio(struct mhi_controller *mhi_cntrl); +int mhi_misc_register_controller(struct mhi_controller *mhi_cntrl); +void mhi_misc_unregister_controller(struct mhi_controller *mhi_cntrl); +int mhi_process_misc_bw_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_process_misc_tsync_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +void mhi_misc_mission_mode(struct mhi_controller *mhi_cntrl); +void mhi_misc_dbs_pending(struct mhi_controller *mhi_cntrl); +void mhi_misc_disable(struct mhi_controller *mhi_cntrl); +void mhi_misc_cmd_configure(struct mhi_controller *mhi_cntrl, + unsigned int type, u64 *ptr, u32 *dword0, + u32 *dword1); +void mhi_misc_cmd_completion(struct mhi_controller *mhi_cntrl, + unsigned int type, unsigned int ccs); +#else +static inline void mhi_misc_init(void) +{ +} + +static inline void mhi_misc_exit(void) +{ +} + +static inline int mhi_misc_init_mmio(struct mhi_controller *mhi_cntrl) +{ + return 0; +} + +static inline int mhi_misc_register_controller(struct mhi_controller *mhi_cntrl) +{ + return 0; +} + +static inline void mhi_misc_unregister_controller(struct mhi_controller + *mhi_cntrl) +{ +} + +static inline int mhi_process_misc_bw_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota) +{ + return 0; +} + +static inline int mhi_process_misc_tsync_ev_ring + (struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota) +{ + return 0; +} + +static inline void mhi_misc_mission_mode(struct mhi_controller *mhi_cntrl) +{ +} + +static inline void mhi_special_dbs_pending(struct mhi_controller *mhi_cntrl) +{ +} + +static inline void mhi_misc_disable(struct mhi_controller *mhi_cntrl) +{ +} + +static inline void mhi_misc_cmd_configure(struct mhi_controller *mhi_cntrl, + unsigned int type, u64 *ptr, + u32 *dword0, u32 *dword1) +{ +} + +static inline void mhi_misc_cmd_completion(struct mhi_controller *mhi_cntrl, + unsigned int type, unsigned int ccs) +{ +} +#endif + +#endif /* _MHI_CORE_MISC_H_ */ diff --git a/mhi/core/pm.c b/mhi/core/pm.c new file mode 100644 index 0000000..5676f73 --- /dev/null +++ b/mhi/core/pm.c @@ -0,0 +1,1255 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mhi.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/wait.h> +#include "internal.h" + +/* + * Not all MHI state transitions are synchronous. Transitions like Linkdown, + * SYS_ERR, and shutdown can happen anytime asynchronously. This function will + * transition to a new state only if we're allowed to. + * + * Priority increases as we go down. For instance, from any state in L0, the + * transition can be made to states in L1, L2 and L3. A notable exception to + * this rule is state DISABLE. From DISABLE state we can only transition to + * POR state. Also, while in L2 state, user cannot jump back to previous + * L1 or L0 states. + * + * Valid transitions: + * L0: DISABLE <--> POR + * POR <--> POR + * POR -> M0 -> M2 --> M0 + * POR -> FW_DL_ERR + * FW_DL_ERR <--> FW_DL_ERR + * M0 <--> M0 + * M0 -> FW_DL_ERR + * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 + * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR + * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT + * SHUTDOWN_PROCESS -> DISABLE + * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT + * LD_ERR_FATAL_DETECT -> DISABLE + */ +static struct mhi_pm_transitions const dev_state_transitions[] = { + /* L0 States */ + { + MHI_PM_DISABLE, + MHI_PM_POR + }, + { + MHI_PM_POR, + MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 | + MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR + }, + { + MHI_PM_M0, + MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER | + MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR + }, + { + MHI_PM_M2, + MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3_ENTER, + MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3, + MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3_EXIT, + MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_FW_DL_ERR, + MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L1 States */ + { + MHI_PM_SYS_ERR_DETECT, + MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_SYS_ERR_PROCESS, + MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L2 States */ + { + MHI_PM_SHUTDOWN_PROCESS, + MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L3 States */ + { + MHI_PM_LD_ERR_FATAL_DETECT, + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE + }, +}; + +enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl, + enum mhi_pm_state state) +{ + unsigned long cur_state = mhi_cntrl->pm_state; + int index = find_last_bit(&cur_state, 32); + + if (unlikely(index >= ARRAY_SIZE(dev_state_transitions))) + return cur_state; + + if (unlikely(dev_state_transitions[index].from_state != cur_state)) + return cur_state; + + if (unlikely(!(dev_state_transitions[index].to_states & state))) + return cur_state; + + mhi_cntrl->pm_state = state; + return mhi_cntrl->pm_state; +} + +void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state) +{ + if (state == MHI_STATE_RESET) { + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1); + } else { + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_MHISTATE_MASK, + MHICTRL_MHISTATE_SHIFT, state); + } +} + +/* NOP for backward compatibility, host allowed to ring DB in M2 state */ +static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl) +{ +} + +static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl) +{ + mhi_cntrl->wake_get(mhi_cntrl, false); + mhi_cntrl->wake_put(mhi_cntrl, true); +} + +/* Add event ring elements and ring er db */ +static void mhi_setup_event_rings(struct mhi_controller *mhi_cntrl, bool add_el) +{ + struct mhi_event *mhi_event; + int i; + bool skip_er_setup; + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev) + continue; + + /* skip HW event ring setup in ready state */ + if (mhi_cntrl->dev_state == MHI_STATE_READY) + skip_er_setup = mhi_event->hw_ring; + else + skip_er_setup = !mhi_event->hw_ring; + + /* if no er element to add, ring all er dbs */ + if (add_el && skip_er_setup) + continue; + + if (add_el) { + ring->wp = ring->base + ring->len - ring->el_size; + *ring->ctxt_wp = + ring->iommu_base + ring->len - ring->el_size; + /* Update all cores */ + smp_wmb(); + } + + /* Ring the event ring db */ + spin_lock_irq(&mhi_event->lock); + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } +} + +/* Handle device ready state transition */ +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state cur_state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 interval_us = 25000; /* poll register field every 25 milliseconds */ + int ret; + + /* Check if device entered error state */ + if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("Device link is not accessible\n"); + return -EIO; + } + + /* Wait for RESET to be cleared and READY bit to be set by the device */ + ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0, + interval_us); + if (ret) { + MHI_ERR("Device failed to clear MHI Reset\n"); + return ret; + } + + ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, + MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT, 1, + interval_us); + if (ret) { + MHI_ERR("Device failed to enter MHI Ready\n"); + return ret; + } + + MHI_VERB("Device in READY State\n"); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); + mhi_cntrl->dev_state = MHI_STATE_READY; + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (cur_state != MHI_PM_POR) { + MHI_ERR("Error moving to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_POR), + to_mhi_pm_state_str(cur_state)); + return -EIO; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + MHI_ERR("Device registers not accessible\n"); + goto error_mmio; + } + + /* Configure MMIO registers */ + ret = mhi_init_mmio(mhi_cntrl); + if (ret) { + MHI_ERR("Error configuring MMIO registers\n"); + goto error_mmio; + } + + /* add SW event ring elements and ring SW event ring dbs */ + mhi_setup_event_rings(mhi_cntrl, true); + + /* Set MHI to M0 state */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; + +error_mmio: + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -EIO; +} + +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state cur_state; + struct mhi_chan *mhi_chan; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int i; + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_M0; + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_M0)) { + MHI_ERR("Unable to transition to M0 state\n"); + return -EIO; + } + mhi_cntrl->M0++; + + /* Wake up the device */ + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + + /* Ring all event rings and CMD ring only if we're in mission mode */ + if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) { + struct mhi_cmd *mhi_cmd = + &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + + mhi_setup_event_rings(mhi_cntrl, false); + + /* Only ring primary cmd ring if ring is not empty */ + spin_lock_irq(&mhi_cmd->lock); + if (mhi_cmd->ring.rp != mhi_cmd->ring.wp) + mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); + spin_unlock_irq(&mhi_cmd->lock); + + /* ring misc doorbells for certain controllers */ + mhi_misc_dbs_pending(mhi_cntrl); + } + + /* Ring channel DB registers */ + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + if (mhi_chan->db_cfg.reset_req) { + write_lock_irq(&mhi_chan->lock); + mhi_chan->db_cfg.db_mode = true; + write_unlock_irq(&mhi_chan->lock); + } + + read_lock_irq(&mhi_chan->lock); + + /* Only ring DB if ring is not empty */ + if (tre_ring->base && tre_ring->wp != tre_ring->rp) + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irq(&mhi_chan->lock); + } + + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + + return 0; +} + +/* + * After receiving the MHI state change event from the device indicating the + * transition to M1 state, the host can transition the device to M2 state + * for keeping it in low power state. + */ +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + write_lock_irq(&mhi_cntrl->pm_lock); + state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2); + if (state == MHI_PM_M2) { + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2); + mhi_cntrl->dev_state = MHI_STATE_M2; + + write_unlock_irq(&mhi_cntrl->pm_lock); + + mhi_cntrl->M2++; + wake_up_all(&mhi_cntrl->state_event); + + /* If there are any pending resources, exit M2 immediately */ + if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) || + atomic_read(&mhi_cntrl->dev_wake))) { + MHI_VERB( + "Exiting M2, pending_pkts: %d dev_wake: %d\n", + atomic_read(&mhi_cntrl->pending_pkts), + atomic_read(&mhi_cntrl->dev_wake)); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + mhi_cntrl->wake_put(mhi_cntrl, true); + read_unlock_bh(&mhi_cntrl->pm_lock); + } else { + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE); + } + } else { + write_unlock_irq(&mhi_cntrl->pm_lock); + } +} + +/* MHI M3 completion handler */ +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_M3; + state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (state != MHI_PM_M3) { + MHI_ERR("Unable to transition to M3 state\n"); + return -EIO; + } + + mhi_cntrl->M3++; + wake_up_all(&mhi_cntrl->state_event); + + return 0; +} + +/* Handle device Mission Mode transition */ +static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee; + int ret; + + MHI_VERB("Processing Mission Mode transition\n"); + + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + ee = mhi_get_exec_env(mhi_cntrl); + + if (!MHI_IN_MISSION_MODE(ee)) { + mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + return -EIO; + } + mhi_cntrl->ee = ee; + write_unlock_irq(&mhi_cntrl->pm_lock); + + wake_up_all(&mhi_cntrl->state_event); + + device_for_each_child(&mhi_cntrl->mhi_dev->dev, ¤t_ee, + mhi_destroy_device); + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE); + + /* Force MHI to be in M0 state before continuing */ + ret = __mhi_device_get_sync(mhi_cntrl); + if (ret) + return ret; + + read_lock_bh(&mhi_cntrl->pm_lock); + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + ret = -EIO; + goto error_mission_mode; + } + + /* Add elements to all HW event rings and ring HW event ring dbs */ + mhi_setup_event_rings(mhi_cntrl, true); + + read_unlock_bh(&mhi_cntrl->pm_lock); + + mhi_misc_mission_mode(mhi_cntrl); + mhi_process_sleeping_events(mhi_cntrl); + + /* + * The MHI devices are only created when the client device switches its + * Execution Environment (EE) to either SBL or AMSS states + */ + mhi_create_devices(mhi_cntrl); + + read_lock_bh(&mhi_cntrl->pm_lock); + +error_mission_mode: + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} + +/* Handle shutdown transitions */ +static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state cur_state; + struct mhi_event *mhi_event; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event_ctxt *er_ctxt; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret, i; + + MHI_VERB("Processing disable transition with PM state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + + mutex_lock(&mhi_cntrl->pm_mutex); + + /* Trigger MHI RESET so that the device will not access host memory */ + if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { + MHI_VERB("Triggering MHI Reset in device\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + + /* Wait for the reset bit to be cleared by the device */ + ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0, + 25000); + if (ret) + MHI_ERR("Device failed to clear MHI Reset\n"); + + /* + * Device will clear BHI_INTVEC as a part of RESET processing, + * hence re-program it + */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + } + + MHI_VERB( + "Waiting for all pending event ring processing to complete\n"); + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); + if (mhi_event->priority == MHI_ER_PRIORITY_HI_SLEEP) + cancel_work_sync(&mhi_event->work); + else + tasklet_kill(&mhi_event->task); + } + + /* Release lock and wait for all pending threads to complete */ + mutex_unlock(&mhi_cntrl->pm_mutex); + + mhi_misc_disable(mhi_cntrl); + + MHI_VERB("Waiting for all pending threads to complete\n"); + wake_up_all(&mhi_cntrl->state_event); + + MHI_VERB("Reset all active channels and remove MHI devices\n"); + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); + + mutex_lock(&mhi_cntrl->pm_mutex); + + WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); + WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); + + /* Reset the ev rings and cmd rings */ + MHI_VERB("Resetting EV CTXT and CMD CTXT\n"); + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->rp = ring->base; + ring->wp = ring->base; + cmd_ctxt->rp = cmd_ctxt->rbase; + cmd_ctxt->wp = cmd_ctxt->rbase; + } + + mhi_event = mhi_cntrl->mhi_event; + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* Skip offload events */ + if (mhi_event->offload_ev) + continue; + + ring->rp = ring->base; + ring->wp = ring->base; + er_ctxt->rp = er_ctxt->rbase; + er_ctxt->wp = er_ctxt->rbase; + } + + /* Move to disable state */ + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_DISABLE)) + MHI_ERR("Error moving from PM state: %s to: %s\n", + to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str(MHI_PM_DISABLE)); + + MHI_VERB("Exiting with PM state: %s, MHI state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + mutex_unlock(&mhi_cntrl->pm_mutex); +} + +/* Handle system error transitions */ +static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state cur_state, prev_state; + struct mhi_event *mhi_event; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event_ctxt *er_ctxt; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret, i; + + MHI_VERB("Transitioning from PM state: %s to: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS)); + + /* We must notify MHI control driver so it can clean up first */ + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR); + + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + prev_state = mhi_cntrl->pm_state; + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (cur_state != MHI_PM_SYS_ERR_PROCESS) { + MHI_ERR("Failed to transition from PM state: %s to: %s\n", + to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS)); + goto exit_sys_error_transition; + } + + mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; + mhi_cntrl->dev_state = MHI_STATE_RESET; + + /* Wake up threads waiting for state transition */ + wake_up_all(&mhi_cntrl->state_event); + + /* Trigger MHI RESET so that the device will not access host memory */ + if (MHI_REG_ACCESS_VALID(prev_state)) { + u32 in_reset = -1; + unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); + + MHI_VERB("Triggering MHI Reset in device\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + + /* Wait for the reset bit to be cleared by the device */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_read_reg_field(mhi_cntrl, + mhi_cntrl->regs, + MHICTRL, + MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, + &in_reset) || + !in_reset, timeout); + if (!ret || in_reset) { + MHI_ERR("Device failed to exit MHI Reset state\n"); + goto exit_sys_error_transition; + } + + /* + * Device will clear BHI_INTVEC as a part of RESET processing, + * hence re-program it + */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + } + + MHI_VERB( + "Waiting for all pending event ring processing to complete\n"); + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + if (mhi_event->priority == MHI_ER_PRIORITY_HI_SLEEP) + cancel_work_sync(&mhi_event->work); + else + tasklet_kill(&mhi_event->task); + } + + /* Release lock and wait for all pending threads to complete */ + mutex_unlock(&mhi_cntrl->pm_mutex); + + mhi_misc_disable(mhi_cntrl); + + MHI_VERB("Waiting for all pending threads to complete\n"); + wake_up_all(&mhi_cntrl->state_event); + + MHI_VERB("Reset all active channels and remove MHI devices\n"); + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); + + mutex_lock(&mhi_cntrl->pm_mutex); + + WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); + WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); + + /* Reset the ev rings and cmd rings */ + MHI_VERB("Resetting EV CTXT and CMD CTXT\n"); + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->rp = ring->base; + ring->wp = ring->base; + cmd_ctxt->rp = cmd_ctxt->rbase; + cmd_ctxt->wp = cmd_ctxt->rbase; + } + + mhi_event = mhi_cntrl->mhi_event; + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* Skip offload events */ + if (mhi_event->offload_ev) + continue; + + ring->rp = ring->base; + ring->wp = ring->base; + er_ctxt->rp = er_ctxt->rbase; + er_ctxt->wp = er_ctxt->rbase; + } + + mhi_ready_state_transition(mhi_cntrl); + +exit_sys_error_transition: + MHI_VERB("Exiting with PM state: %s, MHI state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + mutex_unlock(&mhi_cntrl->pm_mutex); +} + +/* Queue a new work item and schedule work */ +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum dev_st_transition state) +{ + struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC); + unsigned long flags; + + if (!item) + return -ENOMEM; + + item->state = state; + spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); + list_add_tail(&item->node, &mhi_cntrl->transition_list); + spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); + + queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker); + + return 0; +} + +/* SYS_ERR worker */ +void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + /* skip if controller supports RDDM */ + if (mhi_cntrl->rddm_image) { + MHI_VERB("Controller supports RDDM, skip SYS_ERROR\n"); + return; + } + + mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR); +} + +/* Device State Transition worker */ +void mhi_pm_st_worker(struct work_struct *work) +{ + struct state_transition *itr, *tmp; + LIST_HEAD(head); + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + st_worker); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + spin_lock_irq(&mhi_cntrl->transition_lock); + list_splice_tail_init(&mhi_cntrl->transition_list, &head); + spin_unlock_irq(&mhi_cntrl->transition_lock); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + MHI_VERB("Handling state transition: %s\n", + TO_DEV_STATE_TRANS_STR(itr->state)); + + switch (itr->state) { + case DEV_ST_TRANSITION_PBL: + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (MHI_IN_PBL(mhi_cntrl->ee)) + mhi_fw_load_handler(mhi_cntrl); + break; + case DEV_ST_TRANSITION_SBL: + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_SBL; + write_unlock_irq(&mhi_cntrl->pm_lock); + + mhi_process_sleeping_events(mhi_cntrl); + + /* + * The MHI devices are only created when the client + * device switches its Execution Environment (EE) to + * either SBL or AMSS states + */ + mhi_create_devices(mhi_cntrl); + if (mhi_cntrl->fbc_download) + mhi_download_amss_image(mhi_cntrl); + break; + case DEV_ST_TRANSITION_MISSION_MODE: + mhi_pm_mission_mode_transition(mhi_cntrl); + break; + case DEV_ST_TRANSITION_READY: + ret = mhi_ready_state_transition(mhi_cntrl); + if (ret) { + mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR; + wake_up_all(&mhi_cntrl->state_event); + } + break; + case DEV_ST_TRANSITION_SYS_ERR: + mhi_pm_sys_error_transition(mhi_cntrl); + break; + case DEV_ST_TRANSITION_DISABLE: + mhi_pm_disable_transition(mhi_cntrl); + break; + default: + break; + } + kfree(itr); + } +} + +int mhi_pm_suspend(struct mhi_controller *mhi_cntrl) +{ + struct mhi_chan *itr, *tmp; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_pm_state new_state; + int ret; + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return -EINVAL; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + /* Return busy if there are any pending resources */ + if (atomic_read(&mhi_cntrl->dev_wake) || + atomic_read(&mhi_cntrl->pending_pkts)) + return -EBUSY; + + /* Take MHI out of M2 state */ + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + mhi_cntrl->dev_state == MHI_STATE_M1 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR( + "Could not enter M0/M1 state"); + return -EIO; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + + if (atomic_read(&mhi_cntrl->dev_wake) || + atomic_read(&mhi_cntrl->pending_pkts)) { + write_unlock_irq(&mhi_cntrl->pm_lock); + return -EBUSY; + } + + MHI_LOG("Allowing M3 transition\n"); + new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER); + if (new_state != MHI_PM_M3_ENTER) { + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_ERR( + "Error setting to PM state: %s from: %s\n", + to_mhi_pm_state_str(MHI_PM_M3_ENTER), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* Set MHI to M3 and wait for completion */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3); + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_LOG("Wait for M3 completion\n"); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M3 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret) { + mhi_debug_reg_dump(mhi_cntrl); + panic("Timedout waiting for M3 ACK"); + return -EIO; + } else if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR( + "Did not enter M3 state, MHI state: %s, PM state: %s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* Notify clients about entering LPM */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); + mutex_unlock(&itr->mutex); + } + + return 0; +} +EXPORT_SYMBOL_GPL(mhi_pm_suspend); + +int mhi_pm_resume(struct mhi_controller *mhi_cntrl) +{ + struct mhi_chan *itr, *tmp; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_pm_state cur_state; + int ret; + + MHI_LOG("Entered with PM state: %s, MHI state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return 0; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + if (mhi_cntrl->pm_state != MHI_PM_M3) + panic("mhi_pm_state != M3"); + + /* Notify clients about exiting LPM */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); + mutex_unlock(&itr->mutex); + } + + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT); + if (cur_state != MHI_PM_M3_EXIT) { + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_LOG( + "Error setting to PM state: %s from: %s\n", + to_mhi_pm_state_str(MHI_PM_M3_EXIT), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* Set MHI to M0 and wait for completion */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + mhi_cntrl->dev_state == MHI_STATE_M2 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR( + "Did not enter M0 state, MHI state: %s, PM state: %s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mhi_pm_resume); + +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) +{ + int ret; + + /* Wake up the device */ + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + mhi_cntrl->wake_get(mhi_cntrl, true); + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) + mhi_trigger_resume(mhi_cntrl); + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->pm_state == MHI_PM_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + return 0; +} + +/* Assert device wake db */ +static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) +{ + unsigned long flags; + + /* + * If force flag is set, then increment the wake count value and + * ring wake db + */ + if (unlikely(force)) { + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + atomic_inc(&mhi_cntrl->dev_wake); + if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) && + !mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + mhi_cntrl->wake_set = true; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } else { + /* + * If resources are already requested, then just increment + * the wake count value and return + */ + if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) + return; + + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && + MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) && + !mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + mhi_cntrl->wake_set = true; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } +} + +/* De-assert device wake db */ +static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, + bool override) +{ + unsigned long flags; + + /* + * Only continue if there is a single resource, else just decrement + * and return + */ + if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) + return; + + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && + MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override && + mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); + mhi_cntrl->wake_set = false; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); +} + +int mhi_async_power_up(struct mhi_controller *mhi_cntrl) +{ + enum mhi_state state; + enum mhi_ee_type current_ee; + enum dev_st_transition next_state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 val; + int ret; + + MHI_LOG("Requested to power ON\n"); + + /* Supply default wake routines if not provided by controller driver */ + if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put || + !mhi_cntrl->wake_toggle) { + mhi_cntrl->wake_get = mhi_assert_dev_wake; + mhi_cntrl->wake_put = mhi_deassert_dev_wake; + mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ? + mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake; + } + + mutex_lock(&mhi_cntrl->pm_mutex); + mhi_cntrl->pm_state = MHI_PM_DISABLE; + + ret = mhi_init_irq_setup(mhi_cntrl); + if (ret) + goto error_setup_irq; + + /* Setup BHI INTVEC */ + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + mhi_cntrl->pm_state = MHI_PM_POR; + mhi_cntrl->ee = MHI_EE_MAX; + current_ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* Confirm that the device is in valid exec env */ + if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) { + MHI_ERR("%s is not a valid EE for power on\n", + TO_MHI_EXEC_STR(current_ee)); + ret = -EIO; + goto error_async_power_up; + } + + state = mhi_get_mhi_state(mhi_cntrl); + MHI_VERB("Attempt power on with EE: %s and state: %s\n", + TO_MHI_EXEC_STR(current_ee), TO_MHI_STATE_STR(state)); + + if (state == MHI_STATE_SYS_ERR) { + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, + mhi_cntrl->regs, + MHICTRL, + MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, + &val) || + !val, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret) { + ret = -EIO; + MHI_LOG("Failed to reset MHI due to syserr state\n"); + goto error_async_power_up; + } + + /* + * device cleares INTVEC as part of RESET processing, + * re-program it + */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + } + + /* Transition to next state */ + next_state = MHI_IN_PBL(current_ee) ? + DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY; + + mhi_queue_state_transition(mhi_cntrl, next_state); + + mutex_unlock(&mhi_cntrl->pm_mutex); + + MHI_LOG("Power on setup success\n"); + + return 0; + +error_async_power_up: + mhi_deinit_free_irq(mhi_cntrl); + +error_setup_irq: + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_async_power_up); + +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) +{ + enum mhi_pm_state cur_state, transition_state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + /* If it's not a graceful shutdown, force MHI to linkdown state */ + transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS : + MHI_PM_LD_ERR_FATAL_DETECT; + + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state); + if (cur_state != transition_state) { + MHI_ERR("Failed to move to state: %s from: %s\n", + to_mhi_pm_state_str(transition_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + /* Force link down or error fatal detected state */ + mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; + } + + /* mark device inactive to avoid any further host processing */ + mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; + mhi_cntrl->dev_state = MHI_STATE_RESET; + + wake_up_all(&mhi_cntrl->state_event); + + write_unlock_irq(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_cntrl->pm_mutex); + + mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE); + + /* Wait for shutdown to complete */ + flush_work(&mhi_cntrl->st_worker); + + free_irq(mhi_cntrl->irq[0], mhi_cntrl); + + if (mhi_cntrl->fbc_image) + mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image); +} +EXPORT_SYMBOL_GPL(mhi_power_down); + +int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret = mhi_async_power_up(mhi_cntrl); + + if (ret) + return ret; + + wait_event_timeout(mhi_cntrl->state_event, + MHI_IN_MISSION_MODE(mhi_cntrl->ee) || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT; + if (ret && !mhi_cntrl->rddm_image) + mhi_power_down(mhi_cntrl, false); + + return ret; +} +EXPORT_SYMBOL(mhi_sync_power_up); + +int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + /* Check if device is already in RDDM */ + if (mhi_cntrl->ee == MHI_EE_RDDM) + return 0; + + MHI_VERB("Triggering SYS_ERR to force RDDM state\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + + /* Wait for RDDM event */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_RDDM, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + ret = ret ? 0 : -EIO; + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_force_rddm_mode); + +void mhi_device_get(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + mhi_dev->dev_wake++; + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) + mhi_trigger_resume(mhi_cntrl); + + mhi_cntrl->wake_get(mhi_cntrl, true); + read_unlock_bh(&mhi_cntrl->pm_lock); +} +EXPORT_SYMBOL_GPL(mhi_device_get); + +int mhi_device_get_sync(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int ret; + + ret = __mhi_device_get_sync(mhi_cntrl); + if (!ret) + mhi_dev->dev_wake++; + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_device_get_sync); + +void mhi_device_put(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + mhi_dev->dev_wake--; + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) + mhi_trigger_resume(mhi_cntrl); + + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); +} +EXPORT_SYMBOL_GPL(mhi_device_put); diff --git a/qmi/Makefile b/qmi/Makefile new file mode 100644 index 0000000..0007965 --- /dev/null +++ b/qmi/Makefile @@ -0,0 +1,3 @@ +ccflags-y += -I$(WLAN_PLATFORM_ROOT)/inc +obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o +qmi_helpers-y += qmi_encdec.o qmi_interface.o diff --git a/qmi/qmi_encdec.c b/qmi/qmi_encdec.c new file mode 100644 index 0000000..3aaab71 --- /dev/null +++ b/qmi/qmi_encdec.c @@ -0,0 +1,816 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/soc/qcom/qmi.h> + +#define QMI_ENCDEC_ENCODE_TLV(type, length, p_dst) do { \ + *p_dst++ = type; \ + *p_dst++ = ((u8)((length) & 0xFF)); \ + *p_dst++ = ((u8)(((length) >> 8) & 0xFF)); \ +} while (0) + +#define QMI_ENCDEC_DECODE_TLV(p_type, p_length, p_src) do { \ + *p_type = (u8)*p_src++; \ + *p_length = (u8)*p_src++; \ + *p_length |= ((u8)*p_src) << 8; \ +} while (0) + +#define QMI_ENCDEC_ENCODE_N_BYTES(p_dst, p_src, size) \ +do { \ + memcpy(p_dst, p_src, size); \ + p_dst = (u8 *)p_dst + size; \ + p_src = (u8 *)p_src + size; \ +} while (0) + +#define QMI_ENCDEC_DECODE_N_BYTES(p_dst, p_src, size) \ +do { \ + memcpy(p_dst, p_src, size); \ + p_dst = (u8 *)p_dst + size; \ + p_src = (u8 *)p_src + size; \ +} while (0) + +#define UPDATE_ENCODE_VARIABLES(temp_si, buf_dst, \ + encoded_bytes, tlv_len, encode_tlv, rc) \ +do { \ + buf_dst = (u8 *)buf_dst + rc; \ + encoded_bytes += rc; \ + tlv_len += rc; \ + temp_si = temp_si + 1; \ + encode_tlv = 1; \ +} while (0) + +#define UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc) \ +do { \ + buf_src = (u8 *)buf_src + rc; \ + decoded_bytes += rc; \ +} while (0) + +#define TLV_LEN_SIZE sizeof(u16) +#define TLV_TYPE_SIZE sizeof(u8) +#define OPTIONAL_TLV_TYPE_START 0x10 + +static int qmi_encode(struct qmi_elem_info *ei_array, void *out_buf, + const void *in_c_struct, u32 out_buf_len, + int enc_level); + +static int qmi_decode(struct qmi_elem_info *ei_array, void *out_c_struct, + const void *in_buf, u32 in_buf_len, int dec_level); + +/** + * skip_to_next_elem() - Skip to next element in the structure to be encoded + * @ei_array: Struct info describing the element to be skipped. + * @level: Depth level of encoding/decoding to identify nested structures. + * + * This function is used while encoding optional elements. If the flag + * corresponding to an optional element is not set, then encoding the + * optional element can be skipped. This function can be used to perform + * that operation. + * + * Return: struct info of the next element that can be encoded. + */ +static struct qmi_elem_info *skip_to_next_elem(struct qmi_elem_info *ei_array, + int level) +{ + struct qmi_elem_info *temp_ei = ei_array; + u8 tlv_type; + + if (level > 1) { + temp_ei = temp_ei + 1; + } else { + do { + tlv_type = temp_ei->tlv_type; + temp_ei = temp_ei + 1; + } while (tlv_type == temp_ei->tlv_type); + } + + return temp_ei; +} + +/** + * qmi_calc_min_msg_len() - Calculate the minimum length of a QMI message + * @ei_array: Struct info array describing the structure. + * @level: Level to identify the depth of the nested structures. + * + * Return: Expected minimum length of the QMI message or 0 on error. + */ +static int qmi_calc_min_msg_len(struct qmi_elem_info *ei_array, + int level) +{ + int min_msg_len = 0; + struct qmi_elem_info *temp_ei = ei_array; + + if (!ei_array) + return min_msg_len; + + while (temp_ei->data_type != QMI_EOTI) { + /* Optional elements do not count in minimum length */ + if (temp_ei->data_type == QMI_OPT_FLAG) { + temp_ei = skip_to_next_elem(temp_ei, level); + continue; + } + + if (temp_ei->data_type == QMI_DATA_LEN) { + min_msg_len += (temp_ei->elem_size == sizeof(u8) ? + sizeof(u8) : sizeof(u16)); + temp_ei++; + continue; + } else if (temp_ei->data_type == QMI_STRUCT) { + min_msg_len += qmi_calc_min_msg_len(temp_ei->ei_array, + (level + 1)); + temp_ei++; + } else if (temp_ei->data_type == QMI_STRING) { + if (level > 1) + min_msg_len += temp_ei->elem_len <= U8_MAX ? + sizeof(u8) : sizeof(u16); + min_msg_len += temp_ei->elem_len * temp_ei->elem_size; + temp_ei++; + } else { + min_msg_len += (temp_ei->elem_len * temp_ei->elem_size); + temp_ei++; + } + + /* + * Type & Length info. not prepended for elements in the + * nested structure. + */ + if (level == 1) + min_msg_len += (TLV_TYPE_SIZE + TLV_LEN_SIZE); + } + + return min_msg_len; +} + +/** + * qmi_encode_basic_elem() - Encodes elements of basic/primary data type + * @buf_dst: Buffer to store the encoded information. + * @buf_src: Buffer containing the elements to be encoded. + * @elem_len: Number of elements, in the buf_src, to be encoded. + * @elem_size: Size of a single instance of the element to be encoded. + * + * This function encodes the "elem_len" number of data elements, each of + * size "elem_size" bytes from the source buffer "buf_src" and stores the + * encoded information in the destination buffer "buf_dst". The elements are + * of primary data type which include u8 - u64 or similar. This + * function returns the number of bytes of encoded information. + * + * Return: The number of bytes of encoded information. + */ +static int qmi_encode_basic_elem(void *buf_dst, const void *buf_src, + u32 elem_len, u32 elem_size) +{ + u32 i, rc = 0; + + for (i = 0; i < elem_len; i++) { + QMI_ENCDEC_ENCODE_N_BYTES(buf_dst, buf_src, elem_size); + rc += elem_size; + } + + return rc; +} + +/** + * qmi_encode_struct_elem() - Encodes elements of struct data type + * @ei_array: Struct info array descibing the struct element. + * @buf_dst: Buffer to store the encoded information. + * @buf_src: Buffer containing the elements to be encoded. + * @elem_len: Number of elements, in the buf_src, to be encoded. + * @out_buf_len: Available space in the encode buffer. + * @enc_level: Depth of the nested structure from the main structure. + * + * This function encodes the "elem_len" number of struct elements, each of + * size "ei_array->elem_size" bytes from the source buffer "buf_src" and + * stores the encoded information in the destination buffer "buf_dst". The + * elements are of struct data type which includes any C structure. This + * function returns the number of bytes of encoded information. + * + * Return: The number of bytes of encoded information on success or negative + * errno on error. + */ +static int qmi_encode_struct_elem(struct qmi_elem_info *ei_array, + void *buf_dst, const void *buf_src, + u32 elem_len, u32 out_buf_len, + int enc_level) +{ + int i, rc, encoded_bytes = 0; + struct qmi_elem_info *temp_ei = ei_array; + + for (i = 0; i < elem_len; i++) { + rc = qmi_encode(temp_ei->ei_array, buf_dst, buf_src, + out_buf_len - encoded_bytes, enc_level); + if (rc < 0) { + pr_err("%s: STRUCT Encode failure\n", __func__); + return rc; + } + buf_dst = buf_dst + rc; + buf_src = buf_src + temp_ei->elem_size; + encoded_bytes += rc; + } + + return encoded_bytes; +} + +/** + * qmi_encode_string_elem() - Encodes elements of string data type + * @ei_array: Struct info array descibing the string element. + * @buf_dst: Buffer to store the encoded information. + * @buf_src: Buffer containing the elements to be encoded. + * @out_buf_len: Available space in the encode buffer. + * @enc_level: Depth of the string element from the main structure. + * + * This function encodes a string element of maximum length "ei_array->elem_len" + * bytes from the source buffer "buf_src" and stores the encoded information in + * the destination buffer "buf_dst". This function returns the number of bytes + * of encoded information. + * + * Return: The number of bytes of encoded information on success or negative + * errno on error. + */ +static int qmi_encode_string_elem(struct qmi_elem_info *ei_array, + void *buf_dst, const void *buf_src, + u32 out_buf_len, int enc_level) +{ + int rc; + int encoded_bytes = 0; + struct qmi_elem_info *temp_ei = ei_array; + u32 string_len = 0; + u32 string_len_sz = 0; + + string_len = strlen(buf_src); + string_len_sz = temp_ei->elem_len <= U8_MAX ? + sizeof(u8) : sizeof(u16); + if (string_len > temp_ei->elem_len) { + pr_err("%s: String to be encoded is longer - %d > %d\n", + __func__, string_len, temp_ei->elem_len); + return -EINVAL; + } + + if (enc_level == 1) { + if (string_len + TLV_LEN_SIZE + TLV_TYPE_SIZE > + out_buf_len) { + pr_err("%s: Output len %d > Out Buf len %d\n", + __func__, string_len, out_buf_len); + return -ETOOSMALL; + } + } else { + if (string_len + string_len_sz > out_buf_len) { + pr_err("%s: Output len %d > Out Buf len %d\n", + __func__, string_len, out_buf_len); + return -ETOOSMALL; + } + rc = qmi_encode_basic_elem(buf_dst, &string_len, + 1, string_len_sz); + encoded_bytes += rc; + } + + rc = qmi_encode_basic_elem(buf_dst + encoded_bytes, buf_src, + string_len, temp_ei->elem_size); + encoded_bytes += rc; + + return encoded_bytes; +} + +/** + * qmi_encode() - Core Encode Function + * @ei_array: Struct info array describing the structure to be encoded. + * @out_buf: Buffer to hold the encoded QMI message. + * @in_c_struct: Pointer to the C structure to be encoded. + * @out_buf_len: Available space in the encode buffer. + * @enc_level: Encode level to indicate the depth of the nested structure, + * within the main structure, being encoded. + * + * Return: The number of bytes of encoded information on success or negative + * errno on error. + */ +static int qmi_encode(struct qmi_elem_info *ei_array, void *out_buf, + const void *in_c_struct, u32 out_buf_len, + int enc_level) +{ + struct qmi_elem_info *temp_ei = ei_array; + u8 opt_flag_value = 0; + u32 data_len_value = 0, data_len_sz; + u8 *buf_dst = (u8 *)out_buf; + u8 *tlv_pointer; + u32 tlv_len; + u8 tlv_type; + u32 encoded_bytes = 0; + const void *buf_src; + int encode_tlv = 0; + int rc; + + if (!ei_array) + return 0; + + tlv_pointer = buf_dst; + tlv_len = 0; + if (enc_level == 1) + buf_dst = buf_dst + (TLV_LEN_SIZE + TLV_TYPE_SIZE); + + while (temp_ei->data_type != QMI_EOTI) { + buf_src = in_c_struct + temp_ei->offset; + tlv_type = temp_ei->tlv_type; + + if (temp_ei->array_type == NO_ARRAY) { + data_len_value = 1; + } else if (temp_ei->array_type == STATIC_ARRAY) { + data_len_value = temp_ei->elem_len; + } else if (data_len_value <= 0 || + temp_ei->elem_len < data_len_value) { + pr_err("%s: Invalid data length\n", __func__); + return -EINVAL; + } + + switch (temp_ei->data_type) { + case QMI_OPT_FLAG: + rc = qmi_encode_basic_elem(&opt_flag_value, buf_src, + 1, sizeof(u8)); + if (opt_flag_value) + temp_ei = temp_ei + 1; + else + temp_ei = skip_to_next_elem(temp_ei, enc_level); + break; + + case QMI_DATA_LEN: + memcpy(&data_len_value, buf_src, temp_ei->elem_size); + data_len_sz = temp_ei->elem_size == sizeof(u8) ? + sizeof(u8) : sizeof(u16); + /* Check to avoid out of range buffer access */ + if ((data_len_sz + encoded_bytes + TLV_LEN_SIZE + + TLV_TYPE_SIZE) > out_buf_len) { + pr_err("%s: Too Small Buffer @DATA_LEN\n", + __func__); + return -ETOOSMALL; + } + rc = qmi_encode_basic_elem(buf_dst, &data_len_value, + 1, data_len_sz); + UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst, + encoded_bytes, tlv_len, + encode_tlv, rc); + if (!data_len_value) + temp_ei = skip_to_next_elem(temp_ei, enc_level); + else + encode_tlv = 0; + break; + + case QMI_UNSIGNED_1_BYTE: + case QMI_UNSIGNED_2_BYTE: + case QMI_UNSIGNED_4_BYTE: + case QMI_UNSIGNED_8_BYTE: + case QMI_SIGNED_2_BYTE_ENUM: + case QMI_SIGNED_4_BYTE_ENUM: + /* Check to avoid out of range buffer access */ + if (((data_len_value * temp_ei->elem_size) + + encoded_bytes + TLV_LEN_SIZE + TLV_TYPE_SIZE) > + out_buf_len) { + pr_err("%s: Too Small Buffer @data_type:%d\n", + __func__, temp_ei->data_type); + return -ETOOSMALL; + } + rc = qmi_encode_basic_elem(buf_dst, buf_src, + data_len_value, + temp_ei->elem_size); + UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst, + encoded_bytes, tlv_len, + encode_tlv, rc); + break; + + case QMI_STRUCT: + rc = qmi_encode_struct_elem(temp_ei, buf_dst, buf_src, + data_len_value, + out_buf_len - encoded_bytes, + enc_level + 1); + if (rc < 0) + return rc; + UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst, + encoded_bytes, tlv_len, + encode_tlv, rc); + break; + + case QMI_STRING: + rc = qmi_encode_string_elem(temp_ei, buf_dst, buf_src, + out_buf_len - encoded_bytes, + enc_level); + if (rc < 0) + return rc; + UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst, + encoded_bytes, tlv_len, + encode_tlv, rc); + break; + default: + pr_err("%s: Unrecognized data type\n", __func__); + return -EINVAL; + } + + if (encode_tlv && enc_level == 1) { + QMI_ENCDEC_ENCODE_TLV(tlv_type, tlv_len, tlv_pointer); + encoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE); + tlv_pointer = buf_dst; + tlv_len = 0; + buf_dst = buf_dst + TLV_LEN_SIZE + TLV_TYPE_SIZE; + encode_tlv = 0; + } + } + + return encoded_bytes; +} + +/** + * qmi_decode_basic_elem() - Decodes elements of basic/primary data type + * @buf_dst: Buffer to store the decoded element. + * @buf_src: Buffer containing the elements in QMI wire format. + * @elem_len: Number of elements to be decoded. + * @elem_size: Size of a single instance of the element to be decoded. + * + * This function decodes the "elem_len" number of elements in QMI wire format, + * each of size "elem_size" bytes from the source buffer "buf_src" and stores + * the decoded elements in the destination buffer "buf_dst". The elements are + * of primary data type which include u8 - u64 or similar. This + * function returns the number of bytes of decoded information. + * + * Return: The total size of the decoded data elements, in bytes. + */ +static int qmi_decode_basic_elem(void *buf_dst, const void *buf_src, + u32 elem_len, u32 elem_size) +{ + u32 i, rc = 0; + + for (i = 0; i < elem_len; i++) { + QMI_ENCDEC_DECODE_N_BYTES(buf_dst, buf_src, elem_size); + rc += elem_size; + } + + return rc; +} + +/** + * qmi_decode_struct_elem() - Decodes elements of struct data type + * @ei_array: Struct info array descibing the struct element. + * @buf_dst: Buffer to store the decoded element. + * @buf_src: Buffer containing the elements in QMI wire format. + * @elem_len: Number of elements to be decoded. + * @tlv_len: Total size of the encoded inforation corresponding to + * this struct element. + * @dec_level: Depth of the nested structure from the main structure. + * + * This function decodes the "elem_len" number of elements in QMI wire format, + * each of size "(tlv_len/elem_len)" bytes from the source buffer "buf_src" + * and stores the decoded elements in the destination buffer "buf_dst". The + * elements are of struct data type which includes any C structure. This + * function returns the number of bytes of decoded information. + * + * Return: The total size of the decoded data elements on success, negative + * errno on error. + */ +static int qmi_decode_struct_elem(struct qmi_elem_info *ei_array, + void *buf_dst, const void *buf_src, + u32 elem_len, u32 tlv_len, + int dec_level) +{ + int i, rc, decoded_bytes = 0; + struct qmi_elem_info *temp_ei = ei_array; + + for (i = 0; i < elem_len && decoded_bytes < tlv_len; i++) { + rc = qmi_decode(temp_ei->ei_array, buf_dst, buf_src, + tlv_len - decoded_bytes, dec_level); + if (rc < 0) + return rc; + buf_src = buf_src + rc; + buf_dst = buf_dst + temp_ei->elem_size; + decoded_bytes += rc; + } + + if ((dec_level <= 2 && decoded_bytes != tlv_len) || + (dec_level > 2 && (i < elem_len || decoded_bytes > tlv_len))) { + pr_err("%s: Fault in decoding: dl(%d), db(%d), tl(%d), i(%d), el(%d)\n", + __func__, dec_level, decoded_bytes, tlv_len, + i, elem_len); + return -EFAULT; + } + + return decoded_bytes; +} + +/** + * qmi_decode_string_elem() - Decodes elements of string data type + * @ei_array: Struct info array descibing the string element. + * @buf_dst: Buffer to store the decoded element. + * @buf_src: Buffer containing the elements in QMI wire format. + * @tlv_len: Total size of the encoded inforation corresponding to + * this string element. + * @dec_level: Depth of the string element from the main structure. + * + * This function decodes the string element of maximum length + * "ei_array->elem_len" from the source buffer "buf_src" and puts it into + * the destination buffer "buf_dst". This function returns number of bytes + * decoded from the input buffer. + * + * Return: The total size of the decoded data elements on success, negative + * errno on error. + */ +static int qmi_decode_string_elem(struct qmi_elem_info *ei_array, + void *buf_dst, const void *buf_src, + u32 tlv_len, int dec_level) +{ + int rc; + int decoded_bytes = 0; + u32 string_len = 0; + u32 string_len_sz = 0; + struct qmi_elem_info *temp_ei = ei_array; + + if (dec_level == 1) { + string_len = tlv_len; + } else { + string_len_sz = temp_ei->elem_len <= U8_MAX ? + sizeof(u8) : sizeof(u16); + rc = qmi_decode_basic_elem(&string_len, buf_src, + 1, string_len_sz); + decoded_bytes += rc; + } + + if (string_len > temp_ei->elem_len) { + pr_err("%s: String len %d > Max Len %d\n", + __func__, string_len, temp_ei->elem_len); + return -ETOOSMALL; + } else if (string_len > tlv_len) { + pr_err("%s: String len %d > Input Buffer Len %d\n", + __func__, string_len, tlv_len); + return -EFAULT; + } + + rc = qmi_decode_basic_elem(buf_dst, buf_src + decoded_bytes, + string_len, temp_ei->elem_size); + *((char *)buf_dst + string_len) = '\0'; + decoded_bytes += rc; + + return decoded_bytes; +} + +/** + * find_ei() - Find element info corresponding to TLV Type + * @ei_array: Struct info array of the message being decoded. + * @type: TLV Type of the element being searched. + * + * Every element that got encoded in the QMI message will have a type + * information associated with it. While decoding the QMI message, + * this function is used to find the struct info regarding the element + * that corresponds to the type being decoded. + * + * Return: Pointer to struct info, if found + */ +static struct qmi_elem_info *find_ei(struct qmi_elem_info *ei_array, + u32 type) +{ + struct qmi_elem_info *temp_ei = ei_array; + + while (temp_ei->data_type != QMI_EOTI) { + if (temp_ei->tlv_type == (u8)type) + return temp_ei; + temp_ei = temp_ei + 1; + } + + return NULL; +} + +/** + * qmi_decode() - Core Decode Function + * @ei_array: Struct info array describing the structure to be decoded. + * @out_c_struct: Buffer to hold the decoded C struct + * @in_buf: Buffer containing the QMI message to be decoded + * @in_buf_len: Length of the QMI message to be decoded + * @dec_level: Decode level to indicate the depth of the nested structure, + * within the main structure, being decoded + * + * Return: The number of bytes of decoded information on success, negative + * errno on error. + */ +static int qmi_decode(struct qmi_elem_info *ei_array, void *out_c_struct, + const void *in_buf, u32 in_buf_len, + int dec_level) +{ + struct qmi_elem_info *temp_ei = ei_array; + u8 opt_flag_value = 1; + u32 data_len_value = 0, data_len_sz = 0; + u8 *buf_dst = out_c_struct; + const u8 *tlv_pointer; + u32 tlv_len = 0; + u32 tlv_type; + u32 decoded_bytes = 0; + const void *buf_src = in_buf; + int rc; + + while (decoded_bytes < in_buf_len) { + if (dec_level >= 2 && temp_ei->data_type == QMI_EOTI) + return decoded_bytes; + + if (dec_level == 1) { + tlv_pointer = buf_src; + QMI_ENCDEC_DECODE_TLV(&tlv_type, + &tlv_len, tlv_pointer); + buf_src += (TLV_TYPE_SIZE + TLV_LEN_SIZE); + decoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE); + temp_ei = find_ei(ei_array, tlv_type); + if (!temp_ei && tlv_type < OPTIONAL_TLV_TYPE_START) { + pr_err("%s: Inval element info\n", __func__); + return -EINVAL; + } else if (!temp_ei) { + UPDATE_DECODE_VARIABLES(buf_src, + decoded_bytes, tlv_len); + continue; + } + } else { + /* + * No length information for elements in nested + * structures. So use remaining decodable buffer space. + */ + tlv_len = in_buf_len - decoded_bytes; + } + + buf_dst = out_c_struct + temp_ei->offset; + if (temp_ei->data_type == QMI_OPT_FLAG) { + memcpy(buf_dst, &opt_flag_value, sizeof(u8)); + temp_ei = temp_ei + 1; + buf_dst = out_c_struct + temp_ei->offset; + } + + if (temp_ei->data_type == QMI_DATA_LEN) { + data_len_sz = temp_ei->elem_size == sizeof(u8) ? + sizeof(u8) : sizeof(u16); + rc = qmi_decode_basic_elem(&data_len_value, buf_src, + 1, data_len_sz); + memcpy(buf_dst, &data_len_value, sizeof(u32)); + temp_ei = temp_ei + 1; + buf_dst = out_c_struct + temp_ei->offset; + tlv_len -= data_len_sz; + UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc); + } + + if (temp_ei->array_type == NO_ARRAY) { + data_len_value = 1; + } else if (temp_ei->array_type == STATIC_ARRAY) { + data_len_value = temp_ei->elem_len; + } else if (data_len_value > temp_ei->elem_len) { + pr_err("%s: Data len %d > max spec %d\n", + __func__, data_len_value, temp_ei->elem_len); + return -ETOOSMALL; + } + + switch (temp_ei->data_type) { + case QMI_UNSIGNED_1_BYTE: + case QMI_UNSIGNED_2_BYTE: + case QMI_UNSIGNED_4_BYTE: + case QMI_UNSIGNED_8_BYTE: + case QMI_SIGNED_2_BYTE_ENUM: + case QMI_SIGNED_4_BYTE_ENUM: + rc = qmi_decode_basic_elem(buf_dst, buf_src, + data_len_value, + temp_ei->elem_size); + UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc); + break; + + case QMI_STRUCT: + rc = qmi_decode_struct_elem(temp_ei, buf_dst, buf_src, + data_len_value, tlv_len, + dec_level + 1); + if (rc < 0) + return rc; + UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc); + break; + + case QMI_STRING: + rc = qmi_decode_string_elem(temp_ei, buf_dst, buf_src, + tlv_len, dec_level); + if (rc < 0) + return rc; + UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc); + break; + + default: + pr_err("%s: Unrecognized data type\n", __func__); + return -EINVAL; + } + temp_ei = temp_ei + 1; + } + + return decoded_bytes; +} + +/** + * qmi_encode_message() - Encode C structure as QMI encoded message + * @type: Type of QMI message + * @msg_id: Message ID of the message + * @len: Passed as max length of the message, updated to actual size + * @txn_id: Transaction ID + * @ei: QMI message descriptor + * @c_struct: Reference to structure to encode + * + * Return: Buffer with encoded message, or negative ERR_PTR() on error + */ +void *qmi_encode_message(int type, unsigned int msg_id, size_t *len, + unsigned int txn_id, struct qmi_elem_info *ei, + const void *c_struct) +{ + struct qmi_header *hdr; + ssize_t msglen = 0; + void *msg; + int ret; + + /* Check the possibility of a zero length QMI message */ + if (!c_struct) { + ret = qmi_calc_min_msg_len(ei, 1); + if (ret) { + pr_err("%s: Calc. len %d != 0, but NULL c_struct\n", + __func__, ret); + return ERR_PTR(-EINVAL); + } + } + + msg = kzalloc(sizeof(*hdr) + *len, GFP_KERNEL); + if (!msg) + return ERR_PTR(-ENOMEM); + + /* Encode message, if we have a message */ + if (c_struct) { + msglen = qmi_encode(ei, msg + sizeof(*hdr), c_struct, *len, 1); + if (msglen < 0) { + kfree(msg); + return ERR_PTR(msglen); + } + } + + hdr = msg; + hdr->type = type; + hdr->txn_id = txn_id; + hdr->msg_id = msg_id; + hdr->msg_len = msglen; + + *len = sizeof(*hdr) + msglen; + + return msg; +} +EXPORT_SYMBOL(qmi_encode_message); + +/** + * qmi_decode_message() - Decode QMI encoded message to C structure + * @buf: Buffer with encoded message + * @len: Amount of data in @buf + * @ei: QMI message descriptor + * @c_struct: Reference to structure to decode into + * + * Return: The number of bytes of decoded information on success, negative + * errno on error. + */ +int qmi_decode_message(const void *buf, size_t len, + struct qmi_elem_info *ei, void *c_struct) +{ + if (!ei) + return -EINVAL; + + if (!c_struct || !buf || !len) + return -EINVAL; + + return qmi_decode(ei, c_struct, buf + sizeof(struct qmi_header), + len - sizeof(struct qmi_header), 1); +} +EXPORT_SYMBOL(qmi_decode_message); + +/* Common header in all QMI responses */ +struct qmi_elem_info qmi_response_type_v01_ei[] = { + { + .data_type = QMI_SIGNED_2_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct qmi_response_type_v01, result), + .ei_array = NULL, + }, + { + .data_type = QMI_SIGNED_2_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct qmi_response_type_v01, error), + .ei_array = NULL, + }, + { + .data_type = QMI_EOTI, + .elem_len = 0, + .elem_size = 0, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = 0, + .ei_array = NULL, + }, +}; +EXPORT_SYMBOL(qmi_response_type_v01_ei); + +MODULE_DESCRIPTION("QMI encoder/decoder helper"); +MODULE_LICENSE("GPL v2"); diff --git a/qmi/qmi_interface.c b/qmi/qmi_interface.c new file mode 100644 index 0000000..1a03eaa --- /dev/null +++ b/qmi/qmi_interface.c @@ -0,0 +1,849 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2017 Linaro Ltd. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/qrtr.h> +#include <linux/net.h> +#include <linux/completion.h> +#include <linux/idr.h> +#include <linux/string.h> +#include <net/sock.h> +#include <linux/workqueue.h> +#include <linux/soc/qcom/qmi.h> + +static struct socket *qmi_sock_create(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq); + +/** + * qmi_recv_new_server() - handler of NEW_SERVER control message + * @qmi: qmi handle + * @service: service id of the new server + * @instance: instance id of the new server + * @node: node of the new server + * @port: port of the new server + * + * Calls the new_server callback to inform the client about a newly registered + * server matching the currently registered service lookup. + */ +static void qmi_recv_new_server(struct qmi_handle *qmi, + unsigned int service, unsigned int instance, + unsigned int node, unsigned int port) +{ + struct qmi_ops *ops = &qmi->ops; + struct qmi_service *svc; + int ret; + + if (!ops->new_server) + return; + + /* Ignore EOF marker */ + if (!node && !port) + return; + + svc = kzalloc(sizeof(*svc), GFP_KERNEL); + if (!svc) + return; + + svc->service = service; + svc->version = instance & 0xff; + svc->instance = instance >> 8; + svc->node = node; + svc->port = port; + + ret = ops->new_server(qmi, svc); + if (ret < 0) + kfree(svc); + else + list_add(&svc->list_node, &qmi->lookup_results); +} + +/** + * qmi_recv_del_server() - handler of DEL_SERVER control message + * @qmi: qmi handle + * @node: node of the dying server, a value of -1 matches all nodes + * @port: port of the dying server, a value of -1 matches all ports + * + * Calls the del_server callback for each previously seen server, allowing the + * client to react to the disappearing server. + */ +static void qmi_recv_del_server(struct qmi_handle *qmi, + unsigned int node, unsigned int port) +{ + struct qmi_ops *ops = &qmi->ops; + struct qmi_service *svc; + struct qmi_service *tmp; + + list_for_each_entry_safe(svc, tmp, &qmi->lookup_results, list_node) { + if (node != -1 && svc->node != node) + continue; + if (port != -1 && svc->port != port) + continue; + + if (ops->del_server) + ops->del_server(qmi, svc); + + list_del(&svc->list_node); + kfree(svc); + } +} + +/** + * qmi_recv_bye() - handler of BYE control message + * @qmi: qmi handle + * @node: id of the dying node + * + * Signals the client that all previously registered services on this node are + * now gone and then calls the bye callback to allow the client client further + * cleaning up resources associated with this remote. + */ +static void qmi_recv_bye(struct qmi_handle *qmi, + unsigned int node) +{ + struct qmi_ops *ops = &qmi->ops; + + qmi_recv_del_server(qmi, node, -1); + + if (ops->bye) + ops->bye(qmi, node); +} + +/** + * qmi_recv_del_client() - handler of DEL_CLIENT control message + * @qmi: qmi handle + * @node: node of the dying client + * @port: port of the dying client + * + * Signals the client about a dying client, by calling the del_client callback. + */ +static void qmi_recv_del_client(struct qmi_handle *qmi, + unsigned int node, unsigned int port) +{ + struct qmi_ops *ops = &qmi->ops; + + if (ops->del_client) + ops->del_client(qmi, node, port); +} + +static void qmi_recv_ctrl_pkt(struct qmi_handle *qmi, + const void *buf, size_t len) +{ + const struct qrtr_ctrl_pkt *pkt = buf; + + if (len < sizeof(struct qrtr_ctrl_pkt)) { + pr_debug("ignoring short control packet\n"); + return; + } + + switch (le32_to_cpu(pkt->cmd)) { + case QRTR_TYPE_BYE: + qmi_recv_bye(qmi, le32_to_cpu(pkt->client.node)); + break; + case QRTR_TYPE_NEW_SERVER: + qmi_recv_new_server(qmi, + le32_to_cpu(pkt->server.service), + le32_to_cpu(pkt->server.instance), + le32_to_cpu(pkt->server.node), + le32_to_cpu(pkt->server.port)); + break; + case QRTR_TYPE_DEL_SERVER: + qmi_recv_del_server(qmi, + le32_to_cpu(pkt->server.node), + le32_to_cpu(pkt->server.port)); + break; + case QRTR_TYPE_DEL_CLIENT: + qmi_recv_del_client(qmi, + le32_to_cpu(pkt->client.node), + le32_to_cpu(pkt->client.port)); + break; + } +} + +static void qmi_send_new_lookup(struct qmi_handle *qmi, struct qmi_service *svc) +{ + struct qrtr_ctrl_pkt pkt; + struct sockaddr_qrtr sq; + struct msghdr msg = { }; + struct kvec iv = { &pkt, sizeof(pkt) }; + int ret; + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_LOOKUP); + pkt.server.service = cpu_to_le32(svc->service); + pkt.server.instance = cpu_to_le32(svc->version | svc->instance << 8); + + sq.sq_family = qmi->sq.sq_family; + sq.sq_node = qmi->sq.sq_node; + sq.sq_port = QRTR_PORT_CTRL; + + msg.msg_name = &sq; + msg.msg_namelen = sizeof(sq); + + mutex_lock(&qmi->sock_lock); + if (qmi->sock) { + ret = kernel_sendmsg(qmi->sock, &msg, &iv, 1, sizeof(pkt)); + if (ret < 0) + pr_err("failed to send lookup registration: %d\n", ret); + } + mutex_unlock(&qmi->sock_lock); +} + +/** + * qmi_add_lookup() - register a new lookup with the name service + * @qmi: qmi handle + * @service: service id of the request + * @instance: instance id of the request + * @version: version number of the request + * + * Registering a lookup query with the name server will cause the name server + * to send NEW_SERVER and DEL_SERVER control messages to this socket as + * matching services are registered. + * + * Return: 0 on success, negative errno on failure. + */ +int qmi_add_lookup(struct qmi_handle *qmi, unsigned int service, + unsigned int version, unsigned int instance) +{ + struct qmi_service *svc; + + svc = kzalloc(sizeof(*svc), GFP_KERNEL); + if (!svc) + return -ENOMEM; + + svc->service = service; + svc->version = version; + svc->instance = instance; + + list_add(&svc->list_node, &qmi->lookups); + + qmi_send_new_lookup(qmi, svc); + + return 0; +} +EXPORT_SYMBOL(qmi_add_lookup); + +static void qmi_send_new_server(struct qmi_handle *qmi, struct qmi_service *svc) +{ + struct qrtr_ctrl_pkt pkt; + struct sockaddr_qrtr sq; + struct msghdr msg = { }; + struct kvec iv = { &pkt, sizeof(pkt) }; + int ret; + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_SERVER); + pkt.server.service = cpu_to_le32(svc->service); + pkt.server.instance = cpu_to_le32(svc->version | svc->instance << 8); + pkt.server.node = cpu_to_le32(qmi->sq.sq_node); + pkt.server.port = cpu_to_le32(qmi->sq.sq_port); + + sq.sq_family = qmi->sq.sq_family; + sq.sq_node = qmi->sq.sq_node; + sq.sq_port = QRTR_PORT_CTRL; + + msg.msg_name = &sq; + msg.msg_namelen = sizeof(sq); + + mutex_lock(&qmi->sock_lock); + if (qmi->sock) { + ret = kernel_sendmsg(qmi->sock, &msg, &iv, 1, sizeof(pkt)); + if (ret < 0) + pr_err("send service registration failed: %d\n", ret); + } + mutex_unlock(&qmi->sock_lock); +} + +/** + * qmi_add_server() - register a service with the name service + * @qmi: qmi handle + * @service: type of the service + * @instance: instance of the service + * @version: version of the service + * + * Register a new service with the name service. This allows clients to find + * and start sending messages to the client associated with @qmi. + * + * Return: 0 on success, negative errno on failure. + */ +int qmi_add_server(struct qmi_handle *qmi, unsigned int service, + unsigned int version, unsigned int instance) +{ + struct qmi_service *svc; + + svc = kzalloc(sizeof(*svc), GFP_KERNEL); + if (!svc) + return -ENOMEM; + + svc->service = service; + svc->version = version; + svc->instance = instance; + + list_add(&svc->list_node, &qmi->services); + + qmi_send_new_server(qmi, svc); + + return 0; +} +EXPORT_SYMBOL(qmi_add_server); + +/** + * qmi_txn_init() - allocate transaction id within the given QMI handle + * @qmi: QMI handle + * @txn: transaction context + * @ei: description of how to decode a matching response (optional) + * @c_struct: pointer to the object to decode the response into (optional) + * + * This allocates a transaction id within the QMI handle. If @ei and @c_struct + * are specified any responses to this transaction will be decoded as described + * by @ei into @c_struct. + * + * A client calling qmi_txn_init() must call either qmi_txn_wait() or + * qmi_txn_cancel() to free up the allocated resources. + * + * Return: Transaction id on success, negative errno on failure. + */ +int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn, + struct qmi_elem_info *ei, void *c_struct) +{ + int ret; + + memset(txn, 0, sizeof(*txn)); + + mutex_init(&txn->lock); + init_completion(&txn->completion); + txn->qmi = qmi; + txn->ei = ei; + txn->dest = c_struct; + + mutex_lock(&qmi->txn_lock); + ret = idr_alloc_cyclic(&qmi->txns, txn, 0, U16_MAX, GFP_KERNEL); + if (ret < 0) + pr_err("failed to allocate transaction id\n"); + + txn->id = ret; + mutex_unlock(&qmi->txn_lock); + + return ret; +} +EXPORT_SYMBOL(qmi_txn_init); + +/** + * qmi_txn_wait() - wait for a response on a transaction + * @txn: transaction handle + * @timeout: timeout, in jiffies + * + * If the transaction is decoded by the means of @ei and @c_struct the return + * value will be the returned value of qmi_decode_message(), otherwise it's up + * to the specified message handler to fill out the result. + * + * Return: the transaction response on success, negative errno on failure. + */ +int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout) +{ + struct qmi_handle *qmi = txn->qmi; + int ret; + + ret = wait_for_completion_timeout(&txn->completion, timeout); + + mutex_lock(&qmi->txn_lock); + mutex_lock(&txn->lock); + idr_remove(&qmi->txns, txn->id); + mutex_unlock(&txn->lock); + mutex_unlock(&qmi->txn_lock); + + if (ret == 0) + return -ETIMEDOUT; + else + return txn->result; +} +EXPORT_SYMBOL(qmi_txn_wait); + +/** + * qmi_txn_cancel() - cancel an ongoing transaction + * @txn: transaction id + */ +void qmi_txn_cancel(struct qmi_txn *txn) +{ + struct qmi_handle *qmi = txn->qmi; + + mutex_lock(&qmi->txn_lock); + mutex_lock(&txn->lock); + idr_remove(&qmi->txns, txn->id); + mutex_unlock(&txn->lock); + mutex_unlock(&qmi->txn_lock); +} +EXPORT_SYMBOL(qmi_txn_cancel); + +/** + * qmi_invoke_handler() - find and invoke a handler for a message + * @qmi: qmi handle + * @sq: sockaddr of the sender + * @txn: transaction object for the message + * @buf: buffer containing the message + * @len: length of @buf + * + * Find handler and invoke handler for the incoming message. + */ +static void qmi_invoke_handler(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, const void *buf, size_t len) +{ + const struct qmi_msg_handler *handler; + const struct qmi_header *hdr = buf; + void *dest; + int ret; + + if (!qmi->handlers) + return; + + for (handler = qmi->handlers; handler->fn; handler++) { + if (handler->type == hdr->type && + handler->msg_id == hdr->msg_id) + break; + } + + if (!handler->fn) + return; + + dest = kzalloc(handler->decoded_size, GFP_KERNEL); + if (!dest) + return; + + ret = qmi_decode_message(buf, len, handler->ei, dest); + if (ret < 0) + pr_err("failed to decode incoming message\n"); + else + handler->fn(qmi, sq, txn, dest); + + kfree(dest); +} + +/** + * qmi_handle_net_reset() - invoked to handle ENETRESET on a QMI handle + * @qmi: the QMI context + * + * As a result of registering a name service with the QRTR all open sockets are + * flagged with ENETRESET and this function will be called. The typical case is + * the initial boot, where this signals that the local node id has been + * configured and as such any bound sockets needs to be rebound. So close the + * socket, inform the client and re-initialize the socket. + * + * For clients it's generally sufficient to react to the del_server callbacks, + * but server code is expected to treat the net_reset callback as a "bye" from + * all nodes. + * + * Finally the QMI handle will send out registration requests for any lookups + * and services. + */ +static void qmi_handle_net_reset(struct qmi_handle *qmi) +{ + struct sockaddr_qrtr sq; + struct qmi_service *svc; + struct socket *sock; + + sock = qmi_sock_create(qmi, &sq); + if (IS_ERR(sock)) + return; + + mutex_lock(&qmi->sock_lock); + sock_release(qmi->sock); + qmi->sock = NULL; + mutex_unlock(&qmi->sock_lock); + + qmi_recv_del_server(qmi, -1, -1); + + if (qmi->ops.net_reset) + qmi->ops.net_reset(qmi); + + mutex_lock(&qmi->sock_lock); + qmi->sock = sock; + qmi->sq = sq; + mutex_unlock(&qmi->sock_lock); + + list_for_each_entry(svc, &qmi->lookups, list_node) + qmi_send_new_lookup(qmi, svc); + + list_for_each_entry(svc, &qmi->services, list_node) + qmi_send_new_server(qmi, svc); +} + +static void qmi_handle_message(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq, + const void *buf, size_t len) +{ + const struct qmi_header *hdr; + struct qmi_txn tmp_txn; + struct qmi_txn *txn = NULL; + int ret; + + if (len < sizeof(*hdr)) { + pr_err("ignoring short QMI packet\n"); + return; + } + + hdr = buf; + + /* If this is a response, find the matching transaction handle */ + if (hdr->type == QMI_RESPONSE) { + mutex_lock(&qmi->txn_lock); + txn = idr_find(&qmi->txns, hdr->txn_id); + + /* Ignore unexpected responses */ + if (!txn) { + mutex_unlock(&qmi->txn_lock); + return; + } + + mutex_lock(&txn->lock); + mutex_unlock(&qmi->txn_lock); + + if (txn->dest && txn->ei) { + ret = qmi_decode_message(buf, len, txn->ei, txn->dest); + if (ret < 0) + pr_err("failed to decode incoming message\n"); + + txn->result = ret; + complete(&txn->completion); + } else { + qmi_invoke_handler(qmi, sq, txn, buf, len); + } + + mutex_unlock(&txn->lock); + } else { + /* Create a txn based on the txn_id of the incoming message */ + memset(&tmp_txn, 0, sizeof(tmp_txn)); + tmp_txn.id = hdr->txn_id; + + qmi_invoke_handler(qmi, sq, &tmp_txn, buf, len); + } +} + +static void qmi_data_ready_work(struct work_struct *work) +{ + struct qmi_handle *qmi = container_of(work, struct qmi_handle, work); + struct qmi_ops *ops = &qmi->ops; + struct sockaddr_qrtr sq; + struct msghdr msg = { .msg_name = &sq, .msg_namelen = sizeof(sq) }; + struct kvec iv; + ssize_t msglen; + + for (;;) { + iv.iov_base = qmi->recv_buf; + iv.iov_len = qmi->recv_buf_size; + + mutex_lock(&qmi->sock_lock); + if (qmi->sock) + msglen = kernel_recvmsg(qmi->sock, &msg, &iv, 1, + iv.iov_len, MSG_DONTWAIT); + else + msglen = -EPIPE; + mutex_unlock(&qmi->sock_lock); + if (msglen == -EAGAIN) + break; + + if (msglen == -ENETRESET) { + qmi_handle_net_reset(qmi); + + /* The old qmi->sock is gone, our work is done */ + break; + } + + if (msglen < 0) { + pr_err("qmi recvmsg failed: %zd\n", msglen); + break; + } + + if (sq.sq_node == qmi->sq.sq_node && + sq.sq_port == QRTR_PORT_CTRL) { + qmi_recv_ctrl_pkt(qmi, qmi->recv_buf, msglen); + } else if (ops->msg_handler) { + ops->msg_handler(qmi, &sq, qmi->recv_buf, msglen); + } else { + qmi_handle_message(qmi, &sq, qmi->recv_buf, msglen); + } + } +} + +static void qmi_data_ready(struct sock *sk) +{ + struct qmi_handle *qmi = sk->sk_user_data; + + /* + * This will be NULL if we receive data while being in + * qmi_handle_release() + */ + if (!qmi) + return; + + queue_work(qmi->wq, &qmi->work); +} + +static struct socket *qmi_sock_create(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq) +{ + struct socket *sock; + int ret; + + ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM, + PF_QIPCRTR, &sock); + if (ret < 0) + return ERR_PTR(ret); + + ret = kernel_getsockname(sock, (struct sockaddr *)sq); + if (ret < 0) { + sock_release(sock); + return ERR_PTR(ret); + } + + sock->sk->sk_user_data = qmi; + sock->sk->sk_data_ready = qmi_data_ready; + sock->sk->sk_error_report = qmi_data_ready; + + return sock; +} + +/** + * qmi_handle_init() - initialize a QMI client handle + * @qmi: QMI handle to initialize + * @recv_buf_size: maximum size of incoming message + * @ops: reference to callbacks for QRTR notifications + * @handlers: NULL-terminated list of QMI message handlers + * + * This initializes the QMI client handle to allow sending and receiving QMI + * messages. As messages are received the appropriate handler will be invoked. + * + * Return: 0 on success, negative errno on failure. + */ +int qmi_handle_init(struct qmi_handle *qmi, size_t recv_buf_size, + const struct qmi_ops *ops, + const struct qmi_msg_handler *handlers) +{ + int ret; + + mutex_init(&qmi->txn_lock); + mutex_init(&qmi->sock_lock); + + idr_init(&qmi->txns); + + INIT_LIST_HEAD(&qmi->lookups); + INIT_LIST_HEAD(&qmi->lookup_results); + INIT_LIST_HEAD(&qmi->services); + + INIT_WORK(&qmi->work, qmi_data_ready_work); + + qmi->handlers = handlers; + if (ops) + qmi->ops = *ops; + + /* Make room for the header */ + recv_buf_size += sizeof(struct qmi_header); + /* Must also be sufficient to hold a control packet */ + if (recv_buf_size < sizeof(struct qrtr_ctrl_pkt)) + recv_buf_size = sizeof(struct qrtr_ctrl_pkt); + + qmi->recv_buf_size = recv_buf_size; + qmi->recv_buf = kzalloc(recv_buf_size, GFP_KERNEL); + if (!qmi->recv_buf) + return -ENOMEM; + + qmi->wq = alloc_workqueue("qmi_msg_handler", WQ_UNBOUND, 1); + if (!qmi->wq) { + ret = -ENOMEM; + goto err_free_recv_buf; + } + + qmi->sock = qmi_sock_create(qmi, &qmi->sq); + if (IS_ERR(qmi->sock)) { + if (PTR_ERR(qmi->sock) == -EAFNOSUPPORT) { + ret = -EPROBE_DEFER; + } else { + pr_err("failed to create QMI socket\n"); + ret = PTR_ERR(qmi->sock); + } + goto err_destroy_wq; + } + + return 0; + +err_destroy_wq: + destroy_workqueue(qmi->wq); +err_free_recv_buf: + kfree(qmi->recv_buf); + + return ret; +} +EXPORT_SYMBOL(qmi_handle_init); + +/** + * qmi_handle_release() - release the QMI client handle + * @qmi: QMI client handle + * + * This closes the underlying socket and stops any handling of QMI messages. + */ +void qmi_handle_release(struct qmi_handle *qmi) +{ + struct socket *sock = qmi->sock; + struct qmi_service *svc, *tmp; + + sock->sk->sk_user_data = NULL; + cancel_work_sync(&qmi->work); + + qmi_recv_del_server(qmi, -1, -1); + + mutex_lock(&qmi->sock_lock); + sock_release(sock); + qmi->sock = NULL; + mutex_unlock(&qmi->sock_lock); + + destroy_workqueue(qmi->wq); + + idr_destroy(&qmi->txns); + + kfree(qmi->recv_buf); + + /* Free registered lookup requests */ + list_for_each_entry_safe(svc, tmp, &qmi->lookups, list_node) { + list_del(&svc->list_node); + kfree(svc); + } + + /* Free registered service information */ + list_for_each_entry_safe(svc, tmp, &qmi->services, list_node) { + list_del(&svc->list_node); + kfree(svc); + } +} +EXPORT_SYMBOL(qmi_handle_release); + +/** + * qmi_send_message() - send a QMI message + * @qmi: QMI client handle + * @sq: destination sockaddr + * @txn: transaction object to use for the message + * @type: type of message to send + * @msg_id: message id + * @len: max length of the QMI message + * @ei: QMI message description + * @c_struct: object to be encoded + * + * This function encodes @c_struct using @ei into a message of type @type, + * with @msg_id and @txn into a buffer of maximum size @len, and sends this to + * @sq. + * + * Return: 0 on success, negative errno on failure. + */ +static ssize_t qmi_send_message(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq, struct qmi_txn *txn, + int type, int msg_id, size_t len, + struct qmi_elem_info *ei, const void *c_struct) +{ + struct msghdr msghdr = {}; + struct kvec iv; + void *msg; + int ret; + + msg = qmi_encode_message(type, + msg_id, &len, + txn->id, ei, + c_struct); + if (IS_ERR(msg)) + return PTR_ERR(msg); + + iv.iov_base = msg; + iv.iov_len = len; + + if (sq) { + msghdr.msg_name = sq; + msghdr.msg_namelen = sizeof(*sq); + } + + mutex_lock(&qmi->sock_lock); + if (qmi->sock) { + ret = kernel_sendmsg(qmi->sock, &msghdr, &iv, 1, len); + if (ret < 0) + pr_err("failed to send QMI message\n"); + } else { + ret = -EPIPE; + } + mutex_unlock(&qmi->sock_lock); + + kfree(msg); + + return ret < 0 ? ret : 0; +} + +/** + * qmi_send_request() - send a request QMI message + * @qmi: QMI client handle + * @sq: destination sockaddr + * @txn: transaction object to use for the message + * @msg_id: message id + * @len: max length of the QMI message + * @ei: QMI message description + * @c_struct: object to be encoded + * + * Return: 0 on success, negative errno on failure. + */ +ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, int msg_id, size_t len, + struct qmi_elem_info *ei, const void *c_struct) +{ + return qmi_send_message(qmi, sq, txn, QMI_REQUEST, msg_id, len, ei, + c_struct); +} +EXPORT_SYMBOL(qmi_send_request); + +/** + * qmi_send_response() - send a response QMI message + * @qmi: QMI client handle + * @sq: destination sockaddr + * @txn: transaction object to use for the message + * @msg_id: message id + * @len: max length of the QMI message + * @ei: QMI message description + * @c_struct: object to be encoded + * + * Return: 0 on success, negative errno on failure. + */ +ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, int msg_id, size_t len, + struct qmi_elem_info *ei, const void *c_struct) +{ + return qmi_send_message(qmi, sq, txn, QMI_RESPONSE, msg_id, len, ei, + c_struct); +} +EXPORT_SYMBOL(qmi_send_response); + +/** + * qmi_send_indication() - send an indication QMI message + * @qmi: QMI client handle + * @sq: destination sockaddr + * @msg_id: message id + * @len: max length of the QMI message + * @ei: QMI message description + * @c_struct: object to be encoded + * + * Return: 0 on success, negative errno on failure. + */ +ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + int msg_id, size_t len, struct qmi_elem_info *ei, + const void *c_struct) +{ + struct qmi_txn txn; + ssize_t rval; + int ret; + + ret = qmi_txn_init(qmi, &txn, NULL, NULL); + if (ret < 0) + return ret; + + rval = qmi_send_message(qmi, sq, &txn, QMI_INDICATION, msg_id, len, ei, + c_struct); + + /* We don't care about future messages on this txn */ + qmi_txn_cancel(&txn); + + return rval; +} +EXPORT_SYMBOL(qmi_send_indication); diff --git a/qrtr/Kconfig b/qrtr/Kconfig new file mode 100644 index 0000000..a5f069f --- /dev/null +++ b/qrtr/Kconfig @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Qualcomm IPC Router configuration +# + +config QRTR + tristate "Qualcomm IPC Router support" + help + Say Y if you intend to use Qualcomm IPC router protocol. The + protocol is used to communicate with services provided by other + hardware blocks in the system. + + In order to do service lookups, a userspace daemon is required to + maintain a service listing. + +if QRTR + +config QRTR_NODE_ID + int "QRTR Local Node ID" + default 1 + help + This option is used to configure the QRTR Node ID for the local + processor. The node ID published to other nodes within the system. + This value can be overridden by the name service application. This + option is for configurations where Node ID needs to be customized + but the name service application is not priveleged enough to use + netlink sockets. + +config QRTR_WAKEUP_MS + int "QRTR Wakeup timeout" + default 0 + help + This option is used to configure the wakesource timeout that QRTR + should take when a packet is received. The qrtr driver can guarantee + that the packet gets queued to the socket but cannot guarantee the + client process will get time to run if auto sleep is enabled. This + config will help mitigate missed packets on systems where auto sleep + is aggressive. + +config QRTR_SMD + tristate "SMD IPC Router channels" + depends on RPMSG || (COMPILE_TEST && RPMSG=n) + help + Say Y here to support SMD based ipcrouter channels. SMD is the + most common transport for IPC Router. + +config QRTR_TUN + tristate "TUN device for Qualcomm IPC Router" + help + Say Y here to expose a character device that allows user space to + implement endpoints of QRTR, for purpose of tunneling data to other + hosts or testing purposes. + +config QRTR_MHI + tristate "MHI IPC Router channels" + depends on MHI_BUS || (COMPILE_TEST && MHI_BUS=n) + help + Say Y here to support MHI based ipcrouter channels. MHI is the + transport used for external modem connections for IPC Router. The + MHI transport fakes synchronous sends by waiting for the uplink + callback from the MHI framework before returing to qrtr core. + +config QRTR_GUNYAH + tristate "Gunyah IPC Router channels" + help + Say Y here to support a fifo based ipcrouter channel with gunyah + hypervisor signaling. The gunyah transport layer enables IPC + Router communication between two virtual machines. The transport + uses dynamically shared memory and gunyah doorbells. + +endif # QRTR diff --git a/qrtr/Makefile b/qrtr/Makefile new file mode 100644 index 0000000..9aae41b --- /dev/null +++ b/qrtr/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_QRTR) := qrtr.o ns.o + +obj-$(CONFIG_QRTR_SMD) += qrtr-smd.o +qrtr-smd-y := smd.o +obj-$(CONFIG_QRTR_TUN) += qrtr-tun.o +qrtr-tun-y := tun.o +obj-$(CONFIG_QRTR_MHI) += qrtr-mhi.o +qrtr-mhi-y := mhi.o +obj-$(CONFIG_QRTR_GUNYAH) += qrtr-gunyah.o +qrtr-gunyah-y := gunyah.o +ccflags-y += -DDEBUG diff --git a/qrtr/mhi.c b/qrtr/mhi.c new file mode 100644 index 0000000..6d41472 --- /dev/null +++ b/qrtr/mhi.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/mhi.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/skbuff.h> +#include <linux/of.h> +#include <net/sock.h> + +#include "qrtr.h" + +struct qrtr_mhi_dev { + struct qrtr_endpoint ep; + struct mhi_device *mhi_dev; + struct device *dev; + struct completion prepared; +}; + +/* From MHI to QRTR */ +static void qcom_mhi_qrtr_dl_callback(struct mhi_device *mhi_dev, + struct mhi_result *mhi_res) +{ + struct qrtr_mhi_dev *qdev = dev_get_drvdata(&mhi_dev->dev); + int rc; + + if (!qdev || mhi_res->transaction_status) + return; + + rc = qrtr_endpoint_post(&qdev->ep, mhi_res->buf_addr, + mhi_res->bytes_xferd); + if (rc == -EINVAL) + dev_err(qdev->dev, "invalid ipcrouter packet\n"); +} + +/* From QRTR to MHI */ +static void qcom_mhi_qrtr_ul_callback(struct mhi_device *mhi_dev, + struct mhi_result *mhi_res) +{ + struct sk_buff *skb = mhi_res->buf_addr; + + if (skb->sk) + sock_put(skb->sk); + consume_skb(skb); +} + +/* Send data over MHI */ +static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb) +{ + struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep); + int rc; + + if (skb->sk) + sock_hold(skb->sk); + + rc = wait_for_completion_interruptible(&qdev->prepared); + if (rc) + goto free_skb; + + rc = skb_linearize(skb); + if (rc) + goto free_skb; + + rc = mhi_queue_skb(qdev->mhi_dev, DMA_TO_DEVICE, skb, skb->len, + MHI_EOT); + if (rc) + goto free_skb; + + return rc; + +free_skb: + if (skb->sk) + sock_put(skb->sk); + kfree_skb(skb); + + return rc; +} + +static void qrtr_mhi_of_parse(struct mhi_device *mhi_dev, + u32 *net_id, bool *rt) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device_node *np = NULL; + struct pci_dev *pci_device; + u32 dev_id, nid; + int rc; + + *net_id = QRTR_EP_NET_ID_AUTO; + *rt = false; + + np = of_find_compatible_node(np, NULL, "qcom,qrtr-mhi"); + if (!np) + return; + + rc = of_property_read_u32(np, "qcom,dev-id", &dev_id); + if (!rc) { + pci_device = to_pci_dev(mhi_cntrl->cntrl_dev); + if (pci_device->device == dev_id) { + rc = of_property_read_u32(np, "qcom,net-id", &nid); + if (!rc) + *net_id = nid; + *rt = of_property_read_bool(np, "qcom,low-latency"); + } + } + of_node_put(np); +} + +static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + struct qrtr_mhi_dev *qdev; + u32 net_id; + bool rt; + int rc; + + qdev = devm_kzalloc(&mhi_dev->dev, sizeof(*qdev), GFP_KERNEL); + if (!qdev){ + dev_dbg(&mhi_dev->dev, "qdev alloc failure\n"); + return -ENOMEM; + } + qdev->mhi_dev = mhi_dev; + qdev->dev = &mhi_dev->dev; + qdev->ep.xmit = qcom_mhi_qrtr_send; + init_completion(&qdev->prepared); + + dev_set_drvdata(&mhi_dev->dev, qdev); + + qrtr_mhi_of_parse(mhi_dev, &net_id, &rt); + dev_dbg(&mhi_dev->dev, "start endpoint_register\n"); + rc = qrtr_endpoint_register(&qdev->ep, net_id, rt); + if (rc){ + dev_dbg(&mhi_dev->dev, "endpoint register failure (%d)\n",rc); + return rc; + } + /* start channels */ + + rc = mhi_prepare_for_transfer(mhi_dev); + if (rc) { + dev_dbg(&mhi_dev->dev, "mhi_prepare_for_transfer failure (%d)\n",rc); + qrtr_endpoint_unregister(&qdev->ep); + dev_set_drvdata(&mhi_dev->dev, NULL); + return rc; + } + + complete_all(&qdev->prepared); + dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n"); + + return 0; +} + +static void qcom_mhi_qrtr_remove(struct mhi_device *mhi_dev) +{ + struct qrtr_mhi_dev *qdev = dev_get_drvdata(&mhi_dev->dev); + + qrtr_endpoint_unregister(&qdev->ep); + mhi_unprepare_from_transfer(mhi_dev); + dev_set_drvdata(&mhi_dev->dev, NULL); +} + +static const struct mhi_device_id qcom_mhi_qrtr_id_table[] = { + { .chan = "IPCR" }, + {} +}; +MODULE_DEVICE_TABLE(mhi, qcom_mhi_qrtr_id_table); + +static struct mhi_driver qcom_mhi_qrtr_driver = { + .probe = qcom_mhi_qrtr_probe, + .remove = qcom_mhi_qrtr_remove, + .dl_xfer_cb = qcom_mhi_qrtr_dl_callback, + .ul_xfer_cb = qcom_mhi_qrtr_ul_callback, + .id_table = qcom_mhi_qrtr_id_table, + .driver = { + .name = "qcom_mhi_qrtr", + }, +}; + +module_mhi_driver(qcom_mhi_qrtr_driver); + +MODULE_AUTHOR("Chris Lew <clew@codeaurora.org>"); +MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); +MODULE_DESCRIPTION("Qualcomm IPC-Router MHI interface driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qrtr/ns.c b/qrtr/ns.c new file mode 100644 index 0000000..87c5dfd --- /dev/null +++ b/qrtr/ns.c @@ -0,0 +1,809 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (c) 2015, Sony Mobile Communications Inc. + * Copyright (c) 2013, 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2020, Linaro Ltd. + */ + +#define pr_fmt(fmt) "qrtr: %s(): " fmt, __func__ + +#include <linux/ipc_logging.h> +#include <linux/module.h> +#include <linux/qrtr.h> +#include <linux/workqueue.h> +#include <linux/xarray.h> +#include <net/sock.h> + +#include "qrtr.h" + +#define CREATE_TRACE_POINTS +#include <trace/events/qrtr.h> + +#define NS_LOG_PAGE_CNT 4 +static void *ns_ilc; +#define NS_INFO(x, ...) ipc_log_string(ns_ilc, x, ##__VA_ARGS__) + +static DEFINE_XARRAY(nodes); + +static struct { + struct socket *sock; + struct sockaddr_qrtr bcast_sq; + struct list_head lookups; + struct kthread_worker kworker; + struct kthread_work work; + struct task_struct *task; + int local_node; +} qrtr_ns; + +static const char * const qrtr_ctrl_pkt_strings[] = { + [QRTR_TYPE_HELLO] = "hello", + [QRTR_TYPE_BYE] = "bye", + [QRTR_TYPE_NEW_SERVER] = "new-server", + [QRTR_TYPE_DEL_SERVER] = "del-server", + [QRTR_TYPE_DEL_CLIENT] = "del-client", + [QRTR_TYPE_RESUME_TX] = "resume-tx", + [QRTR_TYPE_EXIT] = "exit", + [QRTR_TYPE_PING] = "ping", + [QRTR_TYPE_NEW_LOOKUP] = "new-lookup", + [QRTR_TYPE_DEL_LOOKUP] = "del-lookup", +}; + +struct qrtr_server_filter { + unsigned int service; + unsigned int instance; + unsigned int ifilter; +}; + +struct qrtr_lookup { + unsigned int service; + unsigned int instance; + + struct sockaddr_qrtr sq; + struct list_head li; +}; + +struct qrtr_server { + unsigned int service; + unsigned int instance; + + unsigned int node; + unsigned int port; + + struct list_head qli; +}; + +struct qrtr_node { + unsigned int id; + struct xarray servers; +}; + +static struct qrtr_node *node_get(unsigned int node_id) +{ + struct qrtr_node *node; + + node = xa_load(&nodes, node_id); + if (node) + return node; + + /* If node didn't exist, allocate and insert it to the tree */ + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return NULL; + + node->id = node_id; + xa_init(&node->servers); + + xa_store(&nodes, node_id, node, GFP_KERNEL); + + return node; +} + +unsigned int qrtr_get_service_id(unsigned int node_id, unsigned int port_id) +{ + struct qrtr_server *srv; + struct qrtr_node *node; + unsigned long index; + + node = node_get(node_id); + if (!node) + return 0; + + xa_for_each(&node->servers, index, srv) { + if (srv->node == node_id && srv->port == port_id) + return srv->service; + } + + return 0; +} +EXPORT_SYMBOL(qrtr_get_service_id); + +static int server_match(const struct qrtr_server *srv, + const struct qrtr_server_filter *f) +{ + unsigned int ifilter = f->ifilter; + + if (f->service != 0 && srv->service != f->service) + return 0; + if (!ifilter && f->instance) + ifilter = ~0; + + return (srv->instance & ifilter) == f->instance; +} + +static int service_announce_new(struct sockaddr_qrtr *dest, + struct qrtr_server *srv) +{ + struct qrtr_ctrl_pkt pkt; + struct msghdr msg = { }; + struct kvec iv; + + trace_qrtr_ns_service_announce_new(srv->service, srv->instance, + srv->node, srv->port); + + NS_INFO("%s: [0x%x:0x%x]@[0x%x:0x%x]\n", __func__, srv->service, + srv->instance, srv->node, srv->port); + iv.iov_base = &pkt; + iv.iov_len = sizeof(pkt); + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_SERVER); + pkt.server.service = cpu_to_le32(srv->service); + pkt.server.instance = cpu_to_le32(srv->instance); + pkt.server.node = cpu_to_le32(srv->node); + pkt.server.port = cpu_to_le32(srv->port); + + msg.msg_name = (struct sockaddr *)dest; + msg.msg_namelen = sizeof(*dest); + + return kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); +} + +static int service_announce_del(struct sockaddr_qrtr *dest, + struct qrtr_server *srv) +{ + struct qrtr_ctrl_pkt pkt; + struct msghdr msg = { }; + struct kvec iv; + int ret; + + trace_qrtr_ns_service_announce_del(srv->service, srv->instance, + srv->node, srv->port); + + NS_INFO("%s: [0x%x:0x%x]@[0x%x:0x%x]\n", __func__, srv->service, + srv->instance, srv->node, srv->port); + + iv.iov_base = &pkt; + iv.iov_len = sizeof(pkt); + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = cpu_to_le32(QRTR_TYPE_DEL_SERVER); + pkt.server.service = cpu_to_le32(srv->service); + pkt.server.instance = cpu_to_le32(srv->instance); + pkt.server.node = cpu_to_le32(srv->node); + pkt.server.port = cpu_to_le32(srv->port); + + msg.msg_name = (struct sockaddr *)dest; + msg.msg_namelen = sizeof(*dest); + + ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); + if (ret < 0 && ret != -ENODEV) + pr_err_ratelimited("failed to announce del service %d\n", ret); + + return ret; +} + +static void lookup_notify(struct sockaddr_qrtr *to, struct qrtr_server *srv, + bool new) +{ + struct qrtr_ctrl_pkt pkt; + struct msghdr msg = { }; + struct kvec iv; + int ret; + + iv.iov_base = &pkt; + iv.iov_len = sizeof(pkt); + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = new ? cpu_to_le32(QRTR_TYPE_NEW_SERVER) : + cpu_to_le32(QRTR_TYPE_DEL_SERVER); + if (srv) { + pkt.server.service = cpu_to_le32(srv->service); + pkt.server.instance = cpu_to_le32(srv->instance); + pkt.server.node = cpu_to_le32(srv->node); + pkt.server.port = cpu_to_le32(srv->port); + } + + msg.msg_name = (struct sockaddr *)to; + msg.msg_namelen = sizeof(*to); + + ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); + if (ret < 0 && ret != -ENODEV) + pr_err_ratelimited("failed to send lookup notification %d\n", + ret); +} + +static int announce_servers(struct sockaddr_qrtr *sq) +{ + struct qrtr_server *srv; + struct qrtr_node *node; + unsigned long index; + int ret; + + node = node_get(qrtr_ns.local_node); + if (!node) + return 0; + + /* Announce the list of servers registered in this node */ + xa_for_each(&node->servers, index, srv) { + ret = service_announce_new(sq, srv); + if (ret < 0) { + if (ret == -ENODEV) + continue; + + pr_err("failed to announce new service %d\n", ret); + return ret; + } + } + + return 0; +} + +static struct qrtr_server *server_add(unsigned int service, + unsigned int instance, + unsigned int node_id, + unsigned int port) +{ + struct qrtr_server *srv; + struct qrtr_server *old; + struct qrtr_node *node; + + if (!service || !port) + return NULL; + + srv = kzalloc(sizeof(*srv), GFP_KERNEL); + if (!srv) + return NULL; + + srv->service = service; + srv->instance = instance; + srv->node = node_id; + srv->port = port; + + node = node_get(node_id); + if (!node) + goto err; + + /* Delete the old server on the same port */ + old = xa_store(&node->servers, port, srv, GFP_KERNEL); + if (old) { + if (xa_is_err(old)) { + pr_err("failed to add server [0x%x:0x%x] ret:%d\n", + srv->service, srv->instance, xa_err(old)); + goto err; + } else { + kfree(old); + } + } + + trace_qrtr_ns_server_add(srv->service, srv->instance, + srv->node, srv->port); + + NS_INFO("%s: [0x%x:0x%x]@[0x%x:0x%x]\n", __func__, srv->service, + srv->instance, srv->node, srv->port); + + return srv; + +err: + kfree(srv); + return NULL; +} + +static int server_del(struct qrtr_node *node, unsigned int port) +{ + struct qrtr_lookup *lookup; + struct qrtr_server *srv; + struct list_head *li; + + srv = xa_load(&node->servers, port); + if (!srv) + return 0; + + xa_erase(&node->servers, port); + + /* Broadcast the removal of local servers */ + if (srv->node == qrtr_ns.local_node) + service_announce_del(&qrtr_ns.bcast_sq, srv); + + /* Announce the service's disappearance to observers */ + list_for_each(li, &qrtr_ns.lookups) { + lookup = container_of(li, struct qrtr_lookup, li); + if (lookup->service && lookup->service != srv->service) + continue; + if (lookup->instance && lookup->instance != srv->instance) + continue; + + lookup_notify(&lookup->sq, srv, false); + } + + kfree(srv); + + return 0; +} + +static int say_hello(struct sockaddr_qrtr *dest) +{ + struct qrtr_ctrl_pkt pkt; + struct msghdr msg = { }; + struct kvec iv; + int ret; + + iv.iov_base = &pkt; + iv.iov_len = sizeof(pkt); + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = cpu_to_le32(QRTR_TYPE_HELLO); + + msg.msg_name = (struct sockaddr *)dest; + msg.msg_namelen = sizeof(*dest); + + ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); + if (ret < 0) + pr_err("failed to send hello msg %d\n", ret); + + return ret; +} + +/* Announce the list of servers registered on the local node */ +static int ctrl_cmd_hello(struct sockaddr_qrtr *sq) +{ + int ret; + + ret = say_hello(sq); + if (ret < 0) + return ret; + + return announce_servers(sq); +} + +static int ctrl_cmd_bye(struct sockaddr_qrtr *from) +{ + struct qrtr_node *local_node; + struct qrtr_ctrl_pkt pkt; + struct qrtr_server *srv; + struct sockaddr_qrtr sq; + struct msghdr msg = { }; + struct qrtr_node *node; + unsigned long index; + struct kvec iv; + int ret; + + iv.iov_base = &pkt; + iv.iov_len = sizeof(pkt); + + node = node_get(from->sq_node); + if (!node) + return 0; + + /* Advertise removal of this client to all servers of remote node */ + xa_for_each(&node->servers, index, srv) + server_del(node, srv->port); + + /* Advertise the removal of this client to all local servers */ + local_node = node_get(qrtr_ns.local_node); + if (!local_node) + return 0; + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = cpu_to_le32(QRTR_TYPE_BYE); + pkt.client.node = cpu_to_le32(from->sq_node); + + xa_for_each(&local_node->servers, index, srv) { + sq.sq_family = AF_QIPCRTR; + sq.sq_node = srv->node; + sq.sq_port = srv->port; + + msg.msg_name = (struct sockaddr *)&sq; + msg.msg_namelen = sizeof(sq); + + ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); + if (ret < 0 && ret != -ENODEV) + pr_err_ratelimited("send bye failed: [0x%x:0x%x] 0x%x ret: %d\n", + srv->service, srv->instance, + srv->port, ret); + } + + return 0; +} + +static int ctrl_cmd_del_client(struct sockaddr_qrtr *from, + unsigned int node_id, unsigned int port) +{ + struct qrtr_node *local_node; + struct qrtr_lookup *lookup; + struct qrtr_ctrl_pkt pkt; + struct msghdr msg = { }; + struct qrtr_server *srv; + struct sockaddr_qrtr sq; + struct qrtr_node *node; + struct list_head *tmp; + struct list_head *li; + unsigned long index; + struct kvec iv; + int ret; + + iv.iov_base = &pkt; + iv.iov_len = sizeof(pkt); + + /* Local DEL_CLIENT messages comes from the port being closed */ + if (from->sq_node == qrtr_ns.local_node && from->sq_port != port) + return -EINVAL; + + /* Remove any lookups by this client */ + list_for_each_safe(li, tmp, &qrtr_ns.lookups) { + lookup = container_of(li, struct qrtr_lookup, li); + if (lookup->sq.sq_node != node_id) + continue; + if (lookup->sq.sq_port != port) + continue; + + list_del(&lookup->li); + kfree(lookup); + } + + /* Remove the server belonging to this port */ + node = node_get(node_id); + if (node) + server_del(node, port); + + /* Advertise the removal of this client to all local servers */ + local_node = node_get(qrtr_ns.local_node); + if (!local_node) + return 0; + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT); + pkt.client.node = cpu_to_le32(node_id); + pkt.client.port = cpu_to_le32(port); + + xa_for_each(&local_node->servers, index, srv) { + sq.sq_family = AF_QIPCRTR; + sq.sq_node = srv->node; + sq.sq_port = srv->port; + + msg.msg_name = (struct sockaddr *)&sq; + msg.msg_namelen = sizeof(sq); + + ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); + if (ret < 0 && ret != -ENODEV) + pr_err_ratelimited("del client cmd failed: [0x%x:0x%x] 0x%x %d\n", + srv->service, srv->instance, + srv->port, ret); + } + + return 0; +} + +static int ctrl_cmd_new_server(struct sockaddr_qrtr *from, + unsigned int service, unsigned int instance, + unsigned int node_id, unsigned int port) +{ + struct qrtr_lookup *lookup; + struct qrtr_server *srv; + struct list_head *li; + int ret = 0; + + /* Ignore specified node and port for local servers */ + if (from->sq_node == qrtr_ns.local_node) { + node_id = from->sq_node; + port = from->sq_port; + } + + srv = server_add(service, instance, node_id, port); + if (!srv) + return -EINVAL; + + if (srv->node == qrtr_ns.local_node) { + ret = service_announce_new(&qrtr_ns.bcast_sq, srv); + if (ret < 0) { + pr_err("failed to announce new service %d\n", ret); + return ret; + } + } + + /* Notify any potential lookups about the new server */ + list_for_each(li, &qrtr_ns.lookups) { + lookup = container_of(li, struct qrtr_lookup, li); + if (lookup->service && lookup->service != service) + continue; + if (lookup->instance && lookup->instance != instance) + continue; + + lookup_notify(&lookup->sq, srv, true); + } + + return ret; +} + +static int ctrl_cmd_del_server(struct sockaddr_qrtr *from, + unsigned int service, unsigned int instance, + unsigned int node_id, unsigned int port) +{ + struct qrtr_node *node; + + /* Ignore specified node and port for local servers*/ + if (from->sq_node == qrtr_ns.local_node) { + node_id = from->sq_node; + port = from->sq_port; + } + + /* Local servers may only unregister themselves */ + if (from->sq_node == qrtr_ns.local_node && from->sq_port != port) + return -EINVAL; + + node = node_get(node_id); + if (!node) + return -ENOENT; + + return server_del(node, port); +} + +static int ctrl_cmd_new_lookup(struct sockaddr_qrtr *from, + unsigned int service, unsigned int instance) +{ + struct qrtr_server_filter filter; + struct qrtr_lookup *lookup; + struct qrtr_server *srv; + struct qrtr_node *node; + unsigned long node_idx; + unsigned long srv_idx; + + /* Accept only local observers */ + if (from->sq_node != qrtr_ns.local_node) + return -EINVAL; + + lookup = kzalloc(sizeof(*lookup), GFP_KERNEL); + if (!lookup) + return -ENOMEM; + + lookup->sq = *from; + lookup->service = service; + lookup->instance = instance; + list_add_tail(&lookup->li, &qrtr_ns.lookups); + + memset(&filter, 0, sizeof(filter)); + filter.service = service; + filter.instance = instance; + + xa_for_each(&nodes, node_idx, node) { + xa_for_each(&node->servers, srv_idx, srv) { + if (!server_match(srv, &filter)) + continue; + + lookup_notify(from, srv, true); + } + } + + /* Empty notification, to indicate end of listing */ + lookup_notify(from, NULL, true); + + return 0; +} + +static void ctrl_cmd_del_lookup(struct sockaddr_qrtr *from, + unsigned int service, unsigned int instance) +{ + struct qrtr_lookup *lookup; + struct list_head *tmp; + struct list_head *li; + + list_for_each_safe(li, tmp, &qrtr_ns.lookups) { + lookup = container_of(li, struct qrtr_lookup, li); + if (lookup->sq.sq_node != from->sq_node) + continue; + if (lookup->sq.sq_port != from->sq_port) + continue; + if (lookup->service != service) + continue; + if (lookup->instance && lookup->instance != instance) + continue; + + list_del(&lookup->li); + kfree(lookup); + } +} + +static void ns_log_msg(const struct qrtr_ctrl_pkt *pkt, + struct sockaddr_qrtr *sq) +{ + unsigned int cmd = le32_to_cpu(pkt->cmd); + + if (cmd == QRTR_TYPE_HELLO || cmd == QRTR_TYPE_BYE) + NS_INFO("cmd:0x%x node[0x%x]\n", cmd, sq->sq_node); + else if (cmd == QRTR_TYPE_DEL_CLIENT) + NS_INFO("cmd:0x%x addr[0x%x:0x%x]\n", cmd, + le32_to_cpu(pkt->client.node), + le32_to_cpu(pkt->client.port)); + else if (cmd == QRTR_TYPE_NEW_SERVER || cmd == QRTR_TYPE_DEL_SERVER) + NS_INFO("cmd:0x%x SVC[0x%x:0x%x] addr[0x%x:0x%x]\n", cmd, + le32_to_cpu(pkt->server.service), + le32_to_cpu(pkt->server.instance), + le32_to_cpu(pkt->server.node), + le32_to_cpu(pkt->server.port)); + else if (cmd == QRTR_TYPE_NEW_LOOKUP || cmd == QRTR_TYPE_DEL_LOOKUP) + NS_INFO("cmd:0x%x SVC[0x%x:0x%x]\n", cmd, + le32_to_cpu(pkt->server.service), + le32_to_cpu(pkt->server.instance)); +} + +static void qrtr_ns_worker(struct kthread_work *work) +{ + const struct qrtr_ctrl_pkt *pkt; + size_t recv_buf_size = 4096; + struct sockaddr_qrtr sq; + struct msghdr msg = { }; + unsigned int cmd; + ssize_t msglen; + void *recv_buf; + struct kvec iv; + int ret; + + msg.msg_name = (struct sockaddr *)&sq; + msg.msg_namelen = sizeof(sq); + + recv_buf = kzalloc(recv_buf_size, GFP_KERNEL); + if (!recv_buf) + return; + + for (;;) { + iv.iov_base = recv_buf; + iv.iov_len = recv_buf_size; + + msglen = kernel_recvmsg(qrtr_ns.sock, &msg, &iv, 1, + iv.iov_len, MSG_DONTWAIT); + + if (msglen == -EAGAIN) + break; + + if (msglen < 0) { + pr_err("error receiving packet: %zd\n", msglen); + break; + } + + pkt = recv_buf; + cmd = le32_to_cpu(pkt->cmd); + if (cmd < ARRAY_SIZE(qrtr_ctrl_pkt_strings) && + qrtr_ctrl_pkt_strings[cmd]) + trace_qrtr_ns_message(qrtr_ctrl_pkt_strings[cmd], + sq.sq_node, sq.sq_port); + + ns_log_msg(pkt, &sq); + + ret = 0; + switch (cmd) { + case QRTR_TYPE_HELLO: + ret = ctrl_cmd_hello(&sq); + break; + case QRTR_TYPE_BYE: + ret = ctrl_cmd_bye(&sq); + break; + case QRTR_TYPE_DEL_CLIENT: + ret = ctrl_cmd_del_client(&sq, + le32_to_cpu(pkt->client.node), + le32_to_cpu(pkt->client.port)); + break; + case QRTR_TYPE_NEW_SERVER: + ret = ctrl_cmd_new_server(&sq, + le32_to_cpu(pkt->server.service), + le32_to_cpu(pkt->server.instance), + le32_to_cpu(pkt->server.node), + le32_to_cpu(pkt->server.port)); + break; + case QRTR_TYPE_DEL_SERVER: + ret = ctrl_cmd_del_server(&sq, + le32_to_cpu(pkt->server.service), + le32_to_cpu(pkt->server.instance), + le32_to_cpu(pkt->server.node), + le32_to_cpu(pkt->server.port)); + break; + case QRTR_TYPE_EXIT: + case QRTR_TYPE_PING: + case QRTR_TYPE_RESUME_TX: + break; + case QRTR_TYPE_NEW_LOOKUP: + ret = ctrl_cmd_new_lookup(&sq, + le32_to_cpu(pkt->server.service), + le32_to_cpu(pkt->server.instance)); + break; + case QRTR_TYPE_DEL_LOOKUP: + ctrl_cmd_del_lookup(&sq, + le32_to_cpu(pkt->server.service), + le32_to_cpu(pkt->server.instance)); + break; + } + + if (ret < 0) + pr_err("failed while handling packet from %d:%d", + sq.sq_node, sq.sq_port); + } + + kfree(recv_buf); +} + +static void qrtr_ns_data_ready(struct sock *sk) +{ + kthread_queue_work(&qrtr_ns.kworker, &qrtr_ns.work); +} + +void qrtr_ns_init(void) +{ + struct sockaddr_qrtr sq; + int rx_buf_sz = INT_MAX; + int ret; + + INIT_LIST_HEAD(&qrtr_ns.lookups); + kthread_init_worker(&qrtr_ns.kworker); + kthread_init_work(&qrtr_ns.work, qrtr_ns_worker); + + ns_ilc = ipc_log_context_create(NS_LOG_PAGE_CNT, "qrtr_ns", 0); + + ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM, + PF_QIPCRTR, &qrtr_ns.sock); + if (ret < 0) + return; + + ret = kernel_getsockname(qrtr_ns.sock, (struct sockaddr *)&sq); + if (ret < 0) { + pr_err("failed to get socket name\n"); + goto err_sock; + } + + qrtr_ns.task = kthread_run(kthread_worker_fn, &qrtr_ns.kworker, + "qrtr_ns"); + if (IS_ERR(qrtr_ns.task)) { + pr_err("failed to spawn worker thread %ld\n", + PTR_ERR(qrtr_ns.task)); + goto err_sock; + } + + qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready; + + sq.sq_port = QRTR_PORT_CTRL; + qrtr_ns.local_node = sq.sq_node; + + ret = kernel_bind(qrtr_ns.sock, (struct sockaddr *)&sq, sizeof(sq)); + if (ret < 0) { + pr_err("failed to bind to socket\n"); + goto err_wq; + } + + sock_setsockopt(qrtr_ns.sock, SOL_SOCKET, SO_RCVBUF, + KERNEL_SOCKPTR((void *)&rx_buf_sz), sizeof(rx_buf_sz)); + + qrtr_ns.bcast_sq.sq_family = AF_QIPCRTR; + qrtr_ns.bcast_sq.sq_node = QRTR_NODE_BCAST; + qrtr_ns.bcast_sq.sq_port = QRTR_PORT_CTRL; + + ret = say_hello(&qrtr_ns.bcast_sq); + if (ret < 0) + goto err_wq; + + return; + +err_wq: + kthread_stop(qrtr_ns.task); +err_sock: + sock_release(qrtr_ns.sock); +} +EXPORT_SYMBOL_GPL(qrtr_ns_init); + +void qrtr_ns_remove(void) +{ + kthread_flush_worker(&qrtr_ns.kworker); + kthread_stop(qrtr_ns.task); + sock_release(qrtr_ns.sock); +} +EXPORT_SYMBOL_GPL(qrtr_ns_remove); + +MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); +MODULE_DESCRIPTION("Qualcomm IPC Router Nameservice"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/qrtr/qrtr.c b/qrtr/qrtr.c new file mode 100644 index 0000000..3f21c45 --- /dev/null +++ b/qrtr/qrtr.c @@ -0,0 +1,2069 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, Sony Mobile Communications Inc. + * Copyright (c) 2013, 2018-2021 The Linux Foundation. All rights reserved. + */ +#include <linux/kthread.h> +#include <linux/module.h> +#include <linux/netlink.h> +#include <linux/qrtr.h> +#include <linux/termios.h> /* For TIOCINQ/OUTQ */ +#include <linux/numa.h> +#include <linux/spinlock.h> +#include <linux/wait.h> +#include <linux/rwsem.h> +#include <linux/uidgid.h> +#include <linux/pm_wakeup.h> +#include <linux/ipc_logging.h> + +#include <net/sock.h> +#include <uapi/linux/sched/types.h> + +#include "qrtr.h" + +#define QRTR_LOG_PAGE_CNT 4 +#define QRTR_INFO(ctx, x, ...) \ + ipc_log_string(ctx, x, ##__VA_ARGS__) + +#define QRTR_PROTO_VER_1 1 +#define QRTR_PROTO_VER_2 3 + +/* auto-bind range */ +#define QRTR_MIN_EPH_SOCKET 0x4000 +#define QRTR_MAX_EPH_SOCKET 0x7fff + +#define QRTR_PORT_CTRL_LEGACY 0xffff + +/* qrtr socket states */ +#define QRTR_STATE_MULTI -2 +#define QRTR_STATE_INIT -1 + +#define AID_VENDOR_QRTR KGIDT_INIT(2906) + +#if defined(CONFIG_RPMSG_QCOM_GLINK_NATIVE) +extern bool glink_resume_pkt; +#endif +extern unsigned int qrtr_get_service_id(unsigned int node_id, + unsigned int port_id); +/** + * struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1 + * @version: protocol version + * @type: packet type; one of QRTR_TYPE_* + * @src_node_id: source node + * @src_port_id: source port + * @confirm_rx: boolean; whether a resume-tx packet should be send in reply + * @size: length of packet, excluding this header + * @dst_node_id: destination node + * @dst_port_id: destination port + */ +struct qrtr_hdr_v1 { + __le32 version; + __le32 type; + __le32 src_node_id; + __le32 src_port_id; + __le32 confirm_rx; + __le32 size; + __le32 dst_node_id; + __le32 dst_port_id; +} __packed; + +/** + * struct qrtr_hdr_v2 - (I|R)PCrouter packet header later versions + * @version: protocol version + * @type: packet type; one of QRTR_TYPE_* + * @flags: bitmask of QRTR_FLAGS_* + * @optlen: length of optional header data + * @size: length of packet, excluding this header and optlen + * @src_node_id: source node + * @src_port_id: source port + * @dst_node_id: destination node + * @dst_port_id: destination port + */ +struct qrtr_hdr_v2 { + u8 version; + u8 type; + u8 flags; + u8 optlen; + __le32 size; + __le16 src_node_id; + __le16 src_port_id; + __le16 dst_node_id; + __le16 dst_port_id; +}; + +#define QRTR_FLAGS_CONFIRM_RX BIT(0) + +struct qrtr_cb { + u32 src_node; + u32 src_port; + u32 dst_node; + u32 dst_port; + + u8 type; + u8 confirm_rx; +}; + +#define QRTR_HDR_MAX_SIZE max_t(size_t, sizeof(struct qrtr_hdr_v1), \ + sizeof(struct qrtr_hdr_v2)) + +struct qrtr_sock { + /* WARNING: sk must be the first member */ + struct sock sk; + struct sockaddr_qrtr us; + struct sockaddr_qrtr peer; + + int state; +}; + +static inline struct qrtr_sock *qrtr_sk(struct sock *sk) +{ + BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0); + return container_of(sk, struct qrtr_sock, sk); +} + +static unsigned int qrtr_local_nid = CONFIG_QRTR_NODE_ID; +static unsigned int qrtr_wakeup_ms = CONFIG_QRTR_WAKEUP_MS; + +/* for node ids */ +static RADIX_TREE(qrtr_nodes, GFP_ATOMIC); +static DEFINE_SPINLOCK(qrtr_nodes_lock); +/* broadcast list */ +static LIST_HEAD(qrtr_all_epts); +/* lock for qrtr_all_epts */ +static DECLARE_RWSEM(qrtr_epts_lock); + +/* local port allocation management */ +static DEFINE_IDR(qrtr_ports); +static DEFINE_SPINLOCK(qrtr_port_lock); + +/* backup buffers */ +#define QRTR_BACKUP_HI_NUM 5 +#define QRTR_BACKUP_HI_SIZE SZ_16K +#define QRTR_BACKUP_LO_NUM 20 +#define QRTR_BACKUP_LO_SIZE SZ_1K +static struct sk_buff_head qrtr_backup_lo; +static struct sk_buff_head qrtr_backup_hi; +static struct work_struct qrtr_backup_work; + +/** + * struct qrtr_node - endpoint node + * @ep_lock: lock for endpoint management and callbacks + * @ep: endpoint + * @ref: reference count for node + * @nid: node id + * @net_id: network cluster identifer + * @hello_sent: hello packet sent to endpoint + * @hello_rcvd: hello packet received from endpoint + * @qrtr_tx_flow: tree with tx counts per flow + * @resume_tx: waiters for a resume tx from the remote + * @qrtr_tx_lock: lock for qrtr_tx_flow + * @rx_queue: receive queue + * @item: list item for broadcast list + * @kworker: worker thread for recv work + * @task: task to run the worker thread + * @read_data: scheduled work for recv work + * @say_hello: scheduled work for initiating hello + * @ws: wakeupsource avoid system suspend + * @ilc: ipc logging context reference + */ +struct qrtr_node { + struct mutex ep_lock; + struct qrtr_endpoint *ep; + struct kref ref; + unsigned int nid; + unsigned int net_id; + atomic_t hello_sent; + atomic_t hello_rcvd; + + struct radix_tree_root qrtr_tx_flow; + struct wait_queue_head resume_tx; + struct mutex qrtr_tx_lock; /* for qrtr_tx_flow */ + + struct sk_buff_head rx_queue; + struct list_head item; + + struct kthread_worker kworker; + struct task_struct *task; + struct kthread_work read_data; + struct kthread_work say_hello; + + struct wakeup_source *ws; + void *ilc; +}; + +struct qrtr_tx_flow_waiter { + struct list_head node; + struct sock *sk; +}; + +/** + * struct qrtr_tx_flow - tx flow control + * @pending: number of waiting senders + * @tx_failed: indicates that a message with confirm_rx flag was lost + * @waiters: list of ports to notify when this flow resumes + */ +struct qrtr_tx_flow { + atomic_t pending; + int tx_failed; + struct list_head waiters; +}; + +#define QRTR_TX_FLOW_HIGH 10 +#define QRTR_TX_FLOW_LOW 5 + +static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt); +static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb, + int type, struct sockaddr_qrtr *from, + struct sockaddr_qrtr *to, unsigned int flags); +static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb, + int type, struct sockaddr_qrtr *from, + struct sockaddr_qrtr *to, unsigned int flags); +static struct qrtr_sock *qrtr_port_lookup(int port); +static void qrtr_port_put(struct qrtr_sock *ipc); + +static void qrtr_log_tx_msg(struct qrtr_node *node, struct qrtr_hdr_v1 *hdr, + struct sk_buff *skb) +{ + struct qrtr_ctrl_pkt pkt = {0,}; + u64 pl_buf = 0; + int type; + + if (!hdr || !skb) + return; + + type = le32_to_cpu(hdr->type); + if (type == QRTR_TYPE_DATA) { + skb_copy_bits(skb, QRTR_HDR_MAX_SIZE, &pl_buf, sizeof(pl_buf)); + QRTR_INFO(node->ilc, + "TX DATA: Len:0x%x CF:0x%x src[0x%x:0x%x] dst[0x%x:0x%x] [%08x %08x] [%s]\n", + hdr->size, hdr->confirm_rx, + hdr->src_node_id, hdr->src_port_id, + hdr->dst_node_id, hdr->dst_port_id, + (unsigned int)pl_buf, (unsigned int)(pl_buf >> 32), + current->comm); + } else { + skb_copy_bits(skb, QRTR_HDR_MAX_SIZE, &pkt, sizeof(pkt)); + if (type == QRTR_TYPE_NEW_SERVER || + type == QRTR_TYPE_DEL_SERVER) + QRTR_INFO(node->ilc, + "TX CTRL: cmd:0x%x SVC[0x%x:0x%x] addr[0x%x:0x%x]\n", + type, le32_to_cpu(pkt.server.service), + le32_to_cpu(pkt.server.instance), + le32_to_cpu(pkt.server.node), + le32_to_cpu(pkt.server.port)); + else if (type == QRTR_TYPE_DEL_CLIENT || + type == QRTR_TYPE_RESUME_TX) + QRTR_INFO(node->ilc, + "TX CTRL: cmd:0x%x addr[0x%x:0x%x]\n", + type, le32_to_cpu(pkt.client.node), + le32_to_cpu(pkt.client.port)); + else if (type == QRTR_TYPE_HELLO || + type == QRTR_TYPE_BYE) + QRTR_INFO(node->ilc, + "TX CTRL: cmd:0x%x node[0x%x]\n", + type, hdr->src_node_id); + else if (type == QRTR_TYPE_DEL_PROC) + QRTR_INFO(node->ilc, + "TX CTRL: cmd:0x%x node[0x%x]\n", + type, pkt.proc.node); + } +} + +#if defined(CONFIG_RPMSG_QCOM_GLINK_NATIVE) +static void qrtr_log_resume_pkt(struct qrtr_cb *cb, u64 pl_buf) +{ + unsigned int service_id; + + if (glink_resume_pkt) { + glink_resume_pkt = false; + service_id = qrtr_get_service_id(cb->src_node, cb->src_port); + pr_info("[QRTR RESUME PKT]:src[0x%x:0x%x] dst[0x%x:0x%x] [%08x %08x]: service[0x%x]\n", + cb->src_node, cb->src_port, + cb->dst_node, cb->dst_port, + (unsigned int)pl_buf, (unsigned int)(pl_buf >> 32), + service_id); + } +} +#endif + +static void qrtr_log_rx_msg(struct qrtr_node *node, struct sk_buff *skb) +{ + struct qrtr_ctrl_pkt pkt = {0,}; + struct qrtr_cb *cb; + u64 pl_buf = 0; + + if (!skb) + return; + + cb = (struct qrtr_cb *)skb->cb; + + if (cb->type == QRTR_TYPE_DATA) { + skb_copy_bits(skb, 0, &pl_buf, sizeof(pl_buf)); + QRTR_INFO(node->ilc, + "RX DATA: Len:0x%x CF:0x%x src[0x%x:0x%x] dst[0x%x:0x%x] [%08x %08x]\n", + skb->len, cb->confirm_rx, cb->src_node, cb->src_port, + cb->dst_node, cb->dst_port, + (unsigned int)pl_buf, (unsigned int)(pl_buf >> 32)); +#if defined(CONFIG_RPMSG_QCOM_GLINK_NATIVE) + qrtr_log_resume_pkt(cb, pl_buf); +#endif + } else { + skb_copy_bits(skb, 0, &pkt, sizeof(pkt)); + if (cb->type == QRTR_TYPE_NEW_SERVER || + cb->type == QRTR_TYPE_DEL_SERVER) + QRTR_INFO(node->ilc, + "RX CTRL: cmd:0x%x SVC[0x%x:0x%x] addr[0x%x:0x%x]\n", + cb->type, le32_to_cpu(pkt.server.service), + le32_to_cpu(pkt.server.instance), + le32_to_cpu(pkt.server.node), + le32_to_cpu(pkt.server.port)); + else if (cb->type == QRTR_TYPE_DEL_CLIENT || + cb->type == QRTR_TYPE_RESUME_TX) + QRTR_INFO(node->ilc, + "RX CTRL: cmd:0x%x addr[0x%x:0x%x]\n", + cb->type, le32_to_cpu(pkt.client.node), + le32_to_cpu(pkt.client.port)); + else if (cb->type == QRTR_TYPE_HELLO || + cb->type == QRTR_TYPE_BYE) + QRTR_INFO(node->ilc, + "RX CTRL: cmd:0x%x node[0x%x]\n", + cb->type, cb->src_node); + } +} + +static bool refcount_dec_and_rwsem_lock(refcount_t *r, + struct rw_semaphore *sem) +{ + if (refcount_dec_not_one(r)) + return false; + + down_write(sem); + if (!refcount_dec_and_test(r)) { + up_write(sem); + return false; + } + + return true; +} + +static inline int kref_put_rwsem_lock(struct kref *kref, + void (*release)(struct kref *kref), + struct rw_semaphore *sem) +{ + if (refcount_dec_and_rwsem_lock(&kref->refcount, sem)) { + release(kref); + return 1; + } + return 0; +} + +/* Release node resources and free the node. + * + * Do not call directly, use qrtr_node_release. To be used with + * kref_put_mutex. As such, the node mutex is expected to be locked on call. + */ +static void __qrtr_node_release(struct kref *kref) +{ + struct qrtr_tx_flow_waiter *waiter; + struct qrtr_tx_flow_waiter *temp; + struct radix_tree_iter iter; + struct qrtr_tx_flow *flow; + struct qrtr_node *node = container_of(kref, struct qrtr_node, ref); + unsigned long flags; + void __rcu **slot; + + spin_lock_irqsave(&qrtr_nodes_lock, flags); + if (node->nid != QRTR_EP_NID_AUTO) { + radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) { + if (node == *slot) + radix_tree_iter_delete(&qrtr_nodes, &iter, + slot); + } + } + spin_unlock_irqrestore(&qrtr_nodes_lock, flags); + + list_del(&node->item); + up_write(&qrtr_epts_lock); + + /* Free tx flow counters */ + mutex_lock(&node->qrtr_tx_lock); + radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) { + flow = *slot; + list_for_each_entry_safe(waiter, temp, &flow->waiters, node) { + list_del(&waiter->node); + sock_put(waiter->sk); + kfree(waiter); + } + radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot); + kfree(flow); + } + mutex_unlock(&node->qrtr_tx_lock); + + wakeup_source_unregister(node->ws); + kthread_flush_worker(&node->kworker); + kthread_stop(node->task); + + skb_queue_purge(&node->rx_queue); + kfree(node); +} + +/* Increment reference to node. */ +static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node) +{ + if (node) + kref_get(&node->ref); + return node; +} + +/* Decrement reference to node and release as necessary. */ +static void qrtr_node_release(struct qrtr_node *node) +{ + if (!node) + return; + kref_put_rwsem_lock(&node->ref, __qrtr_node_release, &qrtr_epts_lock); +} + +/** + * qrtr_tx_resume() - reset flow control counter + * @node: qrtr_node that the QRTR_TYPE_RESUME_TX packet arrived on + * @skb: resume_tx packet + */ +static void qrtr_tx_resume(struct qrtr_node *node, struct sk_buff *skb) +{ + struct qrtr_tx_flow_waiter *waiter; + struct qrtr_tx_flow_waiter *temp; + struct qrtr_ctrl_pkt pkt = {0,}; + struct qrtr_tx_flow *flow; + struct sockaddr_qrtr src; + struct qrtr_sock *ipc; + struct sk_buff *skbn; + unsigned long key; + + skb_copy_bits(skb, 0, &pkt, sizeof(pkt)); + if (le32_to_cpu(pkt.cmd) != QRTR_TYPE_RESUME_TX) + return; + + src.sq_family = AF_QIPCRTR; + src.sq_node = le32_to_cpu(pkt.client.node); + src.sq_port = le32_to_cpu(pkt.client.port); + key = (u64)src.sq_node << 32 | src.sq_port; + + mutex_lock(&node->qrtr_tx_lock); + flow = radix_tree_lookup(&node->qrtr_tx_flow, key); + if (!flow) { + mutex_unlock(&node->qrtr_tx_lock); + return; + } + + atomic_set(&flow->pending, 0); + wake_up_interruptible_all(&node->resume_tx); + + list_for_each_entry_safe(waiter, temp, &flow->waiters, node) { + list_del(&waiter->node); + skbn = alloc_skb(0, GFP_KERNEL); + if (skbn) { + ipc = qrtr_sk(waiter->sk); + qrtr_local_enqueue(NULL, skbn, QRTR_TYPE_RESUME_TX, + &src, &ipc->us, 0); + } + sock_put(waiter->sk); + kfree(waiter); + } + mutex_unlock(&node->qrtr_tx_lock); + + consume_skb(skb); +} + +/** + * qrtr_tx_wait() - flow control for outgoing packets + * @node: qrtr_node that the packet is to be send to + * @dest_node: node id of the destination + * @dest_port: port number of the destination + * @type: type of message + * + * The flow control scheme is based around the low and high "watermarks". When + * the low watermark is passed the confirm_rx flag is set on the outgoing + * message, which will trigger the remote to send a control message of the type + * QRTR_TYPE_RESUME_TX to reset the counter. If the high watermark is hit + * further transmision should be paused. + * + * Return: 1 if confirm_rx should be set, 0 otherwise or errno failure + */ +static int qrtr_tx_wait(struct qrtr_node *node, struct sockaddr_qrtr *to, + struct sock *sk, int type, unsigned int flags) +{ + unsigned long key = (u64)to->sq_node << 32 | to->sq_port; + struct qrtr_tx_flow_waiter *waiter; + struct qrtr_tx_flow *flow; + int confirm_rx = 0; + long timeo; + long ret; + int cond; + + /* Never set confirm_rx on non-data packets */ + if (type != QRTR_TYPE_DATA) + return 0; + + /* Assume sk is set correctly for all data type packets */ + timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); + + mutex_lock(&node->qrtr_tx_lock); + flow = radix_tree_lookup(&node->qrtr_tx_flow, key); + if (!flow) { + flow = kzalloc(sizeof(*flow), GFP_KERNEL); + if (flow) { + INIT_LIST_HEAD(&flow->waiters); + if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) { + kfree(flow); + flow = NULL; + } + } + } + mutex_unlock(&node->qrtr_tx_lock); + + /* Set confirm_rx if we where unable to find and allocate a flow */ + if (!flow) + return 1; + + ret = timeo; + for (;;) { + mutex_lock(&node->qrtr_tx_lock); + if (READ_ONCE(flow->tx_failed)) { + WRITE_ONCE(flow->tx_failed, 0); + confirm_rx = 1; + mutex_unlock(&node->qrtr_tx_lock); + break; + } + + if (atomic_read(&flow->pending) < QRTR_TX_FLOW_HIGH) { + confirm_rx = atomic_inc_return(&flow->pending) == + QRTR_TX_FLOW_LOW; + mutex_unlock(&node->qrtr_tx_lock); + break; + } + if (!ret) { + list_for_each_entry(waiter, &flow->waiters, node) { + if (waiter->sk == sk) { + mutex_unlock(&node->qrtr_tx_lock); + return -EAGAIN; + } + } + waiter = kzalloc(sizeof(*waiter), GFP_KERNEL); + if (!waiter) { + mutex_unlock(&node->qrtr_tx_lock); + return -ENOMEM; + } + waiter->sk = sk; + sock_hold(sk); + list_add_tail(&waiter->node, &flow->waiters); + QRTR_INFO(node->ilc, "new waiter for [0x%x:0x%x]\n", + to->sq_node, to->sq_port); + mutex_unlock(&node->qrtr_tx_lock); + return -EAGAIN; + } + mutex_unlock(&node->qrtr_tx_lock); + + cond = (!node->ep || READ_ONCE(flow->tx_failed) || + atomic_read(&flow->pending) < QRTR_TX_FLOW_HIGH); + ret = wait_event_interruptible_timeout(node->resume_tx, + cond, timeo); + if (ret < 0) + return ret; + if (!node->ep) + return -EPIPE; + } + return confirm_rx; +} + +/** + * qrtr_tx_flow_failed() - flag that tx of confirm_rx flagged messages failed + * @node: qrtr_node that the packet is to be send to + * @dest_node: node id of the destination + * @dest_port: port number of the destination + * + * Signal that the transmission of a message with confirm_rx flag failed. The + * flow's "pending" counter will keep incrementing towards QRTR_TX_FLOW_HIGH, + * at which point transmission would stall forever waiting for the resume TX + * message associated with the dropped confirm_rx message. + * Work around this by marking the flow as having a failed transmission and + * cause the next transmission attempt to be sent with the confirm_rx. + */ +static void qrtr_tx_flow_failed(struct qrtr_node *node, int dest_node, + int dest_port) +{ + unsigned long key = (u64)dest_node << 32 | dest_port; + struct qrtr_tx_flow *flow; + + mutex_lock(&node->qrtr_tx_lock); + flow = radix_tree_lookup(&node->qrtr_tx_flow, key); + if (flow) + WRITE_ONCE(flow->tx_failed, 1); + mutex_unlock(&node->qrtr_tx_lock); +} + +/* Pass an outgoing packet socket buffer to the endpoint driver. */ +static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, + int type, struct sockaddr_qrtr *from, + struct sockaddr_qrtr *to, unsigned int flags) +{ + struct qrtr_hdr_v1 *hdr; + size_t len = skb->len; + int rc = -ENODEV; + int confirm_rx; + + if (!atomic_read(&node->hello_sent) && type != QRTR_TYPE_HELLO) { + kfree_skb(skb); + return rc; + } + if (atomic_read(&node->hello_sent) && type == QRTR_TYPE_HELLO) { + kfree_skb(skb); + return 0; + } + + /* If sk is null, this is a forwarded packet and should not wait */ + if (!skb->sk) { + struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb; + + confirm_rx = cb->confirm_rx; + } else { + confirm_rx = qrtr_tx_wait(node, to, skb->sk, type, flags); + if (confirm_rx < 0) { + kfree_skb(skb); + return confirm_rx; + } + } + + hdr = skb_push(skb, sizeof(*hdr)); + hdr->version = cpu_to_le32(QRTR_PROTO_VER_1); + hdr->type = cpu_to_le32(type); + hdr->src_node_id = cpu_to_le32(from->sq_node); + hdr->src_port_id = cpu_to_le32(from->sq_port); + if (to->sq_node == QRTR_NODE_BCAST) + hdr->dst_node_id = cpu_to_le32(node->nid); + else + hdr->dst_node_id = cpu_to_le32(to->sq_node); + + hdr->dst_port_id = cpu_to_le32(to->sq_port); + hdr->size = cpu_to_le32(len); + hdr->confirm_rx = !!confirm_rx; + + qrtr_log_tx_msg(node, hdr, skb); + rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr)); + if (rc) { + pr_err("%s: failed to pad size %lu to %lu rc:%d\n", __func__, + len, ALIGN(len, 4) + sizeof(*hdr), rc); + return rc; + } + + mutex_lock(&node->ep_lock); + if (node->ep) + rc = node->ep->xmit(node->ep, skb); + else + kfree_skb(skb); + mutex_unlock(&node->ep_lock); + + /* Need to ensure that a subsequent message carries the otherwise lost + * confirm_rx flag if we dropped this one */ + if (rc && confirm_rx) + qrtr_tx_flow_failed(node, to->sq_node, to->sq_port); + if (type == QRTR_TYPE_HELLO) { + if (!rc) + atomic_inc(&node->hello_sent); + else + kthread_queue_work(&node->kworker, &node->say_hello); + } + + return rc; +} + +/* Lookup node by id. + * + * callers must release with qrtr_node_release() + */ +static struct qrtr_node *qrtr_node_lookup(unsigned int nid) +{ + struct qrtr_node *node; + unsigned long flags; + + down_read(&qrtr_epts_lock); + spin_lock_irqsave(&qrtr_nodes_lock, flags); + node = radix_tree_lookup(&qrtr_nodes, nid); + node = qrtr_node_acquire(node); + spin_unlock_irqrestore(&qrtr_nodes_lock, flags); + up_read(&qrtr_epts_lock); + + return node; +} + +/* Assign node id to node. + * + * This is mostly useful for automatic node id assignment, based on + * the source id in the incoming packet. + */ +static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid) +{ + unsigned long flags; + + if (nid == node->nid || nid == QRTR_EP_NID_AUTO) + return; + + spin_lock_irqsave(&qrtr_nodes_lock, flags); + if (!radix_tree_lookup(&qrtr_nodes, nid)) + radix_tree_insert(&qrtr_nodes, nid, node); + + if (node->nid == QRTR_EP_NID_AUTO) + node->nid = nid; + spin_unlock_irqrestore(&qrtr_nodes_lock, flags); +} + +/** + * qrtr_peek_pkt_size() - Peek into the packet header to get potential pkt size + * + * @data: Starting address of the packet which points to router header. + * + * @returns: potential packet size on success, < 0 on error. + * + * This function is used by the underlying transport abstraction layer to + * peek into the potential packet size of an incoming packet. This information + * is used to perform link layer fragmentation and re-assembly + */ +int qrtr_peek_pkt_size(const void *data) +{ + const struct qrtr_hdr_v1 *v1; + const struct qrtr_hdr_v2 *v2; + unsigned int hdrlen; + unsigned int size; + unsigned int ver; + + /* Version field in v1 is little endian, so this works for both cases */ + ver = *(u8 *)data; + + switch (ver) { + case QRTR_PROTO_VER_1: + v1 = data; + hdrlen = sizeof(*v1); + size = le32_to_cpu(v1->size); + break; + case QRTR_PROTO_VER_2: + v2 = data; + hdrlen = sizeof(*v2) + v2->optlen; + size = le32_to_cpu(v2->size); + break; + default: + pr_err("qrtr: Invalid version %d\n", ver); + return -EINVAL; + } + + return ALIGN(size, 4) + hdrlen; +} +EXPORT_SYMBOL(qrtr_peek_pkt_size); + +static void qrtr_alloc_backup(struct work_struct *work) +{ + struct sk_buff *skb; + int errcode; + + while (skb_queue_len(&qrtr_backup_lo) < QRTR_BACKUP_LO_NUM) { + skb = alloc_skb_with_frags(sizeof(struct qrtr_hdr_v1), + QRTR_BACKUP_LO_SIZE, 0, &errcode, + GFP_KERNEL); + if (!skb) + break; + skb_queue_tail(&qrtr_backup_lo, skb); + } + while (skb_queue_len(&qrtr_backup_hi) < QRTR_BACKUP_HI_NUM) { + skb = alloc_skb_with_frags(sizeof(struct qrtr_hdr_v1), + QRTR_BACKUP_HI_SIZE, 0, &errcode, + GFP_KERNEL); + if (!skb) + break; + skb_queue_tail(&qrtr_backup_hi, skb); + } +} + +static struct sk_buff *qrtr_get_backup(size_t len) +{ + struct sk_buff *skb = NULL; + + if (len < QRTR_BACKUP_LO_SIZE) + skb = skb_dequeue(&qrtr_backup_lo); + else if (len < QRTR_BACKUP_HI_SIZE) + skb = skb_dequeue(&qrtr_backup_hi); + + if (skb) + queue_work(system_unbound_wq, &qrtr_backup_work); + + return skb; +} + +static void qrtr_backup_init(void) +{ + skb_queue_head_init(&qrtr_backup_lo); + skb_queue_head_init(&qrtr_backup_hi); + INIT_WORK(&qrtr_backup_work, qrtr_alloc_backup); + queue_work(system_unbound_wq, &qrtr_backup_work); +} + +static void qrtr_backup_deinit(void) +{ + cancel_work_sync(&qrtr_backup_work); + skb_queue_purge(&qrtr_backup_lo); + skb_queue_purge(&qrtr_backup_hi); +} + +/** + * qrtr_endpoint_post() - post incoming data + * @ep: endpoint handle + * @data: data pointer + * @len: size of data in bytes + * + * Return: 0 on success; negative error code on failure + */ +int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) +{ + struct qrtr_node *node = ep->node; + const struct qrtr_hdr_v1 *v1; + const struct qrtr_hdr_v2 *v2; + struct qrtr_ctrl_pkt *pkt; + struct qrtr_sock *ipc; + struct sk_buff *skb; + struct qrtr_cb *cb; + unsigned int size; + unsigned int ver; + size_t hdrlen; + int errcode; + + if (len == 0 || len & 3) + return -EINVAL; + + skb = alloc_skb_with_frags(sizeof(*v1), len, 0, &errcode, GFP_ATOMIC); + if (!skb) { + skb = qrtr_get_backup(len); + if (!skb) { + pr_err("qrtr: Unable to get skb with len:%lu\n", len); + return -ENOMEM; + } + } + + skb_reserve(skb, sizeof(*v1)); + cb = (struct qrtr_cb *)skb->cb; + + /* Version field in v1 is little endian, so this works for both cases */ + ver = *(u8*)data; + + switch (ver) { + case QRTR_PROTO_VER_1: + if (len < sizeof(*v1)) + goto err; + v1 = data; + hdrlen = sizeof(*v1); + + cb->type = le32_to_cpu(v1->type); + cb->src_node = le32_to_cpu(v1->src_node_id); + cb->src_port = le32_to_cpu(v1->src_port_id); + cb->confirm_rx = !!v1->confirm_rx; + cb->dst_node = le32_to_cpu(v1->dst_node_id); + cb->dst_port = le32_to_cpu(v1->dst_port_id); + + size = le32_to_cpu(v1->size); + break; + case QRTR_PROTO_VER_2: + if (len < sizeof(*v2)) + goto err; + v2 = data; + hdrlen = sizeof(*v2) + v2->optlen; + + cb->type = v2->type; + cb->confirm_rx = !!(v2->flags & QRTR_FLAGS_CONFIRM_RX); + cb->src_node = le16_to_cpu(v2->src_node_id); + cb->src_port = le16_to_cpu(v2->src_port_id); + cb->dst_node = le16_to_cpu(v2->dst_node_id); + cb->dst_port = le16_to_cpu(v2->dst_port_id); + + if (cb->src_port == (u16)QRTR_PORT_CTRL) + cb->src_port = QRTR_PORT_CTRL; + if (cb->dst_port == (u16)QRTR_PORT_CTRL) + cb->dst_port = QRTR_PORT_CTRL; + + size = le32_to_cpu(v2->size); + break; + default: + pr_err("qrtr: Invalid version %d\n", ver); + goto err; + } + + if (cb->dst_port == QRTR_PORT_CTRL_LEGACY) + cb->dst_port = QRTR_PORT_CTRL; + + if (len != ALIGN(size, 4) + hdrlen) + goto err; + + if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA && + cb->type != QRTR_TYPE_RESUME_TX) + goto err; + + skb->data_len = size; + skb->len = size; + skb_store_bits(skb, 0, data + hdrlen, size); + + qrtr_node_assign(node, cb->src_node); + if (cb->type == QRTR_TYPE_NEW_SERVER) { + pkt = (void *)data + hdrlen; + qrtr_node_assign(node, le32_to_cpu(pkt->server.node)); + } + + qrtr_log_rx_msg(node, skb); + /* All control packets and non-local destined data packets should be + * queued to the worker for forwarding handling. + */ + if (cb->type != QRTR_TYPE_DATA || cb->dst_node != qrtr_local_nid) { + skb_queue_tail(&node->rx_queue, skb); + kthread_queue_work(&node->kworker, &node->read_data); + pm_wakeup_ws_event(node->ws, qrtr_wakeup_ms, true); + } else { + ipc = qrtr_port_lookup(cb->dst_port); + if (!ipc) { + kfree_skb(skb); + return -ENODEV; + } + + if (sock_queue_rcv_skb(&ipc->sk, skb)) { + qrtr_port_put(ipc); + goto err; + } + + /* Force wakeup for all packets except for sensors */ + if (node->nid != 9) + pm_wakeup_ws_event(node->ws, qrtr_wakeup_ms, true); + + qrtr_port_put(ipc); + } + + return 0; + +err: + kfree_skb(skb); + return -EINVAL; + +} +EXPORT_SYMBOL_GPL(qrtr_endpoint_post); + +/** + * qrtr_alloc_ctrl_packet() - allocate control packet skb + * @pkt: reference to qrtr_ctrl_pkt pointer + * + * Returns newly allocated sk_buff, or NULL on failure + * + * This function allocates a sk_buff large enough to carry a qrtr_ctrl_pkt and + * on success returns a reference to the control packet in @pkt. + */ +static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt) +{ + const int pkt_len = sizeof(struct qrtr_ctrl_pkt); + struct sk_buff *skb; + + skb = alloc_skb(QRTR_HDR_MAX_SIZE + pkt_len, GFP_KERNEL); + if (!skb) + return NULL; + + skb_reserve(skb, QRTR_HDR_MAX_SIZE); + *pkt = skb_put_zero(skb, pkt_len); + + return skb; +} + +static bool qrtr_must_forward(struct qrtr_node *src, + struct qrtr_node *dst, u32 type) +{ + /* Node structure is not maintained for local processor. + * Hence src is null in that case. + */ + if (!src) + return true; + + if (!dst) + return false; + + if (type == QRTR_TYPE_HELLO || type == QRTR_TYPE_RESUME_TX) + return false; + + if (dst == src || dst->nid == QRTR_EP_NID_AUTO) + return false; + + if (abs(dst->net_id - src->net_id) > 1) + return true; + + return false; +} + +static void qrtr_fwd_ctrl_pkt(struct qrtr_node *src, struct sk_buff *skb) +{ + struct qrtr_node *node; + struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb; + + down_read(&qrtr_epts_lock); + list_for_each_entry(node, &qrtr_all_epts, item) { + struct sockaddr_qrtr from; + struct sockaddr_qrtr to; + struct sk_buff *skbn; + + if (!qrtr_must_forward(src, node, cb->type)) + continue; + + skbn = skb_clone(skb, GFP_KERNEL); + if (!skbn) + break; + + from.sq_family = AF_QIPCRTR; + from.sq_node = cb->src_node; + from.sq_port = cb->src_port; + + to.sq_family = AF_QIPCRTR; + to.sq_node = node->nid; + to.sq_port = QRTR_PORT_CTRL; + + qrtr_node_enqueue(node, skbn, cb->type, &from, &to, 0); + } + up_read(&qrtr_epts_lock); +} + +static void qrtr_fwd_pkt(struct sk_buff *skb, struct qrtr_cb *cb) +{ + struct sockaddr_qrtr from = {AF_QIPCRTR, cb->src_node, cb->src_port}; + struct sockaddr_qrtr to = {AF_QIPCRTR, cb->dst_node, cb->dst_port}; + struct qrtr_node *node; + + node = qrtr_node_lookup(cb->dst_node); + if (!node) { + kfree_skb(skb); + return; + } + + qrtr_node_enqueue(node, skb, cb->type, &from, &to, 0); + qrtr_node_release(node); +} + +static void qrtr_sock_queue_skb(struct qrtr_node *node, struct sk_buff *skb, + struct qrtr_sock *ipc) +{ + struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb; + int rc; + + /* Don't queue HELLO if control port already received */ + if (cb->type == QRTR_TYPE_HELLO) { + if (atomic_read(&node->hello_rcvd)) { + kfree_skb(skb); + return; + } + atomic_inc(&node->hello_rcvd); + } + + rc = sock_queue_rcv_skb(&ipc->sk, skb); + if (rc) { + pr_err("%s: qrtr pkt dropped flow[%d] rc[%d]\n", + __func__, cb->confirm_rx, rc); + kfree_skb(skb); + } +} + +/* Handle not atomic operations for a received packet. */ +static void qrtr_node_rx_work(struct kthread_work *work) +{ + struct qrtr_node *node = container_of(work, struct qrtr_node, + read_data); + struct sk_buff *skb; + char name[32] = {0,}; + + if (unlikely(!node->ilc)) { + snprintf(name, sizeof(name), "qrtr_%d", node->nid); + node->ilc = ipc_log_context_create(QRTR_LOG_PAGE_CNT, name, 0); + } + + while ((skb = skb_dequeue(&node->rx_queue)) != NULL) { + struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb; + struct qrtr_sock *ipc; + + if (cb->type != QRTR_TYPE_DATA) + qrtr_fwd_ctrl_pkt(node, skb); + + if (cb->type == QRTR_TYPE_RESUME_TX) { + if (cb->dst_node != qrtr_local_nid) { + qrtr_fwd_pkt(skb, cb); + continue; + } + qrtr_tx_resume(node, skb); + } else if (cb->dst_node != qrtr_local_nid && + cb->type == QRTR_TYPE_DATA) { + qrtr_fwd_pkt(skb, cb); + } else { + ipc = qrtr_port_lookup(cb->dst_port); + if (!ipc) { + kfree_skb(skb); + } else { + qrtr_sock_queue_skb(node, skb, ipc); + qrtr_port_put(ipc); + } + } + } +} + +static void qrtr_hello_work(struct kthread_work *work) +{ + struct sockaddr_qrtr from = {AF_QIPCRTR, 0, QRTR_PORT_CTRL}; + struct sockaddr_qrtr to = {AF_QIPCRTR, 0, QRTR_PORT_CTRL}; + struct qrtr_ctrl_pkt *pkt; + struct qrtr_node *node; + struct qrtr_sock *ctrl; + struct sk_buff *skb; + + ctrl = qrtr_port_lookup(QRTR_PORT_CTRL); + if (!ctrl) + return; + + skb = qrtr_alloc_ctrl_packet(&pkt); + if (!skb) { + qrtr_port_put(ctrl); + return; + } + + node = container_of(work, struct qrtr_node, say_hello); + pkt->cmd = cpu_to_le32(QRTR_TYPE_HELLO); + from.sq_node = qrtr_local_nid; + to.sq_node = node->nid; + qrtr_node_enqueue(node, skb, QRTR_TYPE_HELLO, &from, &to, 0); + qrtr_port_put(ctrl); +} + +/** + * qrtr_endpoint_register() - register a new endpoint + * @ep: endpoint to register + * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment + * @rt: flag to notify real time low latency endpoint + * Return: 0 on success; negative error code on failure + * + * The specified endpoint must have the xmit function pointer set on call. + */ +int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int net_id, + bool rt) +{ + struct qrtr_node *node; + struct sched_param param = {.sched_priority = 1}; + + if (!ep || !ep->xmit){ + pr_err("%s: error qrtr ep=%x ex->xmit=%x\n", + __func__, ep,ep->xmit); + + return -EINVAL; + } + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node){ + return -ENOMEM; + } + kref_init(&node->ref); + mutex_init(&node->ep_lock); + skb_queue_head_init(&node->rx_queue); + node->nid = QRTR_EP_NID_AUTO; + node->ep = ep; + atomic_set(&node->hello_sent, 0); + atomic_set(&node->hello_rcvd, 0); + + kthread_init_work(&node->read_data, qrtr_node_rx_work); + kthread_init_work(&node->say_hello, qrtr_hello_work); + kthread_init_worker(&node->kworker); + node->task = kthread_run(kthread_worker_fn, &node->kworker, "qrtr_rx"); + if (IS_ERR(node->task)) { + kfree(node); + return -ENOMEM; + } + if (rt) + sched_setscheduler(node->task, SCHED_FIFO, ¶m); + + mutex_init(&node->qrtr_tx_lock); + INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL); + init_waitqueue_head(&node->resume_tx); + + qrtr_node_assign(node, node->nid); + node->net_id = net_id; + + down_write(&qrtr_epts_lock); + list_add(&node->item, &qrtr_all_epts); + up_write(&qrtr_epts_lock); + ep->node = node; + + node->ws = wakeup_source_register(NULL, "qrtr_ws"); + + kthread_queue_work(&node->kworker, &node->say_hello); + return 0; +} +EXPORT_SYMBOL_GPL(qrtr_endpoint_register); + +static void qrtr_notify_bye(u32 nid) +{ + struct sockaddr_qrtr src = {AF_QIPCRTR, nid, QRTR_PORT_CTRL}; + struct sockaddr_qrtr dst = {AF_QIPCRTR, qrtr_local_nid, QRTR_PORT_CTRL}; + struct qrtr_ctrl_pkt *pkt; + struct sk_buff *skb; + + skb = qrtr_alloc_ctrl_packet(&pkt); + if (!skb) + return; + + pkt->cmd = cpu_to_le32(QRTR_TYPE_BYE); + qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst, 0); +} + +static u32 qrtr_calc_checksum(struct qrtr_ctrl_pkt *pkt) +{ + u32 checksum = 0; + u32 mask = 0xffff; + u16 upper_nb; + u16 lower_nb; + u32 *msg; + int i; + + if (!pkt) + return checksum; + msg = (u32 *)pkt; + + for (i = 0; i < sizeof(*pkt) / sizeof(*msg); i++) { + lower_nb = *msg & mask; + upper_nb = (*msg >> 16) & mask; + checksum += (upper_nb + lower_nb); + msg++; + } + while (checksum > 0xffff) + checksum = (checksum & mask) + ((checksum >> 16) & mask); + + checksum = ~checksum & mask; + + return checksum; +} + +static void qrtr_fwd_del_proc(struct qrtr_node *src, unsigned int nid) +{ + struct sockaddr_qrtr from = {AF_QIPCRTR, 0, QRTR_PORT_CTRL}; + struct sockaddr_qrtr to = {AF_QIPCRTR, 0, QRTR_PORT_CTRL}; + struct qrtr_ctrl_pkt *pkt; + struct qrtr_node *dst; + struct sk_buff *skb; + + list_for_each_entry(dst, &qrtr_all_epts, item) { + if (!qrtr_must_forward(src, dst, QRTR_TYPE_DEL_PROC)) + continue; + + skb = qrtr_alloc_ctrl_packet(&pkt); + if (!skb) + return; + + pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_PROC); + pkt->proc.rsvd = QRTR_DEL_PROC_MAGIC; + pkt->proc.node = cpu_to_le32(nid); + pkt->proc.rsvd = cpu_to_le32(qrtr_calc_checksum(pkt)); + + from.sq_node = src->nid; + to.sq_node = dst->nid; + qrtr_node_enqueue(dst, skb, QRTR_TYPE_DEL_PROC, &from, &to, 0); + } +} + +/** + * qrtr_endpoint_unregister - unregister endpoint + * @ep: endpoint to unregister + */ +void qrtr_endpoint_unregister(struct qrtr_endpoint *ep) +{ + struct radix_tree_iter iter; + struct qrtr_node *node = ep->node; + unsigned long flags; + void __rcu **slot; + + mutex_lock(&node->ep_lock); + node->ep = NULL; + mutex_unlock(&node->ep_lock); + + /* Notify the local controller about the event */ + down_read(&qrtr_epts_lock); + spin_lock_irqsave(&qrtr_nodes_lock, flags); + radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) { + if (node != *slot) + continue; + + spin_unlock_irqrestore(&qrtr_nodes_lock, flags); + + qrtr_notify_bye(iter.index); + qrtr_fwd_del_proc(node, iter.index); + + spin_lock_irqsave(&qrtr_nodes_lock, flags); + } + spin_unlock_irqrestore(&qrtr_nodes_lock, flags); + up_read(&qrtr_epts_lock); + + /* Wake up any transmitters waiting for resume-tx from the node */ + wake_up_interruptible_all(&node->resume_tx); + + qrtr_node_release(node); + ep->node = NULL; +} +EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister); + +/* Lookup socket by port. + * + * Callers must release with qrtr_port_put() + */ +static struct qrtr_sock *qrtr_port_lookup(int port) +{ + struct qrtr_sock *ipc; + unsigned long flags; + + if (port == QRTR_PORT_CTRL) + port = 0; + + spin_lock_irqsave(&qrtr_port_lock, flags); + ipc = idr_find(&qrtr_ports, port); + if (ipc) + sock_hold(&ipc->sk); + spin_unlock_irqrestore(&qrtr_port_lock, flags); + + return ipc; +} + +/* Release acquired socket. */ +static void qrtr_port_put(struct qrtr_sock *ipc) +{ + sock_put(&ipc->sk); +} + +static void qrtr_send_del_client(struct qrtr_sock *ipc) +{ + struct qrtr_ctrl_pkt *pkt; + struct sockaddr_qrtr to; + struct qrtr_node *node; + struct sk_buff *skbn; + struct sk_buff *skb; + int type = QRTR_TYPE_DEL_CLIENT; + + skb = qrtr_alloc_ctrl_packet(&pkt); + if (!skb) + return; + + to.sq_family = AF_QIPCRTR; + to.sq_node = QRTR_NODE_BCAST; + to.sq_port = QRTR_PORT_CTRL; + + pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT); + pkt->client.node = cpu_to_le32(ipc->us.sq_node); + pkt->client.port = cpu_to_le32(ipc->us.sq_port); + + skb_set_owner_w(skb, &ipc->sk); + + if (ipc->state == QRTR_STATE_MULTI) { + qrtr_bcast_enqueue(NULL, skb, type, &ipc->us, &to, 0); + return; + } + + if (ipc->state > QRTR_STATE_INIT) { + node = qrtr_node_lookup(ipc->state); + if (!node) + goto exit; + + skbn = skb_clone(skb, GFP_KERNEL); + if (!skbn) { + qrtr_node_release(node); + goto exit; + } + + skb_set_owner_w(skbn, &ipc->sk); + qrtr_node_enqueue(node, skbn, type, &ipc->us, &to, 0); + qrtr_node_release(node); + } +exit: + qrtr_local_enqueue(NULL, skb, type, &ipc->us, &to, 0); +} + +/* Remove port assignment. */ +static void qrtr_port_remove(struct qrtr_sock *ipc) +{ + int port = ipc->us.sq_port; + unsigned long flags; + + qrtr_send_del_client(ipc); + if (port == QRTR_PORT_CTRL) + port = 0; + + __sock_put(&ipc->sk); + + spin_lock_irqsave(&qrtr_port_lock, flags); + idr_remove(&qrtr_ports, port); + spin_unlock_irqrestore(&qrtr_port_lock, flags); +} + +/* Assign port number to socket. + * + * Specify port in the integer pointed to by port, and it will be adjusted + * on return as necesssary. + * + * Port may be: + * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET] + * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN + * >QRTR_MIN_EPH_SOCKET: Specified; available to all + */ +static int qrtr_port_assign(struct qrtr_sock *ipc, int *port) +{ + int rc; + + if (!*port) { + rc = idr_alloc_cyclic(&qrtr_ports, ipc, QRTR_MIN_EPH_SOCKET, + QRTR_MAX_EPH_SOCKET + 1, GFP_ATOMIC); + if (rc >= 0) + *port = rc; + } else if (*port < QRTR_MIN_EPH_SOCKET && + !(capable(CAP_NET_ADMIN) || + in_egroup_p(AID_VENDOR_QRTR) || + in_egroup_p(GLOBAL_ROOT_GID))) { + rc = -EACCES; + } else if (*port == QRTR_PORT_CTRL) { + rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC); + } else { + rc = idr_alloc_cyclic(&qrtr_ports, ipc, *port, *port + 1, + GFP_ATOMIC); + if (rc >= 0) + *port = rc; + } + + if (rc == -ENOSPC) + return -EADDRINUSE; + else if (rc < 0) + return rc; + + sock_hold(&ipc->sk); + + return 0; +} + +/* Reset all non-control ports */ +static void qrtr_reset_ports(void) +{ + struct qrtr_sock *ipc; + int id; + + idr_for_each_entry(&qrtr_ports, ipc, id) { + /* Don't reset control port */ + if (id == 0) + continue; + + sock_hold(&ipc->sk); + ipc->sk.sk_err = ENETRESET; + if (ipc->sk.sk_error_report) + ipc->sk.sk_error_report(&ipc->sk); + sock_put(&ipc->sk); + } +} + +/* Bind socket to address. + * + * Socket should be locked upon call. + */ +static int __qrtr_bind(struct socket *sock, + const struct sockaddr_qrtr *addr, int zapped) +{ + struct qrtr_sock *ipc = qrtr_sk(sock->sk); + struct sock *sk = sock->sk; + unsigned long flags; + int port; + int rc; + + /* rebinding ok */ + if (!zapped && addr->sq_port == ipc->us.sq_port) + return 0; + + spin_lock_irqsave(&qrtr_port_lock, flags); + port = addr->sq_port; + rc = qrtr_port_assign(ipc, &port); + if (rc) { + spin_unlock_irqrestore(&qrtr_port_lock, flags); + return rc; + } + /* Notify all open ports about the new controller */ + if (port == QRTR_PORT_CTRL) + qrtr_reset_ports(); + spin_unlock_irqrestore(&qrtr_port_lock, flags); + + + if (port == QRTR_PORT_CTRL) { + struct qrtr_node *node; + + down_write(&qrtr_epts_lock); + list_for_each_entry(node, &qrtr_all_epts, item) { + atomic_set(&node->hello_sent, 0); + atomic_set(&node->hello_rcvd, 0); + } + up_write(&qrtr_epts_lock); + } + + /* unbind previous, if any */ + if (!zapped) + qrtr_port_remove(ipc); + ipc->us.sq_port = port; + sock_reset_flag(sk, SOCK_ZAPPED); + + return 0; +} + +/* Auto bind to an ephemeral port. */ +static int qrtr_autobind(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct sockaddr_qrtr addr; + + if (!sock_flag(sk, SOCK_ZAPPED)) + return 0; + + addr.sq_family = AF_QIPCRTR; + addr.sq_node = qrtr_local_nid; + addr.sq_port = 0; + + return __qrtr_bind(sock, &addr, 1); +} + +/* Bind socket to specified sockaddr. */ +static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len) +{ + DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr); + struct qrtr_sock *ipc = qrtr_sk(sock->sk); + struct sock *sk = sock->sk; + int rc; + + if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR) + return -EINVAL; + + if (addr->sq_node != ipc->us.sq_node) + return -EINVAL; + + lock_sock(sk); + rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED)); + release_sock(sk); + + return rc; +} + +/* Queue packet to local peer socket. */ +static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb, + int type, struct sockaddr_qrtr *from, + struct sockaddr_qrtr *to, unsigned int flags) +{ + struct qrtr_sock *ipc; + struct qrtr_cb *cb; + struct sock *sk = skb->sk; + + ipc = qrtr_port_lookup(to->sq_port); + if (!ipc && to->sq_port == QRTR_PORT_CTRL) { + kfree_skb(skb); + return 0; + } + if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */ + kfree_skb(skb); + return -ENODEV; + } + /* Keep resetting NETRESET until socket is closed */ + if (sk && sk->sk_err == ENETRESET) { + sock_hold(sk); + sk->sk_err = ENETRESET; + if (sk->sk_error_report) + sk->sk_error_report(sk); + sock_put(sk); + kfree_skb(skb); + return 0; + } + + cb = (struct qrtr_cb *)skb->cb; + cb->src_node = from->sq_node; + cb->src_port = from->sq_port; + + if (sock_queue_rcv_skb(&ipc->sk, skb)) { + qrtr_port_put(ipc); + kfree_skb(skb); + return -ENOSPC; + } + + qrtr_port_put(ipc); + + return 0; +} + +/* Queue packet for broadcast. */ +static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb, + int type, struct sockaddr_qrtr *from, + struct sockaddr_qrtr *to, unsigned int flags) +{ + struct sk_buff *skbn; + + down_read(&qrtr_epts_lock); + list_for_each_entry(node, &qrtr_all_epts, item) { + if (node->nid == QRTR_EP_NID_AUTO && type != QRTR_TYPE_HELLO) + continue; + + skbn = skb_clone(skb, GFP_KERNEL); + if (!skbn) + break; + skb_set_owner_w(skbn, skb->sk); + qrtr_node_enqueue(node, skbn, type, from, to, flags); + } + up_read(&qrtr_epts_lock); + + qrtr_local_enqueue(NULL, skb, type, from, to, flags); + + return 0; +} + +static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) +{ + DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name); + int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *, int, + struct sockaddr_qrtr *, struct sockaddr_qrtr *, + unsigned int); + __le32 qrtr_type = cpu_to_le32(QRTR_TYPE_DATA); + struct qrtr_sock *ipc = qrtr_sk(sock->sk); + struct sock *sk = sock->sk; + struct qrtr_ctrl_pkt pkt; + struct qrtr_node *node; + struct qrtr_node *srv_node; + struct sk_buff *skb; + size_t plen; + u32 type; + int rc; + + if (msg->msg_flags & ~(MSG_DONTWAIT)) + return -EINVAL; + + if (len > 65535) + return -EMSGSIZE; + + lock_sock(sk); + + if (addr) { + if (msg->msg_namelen < sizeof(*addr)) { + release_sock(sk); + return -EINVAL; + } + + if (addr->sq_family != AF_QIPCRTR) { + release_sock(sk); + return -EINVAL; + } + + rc = qrtr_autobind(sock); + if (rc) { + release_sock(sk); + return rc; + } + } else if (sk->sk_state == TCP_ESTABLISHED) { + addr = &ipc->peer; + } else { + release_sock(sk); + return -ENOTCONN; + } + + node = NULL; + srv_node = NULL; + if (addr->sq_node == QRTR_NODE_BCAST) { + if (addr->sq_port != QRTR_PORT_CTRL && + qrtr_local_nid != QRTR_NODE_BCAST) { + release_sock(sk); + return -ENOTCONN; + } + enqueue_fn = qrtr_bcast_enqueue; + } else if (addr->sq_node == ipc->us.sq_node) { + enqueue_fn = qrtr_local_enqueue; + } else { + node = qrtr_node_lookup(addr->sq_node); + if (!node) { + release_sock(sk); + return -ECONNRESET; + } + enqueue_fn = qrtr_node_enqueue; + if (ipc->state > QRTR_STATE_INIT && ipc->state != node->nid) + ipc->state = QRTR_STATE_MULTI; + else if (ipc->state == QRTR_STATE_INIT) + ipc->state = node->nid; + } + + plen = (len + 3) & ~3; + skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_MAX_SIZE, + msg->msg_flags & MSG_DONTWAIT, &rc); + if (!skb) { + rc = -ENOMEM; + goto out_node; + } + + skb_reserve(skb, QRTR_HDR_MAX_SIZE); + + rc = memcpy_from_msg(skb_put(skb, len), msg, len); + if (rc) { + kfree_skb(skb); + goto out_node; + } + + if (ipc->us.sq_port == QRTR_PORT_CTRL || + addr->sq_port == QRTR_PORT_CTRL) { + if (len < 4) { + rc = -EINVAL; + kfree_skb(skb); + goto out_node; + } + + /* control messages already require the type as 'command' */ + skb_copy_bits(skb, 0, &qrtr_type, 4); + } + + type = le32_to_cpu(qrtr_type); + if (addr->sq_port == QRTR_PORT_CTRL && type == QRTR_TYPE_NEW_SERVER) { + ipc->state = QRTR_STATE_MULTI; + + /* drop new server cmds that are not forwardable to dst node*/ + skb_copy_bits(skb, 0, &pkt, sizeof(pkt)); + srv_node = qrtr_node_lookup(pkt.server.node); + if (!qrtr_must_forward(srv_node, node, type)) { + rc = 0; + kfree_skb(skb); + qrtr_node_release(srv_node); + goto out_node; + } + qrtr_node_release(srv_node); + } + + rc = enqueue_fn(node, skb, type, &ipc->us, addr, msg->msg_flags); + if (rc >= 0) + rc = len; + +out_node: + qrtr_node_release(node); + release_sock(sk); + + return rc; +} + +static int qrtr_send_resume_tx(struct qrtr_cb *cb) +{ + struct sockaddr_qrtr remote = { AF_QIPCRTR, cb->src_node, cb->src_port }; + struct sockaddr_qrtr local = { AF_QIPCRTR, cb->dst_node, cb->dst_port }; + struct qrtr_ctrl_pkt *pkt; + struct qrtr_node *node; + struct sk_buff *skb; + int ret; + + node = qrtr_node_lookup(remote.sq_node); + if (!node) + return -EINVAL; + + skb = qrtr_alloc_ctrl_packet(&pkt); + if (!skb) + return -ENOMEM; + + pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX); + pkt->client.node = cpu_to_le32(cb->dst_node); + pkt->client.port = cpu_to_le32(cb->dst_port); + + ret = qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX, + &local, &remote, 0); + + qrtr_node_release(node); + + return ret; +} + +static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg, + size_t size, int flags) +{ + DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name); + struct sock *sk = sock->sk; + struct sk_buff *skb; + struct qrtr_cb *cb; + int copied, rc; + + + if (sock_flag(sk, SOCK_ZAPPED)) + return -EADDRNOTAVAIL; + + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, + flags & MSG_DONTWAIT, &rc); + if (!skb) + return rc; + + lock_sock(sk); + cb = (struct qrtr_cb *)skb->cb; + + copied = skb->len; + if (copied > size) { + copied = size; + msg->msg_flags |= MSG_TRUNC; + } + + rc = skb_copy_datagram_msg(skb, 0, msg, copied); + if (rc < 0) + goto out; + rc = copied; + + if (addr) { + /* There is an anonymous 2-byte hole after sq_family, + * make sure to clear it. + */ + memset(addr, 0, sizeof(*addr)); + + addr->sq_family = AF_QIPCRTR; + addr->sq_node = cb->src_node; + addr->sq_port = cb->src_port; + msg->msg_namelen = sizeof(*addr); + } + +out: + if (cb->confirm_rx) + qrtr_send_resume_tx(cb); + + skb_free_datagram(sk, skb); + release_sock(sk); + + return rc; +} + +static int qrtr_connect(struct socket *sock, struct sockaddr *saddr, + int len, int flags) +{ + DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr); + struct qrtr_sock *ipc = qrtr_sk(sock->sk); + struct sock *sk = sock->sk; + int rc; + + if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR) + return -EINVAL; + + lock_sock(sk); + + sk->sk_state = TCP_CLOSE; + sock->state = SS_UNCONNECTED; + + rc = qrtr_autobind(sock); + if (rc) { + release_sock(sk); + return rc; + } + + ipc->peer = *addr; + sock->state = SS_CONNECTED; + sk->sk_state = TCP_ESTABLISHED; + + release_sock(sk); + + return 0; +} + +static int qrtr_getname(struct socket *sock, struct sockaddr *saddr, + int peer) +{ + struct qrtr_sock *ipc = qrtr_sk(sock->sk); + struct sockaddr_qrtr qaddr; + struct sock *sk = sock->sk; + + lock_sock(sk); + if (peer) { + if (sk->sk_state != TCP_ESTABLISHED) { + release_sock(sk); + return -ENOTCONN; + } + + qaddr = ipc->peer; + } else { + qaddr = ipc->us; + } + release_sock(sk); + + qaddr.sq_family = AF_QIPCRTR; + + memcpy(saddr, &qaddr, sizeof(qaddr)); + + return sizeof(qaddr); +} + +static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct qrtr_sock *ipc = qrtr_sk(sock->sk); + struct sock *sk = sock->sk; + struct sockaddr_qrtr *sq; + struct sk_buff *skb; + struct ifreq ifr; + long len = 0; + int rc = 0; + + lock_sock(sk); + + switch (cmd) { + case TIOCOUTQ: + len = sk->sk_sndbuf - sk_wmem_alloc_get(sk); + if (len < 0) + len = 0; + rc = put_user(len, (int __user *)argp); + break; + case TIOCINQ: + skb = skb_peek(&sk->sk_receive_queue); + if (skb) + len = skb->len; + rc = put_user(len, (int __user *)argp); + break; + case SIOCGIFADDR: + if (copy_from_user(&ifr, argp, sizeof(ifr))) { + rc = -EFAULT; + break; + } + + sq = (struct sockaddr_qrtr *)&ifr.ifr_addr; + *sq = ipc->us; + if (copy_to_user(argp, &ifr, sizeof(ifr))) { + rc = -EFAULT; + break; + } + break; + case SIOCADDRT: + case SIOCDELRT: + case SIOCSIFADDR: + case SIOCGIFDSTADDR: + case SIOCSIFDSTADDR: + case SIOCGIFBRDADDR: + case SIOCSIFBRDADDR: + case SIOCGIFNETMASK: + case SIOCSIFNETMASK: + rc = -EINVAL; + break; + default: + rc = -ENOIOCTLCMD; + break; + } + + release_sock(sk); + + return rc; +} + +static int qrtr_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct qrtr_sock *ipc; + + if (!sk) + return 0; + + lock_sock(sk); + + ipc = qrtr_sk(sk); + sk->sk_shutdown = SHUTDOWN_MASK; + if (!sock_flag(sk, SOCK_DEAD)) + sk->sk_state_change(sk); + + sock_orphan(sk); + sock->sk = NULL; + + if (!sock_flag(sk, SOCK_ZAPPED)) + qrtr_port_remove(ipc); + + skb_queue_purge(&sk->sk_receive_queue); + + release_sock(sk); + sock_put(sk); + + return 0; +} + +static const struct proto_ops qrtr_proto_ops = { + .owner = THIS_MODULE, + .family = AF_QIPCRTR, + .bind = qrtr_bind, + .connect = qrtr_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .listen = sock_no_listen, + .sendmsg = qrtr_sendmsg, + .recvmsg = qrtr_recvmsg, + .getname = qrtr_getname, + .ioctl = qrtr_ioctl, + .gettstamp = sock_gettstamp, + .poll = datagram_poll, + .shutdown = sock_no_shutdown, + .release = qrtr_release, + .mmap = sock_no_mmap, + .sendpage = sock_no_sendpage, +}; + +static struct proto qrtr_proto = { + .name = "QIPCRTR", + .owner = THIS_MODULE, + .obj_size = sizeof(struct qrtr_sock), +}; + +static int qrtr_create(struct net *net, struct socket *sock, + int protocol, int kern) +{ + struct qrtr_sock *ipc; + struct sock *sk; + + if (sock->type != SOCK_DGRAM) + return -EPROTOTYPE; + + sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern); + if (!sk) + return -ENOMEM; + + sock_set_flag(sk, SOCK_ZAPPED); + + sock_init_data(sock, sk); + sock->ops = &qrtr_proto_ops; + + ipc = qrtr_sk(sk); + ipc->us.sq_family = AF_QIPCRTR; + ipc->us.sq_node = qrtr_local_nid; + ipc->us.sq_port = 0; + ipc->state = QRTR_STATE_INIT; + + return 0; +} + +static const struct net_proto_family qrtr_family = { + .owner = THIS_MODULE, + .family = AF_QIPCRTR, + .create = qrtr_create, +}; + +static int __init qrtr_proto_init(void) +{ + int rc; + + rc = proto_register(&qrtr_proto, 1); + if (rc) + return rc; + + rc = sock_register(&qrtr_family); + if (rc) { + proto_unregister(&qrtr_proto); + return rc; + } + + qrtr_ns_init(); + + qrtr_backup_init(); + + return rc; +} +postcore_initcall(qrtr_proto_init); + +static void __exit qrtr_proto_fini(void) +{ + qrtr_ns_remove(); + sock_unregister(qrtr_family.family); + proto_unregister(&qrtr_proto); + + qrtr_backup_deinit(); +} +module_exit(qrtr_proto_fini); + +MODULE_DESCRIPTION("Qualcomm IPC-router driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_NETPROTO(PF_QIPCRTR); diff --git a/qrtr/qrtr.h b/qrtr/qrtr.h new file mode 100644 index 0000000..1c7ad59 --- /dev/null +++ b/qrtr/qrtr.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __QRTR_H_ +#define __QRTR_H_ + +#include <linux/types.h> + +struct sk_buff; + +/* endpoint node id auto assignment */ +#define QRTR_EP_NID_AUTO (-1) +#define QRTR_EP_NET_ID_AUTO (1) + +#define QRTR_DEL_PROC_MAGIC 0xe111 + +/** + * struct qrtr_endpoint - endpoint handle + * @xmit: Callback for outgoing packets + * + * The socket buffer passed to the xmit function becomes owned by the endpoint + * driver. As such, when the driver is done with the buffer, it should + * call kfree_skb() on failure, or consume_skb() on success. + */ +struct qrtr_endpoint { + int (*xmit)(struct qrtr_endpoint *ep, struct sk_buff *skb); + /* private: not for endpoint use */ + struct qrtr_node *node; +}; + +int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int net_id, + bool rt); + +void qrtr_endpoint_unregister(struct qrtr_endpoint *ep); + +int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len); + +int qrtr_peek_pkt_size(const void *data); + +void qrtr_ns_init(void); + +void qrtr_ns_remove(void); + +int qrtr_peek_pkt_size(const void *data); + +unsigned int qrtr_get_service_id(unsigned int node_id, unsigned int port_id); +#endif |