summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAngela Stegmaier <angelabaker@ti.com>2013-05-29 11:55:25 -0500
committerChris Ring <cring@ti.com>2013-06-06 16:44:12 -0700
commitcc3bc3db131523ef0ddfdaa2e9c39dcbd0b8bebb (patch)
treea6cb4970e21ec8f8bf565f2893f3a8e8cf468698
parentffebfa02dcdb71581b05471db91eb078bc5e7c82 (diff)
downloadipc-cc3bc3db131523ef0ddfdaa2e9c39dcbd0b8bebb.tar.gz
QNX IPC: RPMSG-RPC - Don't Wait for Response During Destroy
When calling rpmsg_rpc_destroy, all existing connections are disconnected so that the remote core may be reloaded and stale connections are not left open. When rpmsg_rpc_destroy is called, it is assumed that the remote processor has already been stopped. In this case, there is no need to wait for a response from the disconnect call. Instead, the connection can immediately be marked as disconnected and destroy can continue. This patch modifies each remote core connection to have a parameter that tells whether or not the destroy function is currently executing. Then, the disconnect call is updated to check to see if destroy is being executed. If destroy is being executed, then it does not send the message to the remote core and wait for a response. Instead, the connection is simply marked as disconnected. Signed-off-by: Angela Stegmaier <angelabaker@ti.com>
-rw-r--r--qnx/src/ipc3x_dev/ti/syslink/rpmsg-rpc/rpmsg-rpc.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/qnx/src/ipc3x_dev/ti/syslink/rpmsg-rpc/rpmsg-rpc.c b/qnx/src/ipc3x_dev/ti/syslink/rpmsg-rpc/rpmsg-rpc.c
index 63baca0..0d4b6b8 100644
--- a/qnx/src/ipc3x_dev/ti/syslink/rpmsg-rpc/rpmsg-rpc.c
+++ b/qnx/src/ipc3x_dev/ti/syslink/rpmsg-rpc/rpmsg-rpc.c
@@ -175,6 +175,7 @@ typedef struct rpmsg_rpc_conn_object {
UInt16 procId;
ProcMgr_Handle procH;
UInt32 numFuncs;
+ Bool destroy;
} rpmsg_rpc_conn_object;
/*!
@@ -754,7 +755,7 @@ _rpmsg_rpc_destroy(resmgr_context_t *ctp, io_devctl_t *msg,
GT_0trace(curTrace, GT_4CLASS, "Already destroyed.");
status = (EINVAL);
}
- else {
+ else if (!rpc->conn->destroy) {
hdr = (struct rppc_msg_header *)buf;
hdr->msg_type = RPPC_MSG_DESTROY_INSTANCE;
hdr->msg_len = sizeof(struct rppc_instance_handle);
@@ -784,6 +785,11 @@ _rpmsg_rpc_destroy(resmgr_context_t *ctp, io_devctl_t *msg,
}
}
}
+ else {
+ /* This is the shutdown, remote proc has already been stopped,
+ * so just set created to false. */
+ rpc->created = FALSE;
+ }
return status;
}
@@ -2338,11 +2344,12 @@ rpmsg_rpc_destroy (Void)
WaitingReaders_t * wr = NULL;
struct _msg_info info;
- GT_0trace (curTrace, GT_ENTER, "_rpmsg_rpc_destroy");
+ GT_0trace (curTrace, GT_ENTER, "rpmsg_rpc_destroy");
for (i = 0; i < MAX_CONNS; i++) {
if (rpmsg_rpc_state.objects[i]) {
rpmsg_rpc_conn_object * obj = rpmsg_rpc_state.objects[i];
+ obj->destroy = TRUE;
_deinit_rpmsg_rpc_device(obj->dev);
ProcMgr_close(&obj->procH);
Memory_free(NULL, obj, sizeof(rpmsg_rpc_conn_object));