aboutsummaryrefslogtreecommitdiff
path: root/core/event_loop.cc
blob: e6f80fa72fd145628d858a8cbd18ba7922269681 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
/*
 * Copyright (C) 2016 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "chre/core/event_loop.h"

#include "chre/core/event.h"
#include "chre/core/event_loop_manager.h"
#include "chre/core/nanoapp.h"
#include "chre/platform/context.h"
#include "chre/platform/fatal_error.h"
#include "chre/platform/log.h"
#include "chre/util/conditional_lock_guard.h"
#include "chre/util/lock_guard.h"
#include "chre/util/system/debug_dump.h"
#include "chre_api/chre/version.h"

namespace chre {

namespace {

/**
 * Populates a chreNanoappInfo structure using info from the given Nanoapp
 * instance.
 *
 * @param app A potentially null pointer to the Nanoapp to read from
 * @param info The structure to populate - should not be null, but this function
 *        will handle that input
 *
 * @return true if neither app nor info were null, and info was populated
 */
bool populateNanoappInfo(const Nanoapp *app, struct chreNanoappInfo *info) {
  bool success = false;

  if (app != nullptr && info != nullptr) {
    info->appId      = app->getAppId();
    info->version    = app->getAppVersion();
    info->instanceId = app->getInstanceId();
    success = true;
  }

  return success;
}

}  // anonymous namespace

bool EventLoop::findNanoappInstanceIdByAppId(uint64_t appId,
                                             uint32_t *instanceId) const {
  CHRE_ASSERT(instanceId != nullptr);
  ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());

  bool found = false;
  for (const UniquePtr<Nanoapp>& app : mNanoapps) {
    if (app->getAppId() == appId) {
      *instanceId = app->getInstanceId();
      found = true;
      break;
    }
  }

  return found;
}

void EventLoop::forEachNanoapp(NanoappCallbackFunction *callback, void *data) {
  ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());

  for (const UniquePtr<Nanoapp>& nanoapp : mNanoapps) {
    callback(nanoapp.get(), data);
  }
}

void EventLoop::invokeMessageFreeFunction(
    uint64_t appId, chreMessageFreeFunction *freeFunction, void *message,
    size_t messageSize) {
  Nanoapp *nanoapp = lookupAppByAppId(appId);
  if (nanoapp == nullptr) {
    LOGE("Couldn't find app 0x%016" PRIx64 " for message free callback", appId);
  } else {
    auto prevCurrentApp = mCurrentApp;
    mCurrentApp = nanoapp;
    freeFunction(message, messageSize);
    mCurrentApp = prevCurrentApp;
  }
}

void EventLoop::run() {
  LOGI("EventLoop start");

  bool havePendingEvents = false;
  while (mRunning) {
    // Events are delivered in two stages: first they arrive in the inbound
    // event queue mEvents (potentially posted from another thread), then within
    // this context these events are distributed to smaller event queues
    // associated with each Nanoapp that should receive the event. Once the
    // event is delivered to all interested Nanoapps, its free callback is
    // invoked.
    if (!havePendingEvents || !mEvents.empty()) {
      if (mEvents.size() > mMaxEventPoolUsage) {
        mMaxEventPoolUsage = mEvents.size();
      }

      // mEvents.pop() will be a blocking call if mEvents.empty()
      distributeEvent(mEvents.pop());
    }

    havePendingEvents = deliverEvents();

    mPowerControlManager.postEventLoopProcess(mEvents.size());
  }

  // Deliver any events sitting in Nanoapps' own queues (we could drop them to
  // exit faster, but this is less code and should complete quickly under normal
  // conditions), then purge the main queue of events pending distribution. All
  // nanoapps should be prevented from sending events or messages at this point
  // via currentNanoappIsStopping() returning true.
  flushNanoappEventQueues();
  while (!mEvents.empty()) {
    freeEvent(mEvents.pop());
  }

  // Unload all running nanoapps
  while (!mNanoapps.empty()) {
    unloadNanoappAtIndex(mNanoapps.size() - 1);
  }

  LOGI("Exiting EventLoop");
}

bool EventLoop::startNanoapp(UniquePtr<Nanoapp>& nanoapp) {
  CHRE_ASSERT(!nanoapp.isNull());
  bool success = false;
  auto *eventLoopManager = EventLoopManagerSingleton::get();
  EventLoop& eventLoop = eventLoopManager->getEventLoop();
  uint32_t existingInstanceId;

  if (nanoapp.isNull()) {
    // no-op, invalid argument
  } else if (eventLoop.findNanoappInstanceIdByAppId(nanoapp->getAppId(),
                                                    &existingInstanceId)) {
    LOGE("App with ID 0x%016" PRIx64 " already exists as instance ID 0x%"
         PRIx32, nanoapp->getAppId(), existingInstanceId);
  } else if (!mNanoapps.prepareForPush()) {
    LOGE("Failed to allocate space for new nanoapp");
  } else {
    nanoapp->setInstanceId(eventLoopManager->getNextInstanceId());
    LOGD("Instance ID %" PRIu32 " assigned to app ID 0x%016" PRIx64,
         nanoapp->getInstanceId(), nanoapp->getAppId());

    Nanoapp *newNanoapp = nanoapp.get();
    {
      LockGuard<Mutex> lock(mNanoappsLock);
      mNanoapps.push_back(std::move(nanoapp));
      // After this point, nanoapp is null as we've transferred ownership into
      // mNanoapps.back() - use newNanoapp to reference it
    }

    mCurrentApp = newNanoapp;
    success = newNanoapp->start();
    mCurrentApp = nullptr;
    if (!success) {
      // TODO: to be fully safe, need to purge/flush any events and messages
      // sent by the nanoapp here (but don't call nanoappEnd). For now, we just
      // destroy the Nanoapp instance.
      LOGE("Nanoapp %" PRIu32 " failed to start", newNanoapp->getInstanceId());

      // Note that this lock protects against concurrent read and modification
      // of mNanoapps, but we are assured that no new nanoapps were added since
      // we pushed the new nanoapp
      LockGuard<Mutex> lock(mNanoappsLock);
      mNanoapps.pop_back();
    } else {
      notifyAppStatusChange(CHRE_EVENT_NANOAPP_STARTED, *newNanoapp);
    }
  }

  return success;
}

bool EventLoop::unloadNanoapp(uint32_t instanceId,
                              bool allowSystemNanoappUnload) {
  bool unloaded = false;

  for (size_t i = 0; i < mNanoapps.size(); i++) {
    if (instanceId == mNanoapps[i]->getInstanceId()) {
      if (!allowSystemNanoappUnload && mNanoapps[i]->isSystemNanoapp()) {
        LOGE("Refusing to unload system nanoapp");
      } else {
        // Make sure all messages sent by this nanoapp at least have their
        // associated free callback processing pending in the event queue (i.e.
        // there are no messages pending delivery to the host)
        EventLoopManagerSingleton::get()->getHostCommsManager()
            .flushMessagesSentByNanoapp(mNanoapps[i]->getAppId());

        // Distribute all inbound events we have at this time - here we're
        // interested in handling any message free callbacks generated by
        // flushMessagesSentByNanoapp()
        flushInboundEventQueue();

        // Mark that this nanoapp is stopping early, so it can't send events or
        // messages during the nanoapp event queue flush
        mStoppingNanoapp = mNanoapps[i].get();

        // Process any pending events, with the intent of ensuring that we free
        // all events generated by this nanoapp
        flushNanoappEventQueues();

        // Post the unload event now (so we can reference the Nanoapp instance
        // directly), but nanoapps won't get it until after the unload completes
        notifyAppStatusChange(CHRE_EVENT_NANOAPP_STOPPED, *mStoppingNanoapp);

        // Finally, we are at a point where there should not be any pending
        // events or messages sent by the app that could potentially reference
        // the nanoapp's memory, so we are safe to unload it
        unloadNanoappAtIndex(i);
        mStoppingNanoapp = nullptr;

        // TODO: right now we assume that the nanoapp will clean up all of its
        // resource allocations in its nanoappEnd callback (memory, sensor
        // subscriptions, etc.), otherwise we're leaking resources. We should
        // perform resource cleanup automatically here to avoid these types of
        // potential leaks.

        LOGD("Unloaded nanoapp with instanceId %" PRIu32, instanceId);
        unloaded = true;
      }
      break;
    }
  }

  return unloaded;
}

bool EventLoop::postEvent(uint16_t eventType, void *eventData,
    chreEventCompleteFunction *freeCallback, uint32_t senderInstanceId,
    uint32_t targetInstanceId) {
  bool success = false;

  if (mRunning && (senderInstanceId == kSystemInstanceId ||
      mEventPool.getFreeBlockCount() > kMinReservedSystemEventCount)) {
    success = allocateAndPostEvent(eventType, eventData, freeCallback,
                                   senderInstanceId,targetInstanceId);
    if (!success) {
      // This can only happen if the event is a system event type. This
      // postEvent method will fail if a non-system event is posted when the
      // memory pool is close to full.
      FATAL_ERROR("Failed to allocate system event type %" PRIu16, eventType);
    }
  }

  return success;
}

bool EventLoop::postEventOrFree(uint16_t eventType, void *eventData,
    chreEventCompleteFunction *freeCallback, uint32_t senderInstanceId,
    uint32_t targetInstanceId) {
  bool success = false;

  if (mRunning) {
    success = allocateAndPostEvent(eventType, eventData, freeCallback,
                                   senderInstanceId,targetInstanceId);
    if (!success) {
      freeCallback(eventType, eventData);
      LOGE("Failed to allocate event 0x%" PRIx16 " to instanceId %" PRIu32,
           eventType, targetInstanceId);
    }
  }

  return success;
}

void EventLoop::stop() {
  postEvent(0, nullptr, nullptr, kSystemInstanceId, kSystemInstanceId);
  // Stop accepting new events and tell the main loop to finish
  mRunning = false;
}

Nanoapp *EventLoop::findNanoappByInstanceId(uint32_t instanceId) const {
  ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
  return lookupAppByInstanceId(instanceId);
}

bool EventLoop::populateNanoappInfoForAppId(
    uint64_t appId, struct chreNanoappInfo *info) const {
  ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
  Nanoapp *app = lookupAppByAppId(appId);
  return populateNanoappInfo(app, info);
}

bool EventLoop::populateNanoappInfoForInstanceId(
    uint32_t instanceId, struct chreNanoappInfo *info) const {
  ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
  Nanoapp *app = lookupAppByInstanceId(instanceId);
  return populateNanoappInfo(app, info);
}

bool EventLoop::currentNanoappIsStopping() const {
  return (mCurrentApp == mStoppingNanoapp || !mRunning);
}

bool EventLoop::logStateToBuffer(char *buffer, size_t *bufferPos,
                                 size_t bufferSize) const {
  bool success = debugDumpPrint(buffer, bufferPos, bufferSize, "\nNanoapps:\n");
  for (const UniquePtr<Nanoapp>& app : mNanoapps) {
    success &= app->logStateToBuffer(buffer, bufferPos, bufferSize);
  }

  success &= debugDumpPrint(buffer, bufferPos, bufferSize,
                            "\nEvent Loop:\n");
  success &= debugDumpPrint(buffer, bufferPos, bufferSize,
                            "  Max event pool usage: %zu/%zu\n",
                            mMaxEventPoolUsage, kMaxEventCount);
  return success;
}

bool EventLoop::allocateAndPostEvent(uint16_t eventType, void *eventData,
    chreEventCompleteFunction *freeCallback, uint32_t senderInstanceId,
    uint32_t targetInstanceId) {
  bool success = false;

  Event *event = mEventPool.allocate(eventType, eventData, freeCallback,
                                     senderInstanceId, targetInstanceId);
  if (event != nullptr) {
    success = mEvents.push(event);
  }
  return success;
}

bool EventLoop::deliverEvents() {
  bool havePendingEvents = false;

  // Do one loop of round-robin. We might want to have some kind of priority or
  // time sharing in the future, but this should be good enough for now.
  for (const UniquePtr<Nanoapp>& app : mNanoapps) {
    if (app->hasPendingEvent()) {
      havePendingEvents |= deliverNextEvent(app);
    }
  }

  return havePendingEvents;
}

bool EventLoop::deliverNextEvent(const UniquePtr<Nanoapp>& app) {
  // TODO: cleaner way to set/clear this? RAII-style?
  mCurrentApp = app.get();
  Event *event = app->processNextEvent();
  mCurrentApp = nullptr;

  if (event->isUnreferenced()) {
    freeEvent(event);
  }

  return app->hasPendingEvent();
}

void EventLoop::distributeEvent(Event *event) {
  for (const UniquePtr<Nanoapp>& app : mNanoapps) {
    if ((event->targetInstanceId == chre::kBroadcastInstanceId
            && app->isRegisteredForBroadcastEvent(event->eventType))
        || event->targetInstanceId == app->getInstanceId()) {
      app->postEvent(event);
    }
  }

  if (event->isUnreferenced()) {
    // Events sent to the system instance ID are processed via the free callback
    // and are not expected to be delivered to any nanoapp, so no need to log a
    // warning in that case
    if (event->senderInstanceId != kSystemInstanceId) {
      LOGW("Dropping event 0x%" PRIx16, event->eventType);
    }
    freeEvent(event);
  }
}

void EventLoop::flushInboundEventQueue() {
  while (!mEvents.empty()) {
    distributeEvent(mEvents.pop());
  }
}

void EventLoop::flushNanoappEventQueues() {
  while (deliverEvents());
}

void EventLoop::freeEvent(Event *event) {
  if (event->freeCallback != nullptr) {
    // TODO: find a better way to set the context to the creator of the event
    mCurrentApp = lookupAppByInstanceId(event->senderInstanceId);
    event->freeCallback(event->eventType, event->eventData);
    mCurrentApp = nullptr;
  }

  mEventPool.deallocate(event);
}

Nanoapp *EventLoop::lookupAppByAppId(uint64_t appId) const {
  for (const UniquePtr<Nanoapp>& app : mNanoapps) {
    if (app->getAppId() == appId) {
      return app.get();
    }
  }

  return nullptr;
}

Nanoapp *EventLoop::lookupAppByInstanceId(uint32_t instanceId) const {
  // The system instance ID always has nullptr as its Nanoapp pointer, so can
  // skip iterating through the nanoapp list for that case
  if (instanceId != kSystemInstanceId) {
    for (const UniquePtr<Nanoapp>& app : mNanoapps) {
      if (app->getInstanceId() == instanceId) {
        return app.get();
      }
    }
  }

  return nullptr;
}

void EventLoop::notifyAppStatusChange(uint16_t eventType,
                                      const Nanoapp& nanoapp) {
  auto *info = memoryAlloc<chreNanoappInfo>();
  if (info == nullptr) {
    LOGE("Couldn't alloc app status change event");
  } else {
    info->appId      = nanoapp.getAppId();
    info->version    = nanoapp.getAppVersion();
    info->instanceId = nanoapp.getInstanceId();

    postEvent(eventType, info, freeEventDataCallback);
  }
}

void EventLoop::unloadNanoappAtIndex(size_t index) {
  const UniquePtr<Nanoapp>& nanoapp = mNanoapps[index];

  // Let the app know it's going away
  mCurrentApp = nanoapp.get();
  nanoapp->end();
  mCurrentApp = nullptr;

  // Destroy the Nanoapp instance
  {
    LockGuard<Mutex> lock(mNanoappsLock);
    mNanoapps.erase(index);
  }
}

}  // namespace chre