1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "chre/core/event_loop.h"
18
19 #include "chre/core/event.h"
20 #include "chre/core/event_loop_manager.h"
21 #include "chre/core/nanoapp.h"
22 #include "chre/platform/context.h"
23 #include "chre/platform/fatal_error.h"
24 #include "chre/platform/log.h"
25 #include "chre/platform/system_time.h"
26 #include "chre/util/conditional_lock_guard.h"
27 #include "chre/util/lock_guard.h"
28 #include "chre/util/system/debug_dump.h"
29 #include "chre/util/time.h"
30 #include "chre_api/chre/version.h"
31
32 namespace chre {
33
34 namespace {
35
36 /**
37 * Populates a chreNanoappInfo structure using info from the given Nanoapp
38 * instance.
39 *
40 * @param app A potentially null pointer to the Nanoapp to read from
41 * @param info The structure to populate - should not be null, but this function
42 * will handle that input
43 *
44 * @return true if neither app nor info were null, and info was populated
45 */
populateNanoappInfo(const Nanoapp * app,struct chreNanoappInfo * info)46 bool populateNanoappInfo(const Nanoapp *app, struct chreNanoappInfo *info) {
47 bool success = false;
48
49 if (app != nullptr && info != nullptr) {
50 info->appId = app->getAppId();
51 info->version = app->getAppVersion();
52 info->instanceId = app->getInstanceId();
53 success = true;
54 }
55
56 return success;
57 }
58
59 } // anonymous namespace
60
findNanoappInstanceIdByAppId(uint64_t appId,uint32_t * instanceId) const61 bool EventLoop::findNanoappInstanceIdByAppId(uint64_t appId,
62 uint32_t *instanceId) const {
63 CHRE_ASSERT(instanceId != nullptr);
64 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
65
66 bool found = false;
67 for (const UniquePtr<Nanoapp>& app : mNanoapps) {
68 if (app->getAppId() == appId) {
69 *instanceId = app->getInstanceId();
70 found = true;
71 break;
72 }
73 }
74
75 return found;
76 }
77
forEachNanoapp(NanoappCallbackFunction * callback,void * data)78 void EventLoop::forEachNanoapp(NanoappCallbackFunction *callback, void *data) {
79 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
80
81 for (const UniquePtr<Nanoapp>& nanoapp : mNanoapps) {
82 callback(nanoapp.get(), data);
83 }
84 }
85
invokeMessageFreeFunction(uint64_t appId,chreMessageFreeFunction * freeFunction,void * message,size_t messageSize)86 void EventLoop::invokeMessageFreeFunction(
87 uint64_t appId, chreMessageFreeFunction *freeFunction, void *message,
88 size_t messageSize) {
89 Nanoapp *nanoapp = lookupAppByAppId(appId);
90 if (nanoapp == nullptr) {
91 LOGE("Couldn't find app 0x%016" PRIx64 " for message free callback", appId);
92 } else {
93 auto prevCurrentApp = mCurrentApp;
94 mCurrentApp = nanoapp;
95 freeFunction(message, messageSize);
96 mCurrentApp = prevCurrentApp;
97 }
98 }
99
run()100 void EventLoop::run() {
101 LOGI("EventLoop start");
102
103 bool havePendingEvents = false;
104 while (mRunning) {
105 // Events are delivered in two stages: first they arrive in the inbound
106 // event queue mEvents (potentially posted from another thread), then within
107 // this context these events are distributed to smaller event queues
108 // associated with each Nanoapp that should receive the event. Once the
109 // event is delivered to all interested Nanoapps, its free callback is
110 // invoked.
111 if (!havePendingEvents || !mEvents.empty()) {
112 if (mEvents.size() > mMaxEventPoolUsage) {
113 mMaxEventPoolUsage = mEvents.size();
114 }
115
116 // mEvents.pop() will be a blocking call if mEvents.empty()
117 distributeEvent(mEvents.pop());
118 }
119
120 havePendingEvents = deliverEvents();
121
122 mPowerControlManager.postEventLoopProcess(mEvents.size());
123 }
124
125 // Deliver any events sitting in Nanoapps' own queues (we could drop them to
126 // exit faster, but this is less code and should complete quickly under normal
127 // conditions), then purge the main queue of events pending distribution. All
128 // nanoapps should be prevented from sending events or messages at this point
129 // via currentNanoappIsStopping() returning true.
130 flushNanoappEventQueues();
131 while (!mEvents.empty()) {
132 freeEvent(mEvents.pop());
133 }
134
135 // Unload all running nanoapps
136 while (!mNanoapps.empty()) {
137 unloadNanoappAtIndex(mNanoapps.size() - 1);
138 }
139
140 LOGI("Exiting EventLoop");
141 }
142
startNanoapp(UniquePtr<Nanoapp> & nanoapp)143 bool EventLoop::startNanoapp(UniquePtr<Nanoapp>& nanoapp) {
144 CHRE_ASSERT(!nanoapp.isNull());
145 bool success = false;
146 auto *eventLoopManager = EventLoopManagerSingleton::get();
147 EventLoop& eventLoop = eventLoopManager->getEventLoop();
148 uint32_t existingInstanceId;
149
150 if (nanoapp.isNull()) {
151 // no-op, invalid argument
152 } else if (eventLoop.findNanoappInstanceIdByAppId(nanoapp->getAppId(),
153 &existingInstanceId)) {
154 LOGE("App with ID 0x%016" PRIx64 " already exists as instance ID 0x%"
155 PRIx32, nanoapp->getAppId(), existingInstanceId);
156 } else if (!mNanoapps.prepareForPush()) {
157 LOG_OOM();
158 } else {
159 nanoapp->setInstanceId(eventLoopManager->getNextInstanceId());
160 LOGD("Instance ID %" PRIu32 " assigned to app ID 0x%016" PRIx64,
161 nanoapp->getInstanceId(), nanoapp->getAppId());
162
163 Nanoapp *newNanoapp = nanoapp.get();
164 {
165 LockGuard<Mutex> lock(mNanoappsLock);
166 mNanoapps.push_back(std::move(nanoapp));
167 // After this point, nanoapp is null as we've transferred ownership into
168 // mNanoapps.back() - use newNanoapp to reference it
169 }
170
171 mCurrentApp = newNanoapp;
172 success = newNanoapp->start();
173 mCurrentApp = nullptr;
174 if (!success) {
175 // TODO: to be fully safe, need to purge/flush any events and messages
176 // sent by the nanoapp here (but don't call nanoappEnd). For now, we just
177 // destroy the Nanoapp instance.
178 LOGE("Nanoapp %" PRIu32 " failed to start", newNanoapp->getInstanceId());
179
180 // Note that this lock protects against concurrent read and modification
181 // of mNanoapps, but we are assured that no new nanoapps were added since
182 // we pushed the new nanoapp
183 LockGuard<Mutex> lock(mNanoappsLock);
184 mNanoapps.pop_back();
185 } else {
186 notifyAppStatusChange(CHRE_EVENT_NANOAPP_STARTED, *newNanoapp);
187 }
188 }
189
190 return success;
191 }
192
unloadNanoapp(uint32_t instanceId,bool allowSystemNanoappUnload)193 bool EventLoop::unloadNanoapp(uint32_t instanceId,
194 bool allowSystemNanoappUnload) {
195 bool unloaded = false;
196
197 for (size_t i = 0; i < mNanoapps.size(); i++) {
198 if (instanceId == mNanoapps[i]->getInstanceId()) {
199 if (!allowSystemNanoappUnload && mNanoapps[i]->isSystemNanoapp()) {
200 LOGE("Refusing to unload system nanoapp");
201 } else {
202 // Make sure all messages sent by this nanoapp at least have their
203 // associated free callback processing pending in the event queue (i.e.
204 // there are no messages pending delivery to the host)
205 EventLoopManagerSingleton::get()->getHostCommsManager()
206 .flushMessagesSentByNanoapp(mNanoapps[i]->getAppId());
207
208 // Distribute all inbound events we have at this time - here we're
209 // interested in handling any message free callbacks generated by
210 // flushMessagesSentByNanoapp()
211 flushInboundEventQueue();
212
213 // Mark that this nanoapp is stopping early, so it can't send events or
214 // messages during the nanoapp event queue flush
215 mStoppingNanoapp = mNanoapps[i].get();
216
217 // Process any pending events, with the intent of ensuring that we free
218 // all events generated by this nanoapp
219 flushNanoappEventQueues();
220
221 // Post the unload event now (so we can reference the Nanoapp instance
222 // directly), but nanoapps won't get it until after the unload completes
223 notifyAppStatusChange(CHRE_EVENT_NANOAPP_STOPPED, *mStoppingNanoapp);
224
225 // Finally, we are at a point where there should not be any pending
226 // events or messages sent by the app that could potentially reference
227 // the nanoapp's memory, so we are safe to unload it
228 unloadNanoappAtIndex(i);
229 mStoppingNanoapp = nullptr;
230
231 // TODO: right now we assume that the nanoapp will clean up all of its
232 // resource allocations in its nanoappEnd callback (memory, sensor
233 // subscriptions, etc.), otherwise we're leaking resources. We should
234 // perform resource cleanup automatically here to avoid these types of
235 // potential leaks.
236
237 LOGD("Unloaded nanoapp with instanceId %" PRIu32, instanceId);
238 unloaded = true;
239 }
240 break;
241 }
242 }
243
244 return unloaded;
245 }
246
postEventOrDie(uint16_t eventType,void * eventData,chreEventCompleteFunction * freeCallback,uint32_t targetInstanceId)247 bool EventLoop::postEventOrDie(uint16_t eventType, void *eventData,
248 chreEventCompleteFunction *freeCallback,
249 uint32_t targetInstanceId) {
250 bool success = false;
251
252 if (mRunning) {
253 success = allocateAndPostEvent(eventType, eventData, freeCallback,
254 kSystemInstanceId, targetInstanceId);
255 if (!success) {
256 // This can only happen if the event is a system event type. This
257 // postEvent method will fail if a non-system event is posted when the
258 // memory pool is close to full.
259 FATAL_ERROR("Failed to allocate system event type %" PRIu16, eventType);
260 }
261 }
262
263 return success;
264 }
265
postLowPriorityEventOrFree(uint16_t eventType,void * eventData,chreEventCompleteFunction * freeCallback,uint32_t senderInstanceId,uint32_t targetInstanceId)266 bool EventLoop::postLowPriorityEventOrFree(
267 uint16_t eventType, void *eventData,
268 chreEventCompleteFunction *freeCallback, uint32_t senderInstanceId,
269 uint32_t targetInstanceId) {
270 bool success = false;
271
272 if (mRunning) {
273 if (mEventPool.getFreeBlockCount() > kMinReservedHighPriorityEventCount) {
274 success = allocateAndPostEvent(eventType, eventData, freeCallback,
275 senderInstanceId, targetInstanceId);
276 }
277 if (!success) {
278 if (freeCallback != nullptr) {
279 freeCallback(eventType, eventData);
280 }
281 LOGE("Failed to allocate event 0x%" PRIx16 " to instanceId %" PRIu32,
282 eventType, targetInstanceId);
283 }
284 }
285
286 return success;
287 }
288
stop()289 void EventLoop::stop() {
290 auto callback = [](uint16_t /* type */, void * /* data */) {
291 EventLoopManagerSingleton::get()->getEventLoop().onStopComplete();
292 };
293
294 // Stop accepting new events and tell the main loop to finish.
295 postEventOrDie(0, nullptr, callback, kSystemInstanceId);
296 }
297
onStopComplete()298 void EventLoop::onStopComplete() {
299 mRunning = false;
300 }
301
findNanoappByInstanceId(uint32_t instanceId) const302 Nanoapp *EventLoop::findNanoappByInstanceId(uint32_t instanceId) const {
303 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
304 return lookupAppByInstanceId(instanceId);
305 }
306
populateNanoappInfoForAppId(uint64_t appId,struct chreNanoappInfo * info) const307 bool EventLoop::populateNanoappInfoForAppId(
308 uint64_t appId, struct chreNanoappInfo *info) const {
309 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
310 Nanoapp *app = lookupAppByAppId(appId);
311 return populateNanoappInfo(app, info);
312 }
313
populateNanoappInfoForInstanceId(uint32_t instanceId,struct chreNanoappInfo * info) const314 bool EventLoop::populateNanoappInfoForInstanceId(
315 uint32_t instanceId, struct chreNanoappInfo *info) const {
316 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
317 Nanoapp *app = lookupAppByInstanceId(instanceId);
318 return populateNanoappInfo(app, info);
319 }
320
currentNanoappIsStopping() const321 bool EventLoop::currentNanoappIsStopping() const {
322 return (mCurrentApp == mStoppingNanoapp || !mRunning);
323 }
324
logStateToBuffer(char * buffer,size_t * bufferPos,size_t bufferSize) const325 void EventLoop::logStateToBuffer(char *buffer, size_t *bufferPos,
326 size_t bufferSize) const {
327 debugDumpPrint(buffer, bufferPos, bufferSize, "\nNanoapps:\n");
328 for (const UniquePtr<Nanoapp>& app : mNanoapps) {
329 app->logStateToBuffer(buffer, bufferPos, bufferSize);
330 }
331
332 debugDumpPrint(buffer, bufferPos, bufferSize, "\nEvent Loop:\n");
333 debugDumpPrint(buffer, bufferPos, bufferSize,
334 " Max event pool usage: %zu/%zu\n",
335 mMaxEventPoolUsage, kMaxEventCount);
336 }
337
allocateAndPostEvent(uint16_t eventType,void * eventData,chreEventCompleteFunction * freeCallback,uint32_t senderInstanceId,uint32_t targetInstanceId)338 bool EventLoop::allocateAndPostEvent(uint16_t eventType, void *eventData,
339 chreEventCompleteFunction *freeCallback, uint32_t senderInstanceId,
340 uint32_t targetInstanceId) {
341 bool success = false;
342
343 Milliseconds receivedTime = Nanoseconds(SystemTime::getMonotonicTime());
344 // The event loop should never contain more than 65 seconds worth of data
345 // unless something has gone terribly wrong so use uint16_t to save space.
346 uint16_t receivedTimeMillis = receivedTime.getMilliseconds();
347
348 Event *event = mEventPool.allocate(eventType, receivedTimeMillis, eventData,
349 freeCallback, senderInstanceId,
350 targetInstanceId);
351
352 if (event != nullptr) {
353 success = mEvents.push(event);
354 }
355 return success;
356 }
357
deliverEvents()358 bool EventLoop::deliverEvents() {
359 bool havePendingEvents = false;
360
361 // Do one loop of round-robin. We might want to have some kind of priority or
362 // time sharing in the future, but this should be good enough for now.
363 for (const UniquePtr<Nanoapp>& app : mNanoapps) {
364 if (app->hasPendingEvent()) {
365 havePendingEvents |= deliverNextEvent(app);
366 }
367 }
368
369 return havePendingEvents;
370 }
371
deliverNextEvent(const UniquePtr<Nanoapp> & app)372 bool EventLoop::deliverNextEvent(const UniquePtr<Nanoapp>& app) {
373 // TODO: cleaner way to set/clear this? RAII-style?
374 mCurrentApp = app.get();
375 Event *event = app->processNextEvent();
376 mCurrentApp = nullptr;
377
378 if (event->isUnreferenced()) {
379 freeEvent(event);
380 }
381
382 return app->hasPendingEvent();
383 }
384
distributeEvent(Event * event)385 void EventLoop::distributeEvent(Event *event) {
386 for (const UniquePtr<Nanoapp>& app : mNanoapps) {
387 if ((event->targetInstanceId == chre::kBroadcastInstanceId
388 && app->isRegisteredForBroadcastEvent(event->eventType))
389 || event->targetInstanceId == app->getInstanceId()) {
390 app->postEvent(event);
391 }
392 }
393
394 if (event->isUnreferenced()) {
395 // Events sent to the system instance ID are processed via the free callback
396 // and are not expected to be delivered to any nanoapp, so no need to log a
397 // warning in that case
398 if (event->senderInstanceId != kSystemInstanceId) {
399 LOGW("Dropping event 0x%" PRIx16, event->eventType);
400 }
401 freeEvent(event);
402 }
403 }
404
flushInboundEventQueue()405 void EventLoop::flushInboundEventQueue() {
406 while (!mEvents.empty()) {
407 distributeEvent(mEvents.pop());
408 }
409 }
410
flushNanoappEventQueues()411 void EventLoop::flushNanoappEventQueues() {
412 while (deliverEvents());
413 }
414
freeEvent(Event * event)415 void EventLoop::freeEvent(Event *event) {
416 if (event->freeCallback != nullptr) {
417 // TODO: find a better way to set the context to the creator of the event
418 mCurrentApp = lookupAppByInstanceId(event->senderInstanceId);
419 event->freeCallback(event->eventType, event->eventData);
420 mCurrentApp = nullptr;
421 }
422
423 mEventPool.deallocate(event);
424 }
425
lookupAppByAppId(uint64_t appId) const426 Nanoapp *EventLoop::lookupAppByAppId(uint64_t appId) const {
427 for (const UniquePtr<Nanoapp>& app : mNanoapps) {
428 if (app->getAppId() == appId) {
429 return app.get();
430 }
431 }
432
433 return nullptr;
434 }
435
lookupAppByInstanceId(uint32_t instanceId) const436 Nanoapp *EventLoop::lookupAppByInstanceId(uint32_t instanceId) const {
437 // The system instance ID always has nullptr as its Nanoapp pointer, so can
438 // skip iterating through the nanoapp list for that case
439 if (instanceId != kSystemInstanceId) {
440 for (const UniquePtr<Nanoapp>& app : mNanoapps) {
441 if (app->getInstanceId() == instanceId) {
442 return app.get();
443 }
444 }
445 }
446
447 return nullptr;
448 }
449
notifyAppStatusChange(uint16_t eventType,const Nanoapp & nanoapp)450 void EventLoop::notifyAppStatusChange(uint16_t eventType,
451 const Nanoapp& nanoapp) {
452 auto *info = memoryAlloc<chreNanoappInfo>();
453 if (info == nullptr) {
454 LOG_OOM();
455 } else {
456 info->appId = nanoapp.getAppId();
457 info->version = nanoapp.getAppVersion();
458 info->instanceId = nanoapp.getInstanceId();
459
460 postEventOrDie(eventType, info, freeEventDataCallback);
461 }
462 }
463
unloadNanoappAtIndex(size_t index)464 void EventLoop::unloadNanoappAtIndex(size_t index) {
465 const UniquePtr<Nanoapp>& nanoapp = mNanoapps[index];
466
467 // Lock here to prevent the nanoapp instance from being accessed between the
468 // time it is ended and fully erased
469 LockGuard<Mutex> lock(mNanoappsLock);
470
471 // Let the app know it's going away
472 mCurrentApp = nanoapp.get();
473 nanoapp->end();
474 mCurrentApp = nullptr;
475
476 // Destroy the Nanoapp instance
477 mNanoapps.erase(index);
478 }
479
480 } // namespace chre
481