1 /*
2 ** Copyright 2014, The Android Open Source Project
3 **
4 ** Licensed under the Apache License, Version 2.0 (the "License");
5 ** you may not use this file except in compliance with the License.
6 ** You may obtain a copy of the License at
7 **
8 **     http://www.apache.org/licenses/LICENSE-2.0
9 **
10 ** Unless required by applicable law or agreed to in writing, software
11 ** distributed under the License is distributed on an "AS IS" BASIS,
12 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 ** See the License for the specific language governing permissions and
14 ** limitations under the License.
15 */
16 
17 #include <log/log_properties.h>
18 
19 #include <ctype.h>
20 #include <pthread.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <unistd.h>
24 
25 #include <algorithm>
26 
27 #include <private/android_logger.h>
28 
29 #include "logger_write.h"
30 
31 #ifdef __ANDROID__
32 #define _REALLY_INCLUDE_SYS__SYSTEM_PROPERTIES_H_
33 #include <sys/_system_properties.h>
34 
35 static pthread_mutex_t lock_loggable = PTHREAD_MUTEX_INITIALIZER;
36 
lock()37 static int lock() {
38   /*
39    * If we trigger a signal handler in the middle of locked activity and the
40    * signal handler logs a message, we could get into a deadlock state.
41    */
42   /*
43    *  Any contention, and we can turn around and use the non-cached method
44    * in less time than the system call associated with a mutex to deal with
45    * the contention.
46    */
47   return pthread_mutex_trylock(&lock_loggable);
48 }
49 
unlock()50 static void unlock() {
51   pthread_mutex_unlock(&lock_loggable);
52 }
53 
54 struct cache {
55   const prop_info* pinfo;
56   uint32_t serial;
57 };
58 
59 struct cache_char {
60   struct cache cache;
61   unsigned char c;
62 };
63 
check_cache(struct cache * cache)64 static int check_cache(struct cache* cache) {
65   return cache->pinfo && __system_property_serial(cache->pinfo) != cache->serial;
66 }
67 
68 #define BOOLEAN_TRUE 0xFF
69 #define BOOLEAN_FALSE 0xFE
70 
refresh_cache(struct cache_char * cache,const char * key)71 static void refresh_cache(struct cache_char* cache, const char* key) {
72   char buf[PROP_VALUE_MAX];
73 
74   if (!cache->cache.pinfo) {
75     cache->cache.pinfo = __system_property_find(key);
76     if (!cache->cache.pinfo) {
77       return;
78     }
79   }
80   cache->cache.serial = __system_property_serial(cache->cache.pinfo);
81   __system_property_read(cache->cache.pinfo, 0, buf);
82   switch (buf[0]) {
83     case 't':
84     case 'T':
85       cache->c = strcasecmp(buf + 1, "rue") ? buf[0] : BOOLEAN_TRUE;
86       break;
87     case 'f':
88     case 'F':
89       cache->c = strcasecmp(buf + 1, "alse") ? buf[0] : BOOLEAN_FALSE;
90       break;
91     default:
92       cache->c = buf[0];
93   }
94 }
95 
__android_log_level(const char * tag,size_t len)96 static int __android_log_level(const char* tag, size_t len) {
97   /* sizeof() is used on this array below */
98   static const char log_namespace[] = "persist.log.tag.";
99   static const size_t base_offset = 8; /* skip "persist." */
100 
101   if (tag == nullptr || len == 0) {
102     auto& tag_string = GetDefaultTag();
103     tag = tag_string.c_str();
104     len = tag_string.size();
105   }
106 
107   /* sizeof(log_namespace) = strlen(log_namespace) + 1 */
108   char key[sizeof(log_namespace) + len];
109   char* kp;
110   size_t i;
111   char c = 0;
112   /*
113    * Single layer cache of four properties. Priorities are:
114    *    log.tag.<tag>
115    *    persist.log.tag.<tag>
116    *    log.tag
117    *    persist.log.tag
118    * Where the missing tag matches all tags and becomes the
119    * system global default. We do not support ro.log.tag* .
120    */
121   static char* last_tag;
122   static size_t last_tag_len;
123   static uint32_t global_serial;
124   /* some compilers erroneously see uninitialized use. !not_locked */
125   uint32_t current_global_serial = 0;
126   static struct cache_char tag_cache[2];
127   static struct cache_char global_cache[2];
128   int change_detected;
129   int global_change_detected;
130   int not_locked;
131 
132   strcpy(key, log_namespace);
133 
134   global_change_detected = change_detected = not_locked = lock();
135 
136   if (!not_locked) {
137     /*
138      *  check all known serial numbers to changes.
139      */
140     for (i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) {
141       if (check_cache(&tag_cache[i].cache)) {
142         change_detected = 1;
143       }
144     }
145     for (i = 0; i < (sizeof(global_cache) / sizeof(global_cache[0])); ++i) {
146       if (check_cache(&global_cache[i].cache)) {
147         global_change_detected = 1;
148       }
149     }
150 
151     current_global_serial = __system_property_area_serial();
152     if (current_global_serial != global_serial) {
153       change_detected = 1;
154       global_change_detected = 1;
155     }
156   }
157 
158   if (len) {
159     int local_change_detected = change_detected;
160     if (!not_locked) {
161       if (!last_tag || !last_tag[0] || (last_tag[0] != tag[0]) ||
162           strncmp(last_tag + 1, tag + 1, last_tag_len - 1)) {
163         /* invalidate log.tag.<tag> cache */
164         for (i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) {
165           tag_cache[i].cache.pinfo = NULL;
166           tag_cache[i].c = '\0';
167         }
168         if (last_tag) last_tag[0] = '\0';
169         local_change_detected = 1;
170       }
171       if (!last_tag || !last_tag[0]) {
172         if (!last_tag) {
173           last_tag = static_cast<char*>(calloc(1, len + 1));
174           last_tag_len = 0;
175           if (last_tag) last_tag_len = len + 1;
176         } else if (len >= last_tag_len) {
177           last_tag = static_cast<char*>(realloc(last_tag, len + 1));
178           last_tag_len = 0;
179           if (last_tag) last_tag_len = len + 1;
180         }
181         if (last_tag) {
182           strncpy(last_tag, tag, len);
183           last_tag[len] = '\0';
184         }
185       }
186     }
187     strncpy(key + sizeof(log_namespace) - 1, tag, len);
188     key[sizeof(log_namespace) - 1 + len] = '\0';
189 
190     kp = key;
191     for (i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) {
192       struct cache_char* cache = &tag_cache[i];
193       struct cache_char temp_cache;
194 
195       if (not_locked) {
196         temp_cache.cache.pinfo = NULL;
197         temp_cache.c = '\0';
198         cache = &temp_cache;
199       }
200       if (local_change_detected) {
201         refresh_cache(cache, kp);
202       }
203 
204       if (cache->c) {
205         c = cache->c;
206         break;
207       }
208 
209       kp = key + base_offset;
210     }
211   }
212 
213   switch (toupper(c)) { /* if invalid, resort to global */
214     case 'V':
215     case 'D':
216     case 'I':
217     case 'W':
218     case 'E':
219     case 'F': /* Not officially supported */
220     case 'A':
221     case 'S':
222     case BOOLEAN_FALSE: /* Not officially supported */
223       break;
224     default:
225       /* clear '.' after log.tag */
226       key[sizeof(log_namespace) - 2] = '\0';
227 
228       kp = key;
229       for (i = 0; i < (sizeof(global_cache) / sizeof(global_cache[0])); ++i) {
230         struct cache_char* cache = &global_cache[i];
231         struct cache_char temp_cache;
232 
233         if (not_locked) {
234           temp_cache = *cache;
235           if (temp_cache.cache.pinfo != cache->cache.pinfo) { /* check atomic */
236             temp_cache.cache.pinfo = NULL;
237             temp_cache.c = '\0';
238           }
239           cache = &temp_cache;
240         }
241         if (global_change_detected) {
242           refresh_cache(cache, kp);
243         }
244 
245         if (cache->c) {
246           c = cache->c;
247           break;
248         }
249 
250         kp = key + base_offset;
251       }
252       break;
253   }
254 
255   if (!not_locked) {
256     global_serial = current_global_serial;
257     unlock();
258   }
259 
260   switch (toupper(c)) {
261     /* clang-format off */
262     case 'V': return ANDROID_LOG_VERBOSE;
263     case 'D': return ANDROID_LOG_DEBUG;
264     case 'I': return ANDROID_LOG_INFO;
265     case 'W': return ANDROID_LOG_WARN;
266     case 'E': return ANDROID_LOG_ERROR;
267     case 'F': /* FALLTHRU */ /* Not officially supported */
268     case 'A': return ANDROID_LOG_FATAL;
269     case BOOLEAN_FALSE: /* FALLTHRU */ /* Not Officially supported */
270     case 'S': return ANDROID_LOG_SILENT;
271       /* clang-format on */
272   }
273   return -1;
274 }
275 
__android_log_is_loggable_len(int prio,const char * tag,size_t len,int default_prio)276 int __android_log_is_loggable_len(int prio, const char* tag, size_t len, int default_prio) {
277   int minimum_log_priority = __android_log_get_minimum_priority();
278   int property_log_level = __android_log_level(tag, len);
279 
280   if (property_log_level >= 0 && minimum_log_priority != ANDROID_LOG_DEFAULT) {
281     return prio >= std::min(property_log_level, minimum_log_priority);
282   } else if (property_log_level >= 0) {
283     return prio >= property_log_level;
284   } else if (minimum_log_priority != ANDROID_LOG_DEFAULT) {
285     return prio >= minimum_log_priority;
286   } else {
287     return prio >= default_prio;
288   }
289 }
290 
__android_log_is_loggable(int prio,const char * tag,int default_prio)291 int __android_log_is_loggable(int prio, const char* tag, int default_prio) {
292   auto len = tag ? strlen(tag) : 0;
293   return __android_log_is_loggable_len(prio, tag, len, default_prio);
294 }
295 
__android_log_is_debuggable()296 int __android_log_is_debuggable() {
297   static int is_debuggable = [] {
298     char value[PROP_VALUE_MAX] = {};
299     return __system_property_get("ro.debuggable", value) > 0 && !strcmp(value, "1");
300   }();
301 
302   return is_debuggable;
303 }
304 
305 /*
306  * For properties that are read often, but generally remain constant.
307  * Since a change is rare, we will accept a trylock failure gracefully.
308  * Use a separate lock from is_loggable to keep contention down b/25563384.
309  */
310 struct cache2_char {
311   pthread_mutex_t lock;
312   uint32_t serial;
313   const char* key_persist;
314   struct cache_char cache_persist;
315   const char* key_ro;
316   struct cache_char cache_ro;
317   unsigned char (*const evaluate)(const struct cache2_char* self);
318 };
319 
do_cache2_char(struct cache2_char * self)320 static inline unsigned char do_cache2_char(struct cache2_char* self) {
321   uint32_t current_serial;
322   int change_detected;
323   unsigned char c;
324 
325   if (pthread_mutex_trylock(&self->lock)) {
326     /* We are willing to accept some race in this context */
327     return self->evaluate(self);
328   }
329 
330   change_detected = check_cache(&self->cache_persist.cache) || check_cache(&self->cache_ro.cache);
331   current_serial = __system_property_area_serial();
332   if (current_serial != self->serial) {
333     change_detected = 1;
334   }
335   if (change_detected) {
336     refresh_cache(&self->cache_persist, self->key_persist);
337     refresh_cache(&self->cache_ro, self->key_ro);
338     self->serial = current_serial;
339   }
340   c = self->evaluate(self);
341 
342   pthread_mutex_unlock(&self->lock);
343 
344   return c;
345 }
346 
347 /*
348  * Security state generally remains constant, but the DO must be able
349  * to turn off logging should it become spammy after an attack is detected.
350  */
evaluate_security(const struct cache2_char * self)351 static unsigned char evaluate_security(const struct cache2_char* self) {
352   unsigned char c = self->cache_ro.c;
353 
354   return (c != BOOLEAN_FALSE) && c && (self->cache_persist.c == BOOLEAN_TRUE);
355 }
356 
__android_log_security()357 int __android_log_security() {
358   static struct cache2_char security = {
359       PTHREAD_MUTEX_INITIALIZER, 0,
360       "persist.logd.security",   {{NULL, 0xFFFFFFFF}, BOOLEAN_FALSE},
361       "ro.organization_owned",   {{NULL, 0xFFFFFFFF}, BOOLEAN_FALSE},
362       evaluate_security};
363 
364   return do_cache2_char(&security);
365 }
366 
367 /*
368  * Interface that represents the logd buffer size determination so that others
369  * need not guess our intentions.
370  */
371 
372 /* cache structure */
373 struct cache_property {
374   struct cache cache;
375   char property[PROP_VALUE_MAX];
376 };
377 
refresh_cache_property(struct cache_property * cache,const char * key)378 static void refresh_cache_property(struct cache_property* cache, const char* key) {
379   if (!cache->cache.pinfo) {
380     cache->cache.pinfo = __system_property_find(key);
381     if (!cache->cache.pinfo) {
382       return;
383     }
384   }
385   cache->cache.serial = __system_property_serial(cache->cache.pinfo);
386   __system_property_read(cache->cache.pinfo, 0, cache->property);
387 }
388 
__android_logger_valid_buffer_size(unsigned long value)389 bool __android_logger_valid_buffer_size(unsigned long value) {
390   return LOG_BUFFER_MIN_SIZE <= value && value <= LOG_BUFFER_MAX_SIZE;
391 }
392 
393 struct cache2_property_size {
394   pthread_mutex_t lock;
395   uint32_t serial;
396   const char* key_persist;
397   struct cache_property cache_persist;
398   const char* key_ro;
399   struct cache_property cache_ro;
400   unsigned long (*const evaluate)(const struct cache2_property_size* self);
401 };
402 
do_cache2_property_size(struct cache2_property_size * self)403 static inline unsigned long do_cache2_property_size(struct cache2_property_size* self) {
404   uint32_t current_serial;
405   int change_detected;
406   unsigned long v;
407 
408   if (pthread_mutex_trylock(&self->lock)) {
409     /* We are willing to accept some race in this context */
410     return self->evaluate(self);
411   }
412 
413   change_detected = check_cache(&self->cache_persist.cache) || check_cache(&self->cache_ro.cache);
414   current_serial = __system_property_area_serial();
415   if (current_serial != self->serial) {
416     change_detected = 1;
417   }
418   if (change_detected) {
419     refresh_cache_property(&self->cache_persist, self->key_persist);
420     refresh_cache_property(&self->cache_ro, self->key_ro);
421     self->serial = current_serial;
422   }
423   v = self->evaluate(self);
424 
425   pthread_mutex_unlock(&self->lock);
426 
427   return v;
428 }
429 
property_get_size_from_cache(const struct cache_property * cache)430 static unsigned long property_get_size_from_cache(const struct cache_property* cache) {
431   char* cp;
432   unsigned long value = strtoul(cache->property, &cp, 10);
433 
434   switch (*cp) {
435     case 'm':
436     case 'M':
437       value *= 1024;
438       [[fallthrough]];
439     case 'k':
440     case 'K':
441       value *= 1024;
442       [[fallthrough]];
443     case '\0':
444       break;
445 
446     default:
447       value = 0;
448   }
449 
450   if (!__android_logger_valid_buffer_size(value)) {
451     value = 0;
452   }
453 
454   return value;
455 }
456 
evaluate_property_get_size(const struct cache2_property_size * self)457 static unsigned long evaluate_property_get_size(const struct cache2_property_size* self) {
458   unsigned long size = property_get_size_from_cache(&self->cache_persist);
459   if (size) {
460     return size;
461   }
462   return property_get_size_from_cache(&self->cache_ro);
463 }
464 
__android_logger_get_buffer_size(log_id_t logId)465 unsigned long __android_logger_get_buffer_size(log_id_t logId) {
466   static const char global_tunable[] = "persist.logd.size"; /* Settings App */
467   static const char global_default[] = "ro.logd.size";      /* BoardConfig.mk */
468   static struct cache2_property_size global = {
469       /* clang-format off */
470     PTHREAD_MUTEX_INITIALIZER, 0,
471     global_tunable, { { NULL, 0xFFFFFFFF }, {} },
472     global_default, { { NULL, 0xFFFFFFFF }, {} },
473     evaluate_property_get_size
474       /* clang-format on */
475   };
476   char key_persist[strlen(global_tunable) + strlen(".security") + 1];
477   char key_ro[strlen(global_default) + strlen(".security") + 1];
478   struct cache2_property_size local = {
479       /* clang-format off */
480     PTHREAD_MUTEX_INITIALIZER, 0,
481     key_persist, { { NULL, 0xFFFFFFFF }, {} },
482     key_ro,      { { NULL, 0xFFFFFFFF }, {} },
483     evaluate_property_get_size
484       /* clang-format on */
485   };
486   unsigned long property_size, default_size;
487 
488   default_size = do_cache2_property_size(&global);
489   if (!default_size) {
490     char value[PROP_VALUE_MAX] = {};
491     if (__system_property_get("ro.config.low_ram", value) == 0 || strcmp(value, "true") != 0) {
492       default_size = LOG_BUFFER_SIZE;
493     } else {
494       default_size = LOG_BUFFER_MIN_SIZE;
495     }
496   }
497 
498   snprintf(key_persist, sizeof(key_persist), "%s.%s", global_tunable,
499            android_log_id_to_name(logId));
500   snprintf(key_ro, sizeof(key_ro), "%s.%s", global_default, android_log_id_to_name(logId));
501   property_size = do_cache2_property_size(&local);
502 
503   if (!property_size) {
504     property_size = default_size;
505   }
506 
507   if (!property_size) {
508     property_size = LOG_BUFFER_SIZE;
509   }
510 
511   return property_size;
512 }
513 
514 #else
515 
__android_log_is_loggable(int prio,const char *,int)516 int __android_log_is_loggable(int prio, const char*, int) {
517   int minimum_priority = __android_log_get_minimum_priority();
518   if (minimum_priority == ANDROID_LOG_DEFAULT) {
519     minimum_priority = ANDROID_LOG_INFO;
520   }
521   return prio >= minimum_priority;
522 }
523 
__android_log_is_loggable_len(int prio,const char *,size_t,int def)524 int __android_log_is_loggable_len(int prio, const char*, size_t, int def) {
525   return __android_log_is_loggable(prio, nullptr, def);
526 }
527 
__android_log_is_debuggable()528 int __android_log_is_debuggable() {
529   return 1;
530 }
531 
532 #endif
533