liblog: free event tag map in __android_log_close()
There is no leak since a reference always remained and could get
reused. It just makes sense to also close the event tag map as well
if logging is closed. If we close we also have to fix a tagArray
leak in android_closeEventTagMap().
NB: __android_log_close() already makes an assumption that another
thread is not logging at the same time, which is true in all callers
at this time. There are some partial mitigation strategies if
__android_log_close() is called asynchronously, but not completely
solved at the cost of a lock for every logging call.
Test: gTest liblog-unit-tests
Bug: 30963384
Bug: 31456426
Change-Id: Ib76ad9302dba4d3f35250213f4dfef22498af238
diff --git a/liblog/event_tag_map.c b/liblog/event_tag_map.c
index 345f0d3..3e0b6b5 100644
--- a/liblog/event_tag_map.c
+++ b/liblog/event_tag_map.c
@@ -120,6 +120,7 @@
return;
munmap(map->mapAddr, map->mapLen);
+ free(map->tagArray);
free(map);
}
diff --git a/liblog/logger_write.c b/liblog/logger_write.c
index c7b5a84..08e6348 100644
--- a/liblog/logger_write.c
+++ b/liblog/logger_write.c
@@ -132,12 +132,20 @@
}
return kLogNotAvailable;
}
+
+#if defined(__BIONIC__)
+static atomic_uintptr_t tagMap;
+#endif
+
/*
* Release any logger resources. A new log write will immediately re-acquire.
*/
LIBLOG_ABI_PUBLIC void __android_log_close()
{
struct android_log_transport_write *transport;
+#if defined(__BIONIC__)
+ EventTagMap *m;
+#endif
__android_log_lock();
@@ -165,7 +173,28 @@
}
}
+#if defined(__BIONIC__)
+ /*
+ * Additional risk here somewhat mitigated by immediately unlock flushing
+ * the processor cache. The multi-threaded race that we choose to accept,
+ * to minimize locking, is an atomic_load in a writer picking up a value
+ * just prior to entering this routine. There will be an use after free.
+ *
+ * Again, anyone calling this is doing so to release the logging resources
+ * is most probably going to quiesce then shut down; or to restart after
+ * a fork so the risk should be non-existent. For this reason we
+ * choose a mitigation stance for efficiency instead of incuring the cost
+ * of a lock for every log write.
+ */
+ m = (EventTagMap *)atomic_exchange(&tagMap, (uintptr_t)0);
+#endif
+
__android_log_unlock();
+
+#if defined(__BIONIC__)
+ android_closeEventTagMap(m);
+#endif
+
}
/* log_init_lock assumed */
@@ -250,7 +279,6 @@
return -EPERM;
}
} else if (log_id == LOG_ID_EVENTS) {
- static atomic_uintptr_t map;
const char *tag;
EventTagMap *m, *f;
@@ -260,11 +288,11 @@
tag = NULL;
f = NULL;
- m = (EventTagMap *)atomic_load(&map);
+ m = (EventTagMap *)atomic_load(&tagMap);
if (!m) {
ret = __android_log_trylock();
- m = (EventTagMap *)atomic_load(&map); /* trylock flush cache */
+ m = (EventTagMap *)atomic_load(&tagMap); /* trylock flush cache */
if (!m) {
m = android_openEventTagMap(EVENT_TAG_MAP_FILE);
if (ret) { /* trylock failed, use local copy, mark for close */
@@ -273,7 +301,7 @@
if (!m) { /* One chance to open map file */
m = (EventTagMap *)(uintptr_t)-1LL;
}
- atomic_store(&map, (uintptr_t)m);
+ atomic_store(&tagMap, (uintptr_t)m);
}
}
if (!ret) { /* trylock succeeded, unlock */