Merge changes I40c9ea47,I593aeed5
* changes:
trusty: storage: add tests
trusty: storage: add client lib for testing
diff --git a/adb/adb_trace.h b/adb/adb_trace.h
index d50f947..5206a99 100644
--- a/adb/adb_trace.h
+++ b/adb/adb_trace.h
@@ -41,7 +41,7 @@
};
#define VLOG_IS_ON(TAG) \
- ((adb_trace_mask & (1 << TAG)) != 0)
+ ((adb_trace_mask & (1 << (TAG))) != 0)
#define VLOG(TAG) \
if (LIKELY(!VLOG_IS_ON(TAG))) \
diff --git a/adb/mutex_list.h b/adb/mutex_list.h
index b59c9f2..4a188ee 100644
--- a/adb/mutex_list.h
+++ b/adb/mutex_list.h
@@ -8,7 +8,6 @@
#endif
ADB_MUTEX(basename_lock)
ADB_MUTEX(dirname_lock)
-ADB_MUTEX(socket_list_lock)
ADB_MUTEX(transport_lock)
#if ADB_HOST
ADB_MUTEX(local_transports_lock)
diff --git a/adb/sockets.cpp b/adb/sockets.cpp
index aecaba2..b2555d0 100644
--- a/adb/sockets.cpp
+++ b/adb/sockets.cpp
@@ -26,6 +26,7 @@
#include <unistd.h>
#include <algorithm>
+#include <mutex>
#include <string>
#include <vector>
@@ -35,17 +36,14 @@
#include "adb.h"
#include "adb_io.h"
+#include "sysdeps/mutex.h"
#include "transport.h"
-ADB_MUTEX_DEFINE( socket_list_lock );
-
-static void local_socket_close_locked(asocket *s);
-
+static std::recursive_mutex& local_socket_list_lock = *new std::recursive_mutex();
static unsigned local_socket_next_id = 1;
static asocket local_socket_list = {
- .next = &local_socket_list,
- .prev = &local_socket_list,
+ .next = &local_socket_list, .prev = &local_socket_list,
};
/* the the list of currently closing local sockets.
@@ -53,62 +51,53 @@
** write to their fd.
*/
static asocket local_socket_closing_list = {
- .next = &local_socket_closing_list,
- .prev = &local_socket_closing_list,
+ .next = &local_socket_closing_list, .prev = &local_socket_closing_list,
};
// Parse the global list of sockets to find one with id |local_id|.
// If |peer_id| is not 0, also check that it is connected to a peer
// with id |peer_id|. Returns an asocket handle on success, NULL on failure.
-asocket *find_local_socket(unsigned local_id, unsigned peer_id)
-{
- asocket *s;
- asocket *result = NULL;
+asocket* find_local_socket(unsigned local_id, unsigned peer_id) {
+ asocket* s;
+ asocket* result = NULL;
- adb_mutex_lock(&socket_list_lock);
+ std::lock_guard<std::recursive_mutex> lock(local_socket_list_lock);
for (s = local_socket_list.next; s != &local_socket_list; s = s->next) {
- if (s->id != local_id)
+ if (s->id != local_id) {
continue;
+ }
if (peer_id == 0 || (s->peer && s->peer->id == peer_id)) {
result = s;
}
break;
}
- adb_mutex_unlock(&socket_list_lock);
return result;
}
-static void
-insert_local_socket(asocket* s, asocket* list)
-{
- s->next = list;
- s->prev = s->next->prev;
+static void insert_local_socket(asocket* s, asocket* list) {
+ s->next = list;
+ s->prev = s->next->prev;
s->prev->next = s;
s->next->prev = s;
}
-
-void install_local_socket(asocket *s)
-{
- adb_mutex_lock(&socket_list_lock);
+void install_local_socket(asocket* s) {
+ std::lock_guard<std::recursive_mutex> lock(local_socket_list_lock);
s->id = local_socket_next_id++;
// Socket ids should never be 0.
- if (local_socket_next_id == 0)
- local_socket_next_id = 1;
+ if (local_socket_next_id == 0) {
+ fatal("local socket id overflow");
+ }
insert_local_socket(s, &local_socket_list);
-
- adb_mutex_unlock(&socket_list_lock);
}
-void remove_socket(asocket *s)
-{
+void remove_socket(asocket* s) {
// socket_list_lock should already be held
- if (s->prev && s->next)
- {
+ if (s->prev && s->next) {
s->prev->next = s->next;
s->next->prev = s->prev;
s->next = 0;
@@ -117,50 +106,47 @@
}
}
-void close_all_sockets(atransport *t)
-{
- asocket *s;
+void close_all_sockets(atransport* t) {
+ asocket* s;
- /* this is a little gross, but since s->close() *will* modify
- ** the list out from under you, your options are limited.
- */
- adb_mutex_lock(&socket_list_lock);
+ /* this is a little gross, but since s->close() *will* modify
+ ** the list out from under you, your options are limited.
+ */
+ std::lock_guard<std::recursive_mutex> lock(local_socket_list_lock);
restart:
- for(s = local_socket_list.next; s != &local_socket_list; s = s->next){
- if(s->transport == t || (s->peer && s->peer->transport == t)) {
- local_socket_close_locked(s);
+ for (s = local_socket_list.next; s != &local_socket_list; s = s->next) {
+ if (s->transport == t || (s->peer && s->peer->transport == t)) {
+ s->close(s);
goto restart;
}
}
- adb_mutex_unlock(&socket_list_lock);
}
-static int local_socket_enqueue(asocket *s, apacket *p)
-{
+static int local_socket_enqueue(asocket* s, apacket* p) {
D("LS(%d): enqueue %d", s->id, p->len);
p->ptr = p->data;
- /* if there is already data queue'd, we will receive
- ** events when it's time to write. just add this to
- ** the tail
- */
- if(s->pkt_first) {
+ /* if there is already data queue'd, we will receive
+ ** events when it's time to write. just add this to
+ ** the tail
+ */
+ if (s->pkt_first) {
goto enqueue;
}
- /* write as much as we can, until we
- ** would block or there is an error/eof
- */
- while(p->len > 0) {
+ /* write as much as we can, until we
+ ** would block or there is an error/eof
+ */
+ while (p->len > 0) {
int r = adb_write(s->fd, p->ptr, p->len);
- if(r > 0) {
+ if (r > 0) {
p->len -= r;
p->ptr += r;
continue;
}
- if((r == 0) || (errno != EAGAIN)) {
- D( "LS(%d): not ready, errno=%d: %s", s->id, errno, strerror(errno) );
+ if ((r == 0) || (errno != EAGAIN)) {
+ D("LS(%d): not ready, errno=%d: %s", s->id, errno, strerror(errno));
put_apacket(p);
s->has_write_error = true;
s->close(s);
@@ -170,55 +156,46 @@
}
}
- if(p->len == 0) {
+ if (p->len == 0) {
put_apacket(p);
return 0; /* ready for more data */
}
enqueue:
p->next = 0;
- if(s->pkt_first) {
+ if (s->pkt_first) {
s->pkt_last->next = p;
} else {
s->pkt_first = p;
}
s->pkt_last = p;
- /* make sure we are notified when we can drain the queue */
+ /* make sure we are notified when we can drain the queue */
fdevent_add(&s->fde, FDE_WRITE);
return 1; /* not ready (backlog) */
}
-static void local_socket_ready(asocket *s)
-{
+static void local_socket_ready(asocket* s) {
/* far side is ready for data, pay attention to
readable events */
fdevent_add(&s->fde, FDE_READ);
}
-static void local_socket_close(asocket *s)
-{
- adb_mutex_lock(&socket_list_lock);
- local_socket_close_locked(s);
- adb_mutex_unlock(&socket_list_lock);
-}
-
// be sure to hold the socket list lock when calling this
-static void local_socket_destroy(asocket *s)
-{
+static void local_socket_destroy(asocket* s) {
apacket *p, *n;
int exit_on_close = s->exit_on_close;
D("LS(%d): destroying fde.fd=%d", s->id, s->fde.fd);
- /* IMPORTANT: the remove closes the fd
- ** that belongs to this socket
- */
+ /* IMPORTANT: the remove closes the fd
+ ** that belongs to this socket
+ */
fdevent_remove(&s->fde);
- /* dispose of any unwritten data */
- for(p = s->pkt_first; p; p = n) {
+ /* dispose of any unwritten data */
+ for (p = s->pkt_first; p; p = n) {
D("LS(%d): discarding %d bytes", s->id, p->len);
n = p->next;
put_apacket(p);
@@ -232,41 +209,35 @@
}
}
-
-static void local_socket_close_locked(asocket *s)
-{
- D("entered local_socket_close_locked. LS(%d) fd=%d", s->id, s->fd);
- if(s->peer) {
- D("LS(%d): closing peer. peer->id=%d peer->fd=%d",
- s->id, s->peer->id, s->peer->fd);
+static void local_socket_close(asocket* s) {
+ D("entered local_socket_close. LS(%d) fd=%d", s->id, s->fd);
+ std::lock_guard<std::recursive_mutex> lock(local_socket_list_lock);
+ if (s->peer) {
+ D("LS(%d): closing peer. peer->id=%d peer->fd=%d", s->id, s->peer->id, s->peer->fd);
/* Note: it's important to call shutdown before disconnecting from
* the peer, this ensures that remote sockets can still get the id
* of the local socket they're connected to, to send a CLOSE()
* protocol event. */
- if (s->peer->shutdown)
- s->peer->shutdown(s->peer);
- s->peer->peer = 0;
- // tweak to avoid deadlock
- if (s->peer->close == local_socket_close) {
- local_socket_close_locked(s->peer);
- } else {
- s->peer->close(s->peer);
+ if (s->peer->shutdown) {
+ s->peer->shutdown(s->peer);
}
- s->peer = 0;
+ s->peer->peer = nullptr;
+ s->peer->close(s->peer);
+ s->peer = nullptr;
}
- /* If we are already closing, or if there are no
- ** pending packets, destroy immediately
- */
+ /* If we are already closing, or if there are no
+ ** pending packets, destroy immediately
+ */
if (s->closing || s->has_write_error || s->pkt_first == NULL) {
- int id = s->id;
+ int id = s->id;
local_socket_destroy(s);
D("LS(%d): closed", id);
return;
}
- /* otherwise, put on the closing list
- */
+ /* otherwise, put on the closing list
+ */
D("LS(%d): closing", s->id);
s->closing = 1;
fdevent_del(&s->fde, FDE_READ);
@@ -276,8 +247,7 @@
CHECK_EQ(FDE_WRITE, s->fde.state & FDE_WRITE);
}
-static void local_socket_event_func(int fd, unsigned ev, void* _s)
-{
+static void local_socket_event_func(int fd, unsigned ev, void* _s) {
asocket* s = reinterpret_cast<asocket*>(_s);
D("LS(%d): event_func(fd=%d(==%d), ev=%04x)", s->id, s->fd, fd, ev);
@@ -334,10 +304,9 @@
s->peer->ready(s->peer);
}
-
if (ev & FDE_READ) {
- apacket *p = get_apacket();
- unsigned char *x = p->data;
+ apacket* p = get_apacket();
+ unsigned char* x = p->data;
const size_t max_payload = s->get_max_payload();
size_t avail = max_payload;
int r = 0;
@@ -345,8 +314,8 @@
while (avail > 0) {
r = adb_read(fd, x, avail);
- D("LS(%d): post adb_read(fd=%d,...) r=%d (errno=%d) avail=%zu",
- s->id, s->fd, r, r < 0 ? errno : 0, avail);
+ D("LS(%d): post adb_read(fd=%d,...) r=%d (errno=%d) avail=%zu", s->id, s->fd, r,
+ r < 0 ? errno : 0, avail);
if (r == -1) {
if (errno == EAGAIN) {
break;
@@ -361,8 +330,8 @@
is_eof = 1;
break;
}
- D("LS(%d): fd=%d post avail loop. r=%d is_eof=%d forced_eof=%d",
- s->id, s->fd, r, is_eof, s->fde.force_eof);
+ D("LS(%d): fd=%d post avail loop. r=%d is_eof=%d forced_eof=%d", s->id, s->fd, r, is_eof,
+ s->fde.force_eof);
if ((avail == max_payload) || (s->peer == 0)) {
put_apacket(p);
} else {
@@ -376,48 +345,48 @@
D("LS(%u): fd=%d post peer->enqueue(). r=%d", saved_id, saved_fd, r);
if (r < 0) {
- /* error return means they closed us as a side-effect
- ** and we must return immediately.
- **
- ** note that if we still have buffered packets, the
- ** socket will be placed on the closing socket list.
- ** this handler function will be called again
- ** to process FDE_WRITE events.
- */
+ /* error return means they closed us as a side-effect
+ ** and we must return immediately.
+ **
+ ** note that if we still have buffered packets, the
+ ** socket will be placed on the closing socket list.
+ ** this handler function will be called again
+ ** to process FDE_WRITE events.
+ */
return;
}
if (r > 0) {
- /* if the remote cannot accept further events,
- ** we disable notification of READs. They'll
- ** be enabled again when we get a call to ready()
- */
+ /* if the remote cannot accept further events,
+ ** we disable notification of READs. They'll
+ ** be enabled again when we get a call to ready()
+ */
fdevent_del(&s->fde, FDE_READ);
}
}
/* Don't allow a forced eof if data is still there */
if ((s->fde.force_eof && !r) || is_eof) {
- D(" closing because is_eof=%d r=%d s->fde.force_eof=%d",
- is_eof, r, s->fde.force_eof);
+ D(" closing because is_eof=%d r=%d s->fde.force_eof=%d", is_eof, r, s->fde.force_eof);
s->close(s);
return;
}
}
- if (ev & FDE_ERROR){
- /* this should be caught be the next read or write
- ** catching it here means we may skip the last few
- ** bytes of readable data.
- */
+ if (ev & FDE_ERROR) {
+ /* this should be caught be the next read or write
+ ** catching it here means we may skip the last few
+ ** bytes of readable data.
+ */
D("LS(%d): FDE_ERROR (fd=%d)", s->id, s->fd);
return;
}
}
-asocket *create_local_socket(int fd)
-{
- asocket *s = reinterpret_cast<asocket*>(calloc(1, sizeof(asocket)));
- if (s == NULL) fatal("cannot allocate socket");
+asocket* create_local_socket(int fd) {
+ asocket* s = reinterpret_cast<asocket*>(calloc(1, sizeof(asocket)));
+ if (s == NULL) {
+ fatal("cannot allocate socket");
+ }
s->fd = fd;
s->enqueue = local_socket_enqueue;
s->ready = local_socket_ready;
@@ -430,32 +399,33 @@
return s;
}
-asocket *create_local_service_socket(const char *name,
- const atransport* transport)
-{
+asocket* create_local_service_socket(const char* name, const atransport* transport) {
#if !ADB_HOST
- if (!strcmp(name,"jdwp")) {
+ if (!strcmp(name, "jdwp")) {
return create_jdwp_service_socket();
}
- if (!strcmp(name,"track-jdwp")) {
+ if (!strcmp(name, "track-jdwp")) {
return create_jdwp_tracker_service_socket();
}
#endif
int fd = service_to_fd(name, transport);
- if(fd < 0) return 0;
+ if (fd < 0) {
+ return 0;
+ }
asocket* s = create_local_socket(fd);
D("LS(%d): bound to '%s' via %d", s->id, name, fd);
#if !ADB_HOST
char debug[PROPERTY_VALUE_MAX];
- if (!strncmp(name, "root:", 5))
+ if (!strncmp(name, "root:", 5)) {
property_get("ro.debuggable", debug, "");
+ }
- if ((!strncmp(name, "root:", 5) && getuid() != 0 && strcmp(debug, "1") == 0)
- || (!strncmp(name, "unroot:", 7) && getuid() == 0)
- || !strncmp(name, "usb:", 4)
- || !strncmp(name, "tcpip:", 6)) {
+ if ((!strncmp(name, "root:", 5) && getuid() != 0 && strcmp(debug, "1") == 0) ||
+ (!strncmp(name, "unroot:", 7) && getuid() == 0) ||
+ !strncmp(name, "usb:", 4) ||
+ !strncmp(name, "tcpip:", 6)) {
D("LS(%d): enabling exit_on_close", s->id);
s->exit_on_close = 1;
}
@@ -465,9 +435,8 @@
}
#if ADB_HOST
-static asocket *create_host_service_socket(const char *name, const char* serial)
-{
- asocket *s;
+static asocket* create_host_service_socket(const char* name, const char* serial) {
+ asocket* s;
s = host_service_to_socket(name, serial);
@@ -480,10 +449,8 @@
}
#endif /* ADB_HOST */
-static int remote_socket_enqueue(asocket *s, apacket *p)
-{
- D("entered remote_socket_enqueue RS(%d) WRITE fd=%d peer.fd=%d",
- s->id, s->fd, s->peer->fd);
+static int remote_socket_enqueue(asocket* s, apacket* p) {
+ D("entered remote_socket_enqueue RS(%d) WRITE fd=%d peer.fd=%d", s->id, s->fd, s->peer->fd);
p->msg.command = A_WRTE;
p->msg.arg0 = s->peer->id;
p->msg.arg1 = s->id;
@@ -492,40 +459,35 @@
return 1;
}
-static void remote_socket_ready(asocket *s)
-{
- D("entered remote_socket_ready RS(%d) OKAY fd=%d peer.fd=%d",
- s->id, s->fd, s->peer->fd);
- apacket *p = get_apacket();
+static void remote_socket_ready(asocket* s) {
+ D("entered remote_socket_ready RS(%d) OKAY fd=%d peer.fd=%d", s->id, s->fd, s->peer->fd);
+ apacket* p = get_apacket();
p->msg.command = A_OKAY;
p->msg.arg0 = s->peer->id;
p->msg.arg1 = s->id;
send_packet(p, s->transport);
}
-static void remote_socket_shutdown(asocket *s)
-{
- D("entered remote_socket_shutdown RS(%d) CLOSE fd=%d peer->fd=%d",
- s->id, s->fd, s->peer?s->peer->fd:-1);
- apacket *p = get_apacket();
+static void remote_socket_shutdown(asocket* s) {
+ D("entered remote_socket_shutdown RS(%d) CLOSE fd=%d peer->fd=%d", s->id, s->fd,
+ s->peer ? s->peer->fd : -1);
+ apacket* p = get_apacket();
p->msg.command = A_CLSE;
- if(s->peer) {
+ if (s->peer) {
p->msg.arg0 = s->peer->id;
}
p->msg.arg1 = s->id;
send_packet(p, s->transport);
}
-static void remote_socket_close(asocket *s)
-{
+static void remote_socket_close(asocket* s) {
if (s->peer) {
s->peer->peer = 0;
- D("RS(%d) peer->close()ing peer->id=%d peer->fd=%d",
- s->id, s->peer->id, s->peer->fd);
+ D("RS(%d) peer->close()ing peer->id=%d peer->fd=%d", s->id, s->peer->id, s->peer->fd);
s->peer->close(s->peer);
}
- D("entered remote_socket_close RS(%d) CLOSE fd=%d peer->fd=%d",
- s->id, s->fd, s->peer?s->peer->fd:-1);
+ D("entered remote_socket_close RS(%d) CLOSE fd=%d peer->fd=%d", s->id, s->fd,
+ s->peer ? s->peer->fd : -1);
D("RS(%d): closed", s->id);
free(s);
}
@@ -534,12 +496,15 @@
// |t|. Where |id| is the socket id of the corresponding service on the other
// side of the transport (it is allocated by the remote side and _cannot_ be 0).
// Returns a new non-NULL asocket handle.
-asocket *create_remote_socket(unsigned id, atransport *t)
-{
- if (id == 0) fatal("invalid remote socket id (0)");
+asocket* create_remote_socket(unsigned id, atransport* t) {
+ if (id == 0) {
+ fatal("invalid remote socket id (0)");
+ }
asocket* s = reinterpret_cast<asocket*>(calloc(1, sizeof(asocket)));
- if (s == NULL) fatal("cannot allocate socket");
+ if (s == NULL) {
+ fatal("cannot allocate socket");
+ }
s->id = id;
s->enqueue = remote_socket_enqueue;
s->ready = remote_socket_ready;
@@ -551,13 +516,12 @@
return s;
}
-void connect_to_remote(asocket *s, const char *destination)
-{
+void connect_to_remote(asocket* s, const char* destination) {
D("Connect_to_remote call RS(%d) fd=%d", s->id, s->fd);
- apacket *p = get_apacket();
+ apacket* p = get_apacket();
size_t len = strlen(destination) + 1;
- if(len > (s->get_max_payload()-1)) {
+ if (len > (s->get_max_payload() - 1)) {
fatal("destination oversized");
}
@@ -565,15 +529,13 @@
p->msg.command = A_OPEN;
p->msg.arg0 = s->id;
p->msg.data_length = len;
- strcpy((char*) p->data, destination);
+ strcpy((char*)p->data, destination);
send_packet(p, s->transport);
}
-
/* this is used by magic sockets to rig local sockets to
send the go-ahead message when they connect */
-static void local_socket_ready_notify(asocket *s)
-{
+static void local_socket_ready_notify(asocket* s) {
s->ready = local_socket_ready;
s->shutdown = NULL;
s->close = local_socket_close;
@@ -584,8 +546,7 @@
/* this is used by magic sockets to rig local sockets to
send the failure message if they are closed before
connected (to avoid closing them without a status message) */
-static void local_socket_close_notify(asocket *s)
-{
+static void local_socket_close_notify(asocket* s) {
s->ready = local_socket_ready;
s->shutdown = NULL;
s->close = local_socket_close;
@@ -593,28 +554,41 @@
s->close(s);
}
-static unsigned unhex(unsigned char *s, int len)
-{
+static unsigned unhex(unsigned char* s, int len) {
unsigned n = 0, c;
- while(len-- > 0) {
- switch((c = *s++)) {
- case '0': case '1': case '2':
- case '3': case '4': case '5':
- case '6': case '7': case '8':
- case '9':
- c -= '0';
- break;
- case 'a': case 'b': case 'c':
- case 'd': case 'e': case 'f':
- c = c - 'a' + 10;
- break;
- case 'A': case 'B': case 'C':
- case 'D': case 'E': case 'F':
- c = c - 'A' + 10;
- break;
- default:
- return 0xffffffff;
+ while (len-- > 0) {
+ switch ((c = *s++)) {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ c -= '0';
+ break;
+ case 'a':
+ case 'b':
+ case 'c':
+ case 'd':
+ case 'e':
+ case 'f':
+ c = c - 'a' + 10;
+ break;
+ case 'A':
+ case 'B':
+ case 'C':
+ case 'D':
+ case 'E':
+ case 'F':
+ c = c - 'A' + 10;
+ break;
+ default:
+ return 0xffffffff;
}
n = (n << 4) | c;
@@ -671,31 +645,29 @@
} // namespace internal
-#endif // ADB_HOST
+#endif // ADB_HOST
-static int smart_socket_enqueue(asocket *s, apacket *p)
-{
+static int smart_socket_enqueue(asocket* s, apacket* p) {
unsigned len;
#if ADB_HOST
- char *service = nullptr;
+ char* service = nullptr;
char* serial = nullptr;
TransportType type = kTransportAny;
#endif
D("SS(%d): enqueue %d", s->id, p->len);
- if(s->pkt_first == 0) {
+ if (s->pkt_first == 0) {
s->pkt_first = p;
s->pkt_last = p;
} else {
- if((s->pkt_first->len + p->len) > s->get_max_payload()) {
+ if ((s->pkt_first->len + p->len) > s->get_max_payload()) {
D("SS(%d): overflow", s->id);
put_apacket(p);
goto fail;
}
- memcpy(s->pkt_first->data + s->pkt_first->len,
- p->data, p->len);
+ memcpy(s->pkt_first->data + s->pkt_first->len, p->data, p->len);
s->pkt_first->len += p->len;
put_apacket(p);
@@ -703,7 +675,9 @@
}
/* don't bother if we can't decode the length */
- if(p->len < 4) return 0;
+ if (p->len < 4) {
+ return 0;
+ }
len = unhex(p->data, 4);
if ((len < 1) || (len > MAX_PAYLOAD_V1)) {
@@ -711,27 +685,27 @@
goto fail;
}
- D("SS(%d): len is %d", s->id, len );
+ D("SS(%d): len is %d", s->id, len);
/* can't do anything until we have the full header */
- if((len + 4) > p->len) {
- D("SS(%d): waiting for %d more bytes", s->id, len+4 - p->len);
+ if ((len + 4) > p->len) {
+ D("SS(%d): waiting for %d more bytes", s->id, len + 4 - p->len);
return 0;
}
p->data[len + 4] = 0;
- D("SS(%d): '%s'", s->id, (char*) (p->data + 4));
+ D("SS(%d): '%s'", s->id, (char*)(p->data + 4));
#if ADB_HOST
- service = (char *)p->data + 4;
- if(!strncmp(service, "host-serial:", strlen("host-serial:"))) {
+ service = (char*)p->data + 4;
+ if (!strncmp(service, "host-serial:", strlen("host-serial:"))) {
char* serial_end;
service += strlen("host-serial:");
// serial number should follow "host:" and could be a host:port string.
serial_end = internal::skip_host_serial(service);
if (serial_end) {
- *serial_end = 0; // terminate string
+ *serial_end = 0; // terminate string
serial = service;
service = serial_end + 1;
}
@@ -749,42 +723,42 @@
}
if (service) {
- asocket *s2;
+ asocket* s2;
- /* some requests are handled immediately -- in that
- ** case the handle_host_request() routine has sent
- ** the OKAY or FAIL message and all we have to do
- ** is clean up.
- */
- if(handle_host_request(service, type, serial, s->peer->fd, s) == 0) {
- /* XXX fail message? */
- D( "SS(%d): handled host service '%s'", s->id, service );
+ /* some requests are handled immediately -- in that
+ ** case the handle_host_request() routine has sent
+ ** the OKAY or FAIL message and all we have to do
+ ** is clean up.
+ */
+ if (handle_host_request(service, type, serial, s->peer->fd, s) == 0) {
+ /* XXX fail message? */
+ D("SS(%d): handled host service '%s'", s->id, service);
goto fail;
}
if (!strncmp(service, "transport", strlen("transport"))) {
- D( "SS(%d): okay transport", s->id );
+ D("SS(%d): okay transport", s->id);
p->len = 0;
return 0;
}
- /* try to find a local service with this name.
- ** if no such service exists, we'll fail out
- ** and tear down here.
- */
+ /* try to find a local service with this name.
+ ** if no such service exists, we'll fail out
+ ** and tear down here.
+ */
s2 = create_host_service_socket(service, serial);
- if(s2 == 0) {
- D( "SS(%d): couldn't create host service '%s'", s->id, service );
+ if (s2 == 0) {
+ D("SS(%d): couldn't create host service '%s'", s->id, service);
SendFail(s->peer->fd, "unknown host service");
goto fail;
}
- /* we've connected to a local host service,
- ** so we make our peer back into a regular
- ** local socket and bind it to the new local
- ** service socket, acknowledge the successful
- ** connection, and close this smart socket now
- ** that its work is done.
- */
+ /* we've connected to a local host service,
+ ** so we make our peer back into a regular
+ ** local socket and bind it to the new local
+ ** service socket, acknowledge the successful
+ ** connection, and close this smart socket now
+ ** that its work is done.
+ */
SendOkay(s->peer->fd);
s->peer->ready = local_socket_ready;
@@ -793,10 +767,10 @@
s->peer->peer = s2;
s2->peer = s->peer;
s->peer = 0;
- D( "SS(%d): okay", s->id );
+ D("SS(%d): okay", s->id);
s->close(s);
- /* initial state is "ready" */
+ /* initial state is "ready" */
s2->ready(s2);
return 0;
}
@@ -811,53 +785,50 @@
}
#endif
- if(!(s->transport) || (s->transport->connection_state == kCsOffline)) {
- /* if there's no remote we fail the connection
- ** right here and terminate it
- */
+ if (!(s->transport) || (s->transport->connection_state == kCsOffline)) {
+ /* if there's no remote we fail the connection
+ ** right here and terminate it
+ */
SendFail(s->peer->fd, "device offline (x)");
goto fail;
}
-
- /* instrument our peer to pass the success or fail
- ** message back once it connects or closes, then
- ** detach from it, request the connection, and
- ** tear down
- */
+ /* instrument our peer to pass the success or fail
+ ** message back once it connects or closes, then
+ ** detach from it, request the connection, and
+ ** tear down
+ */
s->peer->ready = local_socket_ready_notify;
s->peer->shutdown = nullptr;
s->peer->close = local_socket_close_notify;
s->peer->peer = 0;
- /* give him our transport and upref it */
+ /* give him our transport and upref it */
s->peer->transport = s->transport;
- connect_to_remote(s->peer, (char*) (p->data + 4));
+ connect_to_remote(s->peer, (char*)(p->data + 4));
s->peer = 0;
s->close(s);
return 1;
fail:
- /* we're going to close our peer as a side-effect, so
- ** return -1 to signal that state to the local socket
- ** who is enqueueing against us
- */
+ /* we're going to close our peer as a side-effect, so
+ ** return -1 to signal that state to the local socket
+ ** who is enqueueing against us
+ */
s->close(s);
return -1;
}
-static void smart_socket_ready(asocket *s)
-{
+static void smart_socket_ready(asocket* s) {
D("SS(%d): ready", s->id);
}
-static void smart_socket_close(asocket *s)
-{
+static void smart_socket_close(asocket* s) {
D("SS(%d): closed", s->id);
- if(s->pkt_first){
+ if (s->pkt_first) {
put_apacket(s->pkt_first);
}
- if(s->peer) {
+ if (s->peer) {
s->peer->peer = 0;
s->peer->close(s->peer);
s->peer = 0;
@@ -865,10 +836,9 @@
free(s);
}
-static asocket *create_smart_socket(void)
-{
+static asocket* create_smart_socket(void) {
D("Creating smart socket");
- asocket *s = reinterpret_cast<asocket*>(calloc(1, sizeof(asocket)));
+ asocket* s = reinterpret_cast<asocket*>(calloc(1, sizeof(asocket)));
if (s == NULL) fatal("cannot allocate socket");
s->enqueue = smart_socket_enqueue;
s->ready = smart_socket_ready;
@@ -879,10 +849,9 @@
return s;
}
-void connect_to_smartsocket(asocket *s)
-{
+void connect_to_smartsocket(asocket* s) {
D("Connecting to smart socket");
- asocket *ss = create_smart_socket();
+ asocket* ss = create_smart_socket();
s->peer = ss;
ss->peer = s;
s->ready(s);
diff --git a/adb/sysdeps/mutex.h b/adb/sysdeps/mutex.h
new file mode 100644
index 0000000..73c9e6e
--- /dev/null
+++ b/adb/sysdeps/mutex.h
@@ -0,0 +1,107 @@
+#pragma once
+
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(_WIN32)
+
+#include <windows.h>
+
+#include <android-base/macros.h>
+
+#include "adb.h"
+
+// The prebuilt version of mingw we use doesn't support mutex or recursive_mutex.
+// Therefore, implement our own using the Windows primitives.
+// Put them directly into the std namespace, so that when they're actually available, the build
+// breaks until they're removed.
+
+#include <mutex>
+namespace std {
+
+// CRITICAL_SECTION is recursive, so just wrap it in a Mutex-compatible class.
+class recursive_mutex {
+ public:
+ recursive_mutex() {
+ InitializeCriticalSection(&mutex_);
+ }
+
+ ~recursive_mutex() {
+ DeleteCriticalSection(&mutex_);
+ }
+
+ void lock() {
+ EnterCriticalSection(&mutex_);
+ }
+
+ bool try_lock() {
+ return TryEnterCriticalSection(&mutex_);
+ }
+
+ void unlock() {
+ LeaveCriticalSection(&mutex_);
+ }
+
+ private:
+ CRITICAL_SECTION mutex_;
+
+ DISALLOW_COPY_AND_ASSIGN(recursive_mutex);
+};
+
+class mutex {
+ public:
+ mutex() {
+ }
+
+ ~mutex() {
+ }
+
+ void lock() {
+ mutex_.lock();
+ if (++lock_count_ != 1) {
+ fatal("non-recursive mutex locked reentrantly");
+ }
+ }
+
+ void unlock() {
+ if (--lock_count_ != 0) {
+ fatal("non-recursive mutex unlock resulted in unexpected lock count: %d", lock_count_);
+ }
+ mutex_.unlock();
+ }
+
+ bool try_lock() {
+ if (!mutex_.try_lock()) {
+ return false;
+ }
+
+ if (lock_count_ != 0) {
+ mutex_.unlock();
+ return false;
+ }
+
+ ++lock_count_;
+ return true;
+ }
+
+ private:
+ recursive_mutex mutex_;
+ size_t lock_count_ = 0;
+};
+
+}
+
+#endif
diff --git a/adb/sysdeps_test.cpp b/adb/sysdeps_test.cpp
index fde344a..395d22d 100644
--- a/adb/sysdeps_test.cpp
+++ b/adb/sysdeps_test.cpp
@@ -244,3 +244,60 @@
adb_close(fd);
}
}
+
+#include "sysdeps/mutex.h"
+TEST(sysdeps_mutex, mutex_smoke) {
+ static std::atomic<bool> finished(false);
+ static std::mutex &m = *new std::mutex();
+ m.lock();
+ ASSERT_FALSE(m.try_lock());
+ adb_thread_create([](void*) {
+ ASSERT_FALSE(m.try_lock());
+ m.lock();
+ finished.store(true);
+ adb_sleep_ms(200);
+ m.unlock();
+ }, nullptr);
+
+ ASSERT_FALSE(finished.load());
+ adb_sleep_ms(100);
+ ASSERT_FALSE(finished.load());
+ m.unlock();
+ adb_sleep_ms(100);
+ m.lock();
+ ASSERT_TRUE(finished.load());
+ m.unlock();
+}
+
+// Our implementation on Windows aborts on double lock.
+#if defined(_WIN32)
+TEST(sysdeps_mutex, mutex_reentrant_lock) {
+ std::mutex &m = *new std::mutex();
+
+ m.lock();
+ ASSERT_FALSE(m.try_lock());
+ EXPECT_DEATH(m.lock(), "non-recursive mutex locked reentrantly");
+}
+#endif
+
+TEST(sysdeps_mutex, recursive_mutex_smoke) {
+ static std::recursive_mutex &m = *new std::recursive_mutex();
+
+ m.lock();
+ ASSERT_TRUE(m.try_lock());
+ m.unlock();
+
+ adb_thread_create([](void*) {
+ ASSERT_FALSE(m.try_lock());
+ m.lock();
+ adb_sleep_ms(500);
+ m.unlock();
+ }, nullptr);
+
+ adb_sleep_ms(100);
+ m.unlock();
+ adb_sleep_ms(100);
+ ASSERT_FALSE(m.try_lock());
+ m.lock();
+ m.unlock();
+}
diff --git a/adb/transport_local.cpp b/adb/transport_local.cpp
index 4121f47..c1c88a9 100644
--- a/adb/transport_local.cpp
+++ b/adb/transport_local.cpp
@@ -167,7 +167,9 @@
D("server: new connection on fd %d", fd);
close_on_exec(fd);
disable_tcp_nagle(fd);
- register_socket_transport(fd, "host", port, 1);
+ if (register_socket_transport(fd, "host", port, 1) != 0) {
+ adb_close(fd);
+ }
}
}
D("transport: server_socket_thread() exiting");
@@ -261,8 +263,8 @@
/* Host is connected. Register the transport, and start the
* exchange. */
std::string serial = android::base::StringPrintf("host-%d", fd);
- register_socket_transport(fd, serial.c_str(), port, 1);
- if (!WriteFdExactly(fd, _start_req, strlen(_start_req))) {
+ if (register_socket_transport(fd, serial.c_str(), port, 1) != 0 ||
+ !WriteFdExactly(fd, _start_req, strlen(_start_req))) {
adb_close(fd);
}
}
diff --git a/adb/usb_linux_client.cpp b/adb/usb_linux_client.cpp
index 0ba6b4b..c10b48c 100644
--- a/adb/usb_linux_client.cpp
+++ b/adb/usb_linux_client.cpp
@@ -400,35 +400,33 @@
v2_descriptor.os_header = os_desc_header;
v2_descriptor.os_desc = os_desc_compat;
- if (h->control < 0) { // might have already done this before
- D("OPENING %s", USB_FFS_ADB_EP0);
- h->control = adb_open(USB_FFS_ADB_EP0, O_RDWR);
- if (h->control < 0) {
- D("[ %s: cannot open control endpoint: errno=%d]", USB_FFS_ADB_EP0, errno);
+ D("OPENING %s", USB_FFS_ADB_EP0);
+ h->control = adb_open(USB_FFS_ADB_EP0, O_RDWR);
+ if (h->control < 0) {
+ D("[ %s: cannot open control endpoint: errno=%d]", USB_FFS_ADB_EP0, errno);
+ goto err;
+ }
+
+ ret = adb_write(h->control, &v2_descriptor, sizeof(v2_descriptor));
+ if (ret < 0) {
+ v1_descriptor.header.magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC);
+ v1_descriptor.header.length = cpu_to_le32(sizeof(v1_descriptor));
+ v1_descriptor.header.fs_count = 3;
+ v1_descriptor.header.hs_count = 3;
+ v1_descriptor.fs_descs = fs_descriptors;
+ v1_descriptor.hs_descs = hs_descriptors;
+ D("[ %s: Switching to V1_descriptor format errno=%d ]", USB_FFS_ADB_EP0, errno);
+ ret = adb_write(h->control, &v1_descriptor, sizeof(v1_descriptor));
+ if (ret < 0) {
+ D("[ %s: write descriptors failed: errno=%d ]", USB_FFS_ADB_EP0, errno);
goto err;
}
+ }
- ret = adb_write(h->control, &v2_descriptor, sizeof(v2_descriptor));
- if (ret < 0) {
- v1_descriptor.header.magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC);
- v1_descriptor.header.length = cpu_to_le32(sizeof(v1_descriptor));
- v1_descriptor.header.fs_count = 3;
- v1_descriptor.header.hs_count = 3;
- v1_descriptor.fs_descs = fs_descriptors;
- v1_descriptor.hs_descs = hs_descriptors;
- D("[ %s: Switching to V1_descriptor format errno=%d ]", USB_FFS_ADB_EP0, errno);
- ret = adb_write(h->control, &v1_descriptor, sizeof(v1_descriptor));
- if (ret < 0) {
- D("[ %s: write descriptors failed: errno=%d ]", USB_FFS_ADB_EP0, errno);
- goto err;
- }
- }
-
- ret = adb_write(h->control, &strings, sizeof(strings));
- if (ret < 0) {
- D("[ %s: writing strings failed: errno=%d]", USB_FFS_ADB_EP0, errno);
- goto err;
- }
+ ret = adb_write(h->control, &strings, sizeof(strings));
+ if (ret < 0) {
+ D("[ %s: writing strings failed: errno=%d]", USB_FFS_ADB_EP0, errno);
+ goto err;
}
h->bulk_out = adb_open(USB_FFS_ADB_OUT, O_RDWR);
@@ -556,6 +554,7 @@
h->kicked = false;
adb_close(h->bulk_out);
adb_close(h->bulk_in);
+ adb_close(h->control);
// Notify usb_adb_open_thread to open a new connection.
adb_mutex_lock(&h->lock);
h->open_new_connection = true;
diff --git a/debuggerd/elf_utils.cpp b/debuggerd/elf_utils.cpp
index 9959f2e..3d99cab 100644
--- a/debuggerd/elf_utils.cpp
+++ b/debuggerd/elf_utils.cpp
@@ -29,7 +29,7 @@
#include "elf_utils.h"
-#define NOTE_ALIGN(size) ((size + 3) & ~3)
+#define NOTE_ALIGN(size) (((size) + 3) & ~3)
template <typename HdrType, typename PhdrType, typename NhdrType>
static bool get_build_id(
diff --git a/debuggerd/test/host_signal_fixup.h b/debuggerd/test/host_signal_fixup.h
index c7796ef..762bae5 100644
--- a/debuggerd/test/host_signal_fixup.h
+++ b/debuggerd/test/host_signal_fixup.h
@@ -57,7 +57,7 @@
#endif
#if !defined(SI_DETHREAD)
-#define SI_DETHREAD -7
+#define SI_DETHREAD (-7)
#endif
#endif
diff --git a/include/cutils/aref.h b/include/cutils/aref.h
deleted file mode 100644
index 3bd36ea..0000000
--- a/include/cutils/aref.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _CUTILS_AREF_H_
-#define _CUTILS_AREF_H_
-
-#include <stddef.h>
-#include <sys/cdefs.h>
-
-#include <cutils/atomic.h>
-
-__BEGIN_DECLS
-
-#define AREF_TO_ITEM(aref, container, member) \
- (container *) (((char*) (aref)) - offsetof(container, member))
-
-struct aref
-{
- volatile int32_t count;
-};
-
-static inline void aref_init(struct aref *r)
-{
- r->count = 1;
-}
-
-static inline int32_t aref_count(struct aref *r)
-{
- return r->count;
-}
-
-static inline void aref_get(struct aref *r)
-{
- android_atomic_inc(&r->count);
-}
-
-static inline void aref_put(struct aref *r, void (*release)(struct aref *))
-{
- if (android_atomic_dec(&r->count) == 1)
- release(r);
-}
-
-__END_DECLS
-
-#endif // _CUTILS_AREF_H_
diff --git a/include/utils/RefBase.h b/include/utils/RefBase.h
index eac6a78..14d9cb1 100644
--- a/include/utils/RefBase.h
+++ b/include/utils/RefBase.h
@@ -17,7 +17,7 @@
#ifndef ANDROID_REF_BASE_H
#define ANDROID_REF_BASE_H
-#include <cutils/atomic.h>
+#include <atomic>
#include <stdint.h>
#include <sys/types.h>
@@ -176,16 +176,17 @@
public:
inline LightRefBase() : mCount(0) { }
inline void incStrong(__attribute__((unused)) const void* id) const {
- android_atomic_inc(&mCount);
+ mCount.fetch_add(1, std::memory_order_relaxed);
}
inline void decStrong(__attribute__((unused)) const void* id) const {
- if (android_atomic_dec(&mCount) == 1) {
+ if (mCount.fetch_sub(1, std::memory_order_release) == 1) {
+ std::atomic_thread_fence(std::memory_order_acquire);
delete static_cast<const T*>(this);
}
}
//! DEBUGGING ONLY: Get current strong ref count.
inline int32_t getStrongCount() const {
- return mCount;
+ return mCount.load(std::memory_order_relaxed);
}
typedef LightRefBase<T> basetype;
@@ -200,7 +201,7 @@
const void* old_id, const void* new_id) { }
private:
- mutable volatile int32_t mCount;
+ mutable std::atomic<int32_t> mCount;
};
// This is a wrapper around LightRefBase that simply enforces a virtual
diff --git a/libbacktrace/GetPss.cpp b/libbacktrace/GetPss.cpp
index b4dc48d..6d750ea 100644
--- a/libbacktrace/GetPss.cpp
+++ b/libbacktrace/GetPss.cpp
@@ -24,7 +24,7 @@
// This is an extremely simplified version of libpagemap.
-#define _BITS(x, offset, bits) (((x) >> offset) & ((1LL << (bits)) - 1))
+#define _BITS(x, offset, bits) (((x) >> (offset)) & ((1LL << (bits)) - 1))
#define PAGEMAP_PRESENT(x) (_BITS(x, 63, 1))
#define PAGEMAP_SWAPPED(x) (_BITS(x, 62, 1))
diff --git a/libbacktrace/backtrace_test.cpp b/libbacktrace/backtrace_test.cpp
index df6c6c1..7066c79 100644
--- a/libbacktrace/backtrace_test.cpp
+++ b/libbacktrace/backtrace_test.cpp
@@ -1420,7 +1420,7 @@
#if defined(ENABLE_PSS_TESTS)
#include "GetPss.h"
-#define MAX_LEAK_BYTES 32*1024UL
+#define MAX_LEAK_BYTES (32*1024UL)
void CheckForLeak(pid_t pid, pid_t tid) {
// Do a few runs to get the PSS stable.
diff --git a/libion/kernel-headers/linux/ion.h b/libion/kernel-headers/linux/ion.h
index 5af39d0..3c28080 100644
--- a/libion/kernel-headers/linux/ion.h
+++ b/libion/kernel-headers/linux/ion.h
@@ -38,7 +38,7 @@
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
-#define ION_NUM_HEAP_IDS sizeof(unsigned int) * 8
+#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8)
#define ION_FLAG_CACHED 1
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define ION_FLAG_CACHED_NEEDS_SYNC 2
diff --git a/liblog/config_read.h b/liblog/config_read.h
index 67f4c20..49a3b75 100644
--- a/liblog/config_read.h
+++ b/liblog/config_read.h
@@ -27,21 +27,21 @@
extern LIBLOG_HIDDEN struct listnode __android_log_persist_read;
#define read_transport_for_each(transp, transports) \
- for (transp = node_to_item((transports)->next, \
+ for ((transp) = node_to_item((transports)->next, \
struct android_log_transport_read, node); \
- (transp != node_to_item(transports, \
+ ((transp) != node_to_item(transports, \
struct android_log_transport_read, node)); \
- transp = node_to_item(transp->node.next, \
+ (transp) = node_to_item((transp)->node.next, \
struct android_log_transport_read, node)) \
#define read_transport_for_each_safe(transp, n, transports) \
- for (transp = node_to_item((transports)->next, \
+ for ((transp) = node_to_item((transports)->next, \
struct android_log_transport_read, node), \
- n = transp->node.next; \
- (transp != node_to_item(transports, \
+ (n) = (transp)->node.next; \
+ ((transp) != node_to_item(transports, \
struct android_log_transport_read, node)); \
- transp = node_to_item(n, struct android_log_transport_read, node), \
- n = transp->node.next)
+ (transp) = node_to_item(n, struct android_log_transport_read, node), \
+ (n) = (transp)->node.next)
LIBLOG_HIDDEN void __android_log_config_read();
diff --git a/liblog/config_write.h b/liblog/config_write.h
index 3a02a4e..3b01a9a 100644
--- a/liblog/config_write.h
+++ b/liblog/config_write.h
@@ -27,21 +27,21 @@
extern LIBLOG_HIDDEN struct listnode __android_log_persist_write;
#define write_transport_for_each(transp, transports) \
- for (transp = node_to_item((transports)->next, \
- struct android_log_transport_write, node); \
- (transp != node_to_item(transports, \
+ for ((transp) = node_to_item((transports)->next, \
+ struct android_log_transport_write, node); \
+ ((transp) != node_to_item(transports, \
struct android_log_transport_write, node)); \
- transp = node_to_item(transp->node.next, \
- struct android_log_transport_write, node)) \
+ (transp) = node_to_item((transp)->node.next, \
+ struct android_log_transport_write, node)) \
#define write_transport_for_each_safe(transp, n, transports) \
- for (transp = node_to_item((transports)->next, \
- struct android_log_transport_write, node), \
- n = transp->node.next; \
- (transp != node_to_item(transports, \
- struct android_log_transport_write, node)); \
- transp = node_to_item(n, struct android_log_transport_write, node), \
- n = transp->node.next)
+ for ((transp) = node_to_item((transports)->next, \
+ struct android_log_transport_write, node), \
+ (n) = (transp)->node.next; \
+ ((transp) != node_to_item(transports, \
+ struct android_log_transport_write, node)); \
+ (transp) = node_to_item(n, struct android_log_transport_write, node), \
+ (n) = (transp)->node.next)
LIBLOG_HIDDEN void __android_log_config_write();
diff --git a/liblog/logger.h b/liblog/logger.h
index c727f29..5087256 100644
--- a/liblog/logger.h
+++ b/liblog/logger.h
@@ -124,23 +124,23 @@
/* assumes caller has structures read-locked, single threaded, or fenced */
#define transport_context_for_each(transp, logger_list) \
- for (transp = node_to_item((logger_list)->transport.next, \
+ for ((transp) = node_to_item((logger_list)->transport.next, \
struct android_log_transport_context, \
node); \
- (transp != node_to_item(&(logger_list)->transport, \
+ ((transp) != node_to_item(&(logger_list)->transport, \
struct android_log_transport_context, \
node)) && \
- (transp->parent == (logger_list)); \
- transp = node_to_item(transp->node.next, \
+ ((transp)->parent == (logger_list)); \
+ (transp) = node_to_item((transp)->node.next, \
struct android_log_transport_context, node))
#define logger_for_each(logp, logger_list) \
- for (logp = node_to_item((logger_list)->logger.next, \
+ for ((logp) = node_to_item((logger_list)->logger.next, \
struct android_log_logger, node); \
- (logp != node_to_item(&(logger_list)->logger, \
+ ((logp) != node_to_item(&(logger_list)->logger, \
struct android_log_logger, node)) && \
- (logp->parent == (logger_list)); \
- logp = node_to_item((logp)->node.next, \
+ ((logp)->parent == (logger_list)); \
+ (logp) = node_to_item((logp)->node.next, \
struct android_log_logger, node))
/* OS specific dribs and drabs */
diff --git a/liblog/logger_read.c b/liblog/logger_read.c
index d04a8c1..00157b7 100644
--- a/liblog/logger_read.c
+++ b/liblog/logger_read.c
@@ -125,7 +125,7 @@
ssize_t ret = -EINVAL; \
struct android_log_transport_context *transp; \
struct android_log_logger *logger_internal = \
- (struct android_log_logger *)logger; \
+ (struct android_log_logger *)(logger); \
\
if (!logger_internal) { \
return ret; \
@@ -186,7 +186,7 @@
#define LOGGER_LIST_FUNCTION(logger_list, def, func, args...) \
struct android_log_transport_context *transp; \
struct android_log_logger_list *logger_list_internal = \
- (struct android_log_logger_list *)logger_list; \
+ (struct android_log_logger_list *)(logger_list); \
\
ssize_t ret = init_transport_context(logger_list_internal); \
if (ret < 0) { \
diff --git a/liblog/tests/benchmark.h b/liblog/tests/benchmark.h
index 7f96e6d..57b3748 100644
--- a/liblog/tests/benchmark.h
+++ b/liblog/tests/benchmark.h
@@ -141,7 +141,7 @@
void StopBenchmarkTiming(uint64_t);
#define BENCHMARK(f) \
- static ::testing::Benchmark* _benchmark_##f __attribute__((unused)) = \
- (::testing::Benchmark*)::testing::BenchmarkFactory(#f, f)
+ static ::testing::Benchmark* _benchmark_##f __attribute__((unused)) = /* NOLINT */ \
+ (::testing::Benchmark*)::testing::BenchmarkFactory(#f, f) /* NOLINT */
#endif // BIONIC_BENCHMARK_H_
diff --git a/libmemtrack/memtrack.c b/libmemtrack/memtrack.c
index 21d9ebd..b528214 100644
--- a/libmemtrack/memtrack.c
+++ b/libmemtrack/memtrack.c
@@ -26,7 +26,7 @@
#include <hardware/memtrack.h>
-#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
static const memtrack_module_t *module;
diff --git a/libmemunreachable/tests/LeakFolding_test.cpp b/libmemunreachable/tests/LeakFolding_test.cpp
index 879a3a0..e85df5f 100644
--- a/libmemunreachable/tests/LeakFolding_test.cpp
+++ b/libmemunreachable/tests/LeakFolding_test.cpp
@@ -37,10 +37,10 @@
Heap heap_;
};
-#define buffer_begin(buffer) reinterpret_cast<uintptr_t>(&buffer[0])
-#define buffer_end(buffer) (reinterpret_cast<uintptr_t>(&buffer[0]) + sizeof(buffer))
+#define buffer_begin(buffer) reinterpret_cast<uintptr_t>(&(buffer)[0])
+#define buffer_end(buffer) (reinterpret_cast<uintptr_t>(&(buffer)[0]) + sizeof(buffer))
#define ALLOCATION(heap_walker, buffer) \
- ASSERT_EQ(true, heap_walker.Allocation(buffer_begin(buffer), buffer_end(buffer)))
+ ASSERT_EQ(true, (heap_walker).Allocation(buffer_begin(buffer), buffer_end(buffer)))
TEST_F(LeakFoldingTest, one) {
void* buffer1[1] = {nullptr};
diff --git a/libnetutils/dhcp_utils.c b/libnetutils/dhcp_utils.c
index c6b9fe4..56e1d59 100644
--- a/libnetutils/dhcp_utils.c
+++ b/libnetutils/dhcp_utils.c
@@ -243,12 +243,8 @@
property_set(result_prop_name, "");
/* Start the daemon and wait until it's ready */
- if (property_get(HOSTNAME_PROP_NAME, prop_value, NULL) && (prop_value[0] != '\0'))
- snprintf(daemon_cmd, sizeof(daemon_cmd), "%s_%s:-f %s -h %s %s", DAEMON_NAME,
- p2p_interface, DHCP_CONFIG_PATH, prop_value, interface);
- else
- snprintf(daemon_cmd, sizeof(daemon_cmd), "%s_%s:-f %s %s", DAEMON_NAME,
- p2p_interface, DHCP_CONFIG_PATH, interface);
+ snprintf(daemon_cmd, sizeof(daemon_cmd), "%s_%s", DAEMON_NAME,
+ p2p_interface);
memset(prop_value, '\0', PROPERTY_VALUE_MAX);
property_set(ctrl_prop, daemon_cmd);
if (wait_for_property(daemon_prop_name, desired_status, 10) < 0) {
@@ -288,7 +284,8 @@
DAEMON_PROP_NAME,
p2p_interface);
- snprintf(daemon_cmd, sizeof(daemon_cmd), "%s_%s", DAEMON_NAME, p2p_interface);
+ snprintf(daemon_cmd, sizeof(daemon_cmd), "%s_%s", DAEMON_NAME,
+ p2p_interface);
/* Stop the daemon and wait until it's reported to be stopped */
property_set(ctrl_prop, daemon_cmd);
@@ -317,7 +314,8 @@
DAEMON_PROP_NAME,
p2p_interface);
- snprintf(daemon_cmd, sizeof(daemon_cmd), "%s_%s", DAEMON_NAME, p2p_interface);
+ snprintf(daemon_cmd, sizeof(daemon_cmd), "%s_%s", DAEMON_NAME,
+ p2p_interface);
/* Stop the daemon and wait until it's reported to be stopped */
property_set(ctrl_prop, daemon_cmd);
@@ -357,8 +355,8 @@
property_set(result_prop_name, "");
/* Start the renew daemon and wait until it's ready */
- snprintf(daemon_cmd, sizeof(daemon_cmd), "%s_%s:%s", DAEMON_NAME_RENEW,
- p2p_interface, interface);
+ snprintf(daemon_cmd, sizeof(daemon_cmd), "%s_%s", DAEMON_NAME_RENEW,
+ p2p_interface);
memset(prop_value, '\0', PROPERTY_VALUE_MAX);
property_set(ctrl_prop, daemon_cmd);
diff --git a/libsparse/output_file.c b/libsparse/output_file.c
index cd30800..d284736 100644
--- a/libsparse/output_file.c
+++ b/libsparse/output_file.c
@@ -57,7 +57,7 @@
#define CHUNK_HEADER_LEN (sizeof(chunk_header_t))
#define container_of(inner, outer_t, elem) \
- ((outer_t *)((char *)inner - offsetof(outer_t, elem)))
+ ((outer_t *)((char *)(inner) - offsetof(outer_t, elem)))
struct output_file_ops {
int (*open)(struct output_file *, int fd);
diff --git a/libutils/RefBase.cpp b/libutils/RefBase.cpp
index 22162fa..085b314 100644
--- a/libutils/RefBase.cpp
+++ b/libutils/RefBase.cpp
@@ -27,7 +27,6 @@
#include <utils/RefBase.h>
-#include <utils/Atomic.h>
#include <utils/CallStack.h>
#include <utils/Log.h>
#include <utils/threads.h>
@@ -57,6 +56,68 @@
namespace android {
+// Usage, invariants, etc:
+
+// It is normally OK just to keep weak pointers to an object. The object will
+// be deallocated by decWeak when the last weak reference disappears.
+// Once a a strong reference has been created, the object will disappear once
+// the last strong reference does (decStrong).
+// AttemptIncStrong will succeed if the object has a strong reference, or if it
+// has a weak reference and has never had a strong reference.
+// AttemptIncWeak really does succeed only if there is already a WEAK
+// reference, and thus may fail when attemptIncStrong would succeed.
+// OBJECT_LIFETIME_WEAK changes this behavior to retain the object
+// unconditionally until the last reference of either kind disappears. The
+// client ensures that the extendObjectLifetime call happens before the dec
+// call that would otherwise have deallocated the object, or before an
+// attemptIncStrong call that might rely on it. We do not worry about
+// concurrent changes to the object lifetime.
+// mStrong is the strong reference count. mWeak is the weak reference count.
+// Between calls, and ignoring memory ordering effects, mWeak includes strong
+// references, and is thus >= mStrong.
+//
+// A weakref_impl is allocated as the value of mRefs in a RefBase object on
+// construction.
+// In the OBJECT_LIFETIME_STRONG case, it is deallocated in the RefBase
+// destructor iff the strong reference count was never incremented. The
+// destructor can be invoked either from decStrong, or from decWeak if there
+// was never a strong reference. If the reference count had been incremented,
+// it is deallocated directly in decWeak, and hence still lives as long as
+// the last weak reference.
+// In the OBJECT_LIFETIME_WEAK case, it is always deallocated from the RefBase
+// destructor, which is always invoked by decWeak. DecStrong explicitly avoids
+// the deletion in this case.
+//
+// Memory ordering:
+// The client must ensure that every inc() call, together with all other
+// accesses to the object, happens before the corresponding dec() call.
+//
+// We try to keep memory ordering constraints on atomics as weak as possible,
+// since memory fences or ordered memory accesses are likely to be a major
+// performance cost for this code. All accesses to mStrong, mWeak, and mFlags
+// explicitly relax memory ordering in some way.
+//
+// The only operations that are not memory_order_relaxed are reference count
+// decrements. All reference count decrements are release operations. In
+// addition, the final decrement leading the deallocation is followed by an
+// acquire fence, which we can view informally as also turning it into an
+// acquire operation. (See 29.8p4 [atomics.fences] for details. We could
+// alternatively use acq_rel operations for all decrements. This is probably
+// slower on most current (2016) hardware, especially on ARMv7, but that may
+// not be true indefinitely.)
+//
+// This convention ensures that the second-to-last decrement synchronizes with
+// (in the language of 1.10 in the C++ standard) the final decrement of a
+// reference count. Since reference counts are only updated using atomic
+// read-modify-write operations, this also extends to any earlier decrements.
+// (See "release sequence" in 1.10.)
+//
+// Since all operations on an object happen before the corresponding reference
+// count decrement, and all reference count decrements happen before the final
+// one, we are guaranteed that all other object accesses happen before the
+// object is destroyed.
+
+
#define INITIAL_STRONG_VALUE (1<<28)
// ---------------------------------------------------------------------------
@@ -64,10 +125,10 @@
class RefBase::weakref_impl : public RefBase::weakref_type
{
public:
- volatile int32_t mStrong;
- volatile int32_t mWeak;
- RefBase* const mBase;
- volatile int32_t mFlags;
+ std::atomic<int32_t> mStrong;
+ std::atomic<int32_t> mWeak;
+ RefBase* const mBase;
+ std::atomic<int32_t> mFlags;
#if !DEBUG_REFS
@@ -141,7 +202,7 @@
void addStrongRef(const void* id) {
//ALOGD_IF(mTrackEnabled,
// "addStrongRef: RefBase=%p, id=%p", mBase, id);
- addRef(&mStrongRefs, id, mStrong);
+ addRef(&mStrongRefs, id, mStrong.load(std::memory_order_relaxed));
}
void removeStrongRef(const void* id) {
@@ -150,7 +211,7 @@
if (!mRetain) {
removeRef(&mStrongRefs, id);
} else {
- addRef(&mStrongRefs, id, -mStrong);
+ addRef(&mStrongRefs, id, -mStrong.load(std::memory_order_relaxed));
}
}
@@ -162,14 +223,14 @@
}
void addWeakRef(const void* id) {
- addRef(&mWeakRefs, id, mWeak);
+ addRef(&mWeakRefs, id, mWeak.load(std::memory_order_relaxed));
}
void removeWeakRef(const void* id) {
if (!mRetain) {
removeRef(&mWeakRefs, id);
} else {
- addRef(&mWeakRefs, id, -mWeak);
+ addRef(&mWeakRefs, id, -mWeak.load(std::memory_order_relaxed));
}
}
@@ -330,7 +391,7 @@
refs->incWeak(id);
refs->addStrongRef(id);
- const int32_t c = android_atomic_inc(&refs->mStrong);
+ const int32_t c = refs->mStrong.fetch_add(1, std::memory_order_relaxed);
ALOG_ASSERT(c > 0, "incStrong() called on %p after last strong ref", refs);
#if PRINT_REFS
ALOGD("incStrong of %p from %p: cnt=%d\n", this, id, c);
@@ -339,7 +400,10 @@
return;
}
- android_atomic_add(-INITIAL_STRONG_VALUE, &refs->mStrong);
+ int32_t old = refs->mStrong.fetch_sub(INITIAL_STRONG_VALUE,
+ std::memory_order_relaxed);
+ // A decStrong() must still happen after us.
+ ALOG_ASSERT(old > INITIAL_STRONG_VALUE, "0x%x too small", old);
refs->mBase->onFirstRef();
}
@@ -347,27 +411,39 @@
{
weakref_impl* const refs = mRefs;
refs->removeStrongRef(id);
- const int32_t c = android_atomic_dec(&refs->mStrong);
+ const int32_t c = refs->mStrong.fetch_sub(1, std::memory_order_release);
#if PRINT_REFS
ALOGD("decStrong of %p from %p: cnt=%d\n", this, id, c);
#endif
ALOG_ASSERT(c >= 1, "decStrong() called on %p too many times", refs);
if (c == 1) {
+ std::atomic_thread_fence(std::memory_order_acquire);
refs->mBase->onLastStrongRef(id);
- if ((refs->mFlags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) {
+ int32_t flags = refs->mFlags.load(std::memory_order_relaxed);
+ if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) {
delete this;
+ // Since mStrong had been incremented, the destructor did not
+ // delete refs.
}
}
+ // Note that even with only strong reference operations, the thread
+ // deallocating this may not be the same as the thread deallocating refs.
+ // That's OK: all accesses to this happen before its deletion here,
+ // and all accesses to refs happen before its deletion in the final decWeak.
+ // The destructor can safely access mRefs because either it's deleting
+ // mRefs itself, or it's running entirely before the final mWeak decrement.
refs->decWeak(id);
}
void RefBase::forceIncStrong(const void* id) const
{
+ // Allows initial mStrong of 0 in addition to INITIAL_STRONG_VALUE.
+ // TODO: Better document assumptions.
weakref_impl* const refs = mRefs;
refs->incWeak(id);
refs->addStrongRef(id);
- const int32_t c = android_atomic_inc(&refs->mStrong);
+ const int32_t c = refs->mStrong.fetch_add(1, std::memory_order_relaxed);
ALOG_ASSERT(c >= 0, "forceIncStrong called on %p after ref count underflow",
refs);
#if PRINT_REFS
@@ -376,7 +452,8 @@
switch (c) {
case INITIAL_STRONG_VALUE:
- android_atomic_add(-INITIAL_STRONG_VALUE, &refs->mStrong);
+ refs->mStrong.fetch_sub(INITIAL_STRONG_VALUE,
+ std::memory_order_relaxed);
// fall through...
case 0:
refs->mBase->onFirstRef();
@@ -385,7 +462,8 @@
int32_t RefBase::getStrongCount() const
{
- return mRefs->mStrong;
+ // Debugging only; No memory ordering guarantees.
+ return mRefs->mStrong.load(std::memory_order_relaxed);
}
RefBase* RefBase::weakref_type::refBase() const
@@ -397,7 +475,8 @@
{
weakref_impl* const impl = static_cast<weakref_impl*>(this);
impl->addWeakRef(id);
- const int32_t c __unused = android_atomic_inc(&impl->mWeak);
+ const int32_t c __unused = impl->mWeak.fetch_add(1,
+ std::memory_order_relaxed);
ALOG_ASSERT(c >= 0, "incWeak called on %p after last weak ref", this);
}
@@ -406,16 +485,19 @@
{
weakref_impl* const impl = static_cast<weakref_impl*>(this);
impl->removeWeakRef(id);
- const int32_t c = android_atomic_dec(&impl->mWeak);
+ const int32_t c = impl->mWeak.fetch_sub(1, std::memory_order_release);
ALOG_ASSERT(c >= 1, "decWeak called on %p too many times", this);
if (c != 1) return;
+ atomic_thread_fence(std::memory_order_acquire);
- if ((impl->mFlags&OBJECT_LIFETIME_WEAK) == OBJECT_LIFETIME_STRONG) {
+ int32_t flags = impl->mFlags.load(std::memory_order_relaxed);
+ if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) {
// This is the regular lifetime case. The object is destroyed
// when the last strong reference goes away. Since weakref_impl
// outlive the object, it is not destroyed in the dtor, and
// we'll have to do it here.
- if (impl->mStrong == INITIAL_STRONG_VALUE) {
+ if (impl->mStrong.load(std::memory_order_relaxed)
+ == INITIAL_STRONG_VALUE) {
// Special case: we never had a strong reference, so we need to
// destroy the object now.
delete impl->mBase;
@@ -424,13 +506,10 @@
delete impl;
}
} else {
- // less common case: lifetime is OBJECT_LIFETIME_{WEAK|FOREVER}
+ // This is the OBJECT_LIFETIME_WEAK case. The last weak-reference
+ // is gone, we can destroy the object.
impl->mBase->onLastWeakRef(id);
- if ((impl->mFlags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_WEAK) {
- // this is the OBJECT_LIFETIME_WEAK case. The last weak-reference
- // is gone, we can destroy the object.
- delete impl->mBase;
- }
+ delete impl->mBase;
}
}
@@ -439,7 +518,7 @@
incWeak(id);
weakref_impl* const impl = static_cast<weakref_impl*>(this);
- int32_t curCount = impl->mStrong;
+ int32_t curCount = impl->mStrong.load(std::memory_order_relaxed);
ALOG_ASSERT(curCount >= 0,
"attemptIncStrong called on %p after underflow", this);
@@ -447,19 +526,20 @@
while (curCount > 0 && curCount != INITIAL_STRONG_VALUE) {
// we're in the easy/common case of promoting a weak-reference
// from an existing strong reference.
- if (android_atomic_cmpxchg(curCount, curCount+1, &impl->mStrong) == 0) {
+ if (impl->mStrong.compare_exchange_weak(curCount, curCount+1,
+ std::memory_order_relaxed)) {
break;
}
// the strong count has changed on us, we need to re-assert our
- // situation.
- curCount = impl->mStrong;
+ // situation. curCount was updated by compare_exchange_weak.
}
if (curCount <= 0 || curCount == INITIAL_STRONG_VALUE) {
// we're now in the harder case of either:
// - there never was a strong reference on us
// - or, all strong references have been released
- if ((impl->mFlags&OBJECT_LIFETIME_WEAK) == OBJECT_LIFETIME_STRONG) {
+ int32_t flags = impl->mFlags.load(std::memory_order_relaxed);
+ if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) {
// this object has a "normal" life-time, i.e.: it gets destroyed
// when the last strong reference goes away
if (curCount <= 0) {
@@ -473,13 +553,13 @@
// there never was a strong-reference, so we can try to
// promote this object; we need to do that atomically.
while (curCount > 0) {
- if (android_atomic_cmpxchg(curCount, curCount + 1,
- &impl->mStrong) == 0) {
+ if (impl->mStrong.compare_exchange_weak(curCount, curCount+1,
+ std::memory_order_relaxed)) {
break;
}
// the strong count has changed on us, we need to re-assert our
// situation (e.g.: another thread has inc/decStrong'ed us)
- curCount = impl->mStrong;
+ // curCount has been updated.
}
if (curCount <= 0) {
@@ -499,7 +579,7 @@
}
// grab a strong-reference, which is always safe due to the
// extended life-time.
- curCount = android_atomic_inc(&impl->mStrong);
+ curCount = impl->mStrong.fetch_add(1, std::memory_order_relaxed);
}
// If the strong reference count has already been incremented by
@@ -518,21 +598,16 @@
ALOGD("attemptIncStrong of %p from %p: cnt=%d\n", this, id, curCount);
#endif
- // now we need to fix-up the count if it was INITIAL_STRONG_VALUE
- // this must be done safely, i.e.: handle the case where several threads
+ // curCount is the value of mStrong before we increment ed it.
+ // Now we need to fix-up the count if it was INITIAL_STRONG_VALUE.
+ // This must be done safely, i.e.: handle the case where several threads
// were here in attemptIncStrong().
- curCount = impl->mStrong;
- while (curCount >= INITIAL_STRONG_VALUE) {
- ALOG_ASSERT(curCount > INITIAL_STRONG_VALUE,
- "attemptIncStrong in %p underflowed to INITIAL_STRONG_VALUE",
- this);
- if (android_atomic_cmpxchg(curCount, curCount-INITIAL_STRONG_VALUE,
- &impl->mStrong) == 0) {
- break;
- }
- // the strong-count changed on us, we need to re-assert the situation,
- // for e.g.: it's possible the fix-up happened in another thread.
- curCount = impl->mStrong;
+ // curCount > INITIAL_STRONG_VALUE is OK, and can happen if we're doing
+ // this in the middle of another incStrong. The subtraction is handled
+ // by the thread that started with INITIAL_STRONG_VALUE.
+ if (curCount == INITIAL_STRONG_VALUE) {
+ impl->mStrong.fetch_sub(INITIAL_STRONG_VALUE,
+ std::memory_order_relaxed);
}
return true;
@@ -542,14 +617,15 @@
{
weakref_impl* const impl = static_cast<weakref_impl*>(this);
- int32_t curCount = impl->mWeak;
+ int32_t curCount = impl->mWeak.load(std::memory_order_relaxed);
ALOG_ASSERT(curCount >= 0, "attemptIncWeak called on %p after underflow",
this);
while (curCount > 0) {
- if (android_atomic_cmpxchg(curCount, curCount+1, &impl->mWeak) == 0) {
+ if (impl->mWeak.compare_exchange_weak(curCount, curCount+1,
+ std::memory_order_relaxed)) {
break;
}
- curCount = impl->mWeak;
+ // curCount has been updated.
}
if (curCount > 0) {
@@ -561,7 +637,9 @@
int32_t RefBase::weakref_type::getWeakCount() const
{
- return static_cast<const weakref_impl*>(this)->mWeak;
+ // Debug only!
+ return static_cast<const weakref_impl*>(this)->mWeak
+ .load(std::memory_order_relaxed);
}
void RefBase::weakref_type::printRefs() const
@@ -592,17 +670,19 @@
RefBase::~RefBase()
{
- if (mRefs->mStrong == INITIAL_STRONG_VALUE) {
+ if (mRefs->mStrong.load(std::memory_order_relaxed)
+ == INITIAL_STRONG_VALUE) {
// we never acquired a strong (and/or weak) reference on this object.
delete mRefs;
} else {
- // life-time of this object is extended to WEAK or FOREVER, in
+ // life-time of this object is extended to WEAK, in
// which case weakref_impl doesn't out-live the object and we
// can free it now.
- if ((mRefs->mFlags & OBJECT_LIFETIME_MASK) != OBJECT_LIFETIME_STRONG) {
+ int32_t flags = mRefs->mFlags.load(std::memory_order_relaxed);
+ if ((flags & OBJECT_LIFETIME_MASK) != OBJECT_LIFETIME_STRONG) {
// It's possible that the weak count is not 0 if the object
// re-acquired a weak reference in its destructor
- if (mRefs->mWeak == 0) {
+ if (mRefs->mWeak.load(std::memory_order_relaxed) == 0) {
delete mRefs;
}
}
@@ -613,7 +693,9 @@
void RefBase::extendObjectLifetime(int32_t mode)
{
- android_atomic_or(mode, &mRefs->mFlags);
+ // Must be happens-before ordered with respect to construction or any
+ // operation that could destroy the object.
+ mRefs->mFlags.fetch_or(mode, std::memory_order_relaxed);
}
void RefBase::onFirstRef()
diff --git a/libutils/SharedBuffer.cpp b/libutils/SharedBuffer.cpp
index c7dd1ab..f3d6d8f 100644
--- a/libutils/SharedBuffer.cpp
+++ b/libutils/SharedBuffer.cpp
@@ -20,7 +20,6 @@
#include <string.h>
#include <log/log.h>
-#include <utils/Atomic.h>
#include "SharedBuffer.h"
@@ -37,18 +36,19 @@
SharedBuffer* sb = static_cast<SharedBuffer *>(malloc(sizeof(SharedBuffer) + size));
if (sb) {
- sb->mRefs = 1;
+ // Should be std::atomic_init(&sb->mRefs, 1);
+ // But that generates a warning with some compilers.
+ // The following is OK on Android-supported platforms.
+ sb->mRefs.store(1, std::memory_order_relaxed);
sb->mSize = size;
}
return sb;
}
-ssize_t SharedBuffer::dealloc(const SharedBuffer* released)
+void SharedBuffer::dealloc(const SharedBuffer* released)
{
- if (released->mRefs != 0) return -1; // XXX: invalid operation
free(const_cast<SharedBuffer*>(released));
- return 0;
}
SharedBuffer* SharedBuffer::edit() const
@@ -108,14 +108,15 @@
}
void SharedBuffer::acquire() const {
- android_atomic_inc(&mRefs);
+ mRefs.fetch_add(1, std::memory_order_relaxed);
}
int32_t SharedBuffer::release(uint32_t flags) const
{
int32_t prev = 1;
- if (onlyOwner() || ((prev = android_atomic_dec(&mRefs)) == 1)) {
- mRefs = 0;
+ if (onlyOwner() || ((prev = mRefs.fetch_sub(1, std::memory_order_release) == 1)
+ && (atomic_thread_fence(std::memory_order_acquire), true))) {
+ mRefs.store(0, std::memory_order_relaxed);
if ((flags & eKeepStorage) == 0) {
free(const_cast<SharedBuffer*>(this));
}
diff --git a/libutils/SharedBuffer.h b/libutils/SharedBuffer.h
index b670953..48358cd 100644
--- a/libutils/SharedBuffer.h
+++ b/libutils/SharedBuffer.h
@@ -14,9 +14,14 @@
* limitations under the License.
*/
+/*
+ * DEPRECATED. DO NOT USE FOR NEW CODE.
+ */
+
#ifndef ANDROID_SHARED_BUFFER_H
#define ANDROID_SHARED_BUFFER_H
+#include <atomic>
#include <stdint.h>
#include <sys/types.h>
@@ -43,7 +48,7 @@
* In other words, the buffer must have been release by all its
* users.
*/
- static ssize_t dealloc(const SharedBuffer* released);
+ static void dealloc(const SharedBuffer* released);
//! access the data for read
inline const void* data() const;
@@ -94,12 +99,16 @@
SharedBuffer(const SharedBuffer&);
SharedBuffer& operator = (const SharedBuffer&);
- // 16 bytes. must be sized to preserve correct alignment.
- mutable int32_t mRefs;
- size_t mSize;
- uint32_t mReserved[2];
+ // Must be sized to preserve correct alignment.
+ mutable std::atomic<int32_t> mRefs;
+ size_t mSize;
+ uint32_t mReserved[2];
};
+static_assert(sizeof(SharedBuffer) % 8 == 0
+ && (sizeof(size_t) > 4 || sizeof(SharedBuffer) == 16),
+ "SharedBuffer has unexpected size");
+
// ---------------------------------------------------------------------------
const void* SharedBuffer::data() const {
@@ -127,7 +136,7 @@
}
bool SharedBuffer::onlyOwner() const {
- return (mRefs == 1);
+ return (mRefs.load(std::memory_order_acquire) == 1);
}
}; // namespace android
diff --git a/lmkd/lmkd.c b/lmkd/lmkd.c
index aa3db8a..37fbdb8 100644
--- a/lmkd/lmkd.c
+++ b/lmkd/lmkd.c
@@ -114,7 +114,7 @@
static struct proc *pidhash[PIDHASH_SZ];
#define pid_hashfn(x) ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1))
-#define ADJTOSLOT(adj) (adj + -OOM_ADJUST_MIN)
+#define ADJTOSLOT(adj) ((adj) + -OOM_ADJUST_MIN)
static struct adjslot_list procadjslot_list[ADJTOSLOT(OOM_ADJUST_MAX) + 1];
/*
diff --git a/logd/LogStatistics.h b/logd/LogStatistics.h
index 6f7d264..b32c27d 100644
--- a/logd/LogStatistics.h
+++ b/logd/LogStatistics.h
@@ -33,7 +33,7 @@
#include "LogUtils.h"
#define log_id_for_each(i) \
- for (log_id_t i = LOG_ID_MIN; i < LOG_ID_MAX; i = (log_id_t) (i + 1))
+ for (log_id_t i = LOG_ID_MIN; (i) < LOG_ID_MAX; (i) = (log_id_t) ((i) + 1))
class LogStatistics;