Clean up the pthread-only atomic stuff a little.

It looks like we can probably just use the generic GCC stuff instead;
the generated code looks pretty similar. We should come back to that.

These routines are only used by the pthread implementation, and
__bionic_atomic_inc isn't used, so we can remove it.

Change-Id: I8b5b8cb30a1b159f0e85c3675aee06ddef39b429
diff --git a/libc/private/bionic_atomic_gcc_builtin.h b/libc/private/bionic_atomic_gcc_builtin.h
index 2919f7f..9e5e5aa 100644
--- a/libc/private/bionic_atomic_gcc_builtin.h
+++ b/libc/private/bionic_atomic_gcc_builtin.h
@@ -16,46 +16,35 @@
 #ifndef BIONIC_ATOMIC_GCC_BUILTIN_H
 #define BIONIC_ATOMIC_GCC_BUILTIN_H
 
-/* This header file is used by default if we don't have optimized atomic
+/*
+ * This header file is used by default if we don't have optimized atomic
  * routines for a given platform. See bionic_atomic_arm.h and
  * bionic_atomic_x86.h for examples.
+ *
+ * Note that the GCC builtins include barriers that aren't present in
+ * the architecture-specific assembler versions.
  */
 
-__ATOMIC_INLINE__ void
-__bionic_memory_barrier(void)
-{
-    __sync_synchronize();
+__ATOMIC_INLINE__ void __bionic_memory_barrier(void) {
+  __sync_synchronize();
 }
 
-__ATOMIC_INLINE__ int
-__bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr)
-{
-    /* We must return 0 on success */
-    return __sync_val_compare_and_swap(ptr, old_value, new_value) != old_value;
+__ATOMIC_INLINE__ int __bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr) {
+  /* We must return 0 on success. */
+  return __sync_val_compare_and_swap(ptr, old_value, new_value) != old_value;
 }
 
-__ATOMIC_INLINE__ int32_t
-__bionic_swap(int32_t new_value, volatile int32_t* ptr)
-{
-    int32_t old_value;
-    do {
-        old_value = *ptr;
-    } while (__sync_val_compare_and_swap(ptr, old_value, new_value) != old_value);
-    return old_value;
+__ATOMIC_INLINE__ int32_t __bionic_swap(int32_t new_value, volatile int32_t* ptr) {
+  int32_t old_value;
+  do {
+    old_value = *ptr;
+  } while (__sync_val_compare_and_swap(ptr, old_value, new_value) != old_value);
+  return old_value;
 }
 
-__ATOMIC_INLINE__ int32_t
-__bionic_atomic_inc(volatile int32_t* ptr)
-{
-    /* We must return the old value */
-    return __sync_fetch_and_add(ptr, 1);
-}
-
-__ATOMIC_INLINE__ int32_t
-__bionic_atomic_dec(volatile int32_t* ptr)
-{
-    /* We must return the old value */
-    return __sync_fetch_and_add(ptr, -1);
+__ATOMIC_INLINE__ int32_t __bionic_atomic_dec(volatile int32_t* ptr) {
+  /* We must return the old value. */
+  return __sync_fetch_and_add(ptr, -1);
 }
 
 #endif /* BIONIC_ATOMIC_GCC_BUILTIN_H */