David 'Digit' Turner | e31bfae | 2011-11-15 15:47:02 +0100 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | #ifndef BIONIC_ATOMIC_X86_H |
| 17 | #define BIONIC_ATOMIC_X86_H |
| 18 | |
| 19 | /* Define a full memory barrier, this is only needed if we build the |
| 20 | * platform for a multi-core device. |
| 21 | */ |
| 22 | #if defined(ANDROID_SMP) && ANDROID_SMP == 1 |
| 23 | __ATOMIC_INLINE__ void |
| 24 | __bionic_memory_barrier() |
| 25 | { |
| 26 | __asm__ __volatile__ ( "mfence" : : : "memory" ); |
| 27 | } |
| 28 | #else |
| 29 | __ATOMIC_INLINE__ void |
| 30 | __bionic_memory_barrier() |
| 31 | { |
| 32 | /* A simple compiler barrier */ |
| 33 | __asm__ __volatile__ ( "" : : : "memory" ); |
| 34 | } |
| 35 | #endif |
| 36 | |
| 37 | /* Compare-and-swap, without any explicit barriers. Note that this function |
| 38 | * returns 0 on success, and 1 on failure. The opposite convention is typically |
| 39 | * used on other platforms. |
| 40 | */ |
| 41 | __ATOMIC_INLINE__ int |
| 42 | __bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr) |
| 43 | { |
| 44 | int32_t prev; |
| 45 | __asm__ __volatile__ ("lock; cmpxchgl %1, %2" |
| 46 | : "=a" (prev) |
| 47 | : "q" (new_value), "m" (*ptr), "0" (old_value) |
| 48 | : "memory"); |
| 49 | return prev != old_value; |
| 50 | } |
| 51 | |
| 52 | |
| 53 | /* Swap, without any explicit barriers */ |
| 54 | __ATOMIC_INLINE__ int32_t |
| 55 | __bionic_swap(int32_t new_value, volatile int32_t *ptr) |
| 56 | { |
| 57 | __asm__ __volatile__ ("xchgl %1, %0" |
| 58 | : "=r" (new_value) |
| 59 | : "m" (*ptr), "0" (new_value) |
| 60 | : "memory"); |
| 61 | return new_value; |
| 62 | } |
| 63 | |
| 64 | /* Atomic increment, without explicit barriers */ |
| 65 | __ATOMIC_INLINE__ int32_t |
| 66 | __bionic_atomic_inc(volatile int32_t *ptr) |
| 67 | { |
| 68 | int increment = 1; |
| 69 | __asm__ __volatile__ ("lock; xaddl %0, %1" |
| 70 | : "+r" (increment), "+m" (*ptr) |
| 71 | : : "memory"); |
| 72 | /* increment now holds the old value of *ptr */ |
| 73 | return increment; |
| 74 | } |
| 75 | |
| 76 | /* Atomic decrement, without explicit barriers */ |
| 77 | __ATOMIC_INLINE__ int32_t |
| 78 | __bionic_atomic_dec(volatile int32_t *ptr) |
| 79 | { |
| 80 | int increment = -1; |
| 81 | __asm__ __volatile__ ("lock; xaddl %0, %1" |
| 82 | : "+r" (increment), "+m" (*ptr) |
| 83 | : : "memory"); |
| 84 | /* increment now holds the old value of *ptr */ |
| 85 | return increment; |
| 86 | } |
| 87 | |
| 88 | #endif /* BIONIC_ATOMIC_X86_H */ |