blob: e94129ca67cc597d9a1c1bb0c697deb08a916afc [file] [log] [blame]
David 'Digit' Turnere31bfae2011-11-15 15:47:02 +01001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#ifndef BIONIC_ATOMIC_ARM_H
17#define BIONIC_ATOMIC_ARM_H
18
19#include <machine/cpu-features.h>
20
21/* Some of the harware instructions used below are not available in Thumb-1
22 * mode (they are if you build in ARM or Thumb-2 mode though). To solve this
23 * problem, we're going to use the same technique than libatomics_ops,
24 * which is to temporarily switch to ARM, do the operation, then switch
25 * back to Thumb-1.
26 *
27 * This results in two 'bx' jumps, just like a normal function call, but
28 * everything is kept inlined, avoids loading or computing the function's
29 * address, and prevents a little I-cache trashing too.
30 *
31 * However, it is highly recommended to avoid compiling any C library source
32 * file that use these functions in Thumb-1 mode.
33 *
34 * Define three helper macros to implement this:
35 */
36#if defined(__thumb__) && !defined(__thumb2__)
37# define __ATOMIC_SWITCH_TO_ARM \
38 "adr r3, 5f\n" \
39 "bx r3\n" \
40 ".align\n" \
41 ".arm\n" \
42 "5:\n"
43/* note: the leading \n below is intentional */
44# define __ATOMIC_SWITCH_TO_THUMB \
45 "\n" \
46 "adr r3, 6f\n" \
47 "bx r3\n" \
48 ".thumb" \
49 "6:\n"
50
51# define __ATOMIC_CLOBBERS "r3" /* list of clobbered registers */
52
53/* Warn the user that ARM mode should really be preferred! */
54# warning Rebuilding this source file in ARM mode is highly recommended for performance!!
55
56#else
57# define __ATOMIC_SWITCH_TO_ARM /* nothing */
58# define __ATOMIC_SWITCH_TO_THUMB /* nothing */
59# define __ATOMIC_CLOBBERS /* nothing */
60#endif
61
62
63/* Define a full memory barrier, this is only needed if we build the
64 * platform for a multi-core device. For the record, using a 'dmb'
65 * instruction on a Nexus One device can take up to 180 ns even if
66 * it is completely un-necessary on this device.
67 *
68 * NOTE: This is where the platform and NDK headers atomic headers are
69 * going to diverge. With the NDK, we don't know if the generated
70 * code is going to run on a single or multi-core device, so we
71 * need to be cautious.
72 *
David 'Digit' Turnere31bfae2011-11-15 15:47:02 +010073 * I.e. on single-core devices, the helper immediately returns,
74 * on multi-core devices, it uses "dmb" or any other means to
75 * perform a full-memory barrier.
76 *
77 * There are three cases to consider for the platform:
78 *
79 * - multi-core ARMv7-A => use the 'dmb' hardware instruction
80 * - multi-core ARMv6 => use the coprocessor
Nick Kraleviche91f7172013-07-03 14:14:06 -070081 * - single core ARMv6+ => do not use any hardware barrier
David 'Digit' Turnere31bfae2011-11-15 15:47:02 +010082 */
83#if defined(ANDROID_SMP) && ANDROID_SMP == 1
84
David 'Digit' Turnere31bfae2011-11-15 15:47:02 +010085/* For ARMv7-A, we can use the 'dmb' instruction directly */
Elliott Hughesc54ca402013-12-13 12:17:13 -080086__ATOMIC_INLINE__ void __bionic_memory_barrier(void) {
David 'Digit' Turnere31bfae2011-11-15 15:47:02 +010087 /* Note: we always build in ARM or Thumb-2 on ARMv7-A, so don't
88 * bother with __ATOMIC_SWITCH_TO_ARM */
89 __asm__ __volatile__ ( "dmb" : : : "memory" );
90}
Elliott Hughesc54ca402013-12-13 12:17:13 -080091
David 'Digit' Turnere31bfae2011-11-15 15:47:02 +010092#else /* !ANDROID_SMP */
Elliott Hughesc54ca402013-12-13 12:17:13 -080093
94__ATOMIC_INLINE__ void __bionic_memory_barrier(void) {
David 'Digit' Turnere31bfae2011-11-15 15:47:02 +010095 /* A simple compiler barrier */
96 __asm__ __volatile__ ( "" : : : "memory" );
97}
David 'Digit' Turnere31bfae2011-11-15 15:47:02 +010098
Elliott Hughesc54ca402013-12-13 12:17:13 -080099#endif /* !ANDROID_SMP */
Nick Kraleviche91f7172013-07-03 14:14:06 -0700100
David 'Digit' Turnere31bfae2011-11-15 15:47:02 +0100101/* Compare-and-swap, without any explicit barriers. Note that this functions
102 * returns 0 on success, and 1 on failure. The opposite convention is typically
103 * used on other platforms.
David 'Digit' Turnere31bfae2011-11-15 15:47:02 +0100104 */
David 'Digit' Turnere31bfae2011-11-15 15:47:02 +0100105__ATOMIC_INLINE__ int
106__bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr)
107{
108 int32_t prev, status;
109 do {
110 __asm__ __volatile__ (
111 __ATOMIC_SWITCH_TO_ARM
112 "ldrex %0, [%3]\n"
113 "mov %1, #0\n"
114 "teq %0, %4\n"
115#ifdef __thumb2__
116 "it eq\n"
117#endif
118 "strexeq %1, %5, [%3]"
119 __ATOMIC_SWITCH_TO_THUMB
120 : "=&r" (prev), "=&r" (status), "+m"(*ptr)
121 : "r" (ptr), "Ir" (old_value), "r" (new_value)
122 : __ATOMIC_CLOBBERS "cc");
123 } while (__builtin_expect(status != 0, 0));
124 return prev != old_value;
125}
David 'Digit' Turnere31bfae2011-11-15 15:47:02 +0100126
Nick Kraleviche91f7172013-07-03 14:14:06 -0700127/* Swap operation, without any explicit barriers. */
David 'Digit' Turnere31bfae2011-11-15 15:47:02 +0100128__ATOMIC_INLINE__ int32_t
129__bionic_swap(int32_t new_value, volatile int32_t* ptr)
130{
131 int32_t prev, status;
132 do {
133 __asm__ __volatile__ (
134 __ATOMIC_SWITCH_TO_ARM
135 "ldrex %0, [%3]\n"
136 "strex %1, %4, [%3]"
137 __ATOMIC_SWITCH_TO_THUMB
138 : "=&r" (prev), "=&r" (status), "+m" (*ptr)
139 : "r" (ptr), "r" (new_value)
140 : __ATOMIC_CLOBBERS "cc");
141 } while (__builtin_expect(status != 0, 0));
142 return prev;
143}
David 'Digit' Turnere31bfae2011-11-15 15:47:02 +0100144
145/* Atomic increment - without any barriers
146 * This returns the old value
147 */
David 'Digit' Turnere31bfae2011-11-15 15:47:02 +0100148__ATOMIC_INLINE__ int32_t
149__bionic_atomic_inc(volatile int32_t* ptr)
150{
151 int32_t prev, tmp, status;
152 do {
153 __asm__ __volatile__ (
154 __ATOMIC_SWITCH_TO_ARM
155 "ldrex %0, [%4]\n"
156 "add %1, %0, #1\n"
157 "strex %2, %1, [%4]"
158 __ATOMIC_SWITCH_TO_THUMB
159 : "=&r" (prev), "=&r" (tmp), "=&r" (status), "+m"(*ptr)
160 : "r" (ptr)
161 : __ATOMIC_CLOBBERS "cc");
162 } while (__builtin_expect(status != 0, 0));
163 return prev;
164}
David 'Digit' Turnere31bfae2011-11-15 15:47:02 +0100165
166/* Atomic decrement - without any barriers
167 * This returns the old value.
168 */
David 'Digit' Turnere31bfae2011-11-15 15:47:02 +0100169__ATOMIC_INLINE__ int32_t
170__bionic_atomic_dec(volatile int32_t* ptr)
171{
172 int32_t prev, tmp, status;
173 do {
174 __asm__ __volatile__ (
175 __ATOMIC_SWITCH_TO_ARM
176 "ldrex %0, [%4]\n"
177 "sub %1, %0, #1\n"
178 "strex %2, %1, [%4]"
179 __ATOMIC_SWITCH_TO_THUMB
180 : "=&r" (prev), "=&r" (tmp), "=&r" (status), "+m"(*ptr)
181 : "r" (ptr)
182 : __ATOMIC_CLOBBERS "cc");
183 } while (__builtin_expect(status != 0, 0));
184 return prev;
185}
David 'Digit' Turnere31bfae2011-11-15 15:47:02 +0100186
187#endif /* SYS_ATOMICS_ARM_H */