blob: e7bab09ca3d8d386b1368b516506b0f6c5a289e1 [file] [log] [blame]
Elliott Hughes5ea047b2011-09-13 14:38:18 -07001/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "atomic.h"
18
Ian Rogers9adbff52013-01-23 18:19:03 -080019#define NEED_SWAP_MUTEXES !defined(__arm__) && !defined(__i386__)
Elliott Hughese222ee02012-12-13 14:41:43 -080020
Ian Rogers9adbff52013-01-23 18:19:03 -080021#if NEED_SWAP_MUTEXES
Ian Rogers25fd14b2012-09-05 10:56:38 -070022#include <vector>
Elliott Hughes76b61672012-12-12 17:47:30 -080023#include "base/mutex.h"
Elliott Hughes1aa246d2012-12-13 09:29:36 -080024#include "base/stl_util.h"
Elliott Hughese222ee02012-12-13 14:41:43 -080025#include "base/stringprintf.h"
Ian Rogers50b35e22012-10-04 10:09:15 -070026#include "thread.h"
Elliott Hughes7c6169d2012-05-02 16:11:48 -070027#endif
Elliott Hughes5ea047b2011-09-13 14:38:18 -070028
29namespace art {
30
Ian Rogers9adbff52013-01-23 18:19:03 -080031#if NEED_SWAP_MUTEXES
32// We stripe across a bunch of different mutexes to reduce contention.
33static const size_t kSwapMutexCount = 32;
34static std::vector<Mutex*>* gSwapMutexes;
Elliott Hughes5ea047b2011-09-13 14:38:18 -070035
Ian Rogers9adbff52013-01-23 18:19:03 -080036static Mutex& GetSwapMutex(const volatile int64_t* addr) {
37 return *(*gSwapMutexes)[((unsigned)(void*)(addr) >> 3U) % kSwapMutexCount];
38}
Elliott Hughes7c6169d2012-05-02 16:11:48 -070039#endif
40
Ian Rogers9adbff52013-01-23 18:19:03 -080041void QuasiAtomic::Startup() {
42#if NEED_SWAP_MUTEXES
43 gSwapMutexes = new std::vector<Mutex*>;
44 for (size_t i = 0; i < kSwapMutexCount; ++i) {
45 gSwapMutexes->push_back(new Mutex(StringPrintf("QuasiAtomic stripe %d", i).c_str()));
46 }
Elliott Hughes7c6169d2012-05-02 16:11:48 -070047#endif
Elliott Hughes5ea047b2011-09-13 14:38:18 -070048}
49
Ian Rogers9adbff52013-01-23 18:19:03 -080050void QuasiAtomic::Shutdown() {
51#if NEED_SWAP_MUTEXES
52 STLDeleteElements(gSwapMutexes);
53 delete gSwapMutexes;
54#endif
Elliott Hughes557e0272011-09-29 10:52:22 -070055}
56
Elliott Hughes7c6169d2012-05-02 16:11:48 -070057int64_t QuasiAtomic::Read64(volatile const int64_t* addr) {
58 int64_t value;
Ian Rogers9adbff52013-01-23 18:19:03 -080059#if defined(__arm__)
60 // Exclusive loads are defined not to tear, clearing the exclusive state isn't necessary. If we
61 // have LPAE (such as Cortex-A15) then ldrd would suffice.
Elliott Hughes7c6169d2012-05-02 16:11:48 -070062 __asm__ __volatile__("@ QuasiAtomic::Read64\n"
63 "ldrexd %0, %H0, [%1]"
64 : "=&r" (value)
65 : "r" (addr));
Ian Rogers9adbff52013-01-23 18:19:03 -080066#elif defined(__i386__)
67 __asm__ __volatile__(
68 "movq %1, %0\n"
69 : "=x" (value)
70 : "m" (*addr));
71#else
72 MutexLock mu(Thread::Current(), GetSwapMutex(addr));
73 return *addr;
74#endif
Elliott Hughes7c6169d2012-05-02 16:11:48 -070075 return value;
76}
77
Ian Rogers9adbff52013-01-23 18:19:03 -080078void QuasiAtomic::Write64(volatile int64_t* addr, int64_t value) {
79#if defined(__arm__)
80 // The write is done as a swap so that the cache-line is in the exclusive state for the store. If
81 // we know that ARM architecture has LPAE (such as Cortex-A15) this isn't necessary and strd will
82 // suffice.
83 int64_t prev;
84 int status;
85 do {
86 __asm__ __volatile__("@ QuasiAtomic::Write64\n"
87 "ldrexd %0, %H0, [%3]\n"
88 "strexd %1, %4, %H4, [%3]"
89 : "=&r" (prev), "=&r" (status), "+m"(*addr)
90 : "r" (addr), "r" (value)
91 : "cc");
92 } while (__builtin_expect(status != 0, 0));
93#elif defined(__i386__)
94 __asm__ __volatile__(
95 "movq %1, %0"
96 : "=m" (*addr)
97 : "x" (value));
98#else
99 MutexLock mu(Thread::Current(), GetSwapMutex(addr));
100 *addr = value;
101#endif
102}
103
104
105bool QuasiAtomic::Cas64(int64_t old_value, int64_t new_value, volatile int64_t* addr) {
106#if defined(__arm__)
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700107 int64_t prev;
108 int status;
109 do {
Elliott Hughes7c6169d2012-05-02 16:11:48 -0700110 __asm__ __volatile__("@ QuasiAtomic::Cas64\n"
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700111 "ldrexd %0, %H0, [%3]\n"
112 "mov %1, #0\n"
113 "teq %0, %4\n"
114 "teqeq %H0, %H4\n"
115 "strexdeq %1, %5, %H5, [%3]"
116 : "=&r" (prev), "=&r" (status), "+m"(*addr)
117 : "r" (addr), "Ir" (old_value), "r" (new_value)
118 : "cc");
119 } while (__builtin_expect(status != 0, 0));
120 return prev != old_value;
Ian Rogers9adbff52013-01-23 18:19:03 -0800121#elif defined(__i386__)
122 // cmpxchg8b implicitly uses %ebx which is also the PIC register.
123 int8_t status;
124 __asm__ __volatile__ (
125 "pushl %%ebx\n"
126 "movl (%3), %%ebx\n"
127 "movl 4(%3), %%ecx\n"
128 "lock cmpxchg8b %1\n"
129 "sete %0\n"
130 "popl %%ebx"
131 : "=R" (status), "+m" (*addr)
132 : "A"(old_value), "D" (&new_value)
133 : "%ecx"
134 );
135 return status != 0;
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700136#else
Ian Rogers9adbff52013-01-23 18:19:03 -0800137 MutexLock mu(Thread::Current(), GetSwapMutex(addr));
138 if (*addr == old_value) {
139 *addr = new_value;
140 return true;
141 }
142 return false;
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700143#endif
Ian Rogers9adbff52013-01-23 18:19:03 -0800144}
145
146bool QuasiAtomic::LongAtomicsUseMutexes() {
147#if NEED_SWAP_MUTEXES
148 return true;
149#else
150 return false;
151#endif
152}
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700153
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700154} // namespace art