blob: 2577dc00eea4deaa723db15eee27cf011230fb34 [file] [log] [blame]
Mathias Agopiancbb288b2009-09-07 16:32:45 -07001/*
2 * Copyright (C) 2007 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "SharedBufferStack"
18
19#include <stdint.h>
20#include <sys/types.h>
21
22#include <utils/Debug.h>
23#include <utils/Log.h>
24#include <utils/threads.h>
25
Mathias Agopian9cce3252010-02-09 17:46:37 -080026#include <private/surfaceflinger/SharedBufferStack.h>
Mathias Agopiancbb288b2009-09-07 16:32:45 -070027
28#include <ui/Rect.h>
29#include <ui/Region.h>
30
31#define DEBUG_ATOMICS 0
32
33namespace android {
34// ----------------------------------------------------------------------------
35
36SharedClient::SharedClient()
Mathias Agopian26d24422010-03-19 16:14:13 -070037 : lock(Mutex::SHARED), cv(Condition::SHARED)
Mathias Agopiancbb288b2009-09-07 16:32:45 -070038{
39}
40
41SharedClient::~SharedClient() {
42}
43
44
45// these functions are used by the clients
46status_t SharedClient::validate(size_t i) const {
Mathias Agopianbb641242010-05-18 17:06:55 -070047 if (uint32_t(i) >= uint32_t(SharedBufferStack::NUM_LAYERS_MAX))
Mathias Agopiancbb288b2009-09-07 16:32:45 -070048 return BAD_INDEX;
49 return surfaces[i].status;
50}
51
52uint32_t SharedClient::getIdentity(size_t token) const {
53 return uint32_t(surfaces[token].identity);
54}
55
Mathias Agopiancbb288b2009-09-07 16:32:45 -070056// ----------------------------------------------------------------------------
57
58
59SharedBufferStack::SharedBufferStack()
Mathias Agopiancbb288b2009-09-07 16:32:45 -070060{
61}
62
Mathias Agopian48d819a2009-09-10 19:41:18 -070063void SharedBufferStack::init(int32_t i)
64{
65 inUse = -1;
66 status = NO_ERROR;
67 identity = i;
68}
69
Mathias Agopiancc08e682010-04-15 18:48:26 -070070status_t SharedBufferStack::setCrop(int buffer, const Rect& crop)
71{
72 if (uint32_t(buffer) >= NUM_BUFFER_MAX)
73 return BAD_INDEX;
74
75 buffers[buffer].crop.l = uint16_t(crop.left);
76 buffers[buffer].crop.t = uint16_t(crop.top);
77 buffers[buffer].crop.r = uint16_t(crop.right);
78 buffers[buffer].crop.b = uint16_t(crop.bottom);
79 return NO_ERROR;
80}
81
Mathias Agopiancbb288b2009-09-07 16:32:45 -070082status_t SharedBufferStack::setDirtyRegion(int buffer, const Region& dirty)
83{
84 if (uint32_t(buffer) >= NUM_BUFFER_MAX)
85 return BAD_INDEX;
86
Mathias Agopian245e4d72010-04-21 15:24:11 -070087 FlatRegion& reg(buffers[buffer].dirtyRegion);
88 if (dirty.isEmpty()) {
89 reg.count = 0;
90 return NO_ERROR;
91 }
92
Mathias Agopian1100c8b2010-04-05 16:21:53 -070093 size_t count;
94 Rect const* r = dirty.getArray(&count);
Mathias Agopian1100c8b2010-04-05 16:21:53 -070095 if (count > FlatRegion::NUM_RECT_MAX) {
96 const Rect bounds(dirty.getBounds());
97 reg.count = 1;
Mathias Agopiancc08e682010-04-15 18:48:26 -070098 reg.rects[0].l = uint16_t(bounds.left);
99 reg.rects[0].t = uint16_t(bounds.top);
100 reg.rects[0].r = uint16_t(bounds.right);
101 reg.rects[0].b = uint16_t(bounds.bottom);
Mathias Agopian1100c8b2010-04-05 16:21:53 -0700102 } else {
103 reg.count = count;
104 for (size_t i=0 ; i<count ; i++) {
Mathias Agopiancc08e682010-04-15 18:48:26 -0700105 reg.rects[i].l = uint16_t(r[i].left);
106 reg.rects[i].t = uint16_t(r[i].top);
107 reg.rects[i].r = uint16_t(r[i].right);
108 reg.rects[i].b = uint16_t(r[i].bottom);
Mathias Agopian1100c8b2010-04-05 16:21:53 -0700109 }
110 }
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700111 return NO_ERROR;
112}
113
114Region SharedBufferStack::getDirtyRegion(int buffer) const
115{
116 Region res;
117 if (uint32_t(buffer) >= NUM_BUFFER_MAX)
118 return res;
119
Mathias Agopiancc08e682010-04-15 18:48:26 -0700120 const FlatRegion& reg(buffers[buffer].dirtyRegion);
Mathias Agopian1100c8b2010-04-05 16:21:53 -0700121 if (reg.count > FlatRegion::NUM_RECT_MAX)
122 return res;
123
124 if (reg.count == 1) {
Mathias Agopiancc08e682010-04-15 18:48:26 -0700125 const Rect r(
126 reg.rects[0].l,
127 reg.rects[0].t,
128 reg.rects[0].r,
129 reg.rects[0].b);
130 res.set(r);
Mathias Agopian1100c8b2010-04-05 16:21:53 -0700131 } else {
132 for (size_t i=0 ; i<reg.count ; i++) {
133 const Rect r(
Mathias Agopiancc08e682010-04-15 18:48:26 -0700134 reg.rects[i].l,
135 reg.rects[i].t,
136 reg.rects[i].r,
137 reg.rects[i].b);
Mathias Agopian1100c8b2010-04-05 16:21:53 -0700138 res.orSelf(r);
139 }
140 }
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700141 return res;
142}
143
144// ----------------------------------------------------------------------------
145
146SharedBufferBase::SharedBufferBase(SharedClient* sharedClient,
Mathias Agopianbb641242010-05-18 17:06:55 -0700147 int surface, int32_t identity)
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700148 : mSharedClient(sharedClient),
149 mSharedStack(sharedClient->surfaces + surface),
Mathias Agopianbb641242010-05-18 17:06:55 -0700150 mIdentity(identity)
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700151{
152}
153
154SharedBufferBase::~SharedBufferBase()
155{
156}
157
Mathias Agopian0b3ad462009-10-02 18:12:30 -0700158status_t SharedBufferBase::getStatus() const
159{
160 SharedBufferStack& stack( *mSharedStack );
161 return stack.status;
162}
163
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700164size_t SharedBufferBase::getFrontBuffer() const
165{
166 SharedBufferStack& stack( *mSharedStack );
167 return size_t( stack.head );
168}
169
170String8 SharedBufferBase::dump(char const* prefix) const
171{
172 const size_t SIZE = 1024;
173 char buffer[SIZE];
174 String8 result;
175 SharedBufferStack& stack( *mSharedStack );
176 snprintf(buffer, SIZE,
Mathias Agopianbb641242010-05-18 17:06:55 -0700177 "%s[ head=%2d, available=%2d, queued=%2d ] "
Mathias Agopianb5c45772010-05-17 18:54:19 -0700178 "reallocMask=%08x, inUse=%2d, identity=%d, status=%d",
Mathias Agopianbb641242010-05-18 17:06:55 -0700179 prefix, stack.head, stack.available, stack.queued,
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700180 stack.reallocMask, stack.inUse, stack.identity, stack.status);
181 result.append(buffer);
Mathias Agopianbb641242010-05-18 17:06:55 -0700182 result.append("\n");
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700183 return result;
184}
185
Mathias Agopianb2965332010-04-27 16:41:19 -0700186status_t SharedBufferBase::waitForCondition(const ConditionBase& condition)
187{
188 const SharedBufferStack& stack( *mSharedStack );
189 SharedClient& client( *mSharedClient );
190 const nsecs_t TIMEOUT = s2ns(1);
191 const int identity = mIdentity;
192
193 Mutex::Autolock _l(client.lock);
194 while ((condition()==false) &&
195 (stack.identity == identity) &&
196 (stack.status == NO_ERROR))
197 {
198 status_t err = client.cv.waitRelative(client.lock, TIMEOUT);
199 // handle errors and timeouts
200 if (CC_UNLIKELY(err != NO_ERROR)) {
201 if (err == TIMED_OUT) {
202 if (condition()) {
203 LOGE("waitForCondition(%s) timed out (identity=%d), "
204 "but condition is true! We recovered but it "
205 "shouldn't happen." , condition.name(), stack.identity);
206 break;
207 } else {
208 LOGW("waitForCondition(%s) timed out "
209 "(identity=%d, status=%d). "
210 "CPU may be pegged. trying again.", condition.name(),
211 stack.identity, stack.status);
212 }
213 } else {
214 LOGE("waitForCondition(%s) error (%s) ",
215 condition.name(), strerror(-err));
216 return err;
217 }
218 }
219 }
220 return (stack.identity != mIdentity) ? status_t(BAD_INDEX) : stack.status;
221}
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700222// ============================================================================
223// conditions and updates
224// ============================================================================
225
226SharedBufferClient::DequeueCondition::DequeueCondition(
227 SharedBufferClient* sbc) : ConditionBase(sbc) {
228}
Mathias Agopianb2965332010-04-27 16:41:19 -0700229bool SharedBufferClient::DequeueCondition::operator()() const {
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700230 return stack.available > 0;
231}
232
233SharedBufferClient::LockCondition::LockCondition(
234 SharedBufferClient* sbc, int buf) : ConditionBase(sbc), buf(buf) {
235}
Mathias Agopianb2965332010-04-27 16:41:19 -0700236bool SharedBufferClient::LockCondition::operator()() const {
Mathias Agopiand5212872010-04-30 12:59:21 -0700237 // NOTE: if stack.head is messed up, we could crash the client
238 // or cause some drawing artifacts. This is okay, as long as it is
239 // limited to the client.
Mathias Agopianc0a91642010-04-27 21:08:20 -0700240 return (buf != stack.index[stack.head] ||
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700241 (stack.queued > 0 && stack.inUse != buf));
242}
243
244SharedBufferServer::ReallocateCondition::ReallocateCondition(
245 SharedBufferBase* sbb, int buf) : ConditionBase(sbb), buf(buf) {
246}
Mathias Agopianb2965332010-04-27 16:41:19 -0700247bool SharedBufferServer::ReallocateCondition::operator()() const {
Mathias Agopiand5212872010-04-30 12:59:21 -0700248 int32_t head = stack.head;
Mathias Agopianbb641242010-05-18 17:06:55 -0700249 if (uint32_t(head) >= SharedBufferStack::NUM_BUFFER_MAX) {
Mathias Agopiand5212872010-04-30 12:59:21 -0700250 // if stack.head is messed up, we cannot allow the server to
251 // crash (since stack.head is mapped on the client side)
252 stack.status = BAD_VALUE;
253 return false;
254 }
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700255 // TODO: we should also check that buf has been dequeued
Mathias Agopiand5212872010-04-30 12:59:21 -0700256 return (buf != stack.index[head]);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700257}
258
259// ----------------------------------------------------------------------------
260
261SharedBufferClient::QueueUpdate::QueueUpdate(SharedBufferBase* sbb)
262 : UpdateBase(sbb) {
263}
264ssize_t SharedBufferClient::QueueUpdate::operator()() {
265 android_atomic_inc(&stack.queued);
266 return NO_ERROR;
267}
268
269SharedBufferClient::UndoDequeueUpdate::UndoDequeueUpdate(SharedBufferBase* sbb)
270 : UpdateBase(sbb) {
271}
272ssize_t SharedBufferClient::UndoDequeueUpdate::operator()() {
273 android_atomic_inc(&stack.available);
274 return NO_ERROR;
275}
276
277SharedBufferServer::UnlockUpdate::UnlockUpdate(
278 SharedBufferBase* sbb, int lockedBuffer)
279 : UpdateBase(sbb), lockedBuffer(lockedBuffer) {
280}
281ssize_t SharedBufferServer::UnlockUpdate::operator()() {
282 if (stack.inUse != lockedBuffer) {
283 LOGE("unlocking %d, but currently locked buffer is %d",
284 lockedBuffer, stack.inUse);
285 return BAD_VALUE;
286 }
287 android_atomic_write(-1, &stack.inUse);
288 return NO_ERROR;
289}
290
291SharedBufferServer::RetireUpdate::RetireUpdate(
292 SharedBufferBase* sbb, int numBuffers)
293 : UpdateBase(sbb), numBuffers(numBuffers) {
294}
295ssize_t SharedBufferServer::RetireUpdate::operator()() {
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700296 int32_t head = stack.head;
Mathias Agopianbb641242010-05-18 17:06:55 -0700297 if (uint32_t(head) >= SharedBufferStack::NUM_BUFFER_MAX)
Mathias Agopiand5212872010-04-30 12:59:21 -0700298 return BAD_VALUE;
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700299
300 // Preventively lock the current buffer before updating queued.
Mathias Agopianc0a91642010-04-27 21:08:20 -0700301 android_atomic_write(stack.index[head], &stack.inUse);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700302
303 // Decrement the number of queued buffers
304 int32_t queued;
305 do {
306 queued = stack.queued;
307 if (queued == 0) {
308 return NOT_ENOUGH_DATA;
309 }
310 } while (android_atomic_cmpxchg(queued, queued-1, &stack.queued));
311
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700312 // lock the buffer before advancing head, which automatically unlocks
313 // the buffer we preventively locked upon entering this function
Mathias Agopianb5c45772010-05-17 18:54:19 -0700314
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700315 head = (head + 1) % numBuffers;
Mathias Agopianc0a91642010-04-27 21:08:20 -0700316 android_atomic_write(stack.index[head], &stack.inUse);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700317
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700318 // head is only modified here, so we don't need to use cmpxchg
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700319 android_atomic_write(head, &stack.head);
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700320
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700321 // now that head has moved, we can increment the number of available buffers
322 android_atomic_inc(&stack.available);
323 return head;
324}
325
Mathias Agopianb58b5d72009-09-10 16:55:13 -0700326SharedBufferServer::StatusUpdate::StatusUpdate(
327 SharedBufferBase* sbb, status_t status)
328 : UpdateBase(sbb), status(status) {
329}
330
331ssize_t SharedBufferServer::StatusUpdate::operator()() {
332 android_atomic_write(status, &stack.status);
333 return NO_ERROR;
334}
335
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700336// ============================================================================
337
338SharedBufferClient::SharedBufferClient(SharedClient* sharedClient,
Mathias Agopian9ec430a2009-10-06 19:00:57 -0700339 int surface, int num, int32_t identity)
Mathias Agopianbb641242010-05-18 17:06:55 -0700340 : SharedBufferBase(sharedClient, surface, identity),
341 mNumBuffers(num), tail(0), undoDequeueTail(0)
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700342{
Mathias Agopianc0a91642010-04-27 21:08:20 -0700343 SharedBufferStack& stack( *mSharedStack );
Mathias Agopianc7d56012009-09-14 15:48:42 -0700344 tail = computeTail();
Mathias Agopianc0a91642010-04-27 21:08:20 -0700345 queued_head = stack.head;
Mathias Agopianc7d56012009-09-14 15:48:42 -0700346}
347
Mathias Agopianbb641242010-05-18 17:06:55 -0700348int32_t SharedBufferClient::computeTail() const
349{
350 SharedBufferStack& stack( *mSharedStack );
351 return (mNumBuffers + stack.head - stack.available + 1) % mNumBuffers;
352}
353
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700354ssize_t SharedBufferClient::dequeue()
355{
Mathias Agopian40d57992009-09-11 19:18:20 -0700356 SharedBufferStack& stack( *mSharedStack );
357
Mathias Agopian1100c8b2010-04-05 16:21:53 -0700358 if (stack.head == tail && stack.available == mNumBuffers) {
Mathias Agopian40d57992009-09-11 19:18:20 -0700359 LOGW("dequeue: tail=%d, head=%d, avail=%d, queued=%d",
360 tail, stack.head, stack.available, stack.queued);
361 }
Mathias Agopianbb641242010-05-18 17:06:55 -0700362
363 RWLock::AutoRLock _rd(mLock);
364
Mathias Agopian86f73292009-09-17 01:35:28 -0700365 const nsecs_t dequeueTime = systemTime(SYSTEM_TIME_THREAD);
Mathias Agopian40d57992009-09-11 19:18:20 -0700366
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700367 //LOGD("[%d] about to dequeue a buffer",
368 // mSharedStack->identity);
369 DequeueCondition condition(this);
370 status_t err = waitForCondition(condition);
371 if (err != NO_ERROR)
372 return ssize_t(err);
373
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700374 // NOTE: 'stack.available' is part of the conditions, however
375 // decrementing it, never changes any conditions, so we don't need
376 // to do this as part of an update.
377 if (android_atomic_dec(&stack.available) == 0) {
378 LOGW("dequeue probably called from multiple threads!");
379 }
380
Mathias Agopianc0a91642010-04-27 21:08:20 -0700381 undoDequeueTail = tail;
382 int dequeued = stack.index[tail];
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700383 tail = ((tail+1 >= mNumBuffers) ? 0 : tail+1);
Mathias Agopianc0a91642010-04-27 21:08:20 -0700384 LOGD_IF(DEBUG_ATOMICS, "dequeued=%d, tail++=%d, %s",
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700385 dequeued, tail, dump("").string());
Mathias Agopian40d57992009-09-11 19:18:20 -0700386
Mathias Agopian86f73292009-09-17 01:35:28 -0700387 mDequeueTime[dequeued] = dequeueTime;
388
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700389 return dequeued;
390}
391
392status_t SharedBufferClient::undoDequeue(int buf)
393{
Mathias Agopianbb641242010-05-18 17:06:55 -0700394 RWLock::AutoRLock _rd(mLock);
395
Mathias Agopianc0a91642010-04-27 21:08:20 -0700396 // TODO: we can only undo the previous dequeue, we should
397 // enforce that in the api
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700398 UndoDequeueUpdate update(this);
399 status_t err = updateCondition( update );
Mathias Agopianc7d56012009-09-14 15:48:42 -0700400 if (err == NO_ERROR) {
Mathias Agopian0a8cd062010-04-27 16:11:38 -0700401 tail = undoDequeueTail;
Mathias Agopianc7d56012009-09-14 15:48:42 -0700402 }
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700403 return err;
404}
405
406status_t SharedBufferClient::lock(int buf)
407{
Mathias Agopianbb641242010-05-18 17:06:55 -0700408 RWLock::AutoRLock _rd(mLock);
409
Mathias Agopianc0a91642010-04-27 21:08:20 -0700410 SharedBufferStack& stack( *mSharedStack );
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700411 LockCondition condition(this, buf);
Mathias Agopian86f73292009-09-17 01:35:28 -0700412 status_t err = waitForCondition(condition);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700413 return err;
414}
415
416status_t SharedBufferClient::queue(int buf)
417{
Mathias Agopianbb641242010-05-18 17:06:55 -0700418 RWLock::AutoRLock _rd(mLock);
419
Mathias Agopianc0a91642010-04-27 21:08:20 -0700420 SharedBufferStack& stack( *mSharedStack );
421
Mathias Agopianb5c45772010-05-17 18:54:19 -0700422 queued_head = (queued_head + 1) % mNumBuffers;
Mathias Agopianc0a91642010-04-27 21:08:20 -0700423 stack.index[queued_head] = buf;
424
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700425 QueueUpdate update(this);
426 status_t err = updateCondition( update );
427 LOGD_IF(DEBUG_ATOMICS, "queued=%d, %s", buf, dump("").string());
Mathias Agopianc0a91642010-04-27 21:08:20 -0700428
Mathias Agopian86f73292009-09-17 01:35:28 -0700429 const nsecs_t now = systemTime(SYSTEM_TIME_THREAD);
430 stack.stats.totalTime = ns2us(now - mDequeueTime[buf]);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700431 return err;
432}
433
Mathias Agopianc0a91642010-04-27 21:08:20 -0700434bool SharedBufferClient::needNewBuffer(int buf) const
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700435{
436 SharedBufferStack& stack( *mSharedStack );
Mathias Agopiana0b3f1d2010-05-21 14:51:33 -0700437 const uint32_t mask = 1<<(31-buf);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700438 return (android_atomic_and(~mask, &stack.reallocMask) & mask) != 0;
439}
440
Mathias Agopianc0a91642010-04-27 21:08:20 -0700441status_t SharedBufferClient::setCrop(int buf, const Rect& crop)
Mathias Agopiancc08e682010-04-15 18:48:26 -0700442{
443 SharedBufferStack& stack( *mSharedStack );
Mathias Agopianc0a91642010-04-27 21:08:20 -0700444 return stack.setCrop(buf, crop);
Mathias Agopiancc08e682010-04-15 18:48:26 -0700445}
446
Mathias Agopianc0a91642010-04-27 21:08:20 -0700447status_t SharedBufferClient::setDirtyRegion(int buf, const Region& reg)
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700448{
449 SharedBufferStack& stack( *mSharedStack );
Mathias Agopianc0a91642010-04-27 21:08:20 -0700450 return stack.setDirtyRegion(buf, reg);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700451}
452
Mathias Agopianbb641242010-05-18 17:06:55 -0700453status_t SharedBufferClient::setBufferCount(
454 int bufferCount, const SetBufferCountCallback& ipc)
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700455{
Mathias Agopianb5c45772010-05-17 18:54:19 -0700456 SharedBufferStack& stack( *mSharedStack );
Mathias Agopianbb641242010-05-18 17:06:55 -0700457 if (uint32_t(bufferCount) >= SharedBufferStack::NUM_BUFFER_MAX)
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700458 return BAD_VALUE;
Mathias Agopianbb641242010-05-18 17:06:55 -0700459
Mathias Agopianf10d7fd2010-05-21 14:19:50 -0700460 if (uint32_t(bufferCount) < SharedBufferStack::NUM_BUFFER_MIN)
461 return BAD_VALUE;
462
Mathias Agopianbb641242010-05-18 17:06:55 -0700463 RWLock::AutoWLock _wr(mLock);
464
465 status_t err = ipc(bufferCount);
466 if (err == NO_ERROR) {
467 mNumBuffers = bufferCount;
468 queued_head = (stack.head + stack.queued) % mNumBuffers;
469 }
470 return err;
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700471}
472
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700473// ----------------------------------------------------------------------------
474
475SharedBufferServer::SharedBufferServer(SharedClient* sharedClient,
Mathias Agopian48d819a2009-09-10 19:41:18 -0700476 int surface, int num, int32_t identity)
Mathias Agopianbb641242010-05-18 17:06:55 -0700477 : SharedBufferBase(sharedClient, surface, identity),
478 mNumBuffers(num)
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700479{
Mathias Agopian48d819a2009-09-10 19:41:18 -0700480 mSharedStack->init(identity);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700481 mSharedStack->head = num-1;
482 mSharedStack->available = num;
483 mSharedStack->queued = 0;
484 mSharedStack->reallocMask = 0;
Mathias Agopiancc08e682010-04-15 18:48:26 -0700485 memset(mSharedStack->buffers, 0, sizeof(mSharedStack->buffers));
Mathias Agopianc0a91642010-04-27 21:08:20 -0700486 for (int i=0 ; i<num ; i++) {
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700487 mBufferList.add(i);
Mathias Agopianc0a91642010-04-27 21:08:20 -0700488 mSharedStack->index[i] = i;
489 }
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700490}
491
492ssize_t SharedBufferServer::retireAndLock()
493{
Mathias Agopianbb641242010-05-18 17:06:55 -0700494 RWLock::AutoRLock _l(mLock);
495
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700496 RetireUpdate update(this, mNumBuffers);
497 ssize_t buf = updateCondition( update );
Mathias Agopianc0a91642010-04-27 21:08:20 -0700498 if (buf >= 0) {
Mathias Agopianbb641242010-05-18 17:06:55 -0700499 if (uint32_t(buf) >= SharedBufferStack::NUM_BUFFER_MAX)
Mathias Agopiand5212872010-04-30 12:59:21 -0700500 return BAD_VALUE;
Mathias Agopianc0a91642010-04-27 21:08:20 -0700501 SharedBufferStack& stack( *mSharedStack );
502 buf = stack.index[buf];
503 LOGD_IF(DEBUG_ATOMICS && buf>=0, "retire=%d, %s",
504 int(buf), dump("").string());
505 }
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700506 return buf;
507}
508
Mathias Agopianc0a91642010-04-27 21:08:20 -0700509status_t SharedBufferServer::unlock(int buf)
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700510{
Mathias Agopianc0a91642010-04-27 21:08:20 -0700511 UnlockUpdate update(this, buf);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700512 status_t err = updateCondition( update );
513 return err;
514}
515
Mathias Agopianb58b5d72009-09-10 16:55:13 -0700516void SharedBufferServer::setStatus(status_t status)
517{
Mathias Agopian0b3ad462009-10-02 18:12:30 -0700518 if (status < NO_ERROR) {
519 StatusUpdate update(this, status);
520 updateCondition( update );
521 }
Mathias Agopianb58b5d72009-09-10 16:55:13 -0700522}
523
Mathias Agopiana138f892010-05-21 17:24:35 -0700524status_t SharedBufferServer::reallocateAll()
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700525{
Mathias Agopianbb641242010-05-18 17:06:55 -0700526 RWLock::AutoRLock _l(mLock);
527
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700528 SharedBufferStack& stack( *mSharedStack );
Mathias Agopiana0b3f1d2010-05-21 14:51:33 -0700529 uint32_t mask = mBufferList.getMask();
Mathias Agopiana138f892010-05-21 17:24:35 -0700530 android_atomic_or(mask, &stack.reallocMask);
531 return NO_ERROR;
532}
533
534status_t SharedBufferServer::reallocateAllExcept(int buffer)
535{
536 RWLock::AutoRLock _l(mLock);
537
538 SharedBufferStack& stack( *mSharedStack );
539 BufferList temp(mBufferList);
540 temp.remove(buffer);
541 uint32_t mask = temp.getMask();
542 android_atomic_or(mask, &stack.reallocMask);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700543 return NO_ERROR;
544}
545
Mathias Agopiane7005012009-10-07 16:44:10 -0700546int32_t SharedBufferServer::getQueuedCount() const
547{
548 SharedBufferStack& stack( *mSharedStack );
549 return stack.queued;
550}
551
Mathias Agopianc0a91642010-04-27 21:08:20 -0700552status_t SharedBufferServer::assertReallocate(int buf)
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700553{
Mathias Agopianbb641242010-05-18 17:06:55 -0700554 /*
555 * NOTE: it's safe to hold mLock for read while waiting for
556 * the ReallocateCondition because that condition is not updated
557 * by the thread that holds mLock for write.
558 */
559 RWLock::AutoRLock _l(mLock);
560
Mathias Agopiand606de62010-05-10 20:06:11 -0700561 // TODO: need to validate "buf"
Mathias Agopianc0a91642010-04-27 21:08:20 -0700562 ReallocateCondition condition(this, buf);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700563 status_t err = waitForCondition(condition);
564 return err;
565}
566
Mathias Agopianc0a91642010-04-27 21:08:20 -0700567Region SharedBufferServer::getDirtyRegion(int buf) const
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700568{
569 SharedBufferStack& stack( *mSharedStack );
Mathias Agopianc0a91642010-04-27 21:08:20 -0700570 return stack.getDirtyRegion(buf);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700571}
572
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700573/*
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700574 * NOTE: this is not thread-safe on the server-side, meaning
575 * 'head' cannot move during this operation. The client-side
576 * can safely operate an usual.
577 *
578 */
579status_t SharedBufferServer::resize(int newNumBuffers)
580{
Mathias Agopianbb641242010-05-18 17:06:55 -0700581 if (uint32_t(newNumBuffers) >= SharedBufferStack::NUM_BUFFER_MAX)
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700582 return BAD_VALUE;
583
Mathias Agopianbb641242010-05-18 17:06:55 -0700584 RWLock::AutoWLock _l(mLock);
585
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700586 // for now we're not supporting shrinking
587 const int numBuffers = mNumBuffers;
588 if (newNumBuffers < numBuffers)
589 return BAD_VALUE;
590
591 SharedBufferStack& stack( *mSharedStack );
592 const int extra = newNumBuffers - numBuffers;
593
594 // read the head, make sure it's valid
595 int32_t head = stack.head;
Mathias Agopianbb641242010-05-18 17:06:55 -0700596 if (uint32_t(head) >= SharedBufferStack::NUM_BUFFER_MAX)
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700597 return BAD_VALUE;
598
599 int base = numBuffers;
600 int32_t avail = stack.available;
601 int tail = head - avail + 1;
Mathias Agopiand6297f72010-05-17 17:27:26 -0700602
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700603 if (tail >= 0) {
604 int8_t* const index = const_cast<int8_t*>(stack.index);
605 const int nb = numBuffers - head;
606 memmove(&index[head + extra], &index[head], nb);
607 base = head;
608 // move head 'extra' ahead, this doesn't impact stack.index[head];
609 stack.head = head + extra;
610 }
611 stack.available += extra;
612
613 // fill the new free space with unused buffers
614 BufferList::const_iterator curr(mBufferList.free_begin());
615 for (int i=0 ; i<extra ; i++) {
Mathias Agopiand6297f72010-05-17 17:27:26 -0700616 stack.index[base+i] = *curr;
617 mBufferList.add(*curr);
618 ++curr;
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700619 }
620
621 mNumBuffers = newNumBuffers;
622 return NO_ERROR;
623}
624
Mathias Agopian86f73292009-09-17 01:35:28 -0700625SharedBufferStack::Statistics SharedBufferServer::getStats() const
626{
627 SharedBufferStack& stack( *mSharedStack );
628 return stack.stats;
629}
630
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700631// ---------------------------------------------------------------------------
632status_t SharedBufferServer::BufferList::add(int value)
633{
634 if (uint32_t(value) >= mCapacity)
635 return BAD_VALUE;
636 uint32_t mask = 1<<(31-value);
637 if (mList & mask)
638 return ALREADY_EXISTS;
639 mList |= mask;
640 return NO_ERROR;
641}
642
643status_t SharedBufferServer::BufferList::remove(int value)
644{
645 if (uint32_t(value) >= mCapacity)
646 return BAD_VALUE;
647 uint32_t mask = 1<<(31-value);
648 if (!(mList & mask))
649 return NAME_NOT_FOUND;
650 mList &= ~mask;
651 return NO_ERROR;
652}
653
Mathias Agopian86f73292009-09-17 01:35:28 -0700654
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700655// ---------------------------------------------------------------------------
656}; // namespace android