blob: 8d031453b03ccc2f1f76bea794f3e08b9cae6867 [file] [log] [blame]
Mathias Agopiancbb288b2009-09-07 16:32:45 -07001/*
2 * Copyright (C) 2007 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "SharedBufferStack"
18
19#include <stdint.h>
20#include <sys/types.h>
21
22#include <utils/Debug.h>
23#include <utils/Log.h>
24#include <utils/threads.h>
25
Mathias Agopian9cce3252010-02-09 17:46:37 -080026#include <private/surfaceflinger/SharedBufferStack.h>
Mathias Agopiancbb288b2009-09-07 16:32:45 -070027
28#include <ui/Rect.h>
29#include <ui/Region.h>
30
31#define DEBUG_ATOMICS 0
32
33namespace android {
34// ----------------------------------------------------------------------------
35
36SharedClient::SharedClient()
Mathias Agopian26d24422010-03-19 16:14:13 -070037 : lock(Mutex::SHARED), cv(Condition::SHARED)
Mathias Agopiancbb288b2009-09-07 16:32:45 -070038{
39}
40
41SharedClient::~SharedClient() {
42}
43
44
45// these functions are used by the clients
46status_t SharedClient::validate(size_t i) const {
Mathias Agopianbb641242010-05-18 17:06:55 -070047 if (uint32_t(i) >= uint32_t(SharedBufferStack::NUM_LAYERS_MAX))
Mathias Agopiancbb288b2009-09-07 16:32:45 -070048 return BAD_INDEX;
49 return surfaces[i].status;
50}
51
Mathias Agopiancbb288b2009-09-07 16:32:45 -070052// ----------------------------------------------------------------------------
53
54
55SharedBufferStack::SharedBufferStack()
Mathias Agopiancbb288b2009-09-07 16:32:45 -070056{
57}
58
Mathias Agopian48d819a2009-09-10 19:41:18 -070059void SharedBufferStack::init(int32_t i)
60{
61 inUse = -1;
62 status = NO_ERROR;
63 identity = i;
64}
65
Mathias Agopiancc08e682010-04-15 18:48:26 -070066status_t SharedBufferStack::setCrop(int buffer, const Rect& crop)
67{
68 if (uint32_t(buffer) >= NUM_BUFFER_MAX)
69 return BAD_INDEX;
70
71 buffers[buffer].crop.l = uint16_t(crop.left);
72 buffers[buffer].crop.t = uint16_t(crop.top);
73 buffers[buffer].crop.r = uint16_t(crop.right);
74 buffers[buffer].crop.b = uint16_t(crop.bottom);
75 return NO_ERROR;
76}
77
Mathias Agopiancbb288b2009-09-07 16:32:45 -070078status_t SharedBufferStack::setDirtyRegion(int buffer, const Region& dirty)
79{
80 if (uint32_t(buffer) >= NUM_BUFFER_MAX)
81 return BAD_INDEX;
82
Mathias Agopian245e4d72010-04-21 15:24:11 -070083 FlatRegion& reg(buffers[buffer].dirtyRegion);
84 if (dirty.isEmpty()) {
85 reg.count = 0;
86 return NO_ERROR;
87 }
88
Mathias Agopian1100c8b2010-04-05 16:21:53 -070089 size_t count;
90 Rect const* r = dirty.getArray(&count);
Mathias Agopian1100c8b2010-04-05 16:21:53 -070091 if (count > FlatRegion::NUM_RECT_MAX) {
92 const Rect bounds(dirty.getBounds());
93 reg.count = 1;
Mathias Agopiancc08e682010-04-15 18:48:26 -070094 reg.rects[0].l = uint16_t(bounds.left);
95 reg.rects[0].t = uint16_t(bounds.top);
96 reg.rects[0].r = uint16_t(bounds.right);
97 reg.rects[0].b = uint16_t(bounds.bottom);
Mathias Agopian1100c8b2010-04-05 16:21:53 -070098 } else {
99 reg.count = count;
100 for (size_t i=0 ; i<count ; i++) {
Mathias Agopiancc08e682010-04-15 18:48:26 -0700101 reg.rects[i].l = uint16_t(r[i].left);
102 reg.rects[i].t = uint16_t(r[i].top);
103 reg.rects[i].r = uint16_t(r[i].right);
104 reg.rects[i].b = uint16_t(r[i].bottom);
Mathias Agopian1100c8b2010-04-05 16:21:53 -0700105 }
106 }
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700107 return NO_ERROR;
108}
109
110Region SharedBufferStack::getDirtyRegion(int buffer) const
111{
112 Region res;
113 if (uint32_t(buffer) >= NUM_BUFFER_MAX)
114 return res;
115
Mathias Agopiancc08e682010-04-15 18:48:26 -0700116 const FlatRegion& reg(buffers[buffer].dirtyRegion);
Mathias Agopian1100c8b2010-04-05 16:21:53 -0700117 if (reg.count > FlatRegion::NUM_RECT_MAX)
118 return res;
119
120 if (reg.count == 1) {
Mathias Agopiancc08e682010-04-15 18:48:26 -0700121 const Rect r(
122 reg.rects[0].l,
123 reg.rects[0].t,
124 reg.rects[0].r,
125 reg.rects[0].b);
126 res.set(r);
Mathias Agopian1100c8b2010-04-05 16:21:53 -0700127 } else {
128 for (size_t i=0 ; i<reg.count ; i++) {
129 const Rect r(
Mathias Agopiancc08e682010-04-15 18:48:26 -0700130 reg.rects[i].l,
131 reg.rects[i].t,
132 reg.rects[i].r,
133 reg.rects[i].b);
Mathias Agopian1100c8b2010-04-05 16:21:53 -0700134 res.orSelf(r);
135 }
136 }
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700137 return res;
138}
139
140// ----------------------------------------------------------------------------
141
142SharedBufferBase::SharedBufferBase(SharedClient* sharedClient,
Mathias Agopianbb641242010-05-18 17:06:55 -0700143 int surface, int32_t identity)
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700144 : mSharedClient(sharedClient),
145 mSharedStack(sharedClient->surfaces + surface),
Mathias Agopianbb641242010-05-18 17:06:55 -0700146 mIdentity(identity)
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700147{
148}
149
150SharedBufferBase::~SharedBufferBase()
151{
152}
153
Mathias Agopian0b3ad462009-10-02 18:12:30 -0700154status_t SharedBufferBase::getStatus() const
155{
156 SharedBufferStack& stack( *mSharedStack );
157 return stack.status;
158}
159
Mathias Agopian7e27f052010-05-28 14:22:23 -0700160int32_t SharedBufferBase::getIdentity() const
161{
162 SharedBufferStack& stack( *mSharedStack );
163 return stack.identity;
164}
165
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700166size_t SharedBufferBase::getFrontBuffer() const
167{
168 SharedBufferStack& stack( *mSharedStack );
169 return size_t( stack.head );
170}
171
172String8 SharedBufferBase::dump(char const* prefix) const
173{
174 const size_t SIZE = 1024;
175 char buffer[SIZE];
176 String8 result;
177 SharedBufferStack& stack( *mSharedStack );
178 snprintf(buffer, SIZE,
Mathias Agopianbb641242010-05-18 17:06:55 -0700179 "%s[ head=%2d, available=%2d, queued=%2d ] "
Mathias Agopianb5c45772010-05-17 18:54:19 -0700180 "reallocMask=%08x, inUse=%2d, identity=%d, status=%d",
Mathias Agopianbb641242010-05-18 17:06:55 -0700181 prefix, stack.head, stack.available, stack.queued,
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700182 stack.reallocMask, stack.inUse, stack.identity, stack.status);
183 result.append(buffer);
Mathias Agopianbb641242010-05-18 17:06:55 -0700184 result.append("\n");
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700185 return result;
186}
187
Mathias Agopianb2965332010-04-27 16:41:19 -0700188status_t SharedBufferBase::waitForCondition(const ConditionBase& condition)
189{
190 const SharedBufferStack& stack( *mSharedStack );
191 SharedClient& client( *mSharedClient );
192 const nsecs_t TIMEOUT = s2ns(1);
193 const int identity = mIdentity;
194
195 Mutex::Autolock _l(client.lock);
196 while ((condition()==false) &&
197 (stack.identity == identity) &&
198 (stack.status == NO_ERROR))
199 {
200 status_t err = client.cv.waitRelative(client.lock, TIMEOUT);
201 // handle errors and timeouts
202 if (CC_UNLIKELY(err != NO_ERROR)) {
203 if (err == TIMED_OUT) {
204 if (condition()) {
205 LOGE("waitForCondition(%s) timed out (identity=%d), "
206 "but condition is true! We recovered but it "
207 "shouldn't happen." , condition.name(), stack.identity);
208 break;
209 } else {
210 LOGW("waitForCondition(%s) timed out "
211 "(identity=%d, status=%d). "
212 "CPU may be pegged. trying again.", condition.name(),
213 stack.identity, stack.status);
214 }
215 } else {
216 LOGE("waitForCondition(%s) error (%s) ",
217 condition.name(), strerror(-err));
218 return err;
219 }
220 }
221 }
222 return (stack.identity != mIdentity) ? status_t(BAD_INDEX) : stack.status;
223}
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700224// ============================================================================
225// conditions and updates
226// ============================================================================
227
228SharedBufferClient::DequeueCondition::DequeueCondition(
229 SharedBufferClient* sbc) : ConditionBase(sbc) {
230}
Mathias Agopianb2965332010-04-27 16:41:19 -0700231bool SharedBufferClient::DequeueCondition::operator()() const {
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700232 return stack.available > 0;
233}
234
235SharedBufferClient::LockCondition::LockCondition(
236 SharedBufferClient* sbc, int buf) : ConditionBase(sbc), buf(buf) {
237}
Mathias Agopianb2965332010-04-27 16:41:19 -0700238bool SharedBufferClient::LockCondition::operator()() const {
Mathias Agopiand5212872010-04-30 12:59:21 -0700239 // NOTE: if stack.head is messed up, we could crash the client
240 // or cause some drawing artifacts. This is okay, as long as it is
241 // limited to the client.
Mathias Agopianc0a91642010-04-27 21:08:20 -0700242 return (buf != stack.index[stack.head] ||
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700243 (stack.queued > 0 && stack.inUse != buf));
244}
245
246SharedBufferServer::ReallocateCondition::ReallocateCondition(
247 SharedBufferBase* sbb, int buf) : ConditionBase(sbb), buf(buf) {
248}
Mathias Agopianb2965332010-04-27 16:41:19 -0700249bool SharedBufferServer::ReallocateCondition::operator()() const {
Mathias Agopiand5212872010-04-30 12:59:21 -0700250 int32_t head = stack.head;
Mathias Agopianbb641242010-05-18 17:06:55 -0700251 if (uint32_t(head) >= SharedBufferStack::NUM_BUFFER_MAX) {
Mathias Agopiand5212872010-04-30 12:59:21 -0700252 // if stack.head is messed up, we cannot allow the server to
253 // crash (since stack.head is mapped on the client side)
254 stack.status = BAD_VALUE;
255 return false;
256 }
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700257 // TODO: we should also check that buf has been dequeued
Mathias Agopiand5212872010-04-30 12:59:21 -0700258 return (buf != stack.index[head]);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700259}
260
261// ----------------------------------------------------------------------------
262
263SharedBufferClient::QueueUpdate::QueueUpdate(SharedBufferBase* sbb)
264 : UpdateBase(sbb) {
265}
266ssize_t SharedBufferClient::QueueUpdate::operator()() {
267 android_atomic_inc(&stack.queued);
268 return NO_ERROR;
269}
270
271SharedBufferClient::UndoDequeueUpdate::UndoDequeueUpdate(SharedBufferBase* sbb)
272 : UpdateBase(sbb) {
273}
274ssize_t SharedBufferClient::UndoDequeueUpdate::operator()() {
275 android_atomic_inc(&stack.available);
276 return NO_ERROR;
277}
278
279SharedBufferServer::UnlockUpdate::UnlockUpdate(
280 SharedBufferBase* sbb, int lockedBuffer)
281 : UpdateBase(sbb), lockedBuffer(lockedBuffer) {
282}
283ssize_t SharedBufferServer::UnlockUpdate::operator()() {
284 if (stack.inUse != lockedBuffer) {
285 LOGE("unlocking %d, but currently locked buffer is %d",
286 lockedBuffer, stack.inUse);
287 return BAD_VALUE;
288 }
289 android_atomic_write(-1, &stack.inUse);
290 return NO_ERROR;
291}
292
293SharedBufferServer::RetireUpdate::RetireUpdate(
294 SharedBufferBase* sbb, int numBuffers)
295 : UpdateBase(sbb), numBuffers(numBuffers) {
296}
297ssize_t SharedBufferServer::RetireUpdate::operator()() {
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700298 int32_t head = stack.head;
Mathias Agopianbb641242010-05-18 17:06:55 -0700299 if (uint32_t(head) >= SharedBufferStack::NUM_BUFFER_MAX)
Mathias Agopiand5212872010-04-30 12:59:21 -0700300 return BAD_VALUE;
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700301
302 // Preventively lock the current buffer before updating queued.
Mathias Agopianc0a91642010-04-27 21:08:20 -0700303 android_atomic_write(stack.index[head], &stack.inUse);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700304
305 // Decrement the number of queued buffers
306 int32_t queued;
307 do {
308 queued = stack.queued;
309 if (queued == 0) {
310 return NOT_ENOUGH_DATA;
311 }
312 } while (android_atomic_cmpxchg(queued, queued-1, &stack.queued));
313
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700314 // lock the buffer before advancing head, which automatically unlocks
315 // the buffer we preventively locked upon entering this function
Mathias Agopianb5c45772010-05-17 18:54:19 -0700316
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700317 head = (head + 1) % numBuffers;
Mathias Agopianc0a91642010-04-27 21:08:20 -0700318 android_atomic_write(stack.index[head], &stack.inUse);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700319
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700320 // head is only modified here, so we don't need to use cmpxchg
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700321 android_atomic_write(head, &stack.head);
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700322
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700323 // now that head has moved, we can increment the number of available buffers
324 android_atomic_inc(&stack.available);
325 return head;
326}
327
Mathias Agopianb58b5d72009-09-10 16:55:13 -0700328SharedBufferServer::StatusUpdate::StatusUpdate(
329 SharedBufferBase* sbb, status_t status)
330 : UpdateBase(sbb), status(status) {
331}
332
333ssize_t SharedBufferServer::StatusUpdate::operator()() {
334 android_atomic_write(status, &stack.status);
335 return NO_ERROR;
336}
337
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700338// ============================================================================
339
340SharedBufferClient::SharedBufferClient(SharedClient* sharedClient,
Mathias Agopian9ec430a2009-10-06 19:00:57 -0700341 int surface, int num, int32_t identity)
Mathias Agopianbb641242010-05-18 17:06:55 -0700342 : SharedBufferBase(sharedClient, surface, identity),
343 mNumBuffers(num), tail(0), undoDequeueTail(0)
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700344{
Mathias Agopianc0a91642010-04-27 21:08:20 -0700345 SharedBufferStack& stack( *mSharedStack );
Mathias Agopianc7d56012009-09-14 15:48:42 -0700346 tail = computeTail();
Mathias Agopianc0a91642010-04-27 21:08:20 -0700347 queued_head = stack.head;
Mathias Agopianc7d56012009-09-14 15:48:42 -0700348}
349
Mathias Agopianbb641242010-05-18 17:06:55 -0700350int32_t SharedBufferClient::computeTail() const
351{
352 SharedBufferStack& stack( *mSharedStack );
353 return (mNumBuffers + stack.head - stack.available + 1) % mNumBuffers;
354}
355
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700356ssize_t SharedBufferClient::dequeue()
357{
Mathias Agopian40d57992009-09-11 19:18:20 -0700358 SharedBufferStack& stack( *mSharedStack );
359
Mathias Agopian1100c8b2010-04-05 16:21:53 -0700360 if (stack.head == tail && stack.available == mNumBuffers) {
Mathias Agopian40d57992009-09-11 19:18:20 -0700361 LOGW("dequeue: tail=%d, head=%d, avail=%d, queued=%d",
362 tail, stack.head, stack.available, stack.queued);
363 }
Mathias Agopianbb641242010-05-18 17:06:55 -0700364
365 RWLock::AutoRLock _rd(mLock);
366
Mathias Agopian86f73292009-09-17 01:35:28 -0700367 const nsecs_t dequeueTime = systemTime(SYSTEM_TIME_THREAD);
Mathias Agopian40d57992009-09-11 19:18:20 -0700368
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700369 //LOGD("[%d] about to dequeue a buffer",
370 // mSharedStack->identity);
371 DequeueCondition condition(this);
372 status_t err = waitForCondition(condition);
373 if (err != NO_ERROR)
374 return ssize_t(err);
375
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700376 // NOTE: 'stack.available' is part of the conditions, however
377 // decrementing it, never changes any conditions, so we don't need
378 // to do this as part of an update.
379 if (android_atomic_dec(&stack.available) == 0) {
380 LOGW("dequeue probably called from multiple threads!");
381 }
382
Mathias Agopianc0a91642010-04-27 21:08:20 -0700383 undoDequeueTail = tail;
384 int dequeued = stack.index[tail];
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700385 tail = ((tail+1 >= mNumBuffers) ? 0 : tail+1);
Mathias Agopianc0a91642010-04-27 21:08:20 -0700386 LOGD_IF(DEBUG_ATOMICS, "dequeued=%d, tail++=%d, %s",
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700387 dequeued, tail, dump("").string());
Mathias Agopian40d57992009-09-11 19:18:20 -0700388
Mathias Agopian86f73292009-09-17 01:35:28 -0700389 mDequeueTime[dequeued] = dequeueTime;
390
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700391 return dequeued;
392}
393
394status_t SharedBufferClient::undoDequeue(int buf)
395{
Mathias Agopianbb641242010-05-18 17:06:55 -0700396 RWLock::AutoRLock _rd(mLock);
397
Mathias Agopianc0a91642010-04-27 21:08:20 -0700398 // TODO: we can only undo the previous dequeue, we should
399 // enforce that in the api
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700400 UndoDequeueUpdate update(this);
401 status_t err = updateCondition( update );
Mathias Agopianc7d56012009-09-14 15:48:42 -0700402 if (err == NO_ERROR) {
Mathias Agopian0a8cd062010-04-27 16:11:38 -0700403 tail = undoDequeueTail;
Mathias Agopianc7d56012009-09-14 15:48:42 -0700404 }
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700405 return err;
406}
407
408status_t SharedBufferClient::lock(int buf)
409{
Mathias Agopianbb641242010-05-18 17:06:55 -0700410 RWLock::AutoRLock _rd(mLock);
411
Mathias Agopianc0a91642010-04-27 21:08:20 -0700412 SharedBufferStack& stack( *mSharedStack );
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700413 LockCondition condition(this, buf);
Mathias Agopian86f73292009-09-17 01:35:28 -0700414 status_t err = waitForCondition(condition);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700415 return err;
416}
417
418status_t SharedBufferClient::queue(int buf)
419{
Mathias Agopianbb641242010-05-18 17:06:55 -0700420 RWLock::AutoRLock _rd(mLock);
421
Mathias Agopianc0a91642010-04-27 21:08:20 -0700422 SharedBufferStack& stack( *mSharedStack );
423
Mathias Agopianb5c45772010-05-17 18:54:19 -0700424 queued_head = (queued_head + 1) % mNumBuffers;
Mathias Agopianc0a91642010-04-27 21:08:20 -0700425 stack.index[queued_head] = buf;
426
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700427 QueueUpdate update(this);
428 status_t err = updateCondition( update );
429 LOGD_IF(DEBUG_ATOMICS, "queued=%d, %s", buf, dump("").string());
Mathias Agopianc0a91642010-04-27 21:08:20 -0700430
Mathias Agopian86f73292009-09-17 01:35:28 -0700431 const nsecs_t now = systemTime(SYSTEM_TIME_THREAD);
432 stack.stats.totalTime = ns2us(now - mDequeueTime[buf]);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700433 return err;
434}
435
Mathias Agopianc0a91642010-04-27 21:08:20 -0700436bool SharedBufferClient::needNewBuffer(int buf) const
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700437{
438 SharedBufferStack& stack( *mSharedStack );
Mathias Agopiana0b3f1d2010-05-21 14:51:33 -0700439 const uint32_t mask = 1<<(31-buf);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700440 return (android_atomic_and(~mask, &stack.reallocMask) & mask) != 0;
441}
442
Mathias Agopianc0a91642010-04-27 21:08:20 -0700443status_t SharedBufferClient::setCrop(int buf, const Rect& crop)
Mathias Agopiancc08e682010-04-15 18:48:26 -0700444{
445 SharedBufferStack& stack( *mSharedStack );
Mathias Agopianc0a91642010-04-27 21:08:20 -0700446 return stack.setCrop(buf, crop);
Mathias Agopiancc08e682010-04-15 18:48:26 -0700447}
448
Mathias Agopianc0a91642010-04-27 21:08:20 -0700449status_t SharedBufferClient::setDirtyRegion(int buf, const Region& reg)
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700450{
451 SharedBufferStack& stack( *mSharedStack );
Mathias Agopianc0a91642010-04-27 21:08:20 -0700452 return stack.setDirtyRegion(buf, reg);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700453}
454
Mathias Agopianbb641242010-05-18 17:06:55 -0700455status_t SharedBufferClient::setBufferCount(
456 int bufferCount, const SetBufferCountCallback& ipc)
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700457{
Mathias Agopianb5c45772010-05-17 18:54:19 -0700458 SharedBufferStack& stack( *mSharedStack );
Mathias Agopianbb641242010-05-18 17:06:55 -0700459 if (uint32_t(bufferCount) >= SharedBufferStack::NUM_BUFFER_MAX)
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700460 return BAD_VALUE;
Mathias Agopianbb641242010-05-18 17:06:55 -0700461
Mathias Agopianf10d7fd2010-05-21 14:19:50 -0700462 if (uint32_t(bufferCount) < SharedBufferStack::NUM_BUFFER_MIN)
463 return BAD_VALUE;
464
Mathias Agopianbb641242010-05-18 17:06:55 -0700465 RWLock::AutoWLock _wr(mLock);
466
467 status_t err = ipc(bufferCount);
468 if (err == NO_ERROR) {
469 mNumBuffers = bufferCount;
470 queued_head = (stack.head + stack.queued) % mNumBuffers;
471 }
472 return err;
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700473}
474
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700475// ----------------------------------------------------------------------------
476
477SharedBufferServer::SharedBufferServer(SharedClient* sharedClient,
Mathias Agopian48d819a2009-09-10 19:41:18 -0700478 int surface, int num, int32_t identity)
Mathias Agopianbb641242010-05-18 17:06:55 -0700479 : SharedBufferBase(sharedClient, surface, identity),
480 mNumBuffers(num)
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700481{
Mathias Agopian48d819a2009-09-10 19:41:18 -0700482 mSharedStack->init(identity);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700483 mSharedStack->head = num-1;
484 mSharedStack->available = num;
485 mSharedStack->queued = 0;
486 mSharedStack->reallocMask = 0;
Mathias Agopiancc08e682010-04-15 18:48:26 -0700487 memset(mSharedStack->buffers, 0, sizeof(mSharedStack->buffers));
Mathias Agopianc0a91642010-04-27 21:08:20 -0700488 for (int i=0 ; i<num ; i++) {
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700489 mBufferList.add(i);
Mathias Agopianc0a91642010-04-27 21:08:20 -0700490 mSharedStack->index[i] = i;
491 }
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700492}
493
494ssize_t SharedBufferServer::retireAndLock()
495{
Mathias Agopianbb641242010-05-18 17:06:55 -0700496 RWLock::AutoRLock _l(mLock);
497
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700498 RetireUpdate update(this, mNumBuffers);
499 ssize_t buf = updateCondition( update );
Mathias Agopianc0a91642010-04-27 21:08:20 -0700500 if (buf >= 0) {
Mathias Agopianbb641242010-05-18 17:06:55 -0700501 if (uint32_t(buf) >= SharedBufferStack::NUM_BUFFER_MAX)
Mathias Agopiand5212872010-04-30 12:59:21 -0700502 return BAD_VALUE;
Mathias Agopianc0a91642010-04-27 21:08:20 -0700503 SharedBufferStack& stack( *mSharedStack );
504 buf = stack.index[buf];
505 LOGD_IF(DEBUG_ATOMICS && buf>=0, "retire=%d, %s",
506 int(buf), dump("").string());
507 }
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700508 return buf;
509}
510
Mathias Agopianc0a91642010-04-27 21:08:20 -0700511status_t SharedBufferServer::unlock(int buf)
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700512{
Mathias Agopianc0a91642010-04-27 21:08:20 -0700513 UnlockUpdate update(this, buf);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700514 status_t err = updateCondition( update );
515 return err;
516}
517
Mathias Agopianb58b5d72009-09-10 16:55:13 -0700518void SharedBufferServer::setStatus(status_t status)
519{
Mathias Agopian0b3ad462009-10-02 18:12:30 -0700520 if (status < NO_ERROR) {
521 StatusUpdate update(this, status);
522 updateCondition( update );
523 }
Mathias Agopianb58b5d72009-09-10 16:55:13 -0700524}
525
Mathias Agopiana138f892010-05-21 17:24:35 -0700526status_t SharedBufferServer::reallocateAll()
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700527{
Mathias Agopianbb641242010-05-18 17:06:55 -0700528 RWLock::AutoRLock _l(mLock);
529
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700530 SharedBufferStack& stack( *mSharedStack );
Mathias Agopiana0b3f1d2010-05-21 14:51:33 -0700531 uint32_t mask = mBufferList.getMask();
Mathias Agopiana138f892010-05-21 17:24:35 -0700532 android_atomic_or(mask, &stack.reallocMask);
533 return NO_ERROR;
534}
535
536status_t SharedBufferServer::reallocateAllExcept(int buffer)
537{
538 RWLock::AutoRLock _l(mLock);
539
540 SharedBufferStack& stack( *mSharedStack );
541 BufferList temp(mBufferList);
542 temp.remove(buffer);
543 uint32_t mask = temp.getMask();
544 android_atomic_or(mask, &stack.reallocMask);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700545 return NO_ERROR;
546}
547
Mathias Agopiane7005012009-10-07 16:44:10 -0700548int32_t SharedBufferServer::getQueuedCount() const
549{
550 SharedBufferStack& stack( *mSharedStack );
551 return stack.queued;
552}
553
Mathias Agopianc0a91642010-04-27 21:08:20 -0700554status_t SharedBufferServer::assertReallocate(int buf)
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700555{
Mathias Agopianbb641242010-05-18 17:06:55 -0700556 /*
557 * NOTE: it's safe to hold mLock for read while waiting for
558 * the ReallocateCondition because that condition is not updated
559 * by the thread that holds mLock for write.
560 */
561 RWLock::AutoRLock _l(mLock);
562
Mathias Agopiand606de62010-05-10 20:06:11 -0700563 // TODO: need to validate "buf"
Mathias Agopianc0a91642010-04-27 21:08:20 -0700564 ReallocateCondition condition(this, buf);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700565 status_t err = waitForCondition(condition);
566 return err;
567}
568
Mathias Agopianc0a91642010-04-27 21:08:20 -0700569Region SharedBufferServer::getDirtyRegion(int buf) const
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700570{
571 SharedBufferStack& stack( *mSharedStack );
Mathias Agopianc0a91642010-04-27 21:08:20 -0700572 return stack.getDirtyRegion(buf);
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700573}
574
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700575/*
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700576 * NOTE: this is not thread-safe on the server-side, meaning
577 * 'head' cannot move during this operation. The client-side
578 * can safely operate an usual.
579 *
580 */
581status_t SharedBufferServer::resize(int newNumBuffers)
582{
Mathias Agopianbb641242010-05-18 17:06:55 -0700583 if (uint32_t(newNumBuffers) >= SharedBufferStack::NUM_BUFFER_MAX)
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700584 return BAD_VALUE;
585
Mathias Agopianbb641242010-05-18 17:06:55 -0700586 RWLock::AutoWLock _l(mLock);
587
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700588 // for now we're not supporting shrinking
589 const int numBuffers = mNumBuffers;
590 if (newNumBuffers < numBuffers)
591 return BAD_VALUE;
592
593 SharedBufferStack& stack( *mSharedStack );
594 const int extra = newNumBuffers - numBuffers;
595
596 // read the head, make sure it's valid
597 int32_t head = stack.head;
Mathias Agopianbb641242010-05-18 17:06:55 -0700598 if (uint32_t(head) >= SharedBufferStack::NUM_BUFFER_MAX)
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700599 return BAD_VALUE;
600
601 int base = numBuffers;
602 int32_t avail = stack.available;
603 int tail = head - avail + 1;
Mathias Agopiand6297f72010-05-17 17:27:26 -0700604
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700605 if (tail >= 0) {
606 int8_t* const index = const_cast<int8_t*>(stack.index);
607 const int nb = numBuffers - head;
608 memmove(&index[head + extra], &index[head], nb);
609 base = head;
610 // move head 'extra' ahead, this doesn't impact stack.index[head];
611 stack.head = head + extra;
612 }
613 stack.available += extra;
614
615 // fill the new free space with unused buffers
616 BufferList::const_iterator curr(mBufferList.free_begin());
617 for (int i=0 ; i<extra ; i++) {
Mathias Agopiand6297f72010-05-17 17:27:26 -0700618 stack.index[base+i] = *curr;
619 mBufferList.add(*curr);
620 ++curr;
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700621 }
622
623 mNumBuffers = newNumBuffers;
624 return NO_ERROR;
625}
626
Mathias Agopian86f73292009-09-17 01:35:28 -0700627SharedBufferStack::Statistics SharedBufferServer::getStats() const
628{
629 SharedBufferStack& stack( *mSharedStack );
630 return stack.stats;
631}
632
Mathias Agopianb5b7f262010-05-07 15:58:44 -0700633// ---------------------------------------------------------------------------
634status_t SharedBufferServer::BufferList::add(int value)
635{
636 if (uint32_t(value) >= mCapacity)
637 return BAD_VALUE;
638 uint32_t mask = 1<<(31-value);
639 if (mList & mask)
640 return ALREADY_EXISTS;
641 mList |= mask;
642 return NO_ERROR;
643}
644
645status_t SharedBufferServer::BufferList::remove(int value)
646{
647 if (uint32_t(value) >= mCapacity)
648 return BAD_VALUE;
649 uint32_t mask = 1<<(31-value);
650 if (!(mList & mask))
651 return NAME_NOT_FOUND;
652 mList &= ~mask;
653 return NO_ERROR;
654}
655
Mathias Agopian86f73292009-09-17 01:35:28 -0700656
Mathias Agopiancbb288b2009-09-07 16:32:45 -0700657// ---------------------------------------------------------------------------
658}; // namespace android