blob: c6b1609dc5e26bbcf8746b2e1e3a90d6d2ced1f8 [file] [log] [blame]
Chris Craikb565df12015-10-05 13:00:52 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "OpReorderer.h"
18
19#include "utils/PaintUtils.h"
20#include "RenderNode.h"
Chris Craik0b7e8242015-10-28 16:50:44 -070021#include "LayerUpdateQueue.h"
Chris Craikb565df12015-10-05 13:00:52 -070022
23#include "SkCanvas.h"
24#include "utils/Trace.h"
25
26namespace android {
27namespace uirenderer {
28
29class BatchBase {
30
31public:
32 BatchBase(batchid_t batchId, BakedOpState* op, bool merging)
33 : mBatchId(batchId)
34 , mMerging(merging) {
35 mBounds = op->computedState.clippedBounds;
36 mOps.push_back(op);
37 }
38
39 bool intersects(const Rect& rect) const {
40 if (!rect.intersects(mBounds)) return false;
41
42 for (const BakedOpState* op : mOps) {
43 if (rect.intersects(op->computedState.clippedBounds)) {
44 return true;
45 }
46 }
47 return false;
48 }
49
50 batchid_t getBatchId() const { return mBatchId; }
51 bool isMerging() const { return mMerging; }
52
53 const std::vector<BakedOpState*>& getOps() const { return mOps; }
54
55 void dump() const {
Chris Craik6fe991e52015-10-20 09:39:42 -070056 ALOGD(" Batch %p, id %d, merging %d, count %d, bounds " RECT_STRING,
57 this, mBatchId, mMerging, mOps.size(), RECT_ARGS(mBounds));
Chris Craikb565df12015-10-05 13:00:52 -070058 }
59protected:
60 batchid_t mBatchId;
61 Rect mBounds;
62 std::vector<BakedOpState*> mOps;
63 bool mMerging;
64};
65
66class OpBatch : public BatchBase {
67public:
68 static void* operator new(size_t size, LinearAllocator& allocator) {
69 return allocator.alloc(size);
70 }
71
72 OpBatch(batchid_t batchId, BakedOpState* op)
73 : BatchBase(batchId, op, false) {
74 }
75
76 void batchOp(BakedOpState* op) {
77 mBounds.unionWith(op->computedState.clippedBounds);
78 mOps.push_back(op);
79 }
80};
81
82class MergingOpBatch : public BatchBase {
83public:
84 static void* operator new(size_t size, LinearAllocator& allocator) {
85 return allocator.alloc(size);
86 }
87
88 MergingOpBatch(batchid_t batchId, BakedOpState* op)
89 : BatchBase(batchId, op, true) {
90 }
91
92 /*
93 * Helper for determining if a new op can merge with a MergingDrawBatch based on their bounds
94 * and clip side flags. Positive bounds delta means new bounds fit in old.
95 */
96 static inline bool checkSide(const int currentFlags, const int newFlags, const int side,
97 float boundsDelta) {
98 bool currentClipExists = currentFlags & side;
99 bool newClipExists = newFlags & side;
100
101 // if current is clipped, we must be able to fit new bounds in current
102 if (boundsDelta > 0 && currentClipExists) return false;
103
104 // if new is clipped, we must be able to fit current bounds in new
105 if (boundsDelta < 0 && newClipExists) return false;
106
107 return true;
108 }
109
110 static bool paintIsDefault(const SkPaint& paint) {
111 return paint.getAlpha() == 255
112 && paint.getColorFilter() == nullptr
113 && paint.getShader() == nullptr;
114 }
115
116 static bool paintsAreEquivalent(const SkPaint& a, const SkPaint& b) {
117 return a.getAlpha() == b.getAlpha()
118 && a.getColorFilter() == b.getColorFilter()
119 && a.getShader() == b.getShader();
120 }
121
122 /*
123 * Checks if a (mergeable) op can be merged into this batch
124 *
125 * If true, the op's multiDraw must be guaranteed to handle both ops simultaneously, so it is
126 * important to consider all paint attributes used in the draw calls in deciding both a) if an
127 * op tries to merge at all, and b) if the op can merge with another set of ops
128 *
129 * False positives can lead to information from the paints of subsequent merged operations being
130 * dropped, so we make simplifying qualifications on the ops that can merge, per op type.
131 */
132 bool canMergeWith(BakedOpState* op) const {
133 bool isTextBatch = getBatchId() == OpBatchType::Text
134 || getBatchId() == OpBatchType::ColorText;
135
136 // Overlapping other operations is only allowed for text without shadow. For other ops,
137 // multiDraw isn't guaranteed to overdraw correctly
138 if (!isTextBatch || PaintUtils::hasTextShadow(op->op->paint)) {
139 if (intersects(op->computedState.clippedBounds)) return false;
140 }
141
142 const BakedOpState* lhs = op;
143 const BakedOpState* rhs = mOps[0];
144
145 if (!MathUtils::areEqual(lhs->alpha, rhs->alpha)) return false;
146
147 // Identical round rect clip state means both ops will clip in the same way, or not at all.
148 // As the state objects are const, we can compare their pointers to determine mergeability
149 if (lhs->roundRectClipState != rhs->roundRectClipState) return false;
150 if (lhs->projectionPathMask != rhs->projectionPathMask) return false;
151
152 /* Clipping compatibility check
153 *
154 * Exploits the fact that if a op or batch is clipped on a side, its bounds will equal its
155 * clip for that side.
156 */
157 const int currentFlags = mClipSideFlags;
158 const int newFlags = op->computedState.clipSideFlags;
159 if (currentFlags != OpClipSideFlags::None || newFlags != OpClipSideFlags::None) {
160 const Rect& opBounds = op->computedState.clippedBounds;
161 float boundsDelta = mBounds.left - opBounds.left;
162 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Left, boundsDelta)) return false;
163 boundsDelta = mBounds.top - opBounds.top;
164 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Top, boundsDelta)) return false;
165
166 // right and bottom delta calculation reversed to account for direction
167 boundsDelta = opBounds.right - mBounds.right;
168 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Right, boundsDelta)) return false;
169 boundsDelta = opBounds.bottom - mBounds.bottom;
170 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Bottom, boundsDelta)) return false;
171 }
172
173 const SkPaint* newPaint = op->op->paint;
174 const SkPaint* oldPaint = mOps[0]->op->paint;
175
176 if (newPaint == oldPaint) {
177 // if paints are equal, then modifiers + paint attribs don't need to be compared
178 return true;
179 } else if (newPaint && !oldPaint) {
180 return paintIsDefault(*newPaint);
181 } else if (!newPaint && oldPaint) {
182 return paintIsDefault(*oldPaint);
183 }
184 return paintsAreEquivalent(*newPaint, *oldPaint);
185 }
186
187 void mergeOp(BakedOpState* op) {
188 mBounds.unionWith(op->computedState.clippedBounds);
189 mOps.push_back(op);
190
191 const int newClipSideFlags = op->computedState.clipSideFlags;
192 mClipSideFlags |= newClipSideFlags;
193
194 const Rect& opClip = op->computedState.clipRect;
195 if (newClipSideFlags & OpClipSideFlags::Left) mClipRect.left = opClip.left;
196 if (newClipSideFlags & OpClipSideFlags::Top) mClipRect.top = opClip.top;
197 if (newClipSideFlags & OpClipSideFlags::Right) mClipRect.right = opClip.right;
198 if (newClipSideFlags & OpClipSideFlags::Bottom) mClipRect.bottom = opClip.bottom;
199 }
200
201private:
202 int mClipSideFlags = 0;
203 Rect mClipRect;
204};
205
Chris Craik0b7e8242015-10-28 16:50:44 -0700206OpReorderer::LayerReorderer::LayerReorderer(uint32_t width, uint32_t height,
207 const BeginLayerOp* beginLayerOp, RenderNode* renderNode)
208 : width(width)
209 , height(height)
210 , offscreenBuffer(renderNode ? renderNode->getLayer() : nullptr)
211 , beginLayerOp(beginLayerOp)
212 , renderNode(renderNode) {}
213
Chris Craik6fe991e52015-10-20 09:39:42 -0700214// iterate back toward target to see if anything drawn since should overlap the new op
Chris Craik818c9fb2015-10-23 14:33:42 -0700215// if no target, merging ops still iterate to find similar batch to insert after
Chris Craik6fe991e52015-10-20 09:39:42 -0700216void OpReorderer::LayerReorderer::locateInsertIndex(int batchId, const Rect& clippedBounds,
217 BatchBase** targetBatch, size_t* insertBatchIndex) const {
218 for (int i = mBatches.size() - 1; i >= 0; i--) {
219 BatchBase* overBatch = mBatches[i];
220
221 if (overBatch == *targetBatch) break;
222
223 // TODO: also consider shader shared between batch types
224 if (batchId == overBatch->getBatchId()) {
225 *insertBatchIndex = i + 1;
226 if (!*targetBatch) break; // found insert position, quit
227 }
228
229 if (overBatch->intersects(clippedBounds)) {
230 // NOTE: it may be possible to optimize for special cases where two operations
231 // of the same batch/paint could swap order, such as with a non-mergeable
232 // (clipped) and a mergeable text operation
233 *targetBatch = nullptr;
234 break;
235 }
236 }
237}
238
239void OpReorderer::LayerReorderer::deferUnmergeableOp(LinearAllocator& allocator,
240 BakedOpState* op, batchid_t batchId) {
241 OpBatch* targetBatch = mBatchLookup[batchId];
242
243 size_t insertBatchIndex = mBatches.size();
244 if (targetBatch) {
245 locateInsertIndex(batchId, op->computedState.clippedBounds,
246 (BatchBase**)(&targetBatch), &insertBatchIndex);
247 }
248
249 if (targetBatch) {
250 targetBatch->batchOp(op);
251 } else {
252 // new non-merging batch
253 targetBatch = new (allocator) OpBatch(batchId, op);
254 mBatchLookup[batchId] = targetBatch;
255 mBatches.insert(mBatches.begin() + insertBatchIndex, targetBatch);
256 }
257}
258
259// insertion point of a new batch, will hopefully be immediately after similar batch
260// (generally, should be similar shader)
261void OpReorderer::LayerReorderer::deferMergeableOp(LinearAllocator& allocator,
262 BakedOpState* op, batchid_t batchId, mergeid_t mergeId) {
263 MergingOpBatch* targetBatch = nullptr;
264
265 // Try to merge with any existing batch with same mergeId
266 auto getResult = mMergingBatchLookup[batchId].find(mergeId);
267 if (getResult != mMergingBatchLookup[batchId].end()) {
268 targetBatch = getResult->second;
269 if (!targetBatch->canMergeWith(op)) {
270 targetBatch = nullptr;
271 }
272 }
273
274 size_t insertBatchIndex = mBatches.size();
275 locateInsertIndex(batchId, op->computedState.clippedBounds,
276 (BatchBase**)(&targetBatch), &insertBatchIndex);
277
278 if (targetBatch) {
279 targetBatch->mergeOp(op);
280 } else {
281 // new merging batch
282 targetBatch = new (allocator) MergingOpBatch(batchId, op);
283 mMergingBatchLookup[batchId].insert(std::make_pair(mergeId, targetBatch));
284
285 mBatches.insert(mBatches.begin() + insertBatchIndex, targetBatch);
286 }
287}
288
Chris Craik5854b342015-10-26 15:49:56 -0700289void OpReorderer::LayerReorderer::replayBakedOpsImpl(void* arg, BakedOpDispatcher* receivers) const {
290 ATRACE_NAME("flush drawing commands");
Chris Craik6fe991e52015-10-20 09:39:42 -0700291 for (const BatchBase* batch : mBatches) {
292 // TODO: different behavior based on batch->isMerging()
293 for (const BakedOpState* op : batch->getOps()) {
294 receivers[op->op->opId](arg, *op->op, *op);
295 }
296 }
297}
298
299void OpReorderer::LayerReorderer::dump() const {
Chris Craik0b7e8242015-10-28 16:50:44 -0700300 ALOGD("LayerReorderer %p, %ux%u buffer %p, blo %p, rn %p",
301 this, width, height, offscreenBuffer, beginLayerOp, renderNode);
Chris Craik6fe991e52015-10-20 09:39:42 -0700302 for (const BatchBase* batch : mBatches) {
303 batch->dump();
304 }
305}
Chris Craikb565df12015-10-05 13:00:52 -0700306
Chris Craik0b7e8242015-10-28 16:50:44 -0700307OpReorderer::OpReorderer(const LayerUpdateQueue& layers, const SkRect& clip,
308 uint32_t viewportWidth, uint32_t viewportHeight,
Chris Craik818c9fb2015-10-23 14:33:42 -0700309 const std::vector< sp<RenderNode> >& nodes)
Chris Craik6fe991e52015-10-20 09:39:42 -0700310 : mCanvasState(*this) {
Chris Craik818c9fb2015-10-23 14:33:42 -0700311 ATRACE_NAME("prepare drawing commands");
Chris Craik818c9fb2015-10-23 14:33:42 -0700312 mLayerReorderers.emplace_back(viewportWidth, viewportHeight);
Chris Craik0b7e8242015-10-28 16:50:44 -0700313 mLayerStack.push_back(0);
Chris Craikb565df12015-10-05 13:00:52 -0700314
Chris Craikb565df12015-10-05 13:00:52 -0700315 mCanvasState.initializeSaveStack(viewportWidth, viewportHeight,
Chris Craikddf22152015-10-14 17:42:47 -0700316 clip.fLeft, clip.fTop, clip.fRight, clip.fBottom,
317 Vector3());
Chris Craik0b7e8242015-10-28 16:50:44 -0700318
319 // Render all layers to be updated, in order. Defer in reverse order, so that they'll be
320 // updated in the order they're passed in (mLayerReorderers are issued to Renderer in reverse)
321 for (int i = layers.entries().size() - 1; i >= 0; i--) {
322 RenderNode* layerNode = layers.entries()[i].renderNode;
323 const Rect& layerDamage = layers.entries()[i].damage;
324
325 saveForLayer(layerNode->getWidth(), layerNode->getHeight(), nullptr, layerNode);
326 mCanvasState.writableSnapshot()->setClip(
327 layerDamage.left, layerDamage.top, layerDamage.right, layerDamage.bottom);
328
329 if (layerNode->getDisplayList()) {
330 deferImpl(*(layerNode->getDisplayList()));
331 }
332 restoreForLayer();
333 }
334
335 // Defer Fbo0
Chris Craikb565df12015-10-05 13:00:52 -0700336 for (const sp<RenderNode>& node : nodes) {
337 if (node->nothingToDraw()) continue;
338
Chris Craik0b7e8242015-10-28 16:50:44 -0700339 int count = mCanvasState.save(SkCanvas::kClip_SaveFlag | SkCanvas::kMatrix_SaveFlag);
340 deferNodePropsAndOps(*node);
341 mCanvasState.restoreToCount(count);
Chris Craikb565df12015-10-05 13:00:52 -0700342 }
343}
344
Chris Craik818c9fb2015-10-23 14:33:42 -0700345OpReorderer::OpReorderer(int viewportWidth, int viewportHeight, const DisplayList& displayList)
346 : mCanvasState(*this) {
Chris Craikb565df12015-10-05 13:00:52 -0700347 ATRACE_NAME("prepare drawing commands");
Chris Craik818c9fb2015-10-23 14:33:42 -0700348
349 mLayerReorderers.emplace_back(viewportWidth, viewportHeight);
350 mLayerStack.push_back(0);
351
Chris Craikb565df12015-10-05 13:00:52 -0700352 mCanvasState.initializeSaveStack(viewportWidth, viewportHeight,
353 0, 0, viewportWidth, viewportHeight, Vector3());
Chris Craikb36af872015-10-16 14:23:12 -0700354 deferImpl(displayList);
Chris Craikb565df12015-10-05 13:00:52 -0700355}
356
Chris Craik818c9fb2015-10-23 14:33:42 -0700357void OpReorderer::onViewportInitialized() {}
358
359void OpReorderer::onSnapshotRestored(const Snapshot& removed, const Snapshot& restored) {}
360
Chris Craik0b7e8242015-10-28 16:50:44 -0700361void OpReorderer::deferNodePropsAndOps(RenderNode& node) {
Chris Craik76caecf2015-11-02 19:17:45 -0800362 if (node.applyViewProperties(mCanvasState, mAllocator)) {
Chris Craik0b7e8242015-10-28 16:50:44 -0700363 // not rejected so render
364 if (node.getLayer()) {
365 // HW layer
366 LayerOp* drawLayerOp = new (mAllocator) LayerOp(node);
367 BakedOpState* bakedOpState = tryBakeOpState(*drawLayerOp);
368 if (bakedOpState) {
369 // Layer will be drawn into parent layer (which is now current, since we popped mLayerStack)
370 currentLayer().deferUnmergeableOp(mAllocator, bakedOpState, OpBatchType::Bitmap);
371 }
372 } else {
373 deferImpl(*(node.getDisplayList()));
374 }
375 }
376}
377
Chris Craikb565df12015-10-05 13:00:52 -0700378/**
379 * Used to define a list of lambdas referencing private OpReorderer::onXXXXOp() methods.
380 *
381 * This allows opIds embedded in the RecordedOps to be used for dispatching to these lambdas. E.g. a
382 * BitmapOp op then would be dispatched to OpReorderer::onBitmapOp(const BitmapOp&)
383 */
Chris Craik6fe991e52015-10-20 09:39:42 -0700384#define OP_RECEIVER(Type) \
Chris Craikb565df12015-10-05 13:00:52 -0700385 [](OpReorderer& reorderer, const RecordedOp& op) { reorderer.on##Type(static_cast<const Type&>(op)); },
Chris Craikb36af872015-10-16 14:23:12 -0700386void OpReorderer::deferImpl(const DisplayList& displayList) {
Chris Craikb565df12015-10-05 13:00:52 -0700387 static std::function<void(OpReorderer& reorderer, const RecordedOp&)> receivers[] = {
Chris Craik6fe991e52015-10-20 09:39:42 -0700388 MAP_OPS(OP_RECEIVER)
Chris Craikb565df12015-10-05 13:00:52 -0700389 };
Chris Craikb36af872015-10-16 14:23:12 -0700390 for (const DisplayList::Chunk& chunk : displayList.getChunks()) {
Chris Craikb565df12015-10-05 13:00:52 -0700391 for (size_t opIndex = chunk.beginOpIndex; opIndex < chunk.endOpIndex; opIndex++) {
Chris Craikb36af872015-10-16 14:23:12 -0700392 const RecordedOp* op = displayList.getOps()[opIndex];
Chris Craikb565df12015-10-05 13:00:52 -0700393 receivers[op->opId](*this, *op);
394 }
395 }
396}
397
Chris Craikb565df12015-10-05 13:00:52 -0700398void OpReorderer::onRenderNodeOp(const RenderNodeOp& op) {
399 if (op.renderNode->nothingToDraw()) {
400 return;
401 }
Chris Craik6fe991e52015-10-20 09:39:42 -0700402 int count = mCanvasState.save(SkCanvas::kClip_SaveFlag | SkCanvas::kMatrix_SaveFlag);
Chris Craikb565df12015-10-05 13:00:52 -0700403
404 // apply state from RecordedOp
405 mCanvasState.concatMatrix(op.localMatrix);
406 mCanvasState.clipRect(op.localClipRect.left, op.localClipRect.top,
407 op.localClipRect.right, op.localClipRect.bottom, SkRegion::kIntersect_Op);
408
Chris Craik0b7e8242015-10-28 16:50:44 -0700409 // then apply state from node properties, and defer ops
410 deferNodePropsAndOps(*op.renderNode);
411
Chris Craik6fe991e52015-10-20 09:39:42 -0700412 mCanvasState.restoreToCount(count);
Chris Craikb565df12015-10-05 13:00:52 -0700413}
414
415static batchid_t tessellatedBatchId(const SkPaint& paint) {
416 return paint.getPathEffect()
417 ? OpBatchType::AlphaMaskTexture
418 : (paint.isAntiAlias() ? OpBatchType::AlphaVertices : OpBatchType::Vertices);
419}
420
421void OpReorderer::onBitmapOp(const BitmapOp& op) {
Chris Craik6fe991e52015-10-20 09:39:42 -0700422 BakedOpState* bakedStateOp = tryBakeOpState(op);
Chris Craikb565df12015-10-05 13:00:52 -0700423 if (!bakedStateOp) return; // quick rejected
424
425 mergeid_t mergeId = (mergeid_t) op.bitmap->getGenerationID();
426 // TODO: AssetAtlas
Chris Craik6fe991e52015-10-20 09:39:42 -0700427 currentLayer().deferMergeableOp(mAllocator, bakedStateOp, OpBatchType::Bitmap, mergeId);
Chris Craikb565df12015-10-05 13:00:52 -0700428}
429
430void OpReorderer::onRectOp(const RectOp& op) {
Chris Craik6fe991e52015-10-20 09:39:42 -0700431 BakedOpState* bakedStateOp = tryBakeOpState(op);
Chris Craikb565df12015-10-05 13:00:52 -0700432 if (!bakedStateOp) return; // quick rejected
Chris Craik6fe991e52015-10-20 09:39:42 -0700433 currentLayer().deferUnmergeableOp(mAllocator, bakedStateOp, tessellatedBatchId(*op.paint));
Chris Craikb565df12015-10-05 13:00:52 -0700434}
435
436void OpReorderer::onSimpleRectsOp(const SimpleRectsOp& op) {
Chris Craik6fe991e52015-10-20 09:39:42 -0700437 BakedOpState* bakedStateOp = tryBakeOpState(op);
Chris Craikb565df12015-10-05 13:00:52 -0700438 if (!bakedStateOp) return; // quick rejected
Chris Craik6fe991e52015-10-20 09:39:42 -0700439 currentLayer().deferUnmergeableOp(mAllocator, bakedStateOp, OpBatchType::Vertices);
Chris Craikb565df12015-10-05 13:00:52 -0700440}
441
Chris Craik0b7e8242015-10-28 16:50:44 -0700442void OpReorderer::saveForLayer(uint32_t layerWidth, uint32_t layerHeight,
443 const BeginLayerOp* beginLayerOp, RenderNode* renderNode) {
Chris Craik818c9fb2015-10-23 14:33:42 -0700444
Chris Craik6fe991e52015-10-20 09:39:42 -0700445 mCanvasState.save(SkCanvas::kClip_SaveFlag | SkCanvas::kMatrix_SaveFlag);
446 mCanvasState.writableSnapshot()->transform->loadIdentity();
Chris Craik818c9fb2015-10-23 14:33:42 -0700447 mCanvasState.writableSnapshot()->initializeViewport(layerWidth, layerHeight);
Chris Craik6fe991e52015-10-20 09:39:42 -0700448 mCanvasState.writableSnapshot()->roundRectClipState = nullptr;
Chris Craikb565df12015-10-05 13:00:52 -0700449
Chris Craik6fe991e52015-10-20 09:39:42 -0700450 // create a new layer, and push its index on the stack
451 mLayerStack.push_back(mLayerReorderers.size());
Chris Craik0b7e8242015-10-28 16:50:44 -0700452 mLayerReorderers.emplace_back(layerWidth, layerHeight, beginLayerOp, renderNode);
453}
454
455void OpReorderer::restoreForLayer() {
456 // restore canvas, and pop finished layer off of the stack
457 mCanvasState.restore();
458 mLayerStack.pop_back();
459}
460
461// TODO: test rejection at defer time, where the bounds become empty
462void OpReorderer::onBeginLayerOp(const BeginLayerOp& op) {
463 const uint32_t layerWidth = (uint32_t) op.unmappedBounds.getWidth();
464 const uint32_t layerHeight = (uint32_t) op.unmappedBounds.getHeight();
465 saveForLayer(layerWidth, layerHeight, &op, nullptr);
Chris Craik6fe991e52015-10-20 09:39:42 -0700466}
Chris Craikb565df12015-10-05 13:00:52 -0700467
Chris Craik6fe991e52015-10-20 09:39:42 -0700468void OpReorderer::onEndLayerOp(const EndLayerOp& /* ignored */) {
Chris Craik6fe991e52015-10-20 09:39:42 -0700469 const BeginLayerOp& beginLayerOp = *currentLayer().beginLayerOp;
Chris Craik6fe991e52015-10-20 09:39:42 -0700470 int finishedLayerIndex = mLayerStack.back();
Chris Craik0b7e8242015-10-28 16:50:44 -0700471
472 restoreForLayer();
Chris Craik6fe991e52015-10-20 09:39:42 -0700473
474 // record the draw operation into the previous layer's list of draw commands
475 // uses state from the associated beginLayerOp, since it has all the state needed for drawing
476 LayerOp* drawLayerOp = new (mAllocator) LayerOp(
477 beginLayerOp.unmappedBounds,
478 beginLayerOp.localMatrix,
479 beginLayerOp.localClipRect,
Chris Craik818c9fb2015-10-23 14:33:42 -0700480 beginLayerOp.paint,
Chris Craik5854b342015-10-26 15:49:56 -0700481 &mLayerReorderers[finishedLayerIndex].offscreenBuffer);
Chris Craik6fe991e52015-10-20 09:39:42 -0700482 BakedOpState* bakedOpState = tryBakeOpState(*drawLayerOp);
483
484 if (bakedOpState) {
485 // Layer will be drawn into parent layer (which is now current, since we popped mLayerStack)
486 currentLayer().deferUnmergeableOp(mAllocator, bakedOpState, OpBatchType::Bitmap);
487 } else {
488 // Layer won't be drawn - delete its drawing batches to prevent it from doing any work
489 mLayerReorderers[finishedLayerIndex].clear();
490 return;
Chris Craikb565df12015-10-05 13:00:52 -0700491 }
492}
493
Chris Craik6fe991e52015-10-20 09:39:42 -0700494void OpReorderer::onLayerOp(const LayerOp& op) {
495 LOG_ALWAYS_FATAL("unsupported");
Chris Craikb565df12015-10-05 13:00:52 -0700496}
497
498} // namespace uirenderer
499} // namespace android