blob: 946897b87b4556db28f94ac1cec7b0628feaf710 [file] [log] [blame]
Chris Craik5ea17242016-01-11 14:07:59 -08001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "LayerReorderer.h"
18
19#include "BakedOpState.h"
20#include "RenderNode.h"
21#include "utils/PaintUtils.h"
22#include "utils/TraceUtils.h"
23
24#include <utils/TypeHelpers.h>
25
26namespace android {
27namespace uirenderer {
28
29class BatchBase {
30public:
31 BatchBase(batchid_t batchId, BakedOpState* op, bool merging)
32 : mBatchId(batchId)
33 , mMerging(merging) {
34 mBounds = op->computedState.clippedBounds;
35 mOps.push_back(op);
36 }
37
38 bool intersects(const Rect& rect) const {
39 if (!rect.intersects(mBounds)) return false;
40
41 for (const BakedOpState* op : mOps) {
42 if (rect.intersects(op->computedState.clippedBounds)) {
43 return true;
44 }
45 }
46 return false;
47 }
48
49 batchid_t getBatchId() const { return mBatchId; }
50 bool isMerging() const { return mMerging; }
51
52 const std::vector<BakedOpState*>& getOps() const { return mOps; }
53
54 void dump() const {
55 ALOGD(" Batch %p, id %d, merging %d, count %d, bounds " RECT_STRING,
Chris Craikb250a832016-01-11 19:28:17 -080056 this, mBatchId, mMerging, (int) mOps.size(), RECT_ARGS(mBounds));
Chris Craik5ea17242016-01-11 14:07:59 -080057 }
58protected:
59 batchid_t mBatchId;
60 Rect mBounds;
61 std::vector<BakedOpState*> mOps;
62 bool mMerging;
63};
64
65class OpBatch : public BatchBase {
66public:
67 static void* operator new(size_t size, LinearAllocator& allocator) {
68 return allocator.alloc(size);
69 }
70
71 OpBatch(batchid_t batchId, BakedOpState* op)
72 : BatchBase(batchId, op, false) {
73 }
74
75 void batchOp(BakedOpState* op) {
76 mBounds.unionWith(op->computedState.clippedBounds);
77 mOps.push_back(op);
78 }
79};
80
81class MergingOpBatch : public BatchBase {
82public:
83 static void* operator new(size_t size, LinearAllocator& allocator) {
84 return allocator.alloc(size);
85 }
86
87 MergingOpBatch(batchid_t batchId, BakedOpState* op)
88 : BatchBase(batchId, op, true)
89 , mClipSideFlags(op->computedState.clipSideFlags) {
90 }
91
92 /*
93 * Helper for determining if a new op can merge with a MergingDrawBatch based on their bounds
94 * and clip side flags. Positive bounds delta means new bounds fit in old.
95 */
96 static inline bool checkSide(const int currentFlags, const int newFlags, const int side,
97 float boundsDelta) {
98 bool currentClipExists = currentFlags & side;
99 bool newClipExists = newFlags & side;
100
101 // if current is clipped, we must be able to fit new bounds in current
102 if (boundsDelta > 0 && currentClipExists) return false;
103
104 // if new is clipped, we must be able to fit current bounds in new
105 if (boundsDelta < 0 && newClipExists) return false;
106
107 return true;
108 }
109
110 static bool paintIsDefault(const SkPaint& paint) {
111 return paint.getAlpha() == 255
112 && paint.getColorFilter() == nullptr
113 && paint.getShader() == nullptr;
114 }
115
116 static bool paintsAreEquivalent(const SkPaint& a, const SkPaint& b) {
117 // Note: don't check color, since all currently mergeable ops can merge across colors
118 return a.getAlpha() == b.getAlpha()
119 && a.getColorFilter() == b.getColorFilter()
120 && a.getShader() == b.getShader();
121 }
122
123 /*
124 * Checks if a (mergeable) op can be merged into this batch
125 *
126 * If true, the op's multiDraw must be guaranteed to handle both ops simultaneously, so it is
127 * important to consider all paint attributes used in the draw calls in deciding both a) if an
128 * op tries to merge at all, and b) if the op can merge with another set of ops
129 *
130 * False positives can lead to information from the paints of subsequent merged operations being
131 * dropped, so we make simplifying qualifications on the ops that can merge, per op type.
132 */
133 bool canMergeWith(BakedOpState* op) const {
134 bool isTextBatch = getBatchId() == OpBatchType::Text
135 || getBatchId() == OpBatchType::ColorText;
136
137 // Overlapping other operations is only allowed for text without shadow. For other ops,
138 // multiDraw isn't guaranteed to overdraw correctly
139 if (!isTextBatch || PaintUtils::hasTextShadow(op->op->paint)) {
140 if (intersects(op->computedState.clippedBounds)) return false;
141 }
142
143 const BakedOpState* lhs = op;
144 const BakedOpState* rhs = mOps[0];
145
146 if (!MathUtils::areEqual(lhs->alpha, rhs->alpha)) return false;
147
148 // Identical round rect clip state means both ops will clip in the same way, or not at all.
149 // As the state objects are const, we can compare their pointers to determine mergeability
150 if (lhs->roundRectClipState != rhs->roundRectClipState) return false;
151 if (lhs->projectionPathMask != rhs->projectionPathMask) return false;
152
153 /* Clipping compatibility check
154 *
155 * Exploits the fact that if a op or batch is clipped on a side, its bounds will equal its
156 * clip for that side.
157 */
158 const int currentFlags = mClipSideFlags;
159 const int newFlags = op->computedState.clipSideFlags;
160 if (currentFlags != OpClipSideFlags::None || newFlags != OpClipSideFlags::None) {
161 const Rect& opBounds = op->computedState.clippedBounds;
162 float boundsDelta = mBounds.left - opBounds.left;
163 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Left, boundsDelta)) return false;
164 boundsDelta = mBounds.top - opBounds.top;
165 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Top, boundsDelta)) return false;
166
167 // right and bottom delta calculation reversed to account for direction
168 boundsDelta = opBounds.right - mBounds.right;
169 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Right, boundsDelta)) return false;
170 boundsDelta = opBounds.bottom - mBounds.bottom;
171 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Bottom, boundsDelta)) return false;
172 }
173
174 const SkPaint* newPaint = op->op->paint;
175 const SkPaint* oldPaint = mOps[0]->op->paint;
176
177 if (newPaint == oldPaint) {
178 // if paints are equal, then modifiers + paint attribs don't need to be compared
179 return true;
180 } else if (newPaint && !oldPaint) {
181 return paintIsDefault(*newPaint);
182 } else if (!newPaint && oldPaint) {
183 return paintIsDefault(*oldPaint);
184 }
185 return paintsAreEquivalent(*newPaint, *oldPaint);
186 }
187
188 void mergeOp(BakedOpState* op) {
189 mBounds.unionWith(op->computedState.clippedBounds);
190 mOps.push_back(op);
191
192 // Because a new op must have passed canMergeWith(), we know it's passed the clipping compat
193 // check, and doesn't extend past a side of the clip that's in use by the merged batch.
194 // Therefore it's safe to simply always merge flags, and use the bounds as the clip rect.
195 mClipSideFlags |= op->computedState.clipSideFlags;
196 }
197
198 int getClipSideFlags() const { return mClipSideFlags; }
199 const Rect& getClipRect() const { return mBounds; }
200
201private:
202 int mClipSideFlags;
203};
204
205LayerReorderer::LayerReorderer(uint32_t width, uint32_t height,
206 const Rect& repaintRect, const BeginLayerOp* beginLayerOp, RenderNode* renderNode)
207 : width(width)
208 , height(height)
209 , repaintRect(repaintRect)
210 , offscreenBuffer(renderNode ? renderNode->getLayer() : nullptr)
211 , beginLayerOp(beginLayerOp)
212 , renderNode(renderNode)
213 , viewportClip(Rect(width, height)) {}
214
215// iterate back toward target to see if anything drawn since should overlap the new op
216// if no target, merging ops still iterate to find similar batch to insert after
217void LayerReorderer::locateInsertIndex(int batchId, const Rect& clippedBounds,
218 BatchBase** targetBatch, size_t* insertBatchIndex) const {
219 for (int i = mBatches.size() - 1; i >= 0; i--) {
220 BatchBase* overBatch = mBatches[i];
221
222 if (overBatch == *targetBatch) break;
223
224 // TODO: also consider shader shared between batch types
225 if (batchId == overBatch->getBatchId()) {
226 *insertBatchIndex = i + 1;
227 if (!*targetBatch) break; // found insert position, quit
228 }
229
230 if (overBatch->intersects(clippedBounds)) {
231 // NOTE: it may be possible to optimize for special cases where two operations
232 // of the same batch/paint could swap order, such as with a non-mergeable
233 // (clipped) and a mergeable text operation
234 *targetBatch = nullptr;
235 break;
236 }
237 }
238}
239
240void LayerReorderer::deferLayerClear(const Rect& rect) {
241 mClearRects.push_back(rect);
242}
243
244void LayerReorderer::flushLayerClears(LinearAllocator& allocator) {
245 if (CC_UNLIKELY(!mClearRects.empty())) {
246 const int vertCount = mClearRects.size() * 4;
247 // put the verts in the frame allocator, since
248 // 1) SimpleRectsOps needs verts, not rects
249 // 2) even if mClearRects stored verts, std::vectors will move their contents
250 Vertex* const verts = (Vertex*) allocator.alloc(vertCount * sizeof(Vertex));
251
252 Vertex* currentVert = verts;
253 Rect bounds = mClearRects[0];
254 for (auto&& rect : mClearRects) {
255 bounds.unionWith(rect);
256 Vertex::set(currentVert++, rect.left, rect.top);
257 Vertex::set(currentVert++, rect.right, rect.top);
258 Vertex::set(currentVert++, rect.left, rect.bottom);
259 Vertex::set(currentVert++, rect.right, rect.bottom);
260 }
261 mClearRects.clear(); // discard rects before drawing so this method isn't reentrant
262
263 // One or more unclipped saveLayers have been enqueued, with deferred clears.
264 // Flush all of these clears with a single draw
265 SkPaint* paint = allocator.create<SkPaint>();
266 paint->setXfermodeMode(SkXfermode::kClear_Mode);
267 SimpleRectsOp* op = new (allocator) SimpleRectsOp(bounds,
268 Matrix4::identity(), nullptr, paint,
269 verts, vertCount);
270 BakedOpState* bakedState = BakedOpState::directConstruct(allocator,
271 &viewportClip, bounds, *op);
272
273
274 deferUnmergeableOp(allocator, bakedState, OpBatchType::Vertices);
275 }
276}
277
278void LayerReorderer::deferUnmergeableOp(LinearAllocator& allocator,
279 BakedOpState* op, batchid_t batchId) {
280 if (batchId != OpBatchType::CopyToLayer) {
281 // if first op after one or more unclipped saveLayers, flush the layer clears
282 flushLayerClears(allocator);
283 }
284
285 OpBatch* targetBatch = mBatchLookup[batchId];
286
287 size_t insertBatchIndex = mBatches.size();
288 if (targetBatch) {
289 locateInsertIndex(batchId, op->computedState.clippedBounds,
290 (BatchBase**)(&targetBatch), &insertBatchIndex);
291 }
292
293 if (targetBatch) {
294 targetBatch->batchOp(op);
295 } else {
296 // new non-merging batch
297 targetBatch = new (allocator) OpBatch(batchId, op);
298 mBatchLookup[batchId] = targetBatch;
299 mBatches.insert(mBatches.begin() + insertBatchIndex, targetBatch);
300 }
301}
302
303void LayerReorderer::deferMergeableOp(LinearAllocator& allocator,
304 BakedOpState* op, batchid_t batchId, mergeid_t mergeId) {
305 if (batchId != OpBatchType::CopyToLayer) {
306 // if first op after one or more unclipped saveLayers, flush the layer clears
307 flushLayerClears(allocator);
308 }
309 MergingOpBatch* targetBatch = nullptr;
310
311 // Try to merge with any existing batch with same mergeId
312 auto getResult = mMergingBatchLookup[batchId].find(mergeId);
313 if (getResult != mMergingBatchLookup[batchId].end()) {
314 targetBatch = getResult->second;
315 if (!targetBatch->canMergeWith(op)) {
316 targetBatch = nullptr;
317 }
318 }
319
320 size_t insertBatchIndex = mBatches.size();
321 locateInsertIndex(batchId, op->computedState.clippedBounds,
322 (BatchBase**)(&targetBatch), &insertBatchIndex);
323
324 if (targetBatch) {
325 targetBatch->mergeOp(op);
326 } else {
327 // new merging batch
328 targetBatch = new (allocator) MergingOpBatch(batchId, op);
329 mMergingBatchLookup[batchId].insert(std::make_pair(mergeId, targetBatch));
330
331 mBatches.insert(mBatches.begin() + insertBatchIndex, targetBatch);
332 }
333}
334
335void LayerReorderer::replayBakedOpsImpl(void* arg,
336 BakedOpReceiver* unmergedReceivers, MergedOpReceiver* mergedReceivers) const {
337 ATRACE_NAME("flush drawing commands");
338 for (const BatchBase* batch : mBatches) {
339 size_t size = batch->getOps().size();
340 if (size > 1 && batch->isMerging()) {
341 int opId = batch->getOps()[0]->op->opId;
342 const MergingOpBatch* mergingBatch = static_cast<const MergingOpBatch*>(batch);
343 MergedBakedOpList data = {
344 batch->getOps().data(),
345 size,
346 mergingBatch->getClipSideFlags(),
347 mergingBatch->getClipRect()
348 };
349 mergedReceivers[opId](arg, data);
350 } else {
351 for (const BakedOpState* op : batch->getOps()) {
352 unmergedReceivers[op->op->opId](arg, *op);
353 }
354 }
355 }
356}
357
358void LayerReorderer::dump() const {
359 ALOGD("LayerReorderer %p, %ux%u buffer %p, blo %p, rn %p",
360 this, width, height, offscreenBuffer, beginLayerOp, renderNode);
361 for (const BatchBase* batch : mBatches) {
362 batch->dump();
363 }
364}
365
366} // namespace uirenderer
367} // namespace android