blob: 6e94ef787f14e96c9e02037db72b1609d8adf2e6 [file] [log] [blame]
reed@google.comac10a2d2010-12-22 21:39:39 +00001/*
2 Copyright 2010 Google Inc.
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 */
16
17
18#include "GrContext.h"
19#include "GrTextureCache.h"
20#include "GrTextStrike.h"
21#include "GrMemory.h"
22#include "GrPathIter.h"
23#include "GrClipIterator.h"
24#include "GrIndexBuffer.h"
25
26#define DEFER_TEXT_RENDERING 1
27
28static const size_t MAX_TEXTURE_CACHE_COUNT = 128;
29static const size_t MAX_TEXTURE_CACHE_BYTES = 8 * 1024 * 1024;
30
31#if DEFER_TEXT_RENDERING
32 static const uint32_t POOL_VB_SIZE = 2048 *
33 GrDrawTarget::VertexSize(GrDrawTarget::kTextFormat_VertexLayoutBit);
34 static const uint32_t NUM_POOL_VBS = 8;
35#else
36 static const uint32_t POOL_VB_SIZE = 0;
37 static const uint32_t NUM_POOL_VBS = 0;
38
39#endif
40
41GrContext* GrContext::Create(GrGpu::Engine engine,
42 GrGpu::Platform3DContext context3D) {
43 GrContext* ctx = NULL;
44 GrGpu* fGpu = GrGpu::Create(engine, context3D);
45 if (NULL != fGpu) {
46 ctx = new GrContext(fGpu);
47 fGpu->unref();
48 }
49 return ctx;
50}
51
52GrContext::~GrContext() {
53 fGpu->unref();
54 delete fTextureCache;
55 delete fFontCache;
56}
57
58void GrContext::abandonAllTextures() {
59 fTextureCache->deleteAll(GrTextureCache::kAbandonTexture_DeleteMode);
60 fFontCache->abandonAll();
61}
62
63GrTextureEntry* GrContext::findAndLockTexture(GrTextureKey* key,
64 const GrSamplerState& sampler) {
65 finalizeTextureKey(key, sampler);
66 return fTextureCache->findAndLock(*key);
67}
68
69static void stretchImage(void* dst,
70 int dstW,
71 int dstH,
72 void* src,
73 int srcW,
74 int srcH,
75 int bpp) {
76 GrFixed dx = (srcW << 16) / dstW;
77 GrFixed dy = (srcH << 16) / dstH;
78
79 GrFixed y = dy >> 1;
80
81 int dstXLimit = dstW*bpp;
82 for (int j = 0; j < dstH; ++j) {
83 GrFixed x = dx >> 1;
84 void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp;
85 void* dstRow = (uint8_t*)dst + j*dstW*bpp;
86 for (int i = 0; i < dstXLimit; i += bpp) {
87 memcpy((uint8_t*) dstRow + i,
88 (uint8_t*) srcRow + (x>>16)*bpp,
89 bpp);
90 x += dx;
91 }
92 y += dy;
93 }
94}
95
96GrTextureEntry* GrContext::createAndLockTexture(GrTextureKey* key,
97 const GrSamplerState& sampler,
98 const GrGpu::TextureDesc& desc,
99 void* srcData, size_t rowBytes) {
100 GrAssert(key->width() == desc.fWidth);
101 GrAssert(key->height() == desc.fHeight);
102
103#if GR_DUMP_TEXTURE_UPLOAD
104 GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight);
105#endif
106
107 GrTextureEntry* entry = NULL;
108 bool special = finalizeTextureKey(key, sampler);
109 if (special) {
110 GrTextureEntry* clampEntry;
111 GrTextureKey clampKey(*key);
112 clampEntry = findAndLockTexture(&clampKey, GrSamplerState::ClampNoFilter());
113
114 if (NULL == clampEntry) {
115 clampEntry = createAndLockTexture(&clampKey,
116 GrSamplerState::ClampNoFilter(),
117 desc, srcData, rowBytes);
118 GrAssert(NULL != clampEntry);
119 if (NULL == clampEntry) {
120 return NULL;
121 }
122 }
123 GrTexture* clampTexture = clampEntry->texture();
124 GrGpu::TextureDesc rtDesc = desc;
125 rtDesc.fFlags |= GrGpu::kRenderTarget_TextureFlag |
126 GrGpu::kNoPathRendering_TextureFlag;
127 rtDesc.fWidth = GrNextPow2(GrMax<int>(desc.fWidth,
128 fGpu->minRenderTargetWidth()));
129 rtDesc.fHeight = GrNextPow2(GrMax<int>(desc.fHeight,
130 fGpu->minRenderTargetHeight()));
131
132 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
133
134 if (NULL != texture) {
135 GrGpu::AutoStateRestore asr(fGpu);
136 fGpu->setRenderTarget(texture->asRenderTarget());
137 fGpu->setTexture(clampEntry->texture());
138 fGpu->setStencilPass(GrGpu::kNone_StencilPass);
139 fGpu->setTextureMatrix(GrMatrix::I());
140 fGpu->setViewMatrix(GrMatrix::I());
141 fGpu->setAlpha(0xff);
142 fGpu->setBlendFunc(GrGpu::kOne_BlendCoeff, GrGpu::kZero_BlendCoeff);
143 fGpu->disableState(GrGpu::kDither_StateBit |
144 GrGpu::kClip_StateBit |
145 GrGpu::kAntialias_StateBit);
146 GrSamplerState stretchSampler(GrSamplerState::kClamp_WrapMode,
147 GrSamplerState::kClamp_WrapMode,
148 sampler.isFilter());
149 fGpu->setSamplerState(stretchSampler);
150
151 static const GrVertexLayout layout =
152 GrDrawTarget::kSeparateTexCoord_VertexLayoutBit;
153 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0);
154
155 if (arg.succeeded()) {
156 GrPoint* verts = (GrPoint*) arg.vertices();
157 verts[0].setIRectFan(0, 0,
158 texture->contentWidth(),
159 texture->contentHeight(),
160 2*sizeof(GrPoint));
161 GrScalar tw = GrFixedToScalar(GR_Fixed1 *
162 clampTexture->contentWidth() /
163 clampTexture->allocWidth());
164 GrScalar th = GrFixedToScalar(GR_Fixed1 *
165 clampTexture->contentHeight() /
166 clampTexture->allocHeight());
167 verts[1].setRectFan(0, 0, tw, th, 2*sizeof(GrPoint));
168 fGpu->drawNonIndexed(GrGpu::kTriangleFan_PrimitiveType,
169 0, 4);
170 entry = fTextureCache->createAndLock(*key, texture);
171 }
172 texture->removeRenderTarget();
173 } else {
174 // TODO: Our CPU stretch doesn't filter. But we create separate
175 // stretched textures when the sampler state is either filtered or
176 // not. Either implement filtered stretch blit on CPU or just create
177 // one when FBO case fails.
178
179 rtDesc.fFlags = 0;
180 // no longer need to clamp at min RT size.
181 rtDesc.fWidth = GrNextPow2(desc.fWidth);
182 rtDesc.fHeight = GrNextPow2(desc.fHeight);
183 int bpp = GrTexture::BytesPerPixel(desc.fFormat);
184 GrAutoSMalloc<128*128*4> stretchedPixels(bpp *
185 rtDesc.fWidth *
186 rtDesc.fHeight);
187 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
188 srcData, desc.fWidth, desc.fHeight, bpp);
189
190 size_t stretchedRowBytes = rtDesc.fWidth * bpp;
191
192 GrTexture* texture = fGpu->createTexture(rtDesc,
193 stretchedPixels.get(),
194 stretchedRowBytes);
195 GrAssert(NULL != texture);
196 entry = fTextureCache->createAndLock(*key, texture);
197 }
198 fTextureCache->unlock(clampEntry);
199
200 } else {
201 GrTexture* texture = fGpu->createTexture(desc, srcData, rowBytes);
202 if (NULL != texture) {
203 entry = fTextureCache->createAndLock(*key, texture);
204 } else {
205 entry = NULL;
206 }
207 }
208 return entry;
209}
210
211void GrContext::unlockTexture(GrTextureEntry* entry) {
212 fTextureCache->unlock(entry);
213}
214
215void GrContext::detachCachedTexture(GrTextureEntry* entry) {
216 fTextureCache->detach(entry);
217}
218
219void GrContext::reattachAndUnlockCachedTexture(GrTextureEntry* entry) {
220 fTextureCache->reattachAndUnlock(entry);
221}
222
223GrTexture* GrContext::createUncachedTexture(const GrGpu::TextureDesc& desc,
224 void* srcData,
225 size_t rowBytes) {
226 return fGpu->createTexture(desc, srcData, rowBytes);
227}
228
229GrRenderTarget* GrContext::createPlatformRenderTarget(intptr_t platformRenderTarget,
230 int width, int height) {
231 return fGpu->createPlatformRenderTarget(platformRenderTarget,
232 width, height);
233}
234
235bool GrContext::supportsIndex8PixelConfig(const GrSamplerState& sampler,
236 int width, int height) {
237 if (!fGpu->supports8BitPalette()) {
238 return false;
239 }
240
241 bool needsRepeat = sampler.getWrapX() != GrSamplerState::kClamp_WrapMode ||
242 sampler.getWrapY() != GrSamplerState::kClamp_WrapMode;
243 bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
244
245 switch (fGpu->npotTextureSupport()) {
246 case GrGpu::kNone_NPOTTextureType:
247 return isPow2;
248 case GrGpu::kNoRepeat_NPOTTextureType:
249 return isPow2 || !needsRepeat;
250 case GrGpu::kNonRendertarget_NPOTTextureType:
251 case GrGpu::kFull_NPOTTextureType:
252 return true;
253 }
254 // should never get here
255 GrAssert(!"Bad enum from fGpu->npotTextureSupport");
256 return false;
257}
258
259////////////////////////////////////////////////////////////////////////////////
260
261void GrContext::eraseColor(GrColor color) {
262 fGpu->eraseColor(color);
263}
264
265void GrContext::drawFull(bool useTexture) {
266 // set rect to be big enough to fill the space, but not super-huge, so we
267 // don't overflow fixed-point implementations
268 GrRect r(fGpu->getClip().getBounds());
269 GrMatrix inverse;
270 if (fGpu->getViewInverse(&inverse)) {
271 inverse.mapRect(&r);
272 } else {
273 GrPrintf("---- fGpu->getViewInverse failed\n");
274 }
275
276 this->fillRect(r, useTexture);
277}
278
279/* create a triangle strip that strokes the specified triangle. There are 8
280 unique vertices, but we repreat the last 2 to close up. Alternatively we
281 could use an indices array, and then only send 8 verts, but not sure that
282 would be faster.
283 */
284static void setStrokeRectStrip(GrPoint verts[10], const GrRect& rect,
285 GrScalar width) {
286 const GrScalar rad = GrScalarHalf(width);
287
288 verts[0].set(rect.fLeft + rad, rect.fTop + rad);
289 verts[1].set(rect.fLeft - rad, rect.fTop - rad);
290 verts[2].set(rect.fRight - rad, rect.fTop + rad);
291 verts[3].set(rect.fRight + rad, rect.fTop - rad);
292 verts[4].set(rect.fRight - rad, rect.fBottom - rad);
293 verts[5].set(rect.fRight + rad, rect.fBottom + rad);
294 verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
295 verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
296 verts[8] = verts[0];
297 verts[9] = verts[1];
298}
299
300void GrContext::drawRect(const GrRect& rect, bool useTexture, GrScalar width) {
301 GrVertexLayout layout = useTexture ?
302 GrDrawTarget::kPositionAsTexCoord_VertexLayoutBit :
303 0;
304
305 static const int worstCaseVertCount = 10;
306 GrDrawTarget::AutoReleaseGeometry geo(fGpu, layout, worstCaseVertCount, 0);
307 if (!geo.succeeded()) {
308 return;
309 }
310
311 this->flushText();
312
313 int vertCount;
314 GrGpu::PrimitiveType primType;
315 GrPoint* vertex = geo.positions();
316
317 if (width >= 0) {
318 if (width > 0) {
319 vertCount = 10;
320 primType = GrGpu::kTriangleStrip_PrimitiveType;
321 setStrokeRectStrip(vertex, rect, width);
322 } else {
323 // hairline
324 vertCount = 5;
325 primType = GrGpu::kLineStrip_PrimitiveType;
326 vertex[0].set(rect.fLeft, rect.fTop);
327 vertex[1].set(rect.fRight, rect.fTop);
328 vertex[2].set(rect.fRight, rect.fBottom);
329 vertex[3].set(rect.fLeft, rect.fBottom);
330 vertex[4].set(rect.fLeft, rect.fTop);
331 }
332 } else {
333 vertCount = 4;
334 primType = GrGpu::kTriangleFan_PrimitiveType;
335 vertex->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom);
336 }
337
338 fGpu->drawNonIndexed(primType, 0, vertCount);
339}
340
341////////////////////////////////////////////////////////////////////////////////
342
343#define NEW_EVAL 1 // Use adaptive path tesselation
344#define STENCIL_OFF 0 // Always disable stencil (even when needed)
345#define CPU_TRANSFORM 0 // Transform path verts on CPU
346
347#if NEW_EVAL
348
349#define EVAL_TOL GR_Scalar1
350
351static uint32_t quadratic_point_count(const GrPoint points[], GrScalar tol) {
352 GrScalar d = points[1].distanceToLineSegmentBetween(points[0], points[2]);
353 // TODO: fixed points sqrt
354 if (d < tol) {
355 return 1;
356 } else {
357 // Each time we subdivide, d should be cut in 4. So we need to
358 // subdivide x = log4(d/tol) times. x subdivisions creates 2^(x)
359 // points.
360 // 2^(log4(x)) = sqrt(x);
361 d = ceilf(sqrtf(d/tol));
362 return GrNextPow2((uint32_t)d);
363 }
364}
365
366static uint32_t generate_quadratic_points(const GrPoint& p0,
367 const GrPoint& p1,
368 const GrPoint& p2,
369 GrScalar tolSqd,
370 GrPoint** points,
371 uint32_t pointsLeft) {
372 if (pointsLeft < 2 ||
373 (p1.distanceToLineSegmentBetweenSqd(p0, p2)) < tolSqd) {
374 (*points)[0] = p2;
375 *points += 1;
376 return 1;
377 }
378
379 GrPoint q[] = {
380 GrPoint(GrScalarAve(p0.fX, p1.fX), GrScalarAve(p0.fY, p1.fY)),
381 GrPoint(GrScalarAve(p1.fX, p2.fX), GrScalarAve(p1.fY, p2.fY)),
382 };
383 GrPoint r(GrScalarAve(q[0].fX, q[1].fX), GrScalarAve(q[0].fY, q[1].fY));
384
385 pointsLeft >>= 1;
386 uint32_t a = generate_quadratic_points(p0, q[0], r, tolSqd, points, pointsLeft);
387 uint32_t b = generate_quadratic_points(r, q[1], p2, tolSqd, points, pointsLeft);
388 return a + b;
389}
390
391static uint32_t cubic_point_count(const GrPoint points[], GrScalar tol) {
392 GrScalar d = GrMax(points[1].distanceToLineSegmentBetweenSqd(points[0], points[3]),
393 points[2].distanceToLineSegmentBetweenSqd(points[0], points[3]));
394 d = sqrtf(d);
395 if (d < tol) {
396 return 1;
397 } else {
398 d = ceilf(sqrtf(d/tol));
399 return GrNextPow2((uint32_t)d);
400 }
401}
402
403static uint32_t generate_cubic_points(const GrPoint& p0,
404 const GrPoint& p1,
405 const GrPoint& p2,
406 const GrPoint& p3,
407 GrScalar tolSqd,
408 GrPoint** points,
409 uint32_t pointsLeft) {
410 if (pointsLeft < 2 ||
411 (p1.distanceToLineSegmentBetweenSqd(p0, p3) < tolSqd &&
412 p2.distanceToLineSegmentBetweenSqd(p0, p3) < tolSqd)) {
413 (*points)[0] = p3;
414 *points += 1;
415 return 1;
416 }
417 GrPoint q[] = {
418 GrPoint(GrScalarAve(p0.fX, p1.fX), GrScalarAve(p0.fY, p1.fY)),
419 GrPoint(GrScalarAve(p1.fX, p2.fX), GrScalarAve(p1.fY, p2.fY)),
420 GrPoint(GrScalarAve(p2.fX, p3.fX), GrScalarAve(p2.fY, p3.fY))
421 };
422 GrPoint r[] = {
423 GrPoint(GrScalarAve(q[0].fX, q[1].fX), GrScalarAve(q[0].fY, q[1].fY)),
424 GrPoint(GrScalarAve(q[1].fX, q[2].fX), GrScalarAve(q[1].fY, q[2].fY))
425 };
426 GrPoint s(GrScalarAve(r[0].fX, r[1].fX), GrScalarAve(r[0].fY, r[1].fY));
427 pointsLeft >>= 1;
428 uint32_t a = generate_cubic_points(p0, q[0], r[0], s, tolSqd, points, pointsLeft);
429 uint32_t b = generate_cubic_points(s, r[1], q[2], p3, tolSqd, points, pointsLeft);
430 return a + b;
431}
432
433#else // !NEW_EVAL
434
435static GrScalar gr_eval_quad(const GrScalar coord[], GrScalar t) {
436 GrScalar A = coord[0] - 2 * coord[2] + coord[4];
437 GrScalar B = 2 * (coord[2] - coord[0]);
438 GrScalar C = coord[0];
439
440 return GrMul(GrMul(A, t) + B, t) + C;
441}
442
443static void gr_eval_quad_at(const GrPoint src[3], GrScalar t, GrPoint* pt) {
444 GrAssert(src);
445 GrAssert(pt);
446 GrAssert(t >= 0 && t <= GR_Scalar1);
447 pt->set(gr_eval_quad(&src[0].fX, t), gr_eval_quad(&src[0].fY, t));
448}
449
450static GrScalar gr_eval_cubic(const GrScalar coord[], GrScalar t) {
451 GrScalar A = coord[6] - coord[0] + 3 * (coord[2] - coord[4]);
452 GrScalar B = 3 * (coord[0] - 2 * coord[2] + coord[4]);
453 GrScalar C = 3 * (coord[2] - coord[0]);
454 GrScalar D = coord[0];
455
456 return GrMul(GrMul(GrMul(A, t) + B, t) + C, t) + D;
457}
458
459static void gr_eval_cubic_at(const GrPoint src[4], GrScalar t, GrPoint* pt) {
460 GrAssert(src);
461 GrAssert(pt);
462 GrAssert(t >= 0 && t <= GR_Scalar1);
463
464 pt->set(gr_eval_cubic(&src[0].fX, t), gr_eval_cubic(&src[0].fY, t));
465}
466
467#endif // !NEW_EVAL
468
469static int worst_case_point_count(GrPathIter* path,
470 int* subpaths,
471 const GrMatrix& matrix,
472 GrScalar tol) {
473 int pointCount = 0;
474 *subpaths = 1;
475
476 bool first = true;
477
478 GrPathIter::Command cmd;
479
480 GrPoint pts[4];
481 while ((cmd = path->next(pts)) != GrPathIter::kEnd_Command) {
482
483 switch (cmd) {
484 case GrPathIter::kLine_Command:
485 pointCount += 1;
486 break;
487 case GrPathIter::kQuadratic_Command:
488#if NEW_EVAL
489 matrix.mapPoints(pts, pts, 3);
490 pointCount += quadratic_point_count(pts, tol);
491#else
492 pointCount += 9;
493#endif
494 break;
495 case GrPathIter::kCubic_Command:
496#if NEW_EVAL
497 matrix.mapPoints(pts, pts, 4);
498 pointCount += cubic_point_count(pts, tol);
499#else
500 pointCount += 17;
501#endif
502 break;
503 case GrPathIter::kMove_Command:
504 pointCount += 1;
505 if (!first) {
506 ++(*subpaths);
507 }
508 break;
509 default:
510 break;
511 }
512 first = false;
513 }
514 return pointCount;
515}
516
517static inline bool single_pass_path(const GrPathIter& path,
518 GrContext::PathFills fill,
519 bool useTex,
520 const GrGpu& gpu) {
521#if STENCIL_OFF
522 return true;
523#else
524 if (GrContext::kEvenOdd_PathFill == fill) {
525 GrPathIter::ConvexHint hint = path.hint();
526 return hint == GrPathIter::kConvex_ConvexHint ||
527 hint == GrPathIter::kNonOverlappingConvexPieces_ConvexHint;
528 } else if (GrContext::kWinding_PathFill == fill) {
529 GrPathIter::ConvexHint hint = path.hint();
530 return hint == GrPathIter::kConvex_ConvexHint ||
531 hint == GrPathIter::kNonOverlappingConvexPieces_ConvexHint ||
532 (hint == GrPathIter::kSameWindingConvexPieces_ConvexHint &&
533 gpu.canDisableBlend() && !gpu.isDitherState());
534
535 }
536 return false;
537#endif
538}
539
540void GrContext::drawPath(GrPathIter* path, PathFills fill,
541 bool useTexture, const GrPoint* translate) {
542
543 flushText();
544
545 GrGpu::AutoStateRestore asr(fGpu);
546
547#if NEW_EVAL
548 GrMatrix viewM;
549 fGpu->getViewMatrix(&viewM);
550 // In order to tesselate the path we get a bound on how much the matrix can
551 // stretch when mapping to screen coordinates.
552 GrScalar stretch = viewM.getMaxStretch();
553 bool useStretch = stretch > 0;
554 GrScalar tol = EVAL_TOL;
555 if (!useStretch) {
556 // TODO: deal with perspective in some better way.
557 tol /= 10;
558 } else {
559 // TODO: fixed point divide
560 GrScalar sinv = 1 / stretch;
561 tol = GrMul(tol, sinv);
562 viewM = GrMatrix::I();
563 }
564 GrScalar tolSqd = GrMul(tol, tol);
565#else
566 // pass to worst_case... but won't be used.
567 static const GrScalar tol = -1;
568#endif
569
570 int subpathCnt;
571 int maxPts = worst_case_point_count(path,
572 &subpathCnt,
573#if CPU_TRANSFORM
574 cpuMatrix,
575#else
576 GrMatrix::I(),
577#endif
578 tol);
579 GrVertexLayout layout = 0;
580 if (useTexture) {
581 layout = GrDrawTarget::kPositionAsTexCoord_VertexLayoutBit;
582 }
583 // add 4 to hold the bounding rect
584 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, maxPts + 4, 0);
585
586 GrPoint* base = (GrPoint*) arg.vertices();
587 GrPoint* vert = base;
588 GrPoint* subpathBase = base;
589
590 GrAutoSTMalloc<8, uint16_t> subpathVertCount(subpathCnt);
591
592 path->rewind();
593
594 // TODO: use primitve restart if available rather than multiple draws
595 GrGpu::PrimitiveType type;
596 int passCount = 0;
597 GrGpu::StencilPass passes[3];
598 bool reverse = false;
599
600 if (kHairLine_PathFill == fill) {
601 type = GrGpu::kLineStrip_PrimitiveType;
602 passCount = 1;
603 passes[0] = GrGpu::kNone_StencilPass;
604 } else {
605 type = GrGpu::kTriangleFan_PrimitiveType;
606 if (single_pass_path(*path, fill, useTexture, *fGpu)) {
607 passCount = 1;
608 passes[0] = GrGpu::kNone_StencilPass;
609 } else {
610 switch (fill) {
611 case kInverseEvenOdd_PathFill:
612 reverse = true;
613 // fallthrough
614 case kEvenOdd_PathFill:
615 passCount = 2;
616 passes[0] = GrGpu::kEvenOddStencil_StencilPass;
617 passes[1] = GrGpu::kEvenOddColor_StencilPass;
618 break;
619
620 case kInverseWinding_PathFill:
621 reverse = true;
622 // fallthrough
623 case kWinding_PathFill:
624 passes[0] = GrGpu::kWindingStencil1_StencilPass;
625 if (fGpu->supportsSingleStencilPassWinding()) {
626 passes[1] = GrGpu::kWindingColor_StencilPass;
627 passCount = 2;
628 } else {
629 passes[1] = GrGpu::kWindingStencil2_StencilPass;
630 passes[2] = GrGpu::kWindingColor_StencilPass;
631 passCount = 3;
632 }
633 break;
634 default:
635 GrAssert(!"Unknown path fill!");
636 return;
637 }
638 }
639 }
640 fGpu->setReverseFill(reverse);
641#if CPU_TRANSFORM
642 GrMatrix cpuMatrix;
643 fGpu->getViewMatrix(&cpuMatrix);
644 fGpu->setViewMatrix(GrMatrix::I());
645#endif
646
647 GrPoint pts[4];
648
649 bool first = true;
650 int subpath = 0;
651
652 for (;;) {
653 GrPathIter::Command cmd = path->next(pts);
654#if CPU_TRANSFORM
655 int numPts = GrPathIter::NumCommandPoints(cmd);
656 cpuMatrix.mapPoints(pts, pts, numPts);
657#endif
658 switch (cmd) {
659 case GrPathIter::kMove_Command:
660 if (!first) {
661 subpathVertCount[subpath] = vert-subpathBase;
662 subpathBase = vert;
663 ++subpath;
664 }
665 *vert = pts[0];
666 vert++;
667 break;
668 case GrPathIter::kLine_Command:
669 *vert = pts[1];
670 vert++;
671 break;
672 case GrPathIter::kQuadratic_Command: {
673#if NEW_EVAL
674
675 generate_quadratic_points(pts[0], pts[1], pts[2],
676 tolSqd, &vert,
677 quadratic_point_count(pts, tol));
678#else
679 const int n = 8;
680 const GrScalar dt = GR_Scalar1 / n;
681 GrScalar t = dt;
682 for (int i = 1; i < n; i++) {
683 gr_eval_quad_at(pts, t, (GrPoint*)vert);
684 t += dt;
685 vert++;
686 }
687 vert->set(pts[2].fX, pts[2].fY);
688 vert++;
689#endif
690 break;
691 }
692 case GrPathIter::kCubic_Command: {
693#if NEW_EVAL
694 generate_cubic_points(pts[0], pts[1], pts[2], pts[3],
695 tolSqd, &vert,
696 cubic_point_count(pts, tol));
697#else
698 const int n = 16;
699 const GrScalar dt = GR_Scalar1 / n;
700 GrScalar t = dt;
701 for (int i = 1; i < n; i++) {
702 gr_eval_cubic_at(pts, t, (GrPoint*)vert);
703 t += dt;
704 vert++;
705 }
706 vert->set(pts[3].fX, pts[3].fY);
707 vert++;
708#endif
709 break;
710 }
711 case GrPathIter::kClose_Command:
712 break;
713 case GrPathIter::kEnd_Command:
714 subpathVertCount[subpath] = vert-subpathBase;
715 ++subpath; // this could be only in debug
716 goto FINISHED;
717 }
718 first = false;
719 }
720FINISHED:
721 GrAssert(subpath == subpathCnt);
722 GrAssert((vert - base) <= maxPts);
723
724 if (translate) {
725 int count = vert - base;
726 for (int i = 0; i < count; i++) {
727 base[i].offset(translate->fX, translate->fY);
728 }
729 }
730
731 // arbitrary path complexity cutoff
732 bool useBounds = fill != kHairLine_PathFill &&
733 (reverse || (vert - base) > 8);
734 GrPoint* boundsVerts = base + maxPts;
735 if (useBounds) {
736 GrRect bounds;
737 if (reverse) {
738 GrAssert(NULL != fGpu->currentRenderTarget());
739 // draw over the whole world.
740 bounds.setLTRB(0, 0,
741 GrIntToScalar(fGpu->currentRenderTarget()->width()),
742 GrIntToScalar(fGpu->currentRenderTarget()->height()));
743 } else {
744 bounds.setBounds((GrPoint*)base, vert - base);
745 }
746 boundsVerts[0].setRectFan(bounds.fLeft, bounds.fTop, bounds.fRight,
747 bounds.fBottom);
748 }
749
750 for (int p = 0; p < passCount; ++p) {
751 fGpu->setStencilPass(passes[p]);
752 if (useBounds && (GrGpu::kEvenOddColor_StencilPass == passes[p] ||
753 GrGpu::kWindingColor_StencilPass == passes[p])) {
754 fGpu->drawNonIndexed(GrGpu::kTriangleFan_PrimitiveType,
755 maxPts, 4);
756 } else {
757 int baseVertex = 0;
758 for (int sp = 0; sp < subpathCnt; ++sp) {
759 fGpu->drawNonIndexed(type,
760 baseVertex,
761 subpathVertCount[sp]);
762 baseVertex += subpathVertCount[sp];
763 }
764 }
765 }
766}
767
768void GrContext::flush(bool flushRenderTarget) {
769 flushText();
770 if (flushRenderTarget) {
771 fGpu->forceRenderTargetFlush();
772 }
773}
774
775void GrContext::flushText() {
776 fTextDrawBuffer.playback(fGpu);
777 fTextDrawBuffer.reset();
778}
779
780bool GrContext::readPixels(int left, int top, int width, int height,
781 GrTexture::PixelConfig config, void* buffer) {
782 this->flush(true);
783 return fGpu->readPixels(left, top, width, height, config, buffer);
784}
785
786void GrContext::writePixels(int left, int top, int width, int height,
787 GrTexture::PixelConfig config, const void* buffer,
788 size_t stride) {
789 const GrGpu::TextureDesc desc = {
790 0, GrGpu::kNone_AALevel, width, height, config
791 };
792 GrTexture* texture = fGpu->createTexture(desc, buffer, stride);
793 if (NULL == texture) {
794 return;
795 }
796
797 this->flush(true);
798
799 GrAutoUnref aur(texture);
800 GrDrawTarget::AutoStateRestore asr(fGpu);
801
802 GrMatrix matrix;
803 matrix.setTranslate(GrIntToScalar(left), GrIntToScalar(top));
804 fGpu->setViewMatrix(matrix);
805 matrix.setScale(GR_Scalar1 / texture->allocWidth(),
806 GR_Scalar1 / texture->allocHeight());
807 fGpu->setTextureMatrix(matrix);
808
809 fGpu->disableState(GrDrawTarget::kClip_StateBit);
810 fGpu->setAlpha(0xFF);
811 fGpu->setBlendFunc(GrDrawTarget::kOne_BlendCoeff,
812 GrDrawTarget::kZero_BlendCoeff);
813 fGpu->setTexture(texture);
814 fGpu->setSamplerState(GrSamplerState::ClampNoFilter());
815
816 this->fillRect(GrRect(0, 0, GrIntToScalar(width), GrIntToScalar(height)),
817 true);
818}
819
820////////////////////////////////////////////////////////////////////////////////
821
822
823/* -------------------------------------------------------
824 * Mimicking the GrGpu interface for now
825 * TODO: define appropriate higher-level API for context
826 */
827
828void GrContext::resetContext() {
829 fGpu->resetContext();
830}
831
832GrVertexBuffer* GrContext::createVertexBuffer(uint32_t size, bool dynamic) {
833 return fGpu->createVertexBuffer(size, dynamic);
834}
835
836GrIndexBuffer* GrContext::createIndexBuffer(uint32_t size, bool dynamic) {
837 return fGpu->createIndexBuffer(size, dynamic);
838}
839
840void GrContext::setTexture(GrTexture* texture) {
841 fGpu->setTexture(texture);
842}
843
844void GrContext::setRenderTarget(GrRenderTarget* target) {
845 flushText();
846 fGpu->setRenderTarget(target);
847}
848
849GrRenderTarget* GrContext::currentRenderTarget() const {
850 return fGpu->currentRenderTarget();
851}
852
853void GrContext::setDefaultRenderTargetSize(uint32_t width, uint32_t height) {
854 fGpu->setDefaultRenderTargetSize(width, height);
855}
856
857void GrContext::setSamplerState(const GrSamplerState& samplerState) {
858 fGpu->setSamplerState(samplerState);
859}
860
861void GrContext::setTextureMatrix(const GrMatrix& m) {
862 fGpu->setTextureMatrix(m);
863}
864
865void GrContext::getViewMatrix(GrMatrix* m) const {
866 fGpu->getViewMatrix(m);
867}
868
869void GrContext::setViewMatrix(const GrMatrix& m) {
870 fGpu->setViewMatrix(m);
871}
872
873bool GrContext::reserveAndLockGeometry(GrVertexLayout vertexLayout,
874 uint32_t vertexCount,
875 uint32_t indexCount,
876 void** vertices,
877 void** indices) {
878 return fGpu->reserveAndLockGeometry(vertexLayout,
879 vertexCount,
880 indexCount,
881 vertices,
882 indices);
883}
884
885void GrContext::drawIndexed(GrGpu::PrimitiveType type,
886 uint32_t startVertex,
887 uint32_t startIndex,
888 uint32_t vertexCount,
889 uint32_t indexCount) {
890 flushText();
891 fGpu->drawIndexed(type,
892 startVertex,
893 startIndex,
894 vertexCount,
895 indexCount);
896}
897
898void GrContext::drawNonIndexed(GrGpu::PrimitiveType type,
899 uint32_t startVertex,
900 uint32_t vertexCount) {
901 flushText();
902 fGpu->drawNonIndexed(type,
903 startVertex,
904 vertexCount);
905}
906
907void GrContext::setVertexSourceToArray(const void* array,
908 GrVertexLayout vertexLayout) {
909 fGpu->setVertexSourceToArray(array, vertexLayout);
910}
911
912void GrContext::setIndexSourceToArray(const void* array) {
913 fGpu->setIndexSourceToArray(array);
914}
915
916void GrContext::setVertexSourceToBuffer(GrVertexBuffer* buffer,
917 GrVertexLayout vertexLayout) {
918 fGpu->setVertexSourceToBuffer(buffer, vertexLayout);
919}
920
921void GrContext::setIndexSourceToBuffer(GrIndexBuffer* buffer) {
922 fGpu->setIndexSourceToBuffer(buffer);
923}
924
925void GrContext::releaseReservedGeometry() {
926 fGpu->releaseReservedGeometry();
927}
928
929void GrContext::setClip(const GrClip& clip) {
930 fGpu->setClip(clip);
931 fGpu->enableState(GrDrawTarget::kClip_StateBit);
932}
933
934void GrContext::setAlpha(uint8_t alpha) {
935 fGpu->setAlpha(alpha);
936}
937
938void GrContext::setColor(GrColor color) {
939 fGpu->setColor(color);
940}
941
942static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) {
943 intptr_t mask = 1 << shift;
944 if (pred) {
945 bits |= mask;
946 } else {
947 bits &= ~mask;
948 }
949 return bits;
950}
951
952void GrContext::setAntiAlias(bool aa) {
953 if (aa) {
954 fGpu->enableState(GrGpu::kAntialias_StateBit);
955 } else {
956 fGpu->disableState(GrGpu::kAntialias_StateBit);
957 }
958}
959
960void GrContext::setDither(bool dither) {
961 // hack for now, since iPad dither is hella-slow
962 dither = false;
963
964 if (dither) {
965 fGpu->enableState(GrGpu::kDither_StateBit);
966 } else {
967 fGpu->disableState(GrGpu::kDither_StateBit);
968 }
969}
970
971void GrContext::setPointSize(float size) {
972 fGpu->setPointSize(size);
973}
974
975void GrContext::setBlendFunc(GrGpu::BlendCoeff srcCoef,
976 GrGpu::BlendCoeff dstCoef) {
977 fGpu->setBlendFunc(srcCoef, dstCoef);
978}
979
980void GrContext::resetStats() {
981 fGpu->resetStats();
982}
983
984const GrGpu::Stats& GrContext::getStats() const {
985 return fGpu->getStats();
986}
987
988void GrContext::printStats() const {
989 fGpu->printStats();
990}
991
992GrContext::GrContext(GrGpu* gpu) :
993 fVBAllocPool(gpu,
994 gpu->supportsBufferLocking() ? POOL_VB_SIZE : 0,
995 gpu->supportsBufferLocking() ? NUM_POOL_VBS : 0),
996 fTextDrawBuffer(gpu->supportsBufferLocking() ? &fVBAllocPool : NULL) {
997 fGpu = gpu;
998 fGpu->ref();
999 fTextureCache = new GrTextureCache(MAX_TEXTURE_CACHE_COUNT,
1000 MAX_TEXTURE_CACHE_BYTES);
1001 fFontCache = new GrFontCache(fGpu);
1002}
1003
1004bool GrContext::finalizeTextureKey(GrTextureKey* key,
1005 const GrSamplerState& sampler) const {
1006 uint32_t bits = 0;
1007 uint16_t width = key->width();
1008 uint16_t height = key->height();
1009 if (fGpu->npotTextureSupport() < GrGpu::kNonRendertarget_NPOTTextureType) {
1010 if ((sampler.getWrapX() != GrSamplerState::kClamp_WrapMode ||
1011 sampler.getWrapY() != GrSamplerState::kClamp_WrapMode) &&
1012 (!GrIsPow2(width) || !GrIsPow2(height))) {
1013 bits |= 1;
1014 bits |= sampler.isFilter() ? 2 : 0;
1015 }
1016 }
1017 key->finalize(bits);
1018 return 0 != bits;
1019}
1020
1021GrDrawTarget* GrContext::getTextTarget() {
1022#if DEFER_TEXT_RENDERING
1023 fTextDrawBuffer.initializeDrawStateAndClip(*fGpu);
1024 return &fTextDrawBuffer;
1025#else
1026 return fGpu;
1027#endif
1028}
1029
1030const GrIndexBuffer* GrContext::quadIndexBuffer() const {
1031 return fGpu->quadIndexBuffer();
1032}
1033
1034int GrContext::maxQuadsInIndexBuffer() const {
1035 return fGpu->maxQuadsInIndexBuffer();
1036}
1037
1038
1039
1040