blob: ee788483ebc37bc86079e3192e502bf6ca3e5985 [file] [log] [blame]
reed@google.com873cb1e2010-12-23 15:00:45 +00001/*
2 Copyright 2010 Google Inc.
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 */
16
reed@google.comac10a2d2010-12-22 21:39:39 +000017
18#include "GrContext.h"
19#include "GrTextureCache.h"
20#include "GrTextStrike.h"
21#include "GrMemory.h"
22#include "GrPathIter.h"
23#include "GrClipIterator.h"
24#include "GrIndexBuffer.h"
25
26#define DEFER_TEXT_RENDERING 1
27
28static const size_t MAX_TEXTURE_CACHE_COUNT = 128;
29static const size_t MAX_TEXTURE_CACHE_BYTES = 8 * 1024 * 1024;
30
31#if DEFER_TEXT_RENDERING
32 static const uint32_t POOL_VB_SIZE = 2048 *
bsalomon@google.com2e7b43d2011-01-18 20:57:22 +000033 GrDrawTarget::VertexSize(
34 GrDrawTarget::kTextFormat_VertexLayoutBit |
bsalomon@google.com1572b072011-01-18 15:30:57 +000035 GrDrawTarget::StageTexCoordVertexLayoutBit(0,0));
reed@google.comac10a2d2010-12-22 21:39:39 +000036 static const uint32_t NUM_POOL_VBS = 8;
37#else
38 static const uint32_t POOL_VB_SIZE = 0;
39 static const uint32_t NUM_POOL_VBS = 0;
40
41#endif
42
43GrContext* GrContext::Create(GrGpu::Engine engine,
44 GrGpu::Platform3DContext context3D) {
45 GrContext* ctx = NULL;
46 GrGpu* fGpu = GrGpu::Create(engine, context3D);
47 if (NULL != fGpu) {
48 ctx = new GrContext(fGpu);
49 fGpu->unref();
50 }
51 return ctx;
52}
53
reed@google.com873cb1e2010-12-23 15:00:45 +000054GrContext* GrContext::CreateGLShaderContext() {
55 return GrContext::Create(GrGpu::kOpenGL_Shaders_Engine, NULL);
56}
57
reed@google.comac10a2d2010-12-22 21:39:39 +000058GrContext::~GrContext() {
59 fGpu->unref();
60 delete fTextureCache;
61 delete fFontCache;
62}
63
64void GrContext::abandonAllTextures() {
65 fTextureCache->deleteAll(GrTextureCache::kAbandonTexture_DeleteMode);
66 fFontCache->abandonAll();
67}
68
69GrTextureEntry* GrContext::findAndLockTexture(GrTextureKey* key,
70 const GrSamplerState& sampler) {
71 finalizeTextureKey(key, sampler);
72 return fTextureCache->findAndLock(*key);
73}
74
75static void stretchImage(void* dst,
76 int dstW,
77 int dstH,
78 void* src,
79 int srcW,
80 int srcH,
81 int bpp) {
82 GrFixed dx = (srcW << 16) / dstW;
83 GrFixed dy = (srcH << 16) / dstH;
84
85 GrFixed y = dy >> 1;
86
87 int dstXLimit = dstW*bpp;
88 for (int j = 0; j < dstH; ++j) {
89 GrFixed x = dx >> 1;
90 void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp;
91 void* dstRow = (uint8_t*)dst + j*dstW*bpp;
92 for (int i = 0; i < dstXLimit; i += bpp) {
93 memcpy((uint8_t*) dstRow + i,
94 (uint8_t*) srcRow + (x>>16)*bpp,
95 bpp);
96 x += dx;
97 }
98 y += dy;
99 }
100}
101
102GrTextureEntry* GrContext::createAndLockTexture(GrTextureKey* key,
103 const GrSamplerState& sampler,
104 const GrGpu::TextureDesc& desc,
105 void* srcData, size_t rowBytes) {
106 GrAssert(key->width() == desc.fWidth);
107 GrAssert(key->height() == desc.fHeight);
108
109#if GR_DUMP_TEXTURE_UPLOAD
110 GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight);
111#endif
112
113 GrTextureEntry* entry = NULL;
114 bool special = finalizeTextureKey(key, sampler);
115 if (special) {
116 GrTextureEntry* clampEntry;
117 GrTextureKey clampKey(*key);
118 clampEntry = findAndLockTexture(&clampKey, GrSamplerState::ClampNoFilter());
119
120 if (NULL == clampEntry) {
121 clampEntry = createAndLockTexture(&clampKey,
122 GrSamplerState::ClampNoFilter(),
123 desc, srcData, rowBytes);
124 GrAssert(NULL != clampEntry);
125 if (NULL == clampEntry) {
126 return NULL;
127 }
128 }
129 GrTexture* clampTexture = clampEntry->texture();
130 GrGpu::TextureDesc rtDesc = desc;
131 rtDesc.fFlags |= GrGpu::kRenderTarget_TextureFlag |
132 GrGpu::kNoPathRendering_TextureFlag;
133 rtDesc.fWidth = GrNextPow2(GrMax<int>(desc.fWidth,
134 fGpu->minRenderTargetWidth()));
135 rtDesc.fHeight = GrNextPow2(GrMax<int>(desc.fHeight,
136 fGpu->minRenderTargetHeight()));
137
138 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
139
140 if (NULL != texture) {
141 GrGpu::AutoStateRestore asr(fGpu);
142 fGpu->setRenderTarget(texture->asRenderTarget());
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000143 fGpu->setTexture(0, clampEntry->texture());
reed@google.comac10a2d2010-12-22 21:39:39 +0000144 fGpu->setStencilPass(GrGpu::kNone_StencilPass);
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000145 fGpu->setTextureMatrix(0, GrMatrix::I());
reed@google.comac10a2d2010-12-22 21:39:39 +0000146 fGpu->setViewMatrix(GrMatrix::I());
147 fGpu->setAlpha(0xff);
148 fGpu->setBlendFunc(GrGpu::kOne_BlendCoeff, GrGpu::kZero_BlendCoeff);
149 fGpu->disableState(GrGpu::kDither_StateBit |
150 GrGpu::kClip_StateBit |
151 GrGpu::kAntialias_StateBit);
152 GrSamplerState stretchSampler(GrSamplerState::kClamp_WrapMode,
153 GrSamplerState::kClamp_WrapMode,
154 sampler.isFilter());
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000155 fGpu->setSamplerState(0, stretchSampler);
reed@google.comac10a2d2010-12-22 21:39:39 +0000156
157 static const GrVertexLayout layout =
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000158 GrDrawTarget::StageTexCoordVertexLayoutBit(0,0);
reed@google.comac10a2d2010-12-22 21:39:39 +0000159 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0);
160
161 if (arg.succeeded()) {
162 GrPoint* verts = (GrPoint*) arg.vertices();
163 verts[0].setIRectFan(0, 0,
164 texture->contentWidth(),
165 texture->contentHeight(),
166 2*sizeof(GrPoint));
167 GrScalar tw = GrFixedToScalar(GR_Fixed1 *
168 clampTexture->contentWidth() /
169 clampTexture->allocWidth());
170 GrScalar th = GrFixedToScalar(GR_Fixed1 *
171 clampTexture->contentHeight() /
172 clampTexture->allocHeight());
173 verts[1].setRectFan(0, 0, tw, th, 2*sizeof(GrPoint));
174 fGpu->drawNonIndexed(GrGpu::kTriangleFan_PrimitiveType,
175 0, 4);
176 entry = fTextureCache->createAndLock(*key, texture);
177 }
178 texture->removeRenderTarget();
179 } else {
180 // TODO: Our CPU stretch doesn't filter. But we create separate
181 // stretched textures when the sampler state is either filtered or
182 // not. Either implement filtered stretch blit on CPU or just create
183 // one when FBO case fails.
184
185 rtDesc.fFlags = 0;
186 // no longer need to clamp at min RT size.
187 rtDesc.fWidth = GrNextPow2(desc.fWidth);
188 rtDesc.fHeight = GrNextPow2(desc.fHeight);
189 int bpp = GrTexture::BytesPerPixel(desc.fFormat);
190 GrAutoSMalloc<128*128*4> stretchedPixels(bpp *
191 rtDesc.fWidth *
192 rtDesc.fHeight);
193 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
194 srcData, desc.fWidth, desc.fHeight, bpp);
195
196 size_t stretchedRowBytes = rtDesc.fWidth * bpp;
197
198 GrTexture* texture = fGpu->createTexture(rtDesc,
199 stretchedPixels.get(),
200 stretchedRowBytes);
201 GrAssert(NULL != texture);
202 entry = fTextureCache->createAndLock(*key, texture);
203 }
204 fTextureCache->unlock(clampEntry);
205
206 } else {
207 GrTexture* texture = fGpu->createTexture(desc, srcData, rowBytes);
208 if (NULL != texture) {
209 entry = fTextureCache->createAndLock(*key, texture);
210 } else {
211 entry = NULL;
212 }
213 }
214 return entry;
215}
216
217void GrContext::unlockTexture(GrTextureEntry* entry) {
218 fTextureCache->unlock(entry);
219}
220
221void GrContext::detachCachedTexture(GrTextureEntry* entry) {
222 fTextureCache->detach(entry);
223}
224
225void GrContext::reattachAndUnlockCachedTexture(GrTextureEntry* entry) {
226 fTextureCache->reattachAndUnlock(entry);
227}
228
229GrTexture* GrContext::createUncachedTexture(const GrGpu::TextureDesc& desc,
230 void* srcData,
231 size_t rowBytes) {
232 return fGpu->createTexture(desc, srcData, rowBytes);
233}
234
reed@google.com01804b42011-01-18 21:50:41 +0000235void GrContext::getTextureCacheLimits(int* maxTextures,
236 size_t* maxTextureBytes) const {
237 fTextureCache->getLimits(maxTextures, maxTextureBytes);
238}
239
240void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) {
241 fTextureCache->setLimits(maxTextures, maxTextureBytes);
242}
243
244///////////////////////////////////////////////////////////////////////////////
245
reed@google.comac10a2d2010-12-22 21:39:39 +0000246GrRenderTarget* GrContext::createPlatformRenderTarget(intptr_t platformRenderTarget,
247 int width, int height) {
248 return fGpu->createPlatformRenderTarget(platformRenderTarget,
249 width, height);
250}
251
252bool GrContext::supportsIndex8PixelConfig(const GrSamplerState& sampler,
253 int width, int height) {
254 if (!fGpu->supports8BitPalette()) {
255 return false;
256 }
257
258 bool needsRepeat = sampler.getWrapX() != GrSamplerState::kClamp_WrapMode ||
259 sampler.getWrapY() != GrSamplerState::kClamp_WrapMode;
260 bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
261
262 switch (fGpu->npotTextureSupport()) {
263 case GrGpu::kNone_NPOTTextureType:
264 return isPow2;
265 case GrGpu::kNoRepeat_NPOTTextureType:
266 return isPow2 || !needsRepeat;
267 case GrGpu::kNonRendertarget_NPOTTextureType:
268 case GrGpu::kFull_NPOTTextureType:
269 return true;
270 }
271 // should never get here
272 GrAssert(!"Bad enum from fGpu->npotTextureSupport");
273 return false;
274}
275
276////////////////////////////////////////////////////////////////////////////////
277
278void GrContext::eraseColor(GrColor color) {
279 fGpu->eraseColor(color);
280}
281
282void GrContext::drawFull(bool useTexture) {
283 // set rect to be big enough to fill the space, but not super-huge, so we
284 // don't overflow fixed-point implementations
285 GrRect r(fGpu->getClip().getBounds());
286 GrMatrix inverse;
287 if (fGpu->getViewInverse(&inverse)) {
288 inverse.mapRect(&r);
289 } else {
290 GrPrintf("---- fGpu->getViewInverse failed\n");
291 }
292
293 this->fillRect(r, useTexture);
294}
295
296/* create a triangle strip that strokes the specified triangle. There are 8
297 unique vertices, but we repreat the last 2 to close up. Alternatively we
298 could use an indices array, and then only send 8 verts, but not sure that
299 would be faster.
300 */
301static void setStrokeRectStrip(GrPoint verts[10], const GrRect& rect,
302 GrScalar width) {
303 const GrScalar rad = GrScalarHalf(width);
304
305 verts[0].set(rect.fLeft + rad, rect.fTop + rad);
306 verts[1].set(rect.fLeft - rad, rect.fTop - rad);
307 verts[2].set(rect.fRight - rad, rect.fTop + rad);
308 verts[3].set(rect.fRight + rad, rect.fTop - rad);
309 verts[4].set(rect.fRight - rad, rect.fBottom - rad);
310 verts[5].set(rect.fRight + rad, rect.fBottom + rad);
311 verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
312 verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
313 verts[8] = verts[0];
314 verts[9] = verts[1];
315}
316
317void GrContext::drawRect(const GrRect& rect, bool useTexture, GrScalar width) {
318 GrVertexLayout layout = useTexture ?
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000319 GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(0) :
reed@google.comac10a2d2010-12-22 21:39:39 +0000320 0;
321
322 static const int worstCaseVertCount = 10;
323 GrDrawTarget::AutoReleaseGeometry geo(fGpu, layout, worstCaseVertCount, 0);
324 if (!geo.succeeded()) {
325 return;
326 }
327
328 this->flushText();
329
330 int vertCount;
331 GrGpu::PrimitiveType primType;
332 GrPoint* vertex = geo.positions();
333
334 if (width >= 0) {
335 if (width > 0) {
336 vertCount = 10;
337 primType = GrGpu::kTriangleStrip_PrimitiveType;
338 setStrokeRectStrip(vertex, rect, width);
339 } else {
340 // hairline
341 vertCount = 5;
342 primType = GrGpu::kLineStrip_PrimitiveType;
343 vertex[0].set(rect.fLeft, rect.fTop);
344 vertex[1].set(rect.fRight, rect.fTop);
345 vertex[2].set(rect.fRight, rect.fBottom);
346 vertex[3].set(rect.fLeft, rect.fBottom);
347 vertex[4].set(rect.fLeft, rect.fTop);
348 }
349 } else {
350 vertCount = 4;
351 primType = GrGpu::kTriangleFan_PrimitiveType;
352 vertex->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom);
353 }
354
355 fGpu->drawNonIndexed(primType, 0, vertCount);
356}
357
358////////////////////////////////////////////////////////////////////////////////
359
360#define NEW_EVAL 1 // Use adaptive path tesselation
361#define STENCIL_OFF 0 // Always disable stencil (even when needed)
362#define CPU_TRANSFORM 0 // Transform path verts on CPU
363
364#if NEW_EVAL
365
366#define EVAL_TOL GR_Scalar1
367
368static uint32_t quadratic_point_count(const GrPoint points[], GrScalar tol) {
369 GrScalar d = points[1].distanceToLineSegmentBetween(points[0], points[2]);
370 // TODO: fixed points sqrt
371 if (d < tol) {
372 return 1;
373 } else {
374 // Each time we subdivide, d should be cut in 4. So we need to
375 // subdivide x = log4(d/tol) times. x subdivisions creates 2^(x)
376 // points.
377 // 2^(log4(x)) = sqrt(x);
378 d = ceilf(sqrtf(d/tol));
379 return GrNextPow2((uint32_t)d);
380 }
381}
382
383static uint32_t generate_quadratic_points(const GrPoint& p0,
384 const GrPoint& p1,
385 const GrPoint& p2,
386 GrScalar tolSqd,
387 GrPoint** points,
388 uint32_t pointsLeft) {
389 if (pointsLeft < 2 ||
390 (p1.distanceToLineSegmentBetweenSqd(p0, p2)) < tolSqd) {
391 (*points)[0] = p2;
392 *points += 1;
393 return 1;
394 }
395
396 GrPoint q[] = {
397 GrPoint(GrScalarAve(p0.fX, p1.fX), GrScalarAve(p0.fY, p1.fY)),
398 GrPoint(GrScalarAve(p1.fX, p2.fX), GrScalarAve(p1.fY, p2.fY)),
399 };
400 GrPoint r(GrScalarAve(q[0].fX, q[1].fX), GrScalarAve(q[0].fY, q[1].fY));
401
402 pointsLeft >>= 1;
403 uint32_t a = generate_quadratic_points(p0, q[0], r, tolSqd, points, pointsLeft);
404 uint32_t b = generate_quadratic_points(r, q[1], p2, tolSqd, points, pointsLeft);
405 return a + b;
406}
407
408static uint32_t cubic_point_count(const GrPoint points[], GrScalar tol) {
409 GrScalar d = GrMax(points[1].distanceToLineSegmentBetweenSqd(points[0], points[3]),
410 points[2].distanceToLineSegmentBetweenSqd(points[0], points[3]));
411 d = sqrtf(d);
412 if (d < tol) {
413 return 1;
414 } else {
415 d = ceilf(sqrtf(d/tol));
416 return GrNextPow2((uint32_t)d);
417 }
418}
419
420static uint32_t generate_cubic_points(const GrPoint& p0,
421 const GrPoint& p1,
422 const GrPoint& p2,
423 const GrPoint& p3,
424 GrScalar tolSqd,
425 GrPoint** points,
426 uint32_t pointsLeft) {
427 if (pointsLeft < 2 ||
428 (p1.distanceToLineSegmentBetweenSqd(p0, p3) < tolSqd &&
429 p2.distanceToLineSegmentBetweenSqd(p0, p3) < tolSqd)) {
430 (*points)[0] = p3;
431 *points += 1;
432 return 1;
433 }
434 GrPoint q[] = {
435 GrPoint(GrScalarAve(p0.fX, p1.fX), GrScalarAve(p0.fY, p1.fY)),
436 GrPoint(GrScalarAve(p1.fX, p2.fX), GrScalarAve(p1.fY, p2.fY)),
437 GrPoint(GrScalarAve(p2.fX, p3.fX), GrScalarAve(p2.fY, p3.fY))
438 };
439 GrPoint r[] = {
440 GrPoint(GrScalarAve(q[0].fX, q[1].fX), GrScalarAve(q[0].fY, q[1].fY)),
441 GrPoint(GrScalarAve(q[1].fX, q[2].fX), GrScalarAve(q[1].fY, q[2].fY))
442 };
443 GrPoint s(GrScalarAve(r[0].fX, r[1].fX), GrScalarAve(r[0].fY, r[1].fY));
444 pointsLeft >>= 1;
445 uint32_t a = generate_cubic_points(p0, q[0], r[0], s, tolSqd, points, pointsLeft);
446 uint32_t b = generate_cubic_points(s, r[1], q[2], p3, tolSqd, points, pointsLeft);
447 return a + b;
448}
449
450#else // !NEW_EVAL
451
452static GrScalar gr_eval_quad(const GrScalar coord[], GrScalar t) {
453 GrScalar A = coord[0] - 2 * coord[2] + coord[4];
454 GrScalar B = 2 * (coord[2] - coord[0]);
455 GrScalar C = coord[0];
456
457 return GrMul(GrMul(A, t) + B, t) + C;
458}
459
460static void gr_eval_quad_at(const GrPoint src[3], GrScalar t, GrPoint* pt) {
461 GrAssert(src);
462 GrAssert(pt);
463 GrAssert(t >= 0 && t <= GR_Scalar1);
464 pt->set(gr_eval_quad(&src[0].fX, t), gr_eval_quad(&src[0].fY, t));
465}
466
467static GrScalar gr_eval_cubic(const GrScalar coord[], GrScalar t) {
468 GrScalar A = coord[6] - coord[0] + 3 * (coord[2] - coord[4]);
469 GrScalar B = 3 * (coord[0] - 2 * coord[2] + coord[4]);
470 GrScalar C = 3 * (coord[2] - coord[0]);
471 GrScalar D = coord[0];
472
473 return GrMul(GrMul(GrMul(A, t) + B, t) + C, t) + D;
474}
475
476static void gr_eval_cubic_at(const GrPoint src[4], GrScalar t, GrPoint* pt) {
477 GrAssert(src);
478 GrAssert(pt);
479 GrAssert(t >= 0 && t <= GR_Scalar1);
480
481 pt->set(gr_eval_cubic(&src[0].fX, t), gr_eval_cubic(&src[0].fY, t));
482}
483
484#endif // !NEW_EVAL
485
486static int worst_case_point_count(GrPathIter* path,
487 int* subpaths,
488 const GrMatrix& matrix,
489 GrScalar tol) {
490 int pointCount = 0;
491 *subpaths = 1;
492
493 bool first = true;
494
495 GrPathIter::Command cmd;
496
497 GrPoint pts[4];
498 while ((cmd = path->next(pts)) != GrPathIter::kEnd_Command) {
499
500 switch (cmd) {
501 case GrPathIter::kLine_Command:
502 pointCount += 1;
503 break;
504 case GrPathIter::kQuadratic_Command:
505#if NEW_EVAL
506 matrix.mapPoints(pts, pts, 3);
507 pointCount += quadratic_point_count(pts, tol);
508#else
509 pointCount += 9;
510#endif
511 break;
512 case GrPathIter::kCubic_Command:
513#if NEW_EVAL
514 matrix.mapPoints(pts, pts, 4);
515 pointCount += cubic_point_count(pts, tol);
516#else
517 pointCount += 17;
518#endif
519 break;
520 case GrPathIter::kMove_Command:
521 pointCount += 1;
522 if (!first) {
523 ++(*subpaths);
524 }
525 break;
526 default:
527 break;
528 }
529 first = false;
530 }
531 return pointCount;
532}
533
534static inline bool single_pass_path(const GrPathIter& path,
535 GrContext::PathFills fill,
536 bool useTex,
537 const GrGpu& gpu) {
538#if STENCIL_OFF
539 return true;
540#else
541 if (GrContext::kEvenOdd_PathFill == fill) {
542 GrPathIter::ConvexHint hint = path.hint();
543 return hint == GrPathIter::kConvex_ConvexHint ||
544 hint == GrPathIter::kNonOverlappingConvexPieces_ConvexHint;
545 } else if (GrContext::kWinding_PathFill == fill) {
546 GrPathIter::ConvexHint hint = path.hint();
547 return hint == GrPathIter::kConvex_ConvexHint ||
548 hint == GrPathIter::kNonOverlappingConvexPieces_ConvexHint ||
549 (hint == GrPathIter::kSameWindingConvexPieces_ConvexHint &&
550 gpu.canDisableBlend() && !gpu.isDitherState());
551
552 }
553 return false;
554#endif
555}
556
557void GrContext::drawPath(GrPathIter* path, PathFills fill,
558 bool useTexture, const GrPoint* translate) {
559
560 flushText();
561
562 GrGpu::AutoStateRestore asr(fGpu);
563
564#if NEW_EVAL
565 GrMatrix viewM;
566 fGpu->getViewMatrix(&viewM);
567 // In order to tesselate the path we get a bound on how much the matrix can
568 // stretch when mapping to screen coordinates.
569 GrScalar stretch = viewM.getMaxStretch();
570 bool useStretch = stretch > 0;
571 GrScalar tol = EVAL_TOL;
572 if (!useStretch) {
573 // TODO: deal with perspective in some better way.
574 tol /= 10;
575 } else {
576 // TODO: fixed point divide
577 GrScalar sinv = 1 / stretch;
578 tol = GrMul(tol, sinv);
579 viewM = GrMatrix::I();
580 }
581 GrScalar tolSqd = GrMul(tol, tol);
582#else
583 // pass to worst_case... but won't be used.
584 static const GrScalar tol = -1;
585#endif
586
587 int subpathCnt;
588 int maxPts = worst_case_point_count(path,
589 &subpathCnt,
590#if CPU_TRANSFORM
591 cpuMatrix,
592#else
593 GrMatrix::I(),
594#endif
595 tol);
596 GrVertexLayout layout = 0;
597 if (useTexture) {
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000598 layout = GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(0);
reed@google.comac10a2d2010-12-22 21:39:39 +0000599 }
600 // add 4 to hold the bounding rect
601 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, maxPts + 4, 0);
602
603 GrPoint* base = (GrPoint*) arg.vertices();
604 GrPoint* vert = base;
605 GrPoint* subpathBase = base;
606
607 GrAutoSTMalloc<8, uint16_t> subpathVertCount(subpathCnt);
608
609 path->rewind();
610
611 // TODO: use primitve restart if available rather than multiple draws
612 GrGpu::PrimitiveType type;
613 int passCount = 0;
614 GrGpu::StencilPass passes[3];
615 bool reverse = false;
616
617 if (kHairLine_PathFill == fill) {
618 type = GrGpu::kLineStrip_PrimitiveType;
619 passCount = 1;
620 passes[0] = GrGpu::kNone_StencilPass;
621 } else {
622 type = GrGpu::kTriangleFan_PrimitiveType;
623 if (single_pass_path(*path, fill, useTexture, *fGpu)) {
624 passCount = 1;
625 passes[0] = GrGpu::kNone_StencilPass;
626 } else {
627 switch (fill) {
628 case kInverseEvenOdd_PathFill:
629 reverse = true;
630 // fallthrough
631 case kEvenOdd_PathFill:
632 passCount = 2;
633 passes[0] = GrGpu::kEvenOddStencil_StencilPass;
634 passes[1] = GrGpu::kEvenOddColor_StencilPass;
635 break;
636
637 case kInverseWinding_PathFill:
638 reverse = true;
639 // fallthrough
640 case kWinding_PathFill:
641 passes[0] = GrGpu::kWindingStencil1_StencilPass;
642 if (fGpu->supportsSingleStencilPassWinding()) {
643 passes[1] = GrGpu::kWindingColor_StencilPass;
644 passCount = 2;
645 } else {
646 passes[1] = GrGpu::kWindingStencil2_StencilPass;
647 passes[2] = GrGpu::kWindingColor_StencilPass;
648 passCount = 3;
649 }
650 break;
651 default:
652 GrAssert(!"Unknown path fill!");
653 return;
654 }
655 }
656 }
657 fGpu->setReverseFill(reverse);
658#if CPU_TRANSFORM
659 GrMatrix cpuMatrix;
660 fGpu->getViewMatrix(&cpuMatrix);
661 fGpu->setViewMatrix(GrMatrix::I());
662#endif
663
664 GrPoint pts[4];
665
666 bool first = true;
667 int subpath = 0;
668
669 for (;;) {
670 GrPathIter::Command cmd = path->next(pts);
671#if CPU_TRANSFORM
672 int numPts = GrPathIter::NumCommandPoints(cmd);
673 cpuMatrix.mapPoints(pts, pts, numPts);
674#endif
675 switch (cmd) {
676 case GrPathIter::kMove_Command:
677 if (!first) {
678 subpathVertCount[subpath] = vert-subpathBase;
679 subpathBase = vert;
680 ++subpath;
681 }
682 *vert = pts[0];
683 vert++;
684 break;
685 case GrPathIter::kLine_Command:
686 *vert = pts[1];
687 vert++;
688 break;
689 case GrPathIter::kQuadratic_Command: {
690#if NEW_EVAL
691
692 generate_quadratic_points(pts[0], pts[1], pts[2],
693 tolSqd, &vert,
694 quadratic_point_count(pts, tol));
695#else
696 const int n = 8;
697 const GrScalar dt = GR_Scalar1 / n;
698 GrScalar t = dt;
699 for (int i = 1; i < n; i++) {
700 gr_eval_quad_at(pts, t, (GrPoint*)vert);
701 t += dt;
702 vert++;
703 }
704 vert->set(pts[2].fX, pts[2].fY);
705 vert++;
706#endif
707 break;
708 }
709 case GrPathIter::kCubic_Command: {
710#if NEW_EVAL
711 generate_cubic_points(pts[0], pts[1], pts[2], pts[3],
712 tolSqd, &vert,
713 cubic_point_count(pts, tol));
714#else
715 const int n = 16;
716 const GrScalar dt = GR_Scalar1 / n;
717 GrScalar t = dt;
718 for (int i = 1; i < n; i++) {
719 gr_eval_cubic_at(pts, t, (GrPoint*)vert);
720 t += dt;
721 vert++;
722 }
723 vert->set(pts[3].fX, pts[3].fY);
724 vert++;
725#endif
726 break;
727 }
728 case GrPathIter::kClose_Command:
729 break;
730 case GrPathIter::kEnd_Command:
731 subpathVertCount[subpath] = vert-subpathBase;
732 ++subpath; // this could be only in debug
733 goto FINISHED;
734 }
735 first = false;
736 }
737FINISHED:
738 GrAssert(subpath == subpathCnt);
739 GrAssert((vert - base) <= maxPts);
740
741 if (translate) {
742 int count = vert - base;
743 for (int i = 0; i < count; i++) {
744 base[i].offset(translate->fX, translate->fY);
745 }
746 }
747
748 // arbitrary path complexity cutoff
749 bool useBounds = fill != kHairLine_PathFill &&
750 (reverse || (vert - base) > 8);
751 GrPoint* boundsVerts = base + maxPts;
752 if (useBounds) {
753 GrRect bounds;
754 if (reverse) {
755 GrAssert(NULL != fGpu->currentRenderTarget());
756 // draw over the whole world.
757 bounds.setLTRB(0, 0,
758 GrIntToScalar(fGpu->currentRenderTarget()->width()),
759 GrIntToScalar(fGpu->currentRenderTarget()->height()));
760 } else {
761 bounds.setBounds((GrPoint*)base, vert - base);
762 }
763 boundsVerts[0].setRectFan(bounds.fLeft, bounds.fTop, bounds.fRight,
764 bounds.fBottom);
765 }
766
767 for (int p = 0; p < passCount; ++p) {
768 fGpu->setStencilPass(passes[p]);
769 if (useBounds && (GrGpu::kEvenOddColor_StencilPass == passes[p] ||
770 GrGpu::kWindingColor_StencilPass == passes[p])) {
771 fGpu->drawNonIndexed(GrGpu::kTriangleFan_PrimitiveType,
772 maxPts, 4);
773 } else {
774 int baseVertex = 0;
775 for (int sp = 0; sp < subpathCnt; ++sp) {
776 fGpu->drawNonIndexed(type,
777 baseVertex,
778 subpathVertCount[sp]);
779 baseVertex += subpathVertCount[sp];
780 }
781 }
782 }
783}
784
785void GrContext::flush(bool flushRenderTarget) {
786 flushText();
787 if (flushRenderTarget) {
788 fGpu->forceRenderTargetFlush();
789 }
790}
791
792void GrContext::flushText() {
793 fTextDrawBuffer.playback(fGpu);
794 fTextDrawBuffer.reset();
795}
796
797bool GrContext::readPixels(int left, int top, int width, int height,
798 GrTexture::PixelConfig config, void* buffer) {
799 this->flush(true);
800 return fGpu->readPixels(left, top, width, height, config, buffer);
801}
802
803void GrContext::writePixels(int left, int top, int width, int height,
804 GrTexture::PixelConfig config, const void* buffer,
805 size_t stride) {
806 const GrGpu::TextureDesc desc = {
807 0, GrGpu::kNone_AALevel, width, height, config
808 };
809 GrTexture* texture = fGpu->createTexture(desc, buffer, stride);
810 if (NULL == texture) {
811 return;
812 }
813
814 this->flush(true);
815
816 GrAutoUnref aur(texture);
817 GrDrawTarget::AutoStateRestore asr(fGpu);
818
819 GrMatrix matrix;
820 matrix.setTranslate(GrIntToScalar(left), GrIntToScalar(top));
821 fGpu->setViewMatrix(matrix);
822 matrix.setScale(GR_Scalar1 / texture->allocWidth(),
823 GR_Scalar1 / texture->allocHeight());
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000824 fGpu->setTextureMatrix(0, matrix);
reed@google.comac10a2d2010-12-22 21:39:39 +0000825
826 fGpu->disableState(GrDrawTarget::kClip_StateBit);
827 fGpu->setAlpha(0xFF);
828 fGpu->setBlendFunc(GrDrawTarget::kOne_BlendCoeff,
829 GrDrawTarget::kZero_BlendCoeff);
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000830 fGpu->setTexture(0, texture);
831 fGpu->setSamplerState(0, GrSamplerState::ClampNoFilter());
reed@google.comac10a2d2010-12-22 21:39:39 +0000832
833 this->fillRect(GrRect(0, 0, GrIntToScalar(width), GrIntToScalar(height)),
834 true);
835}
836
837////////////////////////////////////////////////////////////////////////////////
838
839
840/* -------------------------------------------------------
841 * Mimicking the GrGpu interface for now
842 * TODO: define appropriate higher-level API for context
843 */
844
845void GrContext::resetContext() {
846 fGpu->resetContext();
847}
848
849GrVertexBuffer* GrContext::createVertexBuffer(uint32_t size, bool dynamic) {
850 return fGpu->createVertexBuffer(size, dynamic);
851}
852
853GrIndexBuffer* GrContext::createIndexBuffer(uint32_t size, bool dynamic) {
854 return fGpu->createIndexBuffer(size, dynamic);
855}
856
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000857void GrContext::setTexture(int stage, GrTexture* texture) {
858 fGpu->setTexture(stage, texture);
reed@google.comac10a2d2010-12-22 21:39:39 +0000859}
860
861void GrContext::setRenderTarget(GrRenderTarget* target) {
862 flushText();
863 fGpu->setRenderTarget(target);
864}
865
866GrRenderTarget* GrContext::currentRenderTarget() const {
867 return fGpu->currentRenderTarget();
868}
869
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000870void GrContext::setSamplerState(int stage, const GrSamplerState& samplerState) {
871 fGpu->setSamplerState(stage, samplerState);
reed@google.comac10a2d2010-12-22 21:39:39 +0000872}
873
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000874void GrContext::setTextureMatrix(int stage, const GrMatrix& m) {
875 fGpu->setTextureMatrix(stage, m);
reed@google.comac10a2d2010-12-22 21:39:39 +0000876}
877
878void GrContext::getViewMatrix(GrMatrix* m) const {
879 fGpu->getViewMatrix(m);
880}
881
882void GrContext::setViewMatrix(const GrMatrix& m) {
883 fGpu->setViewMatrix(m);
884}
885
886bool GrContext::reserveAndLockGeometry(GrVertexLayout vertexLayout,
887 uint32_t vertexCount,
888 uint32_t indexCount,
889 void** vertices,
890 void** indices) {
891 return fGpu->reserveAndLockGeometry(vertexLayout,
892 vertexCount,
893 indexCount,
894 vertices,
895 indices);
896}
897
898void GrContext::drawIndexed(GrGpu::PrimitiveType type,
899 uint32_t startVertex,
900 uint32_t startIndex,
901 uint32_t vertexCount,
902 uint32_t indexCount) {
903 flushText();
904 fGpu->drawIndexed(type,
905 startVertex,
906 startIndex,
907 vertexCount,
908 indexCount);
909}
910
911void GrContext::drawNonIndexed(GrGpu::PrimitiveType type,
912 uint32_t startVertex,
913 uint32_t vertexCount) {
914 flushText();
915 fGpu->drawNonIndexed(type,
916 startVertex,
917 vertexCount);
918}
919
920void GrContext::setVertexSourceToArray(const void* array,
921 GrVertexLayout vertexLayout) {
922 fGpu->setVertexSourceToArray(array, vertexLayout);
923}
924
925void GrContext::setIndexSourceToArray(const void* array) {
926 fGpu->setIndexSourceToArray(array);
927}
928
929void GrContext::setVertexSourceToBuffer(GrVertexBuffer* buffer,
930 GrVertexLayout vertexLayout) {
931 fGpu->setVertexSourceToBuffer(buffer, vertexLayout);
932}
933
934void GrContext::setIndexSourceToBuffer(GrIndexBuffer* buffer) {
935 fGpu->setIndexSourceToBuffer(buffer);
936}
937
938void GrContext::releaseReservedGeometry() {
939 fGpu->releaseReservedGeometry();
940}
941
942void GrContext::setClip(const GrClip& clip) {
943 fGpu->setClip(clip);
944 fGpu->enableState(GrDrawTarget::kClip_StateBit);
945}
946
947void GrContext::setAlpha(uint8_t alpha) {
948 fGpu->setAlpha(alpha);
949}
950
951void GrContext::setColor(GrColor color) {
952 fGpu->setColor(color);
953}
954
955static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) {
956 intptr_t mask = 1 << shift;
957 if (pred) {
958 bits |= mask;
959 } else {
960 bits &= ~mask;
961 }
962 return bits;
963}
964
965void GrContext::setAntiAlias(bool aa) {
966 if (aa) {
967 fGpu->enableState(GrGpu::kAntialias_StateBit);
968 } else {
969 fGpu->disableState(GrGpu::kAntialias_StateBit);
970 }
971}
972
973void GrContext::setDither(bool dither) {
974 // hack for now, since iPad dither is hella-slow
975 dither = false;
976
977 if (dither) {
978 fGpu->enableState(GrGpu::kDither_StateBit);
979 } else {
980 fGpu->disableState(GrGpu::kDither_StateBit);
981 }
982}
983
984void GrContext::setPointSize(float size) {
985 fGpu->setPointSize(size);
986}
987
988void GrContext::setBlendFunc(GrGpu::BlendCoeff srcCoef,
989 GrGpu::BlendCoeff dstCoef) {
990 fGpu->setBlendFunc(srcCoef, dstCoef);
991}
992
993void GrContext::resetStats() {
994 fGpu->resetStats();
995}
996
997const GrGpu::Stats& GrContext::getStats() const {
998 return fGpu->getStats();
999}
1000
1001void GrContext::printStats() const {
1002 fGpu->printStats();
1003}
1004
1005GrContext::GrContext(GrGpu* gpu) :
1006 fVBAllocPool(gpu,
1007 gpu->supportsBufferLocking() ? POOL_VB_SIZE : 0,
1008 gpu->supportsBufferLocking() ? NUM_POOL_VBS : 0),
1009 fTextDrawBuffer(gpu->supportsBufferLocking() ? &fVBAllocPool : NULL) {
1010 fGpu = gpu;
1011 fGpu->ref();
1012 fTextureCache = new GrTextureCache(MAX_TEXTURE_CACHE_COUNT,
1013 MAX_TEXTURE_CACHE_BYTES);
1014 fFontCache = new GrFontCache(fGpu);
1015}
1016
1017bool GrContext::finalizeTextureKey(GrTextureKey* key,
1018 const GrSamplerState& sampler) const {
1019 uint32_t bits = 0;
1020 uint16_t width = key->width();
1021 uint16_t height = key->height();
1022 if (fGpu->npotTextureSupport() < GrGpu::kNonRendertarget_NPOTTextureType) {
1023 if ((sampler.getWrapX() != GrSamplerState::kClamp_WrapMode ||
1024 sampler.getWrapY() != GrSamplerState::kClamp_WrapMode) &&
1025 (!GrIsPow2(width) || !GrIsPow2(height))) {
1026 bits |= 1;
1027 bits |= sampler.isFilter() ? 2 : 0;
1028 }
1029 }
1030 key->finalize(bits);
1031 return 0 != bits;
1032}
1033
1034GrDrawTarget* GrContext::getTextTarget() {
1035#if DEFER_TEXT_RENDERING
1036 fTextDrawBuffer.initializeDrawStateAndClip(*fGpu);
1037 return &fTextDrawBuffer;
1038#else
1039 return fGpu;
1040#endif
1041}
1042
1043const GrIndexBuffer* GrContext::quadIndexBuffer() const {
1044 return fGpu->quadIndexBuffer();
1045}
1046
1047int GrContext::maxQuadsInIndexBuffer() const {
1048 return fGpu->maxQuadsInIndexBuffer();
1049}
1050
1051
1052
reed@google.com873cb1e2010-12-23 15:00:45 +00001053