fix slow performance on desktops by uping our max-texture-dim from 512 to
whatever the gpu itself broadcasts.
git-svn-id: http://skia.googlecode.com/svn/trunk@742 2bbb7eff-a529-9590-31e7-b0007b416f81
diff --git a/gpu/include/GrContext.h b/gpu/include/GrContext.h
index c2838b8..db53b1a 100644
--- a/gpu/include/GrContext.h
+++ b/gpu/include/GrContext.h
@@ -134,6 +134,11 @@
*/
void setTextureCacheLimits(int maxTextures, size_t maxTextureBytes);
+ /**
+ * Return the max width or height of a texture supported by the current gpu
+ */
+ int getMaxTextureDimension();
+
///////////////////////////////////////////////////////////////////////////
// Render targets
diff --git a/gpu/include/GrGpu.h b/gpu/include/GrGpu.h
index 6cbe53e..62f68ca 100644
--- a/gpu/include/GrGpu.h
+++ b/gpu/include/GrGpu.h
@@ -287,6 +287,8 @@
*/
NPOTTextureTypes npotTextureSupport() const { return fNPOTTextureSupport; }
+ int maxTextureDimension() const { return fMaxTextureDimension; }
+
// GrDrawTarget overrides
virtual void drawIndexed(PrimitiveType type,
uint32_t startVertex,
@@ -381,6 +383,7 @@
// set by subclass
int fMinRenderTargetWidth;
int fMinRenderTargetHeight;
+ int fMaxTextureDimension;
// overridden by API specific GrGpu-derived class to perform the draw call.
virtual void drawIndexedHelper(PrimitiveType type,
diff --git a/gpu/src/GrContext.cpp b/gpu/src/GrContext.cpp
index 5e6b824..9b94b7a 100644
--- a/gpu/src/GrContext.cpp
+++ b/gpu/src/GrContext.cpp
@@ -241,6 +241,10 @@
fTextureCache->setLimits(maxTextures, maxTextureBytes);
}
+int GrContext::getMaxTextureDimension() {
+ return fGpu->maxTextureDimension();
+}
+
///////////////////////////////////////////////////////////////////////////////
GrRenderTarget* GrContext::createPlatformRenderTarget(intptr_t platformRenderTarget,
diff --git a/gpu/src/GrGpuGL.cpp b/gpu/src/GrGpuGL.cpp
index ae27189..fb23152 100644
--- a/gpu/src/GrGpuGL.cpp
+++ b/gpu/src/GrGpuGL.cpp
@@ -380,6 +380,8 @@
fMinRenderTargetHeight = GrMax<GLuint>(fMinRenderTargetHeight, 16);
#endif
+ GR_GL_GetIntegerv(GL_MAX_TEXTURE_SIZE, &fMaxTextureDimension);
+
#if GR_COLLECT_STATS
++fStats.fRenderTargetChngCnt;
#endif
diff --git a/src/gpu/SkGpuDevice.cpp b/src/gpu/SkGpuDevice.cpp
index 4042006..dca146e 100644
--- a/src/gpu/SkGpuDevice.cpp
+++ b/src/gpu/SkGpuDevice.cpp
@@ -709,13 +709,6 @@
fContext->drawPath(grPaint, &iter, fill);
}
-/*
- * This value must not exceed the GPU's texture dimension limit, but it can
- * be smaller, if that helps avoid very large single textures hurting the
- * cache.
- */
-#define MAX_TEXTURE_DIM 512
-
void SkGpuDevice::drawBitmap(const SkDraw& draw,
const SkBitmap& bitmap,
const SkIRect* srcRectPtr,
@@ -736,8 +729,9 @@
}
grPaint.fSampler.setFilter(paint.isFilterBitmap());
- if (bitmap.getTexture() || (bitmap.width() <= MAX_TEXTURE_DIM &&
- bitmap.height() <= MAX_TEXTURE_DIM)) {
+ const int maxTextureDim = fContext->getMaxTextureDimension();
+ if (bitmap.getTexture() || (bitmap.width() <= maxTextureDim &&
+ bitmap.height() <= maxTextureDim)) {
// take the fast case
this->internalDrawBitmap(draw, bitmap, srcRect, m, &grPaint);
return;
@@ -762,13 +756,13 @@
clipRect.offset(DX, DY);
}
- int nx = bitmap.width() / MAX_TEXTURE_DIM;
- int ny = bitmap.height() / MAX_TEXTURE_DIM;
+ int nx = bitmap.width() / maxTextureDim;
+ int ny = bitmap.height() / maxTextureDim;
for (int x = 0; x <= nx; x++) {
for (int y = 0; y <= ny; y++) {
SkIRect tileR;
- tileR.set(x * MAX_TEXTURE_DIM, y * MAX_TEXTURE_DIM,
- (x + 1) * MAX_TEXTURE_DIM, (y + 1) * MAX_TEXTURE_DIM);
+ tileR.set(x * maxTextureDim, y * maxTextureDim,
+ (x + 1) * maxTextureDim, (y + 1) * maxTextureDim);
if (!SkIRect::Intersects(tileR, clipRect)) {
continue;
}
@@ -807,8 +801,8 @@
const SkIRect& srcRect,
const SkMatrix& m,
GrPaint* grPaint) {
- SkASSERT(bitmap.width() <= MAX_TEXTURE_DIM &&
- bitmap.height() <= MAX_TEXTURE_DIM);
+ SkASSERT(bitmap.width() <= fContext->getMaxTextureDimension() &&
+ bitmap.height() <= fContext->getMaxTextureDimension());
SkAutoLockPixels alp(bitmap);
if (!bitmap.getTexture() && !bitmap.readyToDraw()) {