Let SkBenchmark classes specify that they do no rendering.
Doing this gives us a 15-20% speedup in bench cycle time.
Here again I'm just picking the easy targets.
http://codereview.appspot.com/6500115/
git-svn-id: http://skia.googlecode.com/svn/trunk@5525 2bbb7eff-a529-9590-31e7-b0007b416f81
diff --git a/bench/GrMemoryPoolBench.cpp b/bench/GrMemoryPoolBench.cpp
index d9dc49d..d6c4b77 100644
--- a/bench/GrMemoryPoolBench.cpp
+++ b/bench/GrMemoryPoolBench.cpp
@@ -39,6 +39,7 @@
};
public:
GrMemoryPoolBenchStack(void* param) : INHERITED(param) {
+ fIsRendering = false;
}
protected:
virtual const char* onGetName() {
@@ -94,6 +95,7 @@
};
public:
GrMemoryPoolBenchRandom(void* param) : INHERITED(param) {
+ fIsRendering = false;
}
protected:
virtual const char* onGetName() {
@@ -131,6 +133,7 @@
};
public:
GrMemoryPoolBenchQueue(void* param) : INHERITED(param) {
+ fIsRendering = false;
}
protected:
virtual const char* onGetName() {
diff --git a/bench/InterpBench.cpp b/bench/InterpBench.cpp
index f8f463b..b689c72 100644
--- a/bench/InterpBench.cpp
+++ b/bench/InterpBench.cpp
@@ -20,6 +20,7 @@
fName.printf("interp_%s", name);
fFx = 3.3f;
fDx = 0.1257f;
+ fIsRendering = false;
}
virtual void performTest(int16_t dst[], float x, float dx, int count) = 0;
diff --git a/bench/MathBench.cpp b/bench/MathBench.cpp
index 414036b..df3d3a5 100644
--- a/bench/MathBench.cpp
+++ b/bench/MathBench.cpp
@@ -30,6 +30,8 @@
for (int i = 0; i < kBuffer; ++i) {
fSrc[i] = rand.nextSScalar1();
}
+
+ fIsRendering = false;
}
virtual void performTest(float* SK_RESTRICT dst,
@@ -265,6 +267,7 @@
fProc = gRec[index].fProc;
fName = gRec[index].fName;
}
+ fIsRendering = false;
}
protected:
@@ -330,6 +333,7 @@
} else {
fName = "floor_std";
}
+ fIsRendering = false;
}
virtual void process(float) {}
diff --git a/bench/MatrixBench.cpp b/bench/MatrixBench.cpp
index d8e2137..1a85172 100644
--- a/bench/MatrixBench.cpp
+++ b/bench/MatrixBench.cpp
@@ -16,6 +16,7 @@
public:
MatrixBench(void* param, const char name[]) : INHERITED(param) {
fName.printf("matrix_%s", name);
+ fIsRendering = false;
}
virtual void performTest() = 0;
diff --git a/bench/MemoryBench.cpp b/bench/MemoryBench.cpp
index d8d6052..30b9ea3 100644
--- a/bench/MemoryBench.cpp
+++ b/bench/MemoryBench.cpp
@@ -23,6 +23,7 @@
ChunkAllocBench(void* param, size_t minSize) : INHERITED(param) {
fMinSize = minSize;
fName.printf("chunkalloc_" SK_SIZE_T_SPECIFIER, minSize);
+ fIsRendering = false;
}
protected:
diff --git a/bench/RTreeBench.cpp b/bench/RTreeBench.cpp
index d13d887..e1885c3 100644
--- a/bench/RTreeBench.cpp
+++ b/bench/RTreeBench.cpp
@@ -35,6 +35,7 @@
if (fBulkLoad) {
fName.append("_bulk");
}
+ fIsRendering = false;
}
virtual ~BBoxBuildBench() {
fTree->unref();
@@ -91,6 +92,7 @@
SkBENCHLOOP(NUM_QUERY_RECTS)), fBulkLoad);
}
fTree->flushDeferredInserts();
+ fIsRendering = false;
}
virtual ~BBoxQueryBench() {
fTree->unref();
diff --git a/bench/RefCntBench.cpp b/bench/RefCntBench.cpp
index cdc81eb..c8a98e4 100644
--- a/bench/RefCntBench.cpp
+++ b/bench/RefCntBench.cpp
@@ -18,6 +18,7 @@
class RefCntBench_Stack : public SkBenchmark {
public:
RefCntBench_Stack(void* param) : INHERITED(param) {
+ fIsRendering = false;
}
protected:
virtual const char* onGetName() {
@@ -54,6 +55,7 @@
class RefCntBench_Heap : public SkBenchmark {
public:
RefCntBench_Heap(void* param) : INHERITED(param) {
+ fIsRendering = false;
}
protected:
virtual const char* onGetName() {
@@ -79,6 +81,7 @@
class RefCntBench_New : public SkBenchmark {
public:
RefCntBench_New(void* param) : INHERITED(param) {
+ fIsRendering = false;
}
protected:
virtual const char* onGetName() {
@@ -105,6 +108,7 @@
class WeakRefCntBench_Stack : public SkBenchmark {
public:
WeakRefCntBench_Stack(void* param) : INHERITED(param) {
+ fIsRendering = false;
}
protected:
virtual const char* onGetName() {
@@ -134,6 +138,7 @@
class WeakRefCntBench_Heap : public SkBenchmark {
public:
WeakRefCntBench_Heap(void* param) : INHERITED(param) {
+ fIsRendering = false;
}
protected:
virtual const char* onGetName() {
@@ -159,6 +164,7 @@
class WeakRefCntBench_New : public SkBenchmark {
public:
WeakRefCntBench_New(void* param) : INHERITED(param) {
+ fIsRendering = false;
}
protected:
virtual const char* onGetName() {
diff --git a/bench/RegionBench.cpp b/bench/RegionBench.cpp
index 7b9e2d4..7a306e9 100644
--- a/bench/RegionBench.cpp
+++ b/bench/RegionBench.cpp
@@ -100,6 +100,7 @@
fA.op(randrect(rand), SkRegion::kXOR_Op);
fB.op(randrect(rand), SkRegion::kXOR_Op);
}
+ fIsRendering = false;
}
protected:
diff --git a/bench/SkBenchmark.cpp b/bench/SkBenchmark.cpp
index c8552d5..6afcd8e 100644
--- a/bench/SkBenchmark.cpp
+++ b/bench/SkBenchmark.cpp
@@ -19,6 +19,7 @@
fForceAA = true;
fDither = SkTriState::kDefault;
fHasStrokeWidth = false;
+ fIsRendering = true;
}
const char* SkBenchmark::getName() {
diff --git a/bench/SkBenchmark.h b/bench/SkBenchmark.h
index a77cb83..5cfbdab2 100644
--- a/bench/SkBenchmark.h
+++ b/bench/SkBenchmark.h
@@ -81,6 +81,13 @@
return fHasStrokeWidth;
}
+ /** If true; the benchmark does rendering; if false, the benchmark
+ doesn't, and so need not be re-run in every different rendering
+ mode. */
+ bool isRendering() {
+ return fIsRendering;
+ }
+
const char* findDefine(const char* key) const;
bool findDefine32(const char* key, int32_t* value) const;
bool findDefineScalar(const char* key, SkScalar* value) const;
@@ -94,6 +101,8 @@
virtual void onPostDraw() {}
virtual SkIPoint onGetSize();
+ /// Defaults to true.
+ bool fIsRendering;
private:
const SkTDict<const char*>* fDict;
diff --git a/bench/benchmain.cpp b/bench/benchmain.cpp
index 9edb8cf..692b0bd 100644
--- a/bench/benchmain.cpp
+++ b/bench/benchmain.cpp
@@ -738,7 +738,13 @@
AutoPrePostDraw appd(bench);
+ bool runOnce = false;
for (int x = 0; x < configs.count(); ++x) {
+ if (!bench->isRendering() && runOnce) {
+ continue;
+ }
+ runOnce = true;
+
int configIndex = configs[x];
outConfig = gConfigs[configIndex].fConfig;