Merge "Add more runtime options." into dalvik-dev
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 8321ff6..d7a4136 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -151,10 +151,10 @@
   AN_BRANCH,
 
   // 2B PACKED_SWITCH vAA, +BBBBBBBB
-  AN_NONE,
+  AN_SWITCH,
 
   // 2C SPARSE_SWITCH vAA, +BBBBBBBB
-  AN_NONE,
+  AN_SWITCH,
 
   // 2D CMPL_FLOAT vAA, vBB, vCC
   AN_MATH | AN_FP | AN_SINGLE,
@@ -841,6 +841,7 @@
   int branch_ops;
   int heavyweight_ops;
   bool has_computational_loop;
+  bool has_switch;
   float math_ratio;
   float fp_ratio;
   float array_ratio;
@@ -914,6 +915,9 @@
       if ((flags & AN_HEAVYWEIGHT) != 0) {
         stats->heavyweight_ops += loop_scale_factor;
       }
+      if ((flags & AN_SWITCH) != 0) {
+        stats->has_switch = true;
+      }
     }
     if (tbb == ending_bb) {
       done = true;
@@ -939,7 +943,7 @@
               << stats->math_ratio << ", fp:"
               << stats->fp_ratio << ", br:"
               << stats->branch_ratio << ", hw:"
-              << stats-> heavyweight_ratio << ", arr:"
+              << stats->heavyweight_ratio << ", arr:"
               << stats->array_ratio << ", hot:"
               << stats->has_computational_loop << ", "
               << PrettyMethod(cu_->method_idx, *cu_->dex_file);
@@ -971,8 +975,14 @@
     return false;
   }
 
-  // If high proportion of expensive operations, skip.
-  if (stats->heavyweight_ratio > 0.3) {
+  // Switch operations benefit greatly from compilation, so go ahead and spend the cycles.
+  if (stats->has_switch) {
+    return false;
+  }
+
+  // If significant in size and high proportion of expensive operations, skip.
+  if ((GetNumDalvikInsns() > Runtime::Current()->GetSmallMethodThreshold()) &&
+      (stats->heavyweight_ratio > 0.3)) {
     return true;
   }
 
@@ -984,8 +994,7 @@
   * Ultimate goal is to drive with profile data.
   */
 bool MIRGraph::SkipCompilation(Runtime::CompilerFilter compiler_filter) {
-  if (compiler_filter == Runtime::kSpeed) {
-    // If going for speed, compile everything.
+  if (compiler_filter == Runtime::kEverything) {
     return false;
   }
 
@@ -994,10 +1003,38 @@
     return true;
   }
 
-  // Filter 1: Skip huge methods (generally machine-generated initialization methods).
+  // Set up compilation cutoffs based on current filter mode.
+  size_t small_cutoff = 0;
+  size_t default_cutoff = 0;
+  switch (compiler_filter) {
+    case Runtime::kBalanced:
+      small_cutoff = Runtime::Current()->GetSmallMethodThreshold();
+      default_cutoff = Runtime::Current()->GetLargeMethodThreshold();
+      break;
+    case Runtime::kSpace:
+      small_cutoff = Runtime::Current()->GetTinyMethodThreshold();
+      default_cutoff = Runtime::Current()->GetSmallMethodThreshold();
+      break;
+    case Runtime::kSpeed:
+      small_cutoff = Runtime::Current()->GetHugeMethodThreshold();
+      default_cutoff = Runtime::Current()->GetHugeMethodThreshold();
+      break;
+    default:
+      LOG(FATAL) << "Unexpected compiler_filter_: " << compiler_filter;
+  }
+
+  // If size < cutoff, assume we'll compile - but allow removal.
+  bool skip_compilation = (GetNumDalvikInsns() >= default_cutoff);
+
+  /*
+   * Filter 1: Huge methods are likely to be machine generated, but some aren't.
+   * If huge, assume we won't compile, but allow futher analysis to turn it back on.
+   */
   if (GetNumDalvikInsns() > Runtime::Current()->GetHugeMethodThreshold()) {
-    // Ain't nobody got time for that.
-    return true;
+    skip_compilation = true;
+  } else if (compiler_filter == Runtime::kSpeed) {
+    // If not huge, compile.
+    return false;
   }
 
   // Filter 2: Skip class initializers.
@@ -1010,28 +1047,7 @@
     return false;
   }
 
-  /* In balanced mode, we generally assume that we'll be compiling, and then detect
-   * methods that won't benefit and remove them.  In space or deferred mode, we do the
-   * opposite: assume no compilation and then add back presumed hot methods.
-   */
-  bool skip_compilation = (compiler_filter == Runtime::kBalanced) ? false : true;
-
-
-  // Filter 4: go ahead and compile the small ones.
-  size_t small_cutoff = 0;
-  switch (compiler_filter) {
-    case Runtime::kBalanced:
-      small_cutoff = Runtime::Current()->GetSmallMethodThreshold();
-      break;
-    case Runtime::kSpace:
-      small_cutoff = Runtime::Current()->GetTinyMethodThreshold();
-      break;
-    case Runtime::kDeferCompilation:
-      small_cutoff = 0;
-      break;
-    default:
-      LOG(FATAL) << "Unexpected compiler_filter_: " << compiler_filter;
-  }
+  // Filter 4: if small, just compile.
   if (GetNumDalvikInsns() < small_cutoff) {
     return false;
   }
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index af1ae44..c02deab 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -38,7 +38,8 @@
   kArrayOp,
   kHeavyweightOp,
   kSimpleConstOp,
-  kMoveOp
+  kMoveOp,
+  kSwitch
 };
 
 #define AN_NONE (1 << kUninterestingOp)
@@ -55,6 +56,7 @@
 #define AN_HEAVYWEIGHT (1 << kHeavyweightOp)
 #define AN_SIMPLECONST (1 << kSimpleConstOp)
 #define AN_MOVE (1 << kMoveOp)
+#define AN_SWITCH (1 << kSwitch)
 #define AN_COMPUTATIONAL (AN_MATH | AN_ARRAYOP | AN_MOVE | AN_SIMPLECONST)
 
 enum DataFlowAttributePos {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 511788b..1d7f68d 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -867,9 +867,10 @@
   // give it away now and then switch to a more managable ScopedObjectAccess.
   Thread::Current()->TransitionFromRunnableToSuspended(kNative);
   // If we're doing the image, override the compiler filter to force full compilation. Must be
-  // done ahead of WellKnownClasses::Init that causes verification.
-  if (image && Runtime::Current()->GetCompilerFilter() == Runtime::kInterpretOnly) {
-    Runtime::Current()->SetCompilerFilter(Runtime::kSpeed);
+  // done ahead of WellKnownClasses::Init that causes verification.  Note: doesn't force
+  // compilation of class initializers.
+  if (image) {
+    Runtime::Current()->SetCompilerFilter(Runtime::kEverything);
   }
   // Whilst we're in native take the opportunity to initialize well known classes.
   WellKnownClasses::Init(Thread::Current()->GetJniEnv());
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 0110b36..69f004d 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -575,9 +575,6 @@
     case Runtime::kInterpretOnly:
       oat_compiler_filter_string += "interpret-only";
       break;
-    case Runtime::kDeferCompilation:
-      oat_compiler_filter_string += "defer-compilation";
-      break;
     case Runtime::kSpace:
       oat_compiler_filter_string += "space";
       break;
@@ -587,6 +584,9 @@
     case Runtime::kSpeed:
       oat_compiler_filter_string += "speed";
       break;
+    case Runtime::kEverything:
+      oat_compiler_filter_string += "everything";
+      break;
     default:
       LOG(FATAL) << "Unexpected case.";
   }
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 79bf39b..cedea61 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -963,7 +963,7 @@
 
 void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
   JavaVMExt* vm = Runtime::Current()->GetJavaVM();
-  MutexLock mu(Thread::Current(), vm->weak_globals_lock);
+  WriterMutexLock mu(Thread::Current(), vm->weak_globals_lock);
   for (const Object** entry : vm->weak_globals) {
     if (!is_marked(*entry, arg)) {
       *entry = kClearedJniWeakGlobal;
@@ -1046,7 +1046,7 @@
   runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this);
 
   JavaVMExt* vm = runtime->GetJavaVM();
-  MutexLock mu(Thread::Current(), vm->weak_globals_lock);
+  ReaderMutexLock mu(Thread::Current(), vm->weak_globals_lock);
   for (const Object** entry : vm->weak_globals) {
     VerifyIsLive(*entry);
   }
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 55c0765..a18a261 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -91,7 +91,7 @@
   }
   JavaVMExt* vm = soa.Vm();
   IndirectReferenceTable& weak_globals = vm->weak_globals;
-  MutexLock mu(soa.Self(), vm->weak_globals_lock);
+  WriterMutexLock mu(soa.Self(), vm->weak_globals_lock);
   IndirectRef ref = weak_globals.Add(IRT_FIRST_SEGMENT, obj);
   return reinterpret_cast<jweak>(ref);
 }
@@ -823,7 +823,7 @@
     JavaVMExt* vm = soa.Vm();
     IndirectReferenceTable& globals = vm->globals;
     Object* decoded_obj = soa.Decode<Object*>(obj);
-    MutexLock mu(soa.Self(), vm->globals_lock);
+    WriterMutexLock mu(soa.Self(), vm->globals_lock);
     IndirectRef ref = globals.Add(IRT_FIRST_SEGMENT, decoded_obj);
     return reinterpret_cast<jobject>(ref);
   }
@@ -835,7 +835,7 @@
     JavaVMExt* vm = reinterpret_cast<JNIEnvExt*>(env)->vm;
     IndirectReferenceTable& globals = vm->globals;
     Thread* self = reinterpret_cast<JNIEnvExt*>(env)->self;
-    MutexLock mu(self, vm->globals_lock);
+    WriterMutexLock mu(self, vm->globals_lock);
 
     if (!globals.Remove(IRT_FIRST_SEGMENT, obj)) {
       LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") "
@@ -855,7 +855,7 @@
     ScopedObjectAccess soa(env);
     JavaVMExt* vm = soa.Vm();
     IndirectReferenceTable& weak_globals = vm->weak_globals;
-    MutexLock mu(soa.Self(), vm->weak_globals_lock);
+    WriterMutexLock mu(soa.Self(), vm->weak_globals_lock);
 
     if (!weak_globals.Remove(IRT_FIRST_SEGMENT, obj)) {
       LOG(WARNING) << "JNI WARNING: DeleteWeakGlobalRef(" << obj << ") "
@@ -2333,7 +2333,7 @@
   static jint RegisterNativeMethods(JNIEnv* env, jclass java_class, const JNINativeMethod* methods,
                                     jint method_count, bool return_errors) {
     if (UNLIKELY(method_count < 0)) {
-      JniAbortF("RegisterNatives", "method_cound == %d", method_count);
+      JniAbortF("RegisterNatives", "negative method count: %d", method_count);
       return JNI_ERR;  // Not reached.
     }
     CHECK_NON_NULL_ARGUMENT(RegisterNatives, java_class);
@@ -2512,7 +2512,7 @@
   static jint EnsureLocalCapacity(JNIEnv* env, jint desired_capacity,
                                   const char* caller) {
     // TODO: we should try to expand the table if necessary.
-    if (desired_capacity < 1 || desired_capacity > static_cast<jint>(kLocalsMax)) {
+    if (desired_capacity < 0 || desired_capacity > static_cast<jint>(kLocalsMax)) {
       LOG(ERROR) << "Invalid capacity given to " << caller << ": " << desired_capacity;
       return JNI_ERR;
     }
@@ -3019,11 +3019,11 @@
     os << "; pins=" << pin_table.Size();
   }
   {
-    MutexLock mu(self, globals_lock);
+    ReaderMutexLock mu(self, globals_lock);
     os << "; globals=" << globals.Capacity();
   }
   {
-    MutexLock mu(self, weak_globals_lock);
+    ReaderMutexLock mu(self, weak_globals_lock);
     if (weak_globals.Capacity() > 0) {
       os << " (plus " << weak_globals.Capacity() << " weak)";
     }
@@ -3039,11 +3039,11 @@
 void JavaVMExt::DumpReferenceTables(std::ostream& os) {
   Thread* self = Thread::Current();
   {
-    MutexLock mu(self, globals_lock);
+    ReaderMutexLock mu(self, globals_lock);
     globals.Dump(os);
   }
   {
-    MutexLock mu(self, weak_globals_lock);
+    ReaderMutexLock mu(self, weak_globals_lock);
     weak_globals.Dump(os);
   }
   {
@@ -3191,7 +3191,7 @@
       return NULL;
     }
   } else {
-    CHECK(c->GetStatus() >= Class::kStatusInitializing) << c->GetStatus() << " " << PrettyMethod(m);
+    CHECK(c->IsInitializing()) << c->GetStatus() << " " << PrettyMethod(m);
   }
 
   std::string detail;
@@ -3212,7 +3212,7 @@
 void JavaVMExt::VisitRoots(RootVisitor* visitor, void* arg) {
   Thread* self = Thread::Current();
   {
-    MutexLock mu(self, globals_lock);
+    ReaderMutexLock mu(self, globals_lock);
     globals.VisitRoots(visitor, arg);
   }
   {
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index f7caa0f..bad3841 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -112,11 +112,11 @@
   ReferenceTable pin_table GUARDED_BY(pins_lock);
 
   // JNI global references.
-  Mutex globals_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
+  ReaderWriterMutex globals_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
   IndirectReferenceTable globals GUARDED_BY(globals_lock);
 
   // JNI weak global references.
-  Mutex weak_globals_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
+  ReaderWriterMutex weak_globals_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
   IndirectReferenceTable weak_globals GUARDED_BY(weak_globals_lock);
 
   Mutex libraries_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 234e40a..aea2ed3 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -1488,6 +1488,21 @@
   env_->DeleteLocalRef(o);
 }
 
+TEST_F(JniInternalTest, PushLocalFrame_10395422) {
+  // The JNI specification is ambiguous about whether the given capacity is to be interpreted as a
+  // maximum or as a minimum, but it seems like it's supposed to be a minimum, and that's how
+  // Android historically treated it, and it's how the RI treats it. It's also the more useful
+  // interpretation!
+  ASSERT_EQ(JNI_OK, env_->PushLocalFrame(0));
+  env_->PopLocalFrame(NULL);
+
+  // Negative capacities are not allowed.
+  ASSERT_EQ(JNI_ERR, env_->PushLocalFrame(-1));
+
+  // And it's okay to have an upper limit. Ours is currently 512.
+  ASSERT_EQ(JNI_ERR, env_->PushLocalFrame(8192));
+}
+
 TEST_F(JniInternalTest, PushLocalFrame_PopLocalFrame) {
   jobject original = env_->NewStringUTF("");
   ASSERT_TRUE(original != NULL);
@@ -1497,11 +1512,11 @@
   ScopedObjectAccess soa(env_);
   mirror::Object* inner2_direct_pointer;
   {
-    env_->PushLocalFrame(4);
+    ASSERT_EQ(JNI_OK, env_->PushLocalFrame(4));
     outer = env_->NewLocalRef(original);
 
     {
-      env_->PushLocalFrame(4);
+      ASSERT_EQ(JNI_OK, env_->PushLocalFrame(4));
       inner1 = env_->NewLocalRef(outer);
       inner2 = env_->NewStringUTF("survivor");
       inner2_direct_pointer = soa.Decode<mirror::Object*>(inner2);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 31800ce..51a67c1 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -600,14 +600,14 @@
       Trace::SetDefaultClockSource(kProfilerClockSourceDual);
     } else if (option == "-compiler-filter:interpret-only") {
       parsed->compiler_filter_ = kInterpretOnly;
-    } else if (option == "-compiler-filter:defer-compilation") {
-      parsed->compiler_filter_ = kDeferCompilation;
     } else if (option == "-compiler-filter:space") {
       parsed->compiler_filter_ = kSpace;
     } else if (option == "-compiler-filter:balanced") {
       parsed->compiler_filter_ = kBalanced;
     } else if (option == "-compiler-filter:speed") {
       parsed->compiler_filter_ = kSpeed;
+    } else if (option == "-compiler-filter:everything") {
+      parsed->compiler_filter_ = kEverything;
     } else if (option == "-sea_ir") {
       parsed->sea_ir_mode_ = true;
     } else if (StartsWith(option, "-huge-method-max:")) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 6b78ce4..50108ac 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -65,10 +65,10 @@
 
   enum CompilerFilter {
     kInterpretOnly,       // Compile nothing.
-    kDeferCompilation,    // Temporary minimal compilation, will redo during device idle time.
     kSpace,               // Maximize space savings.
     kBalanced,            // Try to get the best performance return on compilation investment.
-    kSpeed                // Compile all methods.
+    kSpeed,               // Maximize runtime performance.
+    kEverything           // Force compilation (Note: excludes compilaton of class initializers).
   };
 
   // Guide heuristics to determine whether to compile method if profile data not available.
@@ -77,10 +77,10 @@
 #else
   static const CompilerFilter kDefaultCompilerFilter = kSpeed;
 #endif
-  static const size_t kDefaultHugeMethodThreshold = 6000;
-  static const size_t kDefaultLargeMethodThreshold = 1000;
-  static const size_t kDefaultSmallMethodThreshold = 200;
-  static const size_t kDefaultTinyMethodThreshold = 10;
+  static const size_t kDefaultHugeMethodThreshold = 10000;
+  static const size_t kDefaultLargeMethodThreshold = 600;
+  static const size_t kDefaultSmallMethodThreshold = 60;
+  static const size_t kDefaultTinyMethodThreshold = 20;
   static const size_t kDefaultNumDexMethodsThreshold = 900;
 
   class ParsedOptions {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 505e368..3178bf1 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1184,13 +1184,13 @@
   } else if (kind == kGlobal) {
     JavaVMExt* vm = Runtime::Current()->GetJavaVM();
     IndirectReferenceTable& globals = vm->globals;
-    MutexLock mu(const_cast<Thread*>(this), vm->globals_lock);
+    ReaderMutexLock mu(const_cast<Thread*>(this), vm->globals_lock);
     result = const_cast<mirror::Object*>(globals.Get(ref));
   } else {
     DCHECK_EQ(kind, kWeakGlobal);
     JavaVMExt* vm = Runtime::Current()->GetJavaVM();
     IndirectReferenceTable& weak_globals = vm->weak_globals;
-    MutexLock mu(const_cast<Thread*>(this), vm->weak_globals_lock);
+    ReaderMutexLock mu(const_cast<Thread*>(this), vm->weak_globals_lock);
     result = const_cast<mirror::Object*>(weak_globals.Get(ref));
     if (result == kClearedJniWeakGlobal) {
       // This is a special case where it's okay to return NULL.
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 4d2f36f..34a0f73 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -4202,7 +4202,7 @@
 ReaderWriterMutex* MethodVerifier::devirt_maps_lock_ = NULL;
 MethodVerifier::DevirtualizationMapTable* MethodVerifier::devirt_maps_ = NULL;
 
-Mutex* MethodVerifier::rejected_classes_lock_ = NULL;
+ReaderWriterMutex* MethodVerifier::rejected_classes_lock_ = NULL;
 MethodVerifier::RejectedClassesTable* MethodVerifier::rejected_classes_ = NULL;
 
 void MethodVerifier::Init() {
@@ -4227,9 +4227,9 @@
       devirt_maps_ = new MethodVerifier::DevirtualizationMapTable();
     }
 
-    rejected_classes_lock_ = new Mutex("verifier rejected classes lock");
+    rejected_classes_lock_ = new ReaderWriterMutex("verifier rejected classes lock");
     {
-      MutexLock mu(self, *rejected_classes_lock_);
+      WriterMutexLock mu(self, *rejected_classes_lock_);
       rejected_classes_ = new MethodVerifier::RejectedClassesTable;
     }
   }
@@ -4267,7 +4267,7 @@
     devirt_maps_lock_ = NULL;
 
     {
-      MutexLock mu(self, *rejected_classes_lock_);
+      WriterMutexLock mu(self, *rejected_classes_lock_);
       delete rejected_classes_;
       rejected_classes_ = NULL;
     }
@@ -4280,7 +4280,7 @@
 void MethodVerifier::AddRejectedClass(ClassReference ref) {
   DCHECK(Runtime::Current()->IsCompiler());
   {
-    MutexLock mu(Thread::Current(), *rejected_classes_lock_);
+    WriterMutexLock mu(Thread::Current(), *rejected_classes_lock_);
     rejected_classes_->insert(ref);
   }
   CHECK(IsClassRejected(ref));
@@ -4288,7 +4288,7 @@
 
 bool MethodVerifier::IsClassRejected(ClassReference ref) {
   DCHECK(Runtime::Current()->IsCompiler());
-  MutexLock mu(Thread::Current(), *rejected_classes_lock_);
+  ReaderMutexLock mu(Thread::Current(), *rejected_classes_lock_);
   return (rejected_classes_->find(ref) != rejected_classes_->end());
 }
 
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index d6bebc6..70442fb 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -661,7 +661,7 @@
                            const PcToConcreteMethodMap* pc_method_map)
         LOCKS_EXCLUDED(devirt_maps_lock_);
   typedef std::set<ClassReference> RejectedClassesTable;
-  static Mutex* rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  static ReaderWriterMutex* rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   static RejectedClassesTable* rejected_classes_ GUARDED_BY(rejected_classes_lock_);
 
   static void AddRejectedClass(ClassReference ref)
diff --git a/test/run-test b/test/run-test
index 4744f19..11dcfc5 100755
--- a/test/run-test
+++ b/test/run-test
@@ -268,6 +268,8 @@
             echo "BUILD FAILED For ${TEST_NAME}"
         fi
     fi
+    # Clean up extraneous files that are not used by tests.
+    find $tmp_dir -mindepth 1  ! -regex ".*/\(.*jar\|$build_output\|$expected\)" | xargs rm -rf
     exit 0
 else
     "./${build}" >"$build_output" 2>&1