Move thread flags and state into 32bits.
We need to ensure that transitions to Runnable are atomic wrt to a
thread modifying the suspend count. Currently this is achieved by
holding the thread_suspend_count_lock_. This change creates a set of bit
flags that summarize that the suspend_count_ is raised and also others
flags that signify the managed code should go into a slow path.
The effect of this change are two-fold:
1) transitions from suspended to runnable can CAS the thread state
rather than holding the suspend_count_lock_. This will make JNI
transitions cheaper.
2) the exception/suspend/interpreter poll needed for shadow frames can
be rolled into a single compare of the bit fields against 0.
Change-Id: I589f84e3dca396c3db448bf32d814565acf3d11f
diff --git a/src/asm_support.h b/src/asm_support.h
index 90ff709..b2f8126 100644
--- a/src/asm_support.h
+++ b/src/asm_support.h
@@ -29,21 +29,21 @@
#define rSELF r9
#define rLR r14
// Offset of field Thread::suspend_count_ verified in InitCpu
-#define THREAD_SUSPEND_COUNT_OFFSET 0
+#define THREAD_FLAGS_OFFSET 0
// Offset of field Thread::exception_ verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 8
+#define THREAD_EXCEPTION_OFFSET 12
#elif defined(__mips__)
#define rSUSPEND $s0
#define rSELF $s1
// Offset of field Thread::suspend_count_ verified in InitCpu
-#define THREAD_SUSPEND_COUNT_OFFSET 0
+#define THREAD_FLAGS_OFFSET 0
// Offset of field Thread::exception_ verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 8
+#define THREAD_EXCEPTION_OFFSET 12
#elif defined(__i386__)
// Offset of field Thread::self_ verified in InitCpu
-#define THREAD_SELF_OFFSET 36
+#define THREAD_SELF_OFFSET 40
// Offset of field Thread::exception_ verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 8
+#define THREAD_EXCEPTION_OFFSET 12
#endif
#endif // ART_SRC_ASM_SUPPORT_H_