Add forwarding address checks for X86, arm, arm64

Added to READ_BARRIER_MARK_REG.

Bug: 30162165

Test: test-art-host, test-art-target

Change-Id: I15cf0d51ed3d22fa401e80ffac3877d61593527c
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index bf70c55..0135260 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1999,11 +1999,17 @@
     // Check lock word for mark bit, if marked return. Use IP for scratch since it is blocked.
     ldr ip, [\reg, MIRROR_OBJECT_LOCK_WORD_OFFSET]
     tst ip, #LOCK_WORD_MARK_BIT_MASK_SHIFTED
-    beq .Lslow_rb_\name
+    beq .Lnot_marked_rb_\name
     // Already marked, return right away.
 .Lret_rb_\name:
     bx lr
 
+.Lnot_marked_rb_\name:
+    // Test that both the forwarding state bits are 1.
+    mvn ip, ip
+    tst ip, #(LOCK_WORD_STATE_FORWARDING_ADDRESS << LOCK_WORD_STATE_SHIFT)
+    beq .Lret_forwarding_address\name
+
 .Lslow_rb_\name:
     // Save IP: the kSaveEverything entrypoint art_quick_resolve_string makes a tail call here.
     push  {r0-r4, r9, ip, lr}           @ save return address, core caller-save registers and ip
@@ -2064,6 +2070,12 @@
     .cfi_restore ip
     .cfi_restore lr
     bx lr
+.Lret_forwarding_address\name:
+    // Shift left by the forwarding address shift. This clears out the state bits since they are
+    // in the top 2 bits of the lock word.
+    mvn ip, ip
+    lsl \reg, ip, #LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT
+    bx lr
 END \name
 .endm