ART: SHARED_REQUIRES to REQUIRES_SHARED
This coincides with the actual attribute name and upstream usage.
Preparation for deferring to libbase.
Test: m
Test: m test-art-host
Change-Id: Ia8986b5dfd926ba772bf00b0a35eaf83596d8518
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 451aa38..1e5f0b6 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -109,7 +109,7 @@
}
static bool IsMethodOrDeclaringClassFinal(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return method->IsFinal() || method->GetDeclaringClass()->IsFinal();
}
@@ -119,7 +119,7 @@
* Return nullptr if the runtime target cannot be proven.
*/
static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resolved_method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (IsMethodOrDeclaringClassFinal(resolved_method)) {
// No need to lookup further, the resolved method will be the target.
return resolved_method;
@@ -189,7 +189,7 @@
static uint32_t FindMethodIndexIn(ArtMethod* method,
const DexFile& dex_file,
uint32_t name_and_signature_index)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (IsSameDexFile(*method->GetDexFile(), dex_file)) {
return method->GetDexMethodIndex();
} else {
@@ -200,7 +200,7 @@
static uint32_t FindClassIndexIn(mirror::Class* cls,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t index = DexFile::kDexNoIndex;
if (cls->GetDexCache() == nullptr) {
DCHECK(cls->IsArrayClass()) << PrettyClass(cls);
@@ -894,7 +894,7 @@
static HInstruction* GetInvokeInputForArgVRegIndex(HInvoke* invoke_instruction,
size_t arg_vreg_index)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
size_t input_index = 0;
for (size_t i = 0; i < arg_vreg_index; ++i, ++input_index) {
DCHECK_LT(input_index, invoke_instruction->GetNumberOfArguments());
@@ -1030,7 +1030,7 @@
HInstanceFieldGet* HInliner::CreateInstanceFieldGet(Handle<mirror::DexCache> dex_cache,
uint32_t field_index,
HInstruction* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
@@ -1058,7 +1058,7 @@
uint32_t field_index,
HInstruction* obj,
HInstruction* value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
@@ -1374,7 +1374,7 @@
static bool IsReferenceTypeRefinement(ReferenceTypeInfo declared_rti,
bool declared_can_be_null,
HInstruction* actual_obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (declared_can_be_null && !actual_obj->CanBeNull()) {
return true;
}
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 02d3a5f..486626b 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -64,12 +64,12 @@
// reference type propagation can run after the inlining. If the inlining is successful, this
// method will replace and remove the `invoke_instruction`.
bool TryInlineAndReplace(HInvoke* invoke_instruction, ArtMethod* resolved_method, bool do_rtp)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool TryBuildAndInline(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
HInstruction** return_replacement)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool TryBuildAndInlineHelper(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
@@ -86,7 +86,7 @@
bool TryPatternSubstitution(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
HInstruction** return_replacement)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Create a new HInstanceFieldGet.
HInstanceFieldGet* CreateInstanceFieldGet(Handle<mirror::DexCache> dex_cache,
@@ -105,38 +105,38 @@
bool TryInlineMonomorphicCall(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
const InlineCache& ic)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Try to inline targets of a polymorphic call.
bool TryInlinePolymorphicCall(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
const InlineCache& ic)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
const InlineCache& ic)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
HInstanceFieldGet* BuildGetReceiverClass(ClassLinker* class_linker,
HInstruction* receiver,
uint32_t dex_pc) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FixUpReturnReferenceType(ArtMethod* resolved_method, HInstruction* return_replacement)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Creates an instance of ReferenceTypeInfo from `klass` if `klass` is
// admissible (see ReferenceTypePropagation::IsAdmissible for details).
// Otherwise returns inexact Object RTI.
- ReferenceTypeInfo GetClassRTI(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ ReferenceTypeInfo GetClassRTI(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
bool ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod* resolved_method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool ReturnTypeMoreSpecific(HInvoke* invoke_instruction, HInstruction* return_replacement)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Add a type guard on the given `receiver`. This will add to the graph:
// i0 = HFieldGet(receiver, klass)
@@ -154,7 +154,7 @@
bool is_referrer,
HInstruction* invoke_instruction,
bool with_deoptimization)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Ad-hoc implementation for implementing a diamond pattern in the graph for
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index e5dab56..453068b 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -957,7 +957,7 @@
}
static bool IsSubClass(mirror::Class* to_test, mirror::Class* super_class)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return to_test != nullptr && !to_test->IsInterface() && to_test->IsSubClass(super_class);
}
@@ -1607,7 +1607,7 @@
}
static TypeCheckKind ComputeTypeCheckKind(Handle<mirror::Class> cls)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (cls.Get() == nullptr) {
return TypeCheckKind::kUnresolvedCheck;
} else if (cls->IsInterface()) {
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 517cf76..aa34ddd 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -103,7 +103,7 @@
bool NeedsAccessCheck(uint32_t type_index,
Handle<mirror::DexCache> dex_cache,
/*out*/bool* finalizable) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool NeedsAccessCheck(uint32_t type_index, /*out*/bool* finalizable) const;
template<typename T>
@@ -255,14 +255,14 @@
ArtMethod* method,
uint32_t method_idx,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Build a HNewInstance instruction.
bool BuildNewInstance(uint16_t type_index, uint32_t dex_pc);
// Return whether the compiler can assume `cls` is initialized.
bool IsInitialized(Handle<mirror::Class> cls) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Try to resolve a method using the class linker. Return null if a method could
// not be resolved.
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 2808e1b..8f37236 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2242,7 +2242,7 @@
}
static void CheckAgainstUpperBound(ReferenceTypeInfo rti, ReferenceTypeInfo upper_bound_rti)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (rti.IsValid()) {
DCHECK(upper_bound_rti.IsSupertypeOf(rti))
<< " upper_bound_rti: " << upper_bound_rti
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index dfa8276..62e39f5 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -171,7 +171,7 @@
static ReferenceTypeInfo Create(TypeHandle type_handle, bool is_exact);
- static ReferenceTypeInfo Create(TypeHandle type_handle) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static ReferenceTypeInfo Create(TypeHandle type_handle) REQUIRES_SHARED(Locks::mutator_lock_) {
return Create(type_handle, type_handle->CannotBeAssignedFromOtherTypes());
}
@@ -191,49 +191,49 @@
bool IsExact() const { return is_exact_; }
- bool IsObjectClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsObjectClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsObjectClass();
}
- bool IsStringClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsStringClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsStringClass();
}
- bool IsObjectArray() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsObjectArray() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return IsArrayClass() && GetTypeHandle()->GetComponentType()->IsObjectClass();
}
- bool IsInterface() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsInterface() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsInterface();
}
- bool IsArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsArrayClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsArrayClass();
}
- bool IsPrimitiveArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveArrayClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsPrimitiveArray();
}
- bool IsNonPrimitiveArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsNonPrimitiveArrayClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsArrayClass() && !GetTypeHandle()->IsPrimitiveArray();
}
- bool CanArrayHold(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool CanArrayHold(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
if (!IsExact()) return false;
if (!IsArrayClass()) return false;
return GetTypeHandle()->GetComponentType()->IsAssignableFrom(rti.GetTypeHandle().Get());
}
- bool CanArrayHoldValuesOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool CanArrayHoldValuesOf(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
if (!IsExact()) return false;
if (!IsArrayClass()) return false;
@@ -244,13 +244,13 @@
Handle<mirror::Class> GetTypeHandle() const { return type_handle_; }
- bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsSupertypeOf(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
DCHECK(rti.IsValid());
return GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
}
- bool IsStrictSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsStrictSupertypeOf(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
DCHECK(rti.IsValid());
return GetTypeHandle().Get() != rti.GetTypeHandle().Get() &&
@@ -260,7 +260,7 @@
// Returns true if the type information provide the same amount of details.
// Note that it does not mean that the instructions have the same actual type
// (because the type can be the result of a merge).
- bool IsEqual(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsEqual(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (!IsValid() && !rti.IsValid()) {
// Invalid types are equal.
return true;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 6e98b4d..c5d7611 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -294,7 +294,7 @@
}
uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
}
@@ -311,7 +311,7 @@
bool JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method, bool osr)
OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
void RunOptimizations(HGraph* graph,
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index e96ab19..4289cf7 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -27,7 +27,7 @@
static inline mirror::DexCache* FindDexCacheWithHint(Thread* self,
const DexFile& dex_file,
Handle<mirror::DexCache> hint_dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (LIKELY(hint_dex_cache->GetDexFile() == &dex_file)) {
return hint_dex_cache.Get();
} else {
@@ -85,7 +85,7 @@
void VisitParameterValue(HParameterValue* instr) OVERRIDE;
void UpdateFieldAccessTypeInfo(HInstruction* instr, const FieldInfo& info);
void SetClassAsTypeInfo(HInstruction* instr, mirror::Class* klass, bool is_exact)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE;
void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE;
void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) OVERRIDE;
@@ -194,7 +194,7 @@
ReferenceTypeInfo upper_bound,
HInstruction* dominator_instr,
HBasicBlock* dominator_block)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// If the position where we should insert the bound type is not already a
// a bound type then we need to create one.
if (position == nullptr || !position->IsBoundType()) {
@@ -487,7 +487,7 @@
const DexFile& dex_file,
uint16_t type_idx,
Handle<mirror::DexCache> hint_dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::DexCache* dex_cache = FindDexCacheWithHint(self, dex_file, hint_dex_cache);
// Get type from dex cache assuming it was populated by the verifier.
return dex_cache->GetResolvedType(type_idx);
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index edd83bf..1fa6624 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -44,7 +44,7 @@
// Returns true if klass is admissible to the propagation: non-null and resolved.
// For an array type, we also check if the component type is admissible.
- static bool IsAdmissible(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static bool IsAdmissible(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
return klass != nullptr &&
klass->IsResolved() &&
(!klass->IsArrayClass() || IsAdmissible(klass->GetComponentType()));
@@ -58,7 +58,7 @@
explicit HandleCache(StackHandleScopeCollection* handles) : handles_(handles) { }
template <typename T>
- MutableHandle<T> NewHandle(T* object) SHARED_REQUIRES(Locks::mutator_lock_) {
+ MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_) {
return handles_->NewHandle(object);
}
@@ -80,8 +80,8 @@
void VisitPhi(HPhi* phi);
void VisitBasicBlock(HBasicBlock* block);
- void UpdateBoundType(HBoundType* bound_type) SHARED_REQUIRES(Locks::mutator_lock_);
- void UpdatePhi(HPhi* phi) SHARED_REQUIRES(Locks::mutator_lock_);
+ void UpdateBoundType(HBoundType* bound_type) REQUIRES_SHARED(Locks::mutator_lock_);
+ void UpdatePhi(HPhi* phi) REQUIRES_SHARED(Locks::mutator_lock_);
void BoundTypeForIfNotNull(HBasicBlock* block);
void BoundTypeForIfInstanceOf(HBasicBlock* block);
void ProcessWorklist();
@@ -92,10 +92,10 @@
bool UpdateReferenceTypeInfo(HInstruction* instr);
static void UpdateArrayGet(HArrayGet* instr, HandleCache* handle_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a, const ReferenceTypeInfo& b)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ValidateTypes();
diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc
index 7649b50..75a4eac 100644
--- a/compiler/optimizing/reference_type_propagation_test.cc
+++ b/compiler/optimizing/reference_type_propagation_test.cc
@@ -46,7 +46,7 @@
// Relay method to merge type in reference type propagation.
ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a,
- const ReferenceTypeInfo& b) SHARED_REQUIRES(Locks::mutator_lock_) {
+ const ReferenceTypeInfo& b) REQUIRES_SHARED(Locks::mutator_lock_) {
return propagation_->MergeTypes(a, b);
}
@@ -56,12 +56,12 @@
}
// Helper method to construct the Object type.
- ReferenceTypeInfo ObjectType(bool is_exact = true) SHARED_REQUIRES(Locks::mutator_lock_) {
+ ReferenceTypeInfo ObjectType(bool is_exact = true) REQUIRES_SHARED(Locks::mutator_lock_) {
return ReferenceTypeInfo::Create(propagation_->handle_cache_.GetObjectClassHandle(), is_exact);
}
// Helper method to construct the String type.
- ReferenceTypeInfo StringType(bool is_exact = true) SHARED_REQUIRES(Locks::mutator_lock_) {
+ ReferenceTypeInfo StringType(bool is_exact = true) REQUIRES_SHARED(Locks::mutator_lock_) {
return ReferenceTypeInfo::Create(propagation_->handle_cache_.GetStringClassHandle(), is_exact);
}
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 5a574d9..f7dc112 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -303,7 +303,7 @@
}
static Primitive::Type GetPrimitiveArrayComponentType(HInstruction* array)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ReferenceTypeInfo array_type = array->GetReferenceTypeInfo();
DCHECK(array_type.IsPrimitiveArrayClass());
return array_type.GetTypeHandle()->GetComponentType()->GetPrimitiveType();