Revert "libandroidfw: Improve performance of AssetManager2"
This reverts commit 392132748416719e3df427e6ac8dc11af194342c.
diff --git a/libs/androidfw/LoadedArsc.cpp b/libs/androidfw/LoadedArsc.cpp
index 1d2c597..e08848f 100644
--- a/libs/androidfw/LoadedArsc.cpp
+++ b/libs/androidfw/LoadedArsc.cpp
@@ -44,6 +44,44 @@
constexpr const static int kAppPackageId = 0x7f;
+// Element of a TypeSpec array. See TypeSpec.
+struct Type {
+ // The configuration for which this type defines entries.
+ // This is already converted to host endianness.
+ ResTable_config configuration;
+
+ // Pointer to the mmapped data where entry definitions are kept.
+ const ResTable_type* type;
+};
+
+// TypeSpec is going to be immediately proceeded by
+// an array of Type structs, all in the same block of memory.
+struct TypeSpec {
+ // Pointer to the mmapped data where flags are kept.
+ // Flags denote whether the resource entry is public
+ // and under which configurations it varies.
+ const ResTable_typeSpec* type_spec;
+
+ // Pointer to the mmapped data where the IDMAP mappings for this type
+ // exist. May be nullptr if no IDMAP exists.
+ const IdmapEntry_header* idmap_entries;
+
+ // The number of types that follow this struct.
+ // There is a type for each configuration
+ // that entries are defined for.
+ size_t type_count;
+
+ // Trick to easily access a variable number of Type structs
+ // proceeding this struct, and to ensure their alignment.
+ const Type types[0];
+};
+
+// TypeSpecPtr points to the block of memory that holds
+// a TypeSpec struct, followed by an array of Type structs.
+// TypeSpecPtr is a managed pointer that knows how to delete
+// itself.
+using TypeSpecPtr = util::unique_cptr<TypeSpec>;
+
namespace {
// Builder that helps accumulate Type structs and then create a single
@@ -57,22 +95,21 @@
}
void AddType(const ResTable_type* type) {
- types_.push_back(type);
+ ResTable_config config;
+ config.copyFromDtoH(type->config);
+ types_.push_back(Type{config, type});
}
TypeSpecPtr Build() {
// Check for overflow.
- using ElementType = const ResTable_type*;
- if ((std::numeric_limits<size_t>::max() - sizeof(TypeSpec)) / sizeof(ElementType) <
- types_.size()) {
+ if ((std::numeric_limits<size_t>::max() - sizeof(TypeSpec)) / sizeof(Type) < types_.size()) {
return {};
}
- TypeSpec* type_spec =
- (TypeSpec*)::malloc(sizeof(TypeSpec) + (types_.size() * sizeof(ElementType)));
+ TypeSpec* type_spec = (TypeSpec*)::malloc(sizeof(TypeSpec) + (types_.size() * sizeof(Type)));
type_spec->type_spec = header_;
type_spec->idmap_entries = idmap_header_;
type_spec->type_count = types_.size();
- memcpy(type_spec + 1, types_.data(), types_.size() * sizeof(ElementType));
+ memcpy(type_spec + 1, types_.data(), types_.size() * sizeof(Type));
return TypeSpecPtr(type_spec);
}
@@ -81,7 +118,7 @@
const ResTable_typeSpec* header_;
const IdmapEntry_header* idmap_header_;
- std::vector<const ResTable_type*> types_;
+ std::vector<Type> types_;
};
} // namespace
@@ -125,17 +162,18 @@
return true;
}
-static bool VerifyResTableEntry(const ResTable_type* type, uint32_t entry_offset) {
+static bool VerifyResTableEntry(const ResTable_type* type, uint32_t entry_offset,
+ size_t entry_idx) {
// Check that the offset is aligned.
if (entry_offset & 0x03) {
- LOG(ERROR) << "Entry at offset " << entry_offset << " is not 4-byte aligned.";
+ LOG(ERROR) << "Entry offset at index " << entry_idx << " is not 4-byte aligned.";
return false;
}
// Check that the offset doesn't overflow.
if (entry_offset > std::numeric_limits<uint32_t>::max() - dtohl(type->entriesStart)) {
// Overflow in offset.
- LOG(ERROR) << "Entry at offset " << entry_offset << " is too large.";
+ LOG(ERROR) << "Entry offset at index " << entry_idx << " is too large.";
return false;
}
@@ -143,7 +181,7 @@
entry_offset += dtohl(type->entriesStart);
if (entry_offset > chunk_size - sizeof(ResTable_entry)) {
- LOG(ERROR) << "Entry at offset " << entry_offset
+ LOG(ERROR) << "Entry offset at index " << entry_idx
<< " is too large. No room for ResTable_entry.";
return false;
}
@@ -153,13 +191,13 @@
const size_t entry_size = dtohs(entry->size);
if (entry_size < sizeof(*entry)) {
- LOG(ERROR) << "ResTable_entry size " << entry_size << " at offset " << entry_offset
+ LOG(ERROR) << "ResTable_entry size " << entry_size << " at index " << entry_idx
<< " is too small.";
return false;
}
if (entry_size > chunk_size || entry_offset > chunk_size - entry_size) {
- LOG(ERROR) << "ResTable_entry size " << entry_size << " at offset " << entry_offset
+ LOG(ERROR) << "ResTable_entry size " << entry_size << " at index " << entry_idx
<< " is too large.";
return false;
}
@@ -167,7 +205,7 @@
if (entry_size < sizeof(ResTable_map_entry)) {
// There needs to be room for one Res_value struct.
if (entry_offset + entry_size > chunk_size - sizeof(Res_value)) {
- LOG(ERROR) << "No room for Res_value after ResTable_entry at offset " << entry_offset
+ LOG(ERROR) << "No room for Res_value after ResTable_entry at index " << entry_idx
<< " for type " << (int)type->id << ".";
return false;
}
@@ -176,12 +214,12 @@
reinterpret_cast<const Res_value*>(reinterpret_cast<const uint8_t*>(entry) + entry_size);
const size_t value_size = dtohs(value->size);
if (value_size < sizeof(Res_value)) {
- LOG(ERROR) << "Res_value at offset " << entry_offset << " is too small.";
+ LOG(ERROR) << "Res_value at index " << entry_idx << " is too small.";
return false;
}
if (value_size > chunk_size || entry_offset + entry_size > chunk_size - value_size) {
- LOG(ERROR) << "Res_value size " << value_size << " at offset " << entry_offset
+ LOG(ERROR) << "Res_value size " << value_size << " at index " << entry_idx
<< " is too large.";
return false;
}
@@ -190,76 +228,117 @@
const size_t map_entry_count = dtohl(map->count);
size_t map_entries_start = entry_offset + entry_size;
if (map_entries_start & 0x03) {
- LOG(ERROR) << "Map entries at offset " << entry_offset << " start at unaligned offset.";
+ LOG(ERROR) << "Map entries at index " << entry_idx << " start at unaligned offset.";
return false;
}
// Each entry is sizeof(ResTable_map) big.
if (map_entry_count > ((chunk_size - map_entries_start) / sizeof(ResTable_map))) {
- LOG(ERROR) << "Too many map entries in ResTable_map_entry at offset " << entry_offset << ".";
+ LOG(ERROR) << "Too many map entries in ResTable_map_entry at index " << entry_idx << ".";
return false;
}
}
return true;
}
-const ResTable_entry* LoadedPackage::GetEntry(const ResTable_type* type_chunk,
- uint16_t entry_index) {
- uint32_t entry_offset = GetEntryOffset(type_chunk, entry_index);
- if (entry_offset == ResTable_type::NO_ENTRY) {
- return nullptr;
- }
- return GetEntryFromOffset(type_chunk, entry_offset);
-}
+bool LoadedPackage::FindEntry(const TypeSpecPtr& type_spec_ptr, uint16_t entry_idx,
+ const ResTable_config& config, FindEntryResult* out_entry) const {
+ const ResTable_config* best_config = nullptr;
+ const ResTable_type* best_type = nullptr;
+ uint32_t best_offset = 0;
-uint32_t LoadedPackage::GetEntryOffset(const ResTable_type* type_chunk, uint16_t entry_index) {
- // The configuration matches and is better than the previous selection.
- // Find the entry value if it exists for this configuration.
- const size_t entry_count = dtohl(type_chunk->entryCount);
- const size_t offsets_offset = dtohs(type_chunk->header.headerSize);
+ for (uint32_t i = 0; i < type_spec_ptr->type_count; i++) {
+ const Type* type = &type_spec_ptr->types[i];
+ const ResTable_type* type_chunk = type->type;
- // Check if there is the desired entry in this type.
+ if (type->configuration.match(config) &&
+ (best_config == nullptr || type->configuration.isBetterThan(*best_config, &config))) {
+ // The configuration matches and is better than the previous selection.
+ // Find the entry value if it exists for this configuration.
+ const size_t entry_count = dtohl(type_chunk->entryCount);
+ const size_t offsets_offset = dtohs(type_chunk->header.headerSize);
- if (type_chunk->flags & ResTable_type::FLAG_SPARSE) {
- // This is encoded as a sparse map, so perform a binary search.
- const ResTable_sparseTypeEntry* sparse_indices =
- reinterpret_cast<const ResTable_sparseTypeEntry*>(
+ // Check if there is the desired entry in this type.
+
+ if (type_chunk->flags & ResTable_type::FLAG_SPARSE) {
+ // This is encoded as a sparse map, so perform a binary search.
+ const ResTable_sparseTypeEntry* sparse_indices =
+ reinterpret_cast<const ResTable_sparseTypeEntry*>(
+ reinterpret_cast<const uint8_t*>(type_chunk) + offsets_offset);
+ const ResTable_sparseTypeEntry* sparse_indices_end = sparse_indices + entry_count;
+ const ResTable_sparseTypeEntry* result =
+ std::lower_bound(sparse_indices, sparse_indices_end, entry_idx,
+ [](const ResTable_sparseTypeEntry& entry, uint16_t entry_idx) {
+ return dtohs(entry.idx) < entry_idx;
+ });
+
+ if (result == sparse_indices_end || dtohs(result->idx) != entry_idx) {
+ // No entry found.
+ continue;
+ }
+
+ // Extract the offset from the entry. Each offset must be a multiple of 4 so we store it as
+ // the real offset divided by 4.
+ best_offset = uint32_t{dtohs(result->offset)} * 4u;
+ } else {
+ if (entry_idx >= entry_count) {
+ // This entry cannot be here.
+ continue;
+ }
+
+ const uint32_t* entry_offsets = reinterpret_cast<const uint32_t*>(
reinterpret_cast<const uint8_t*>(type_chunk) + offsets_offset);
- const ResTable_sparseTypeEntry* sparse_indices_end = sparse_indices + entry_count;
- const ResTable_sparseTypeEntry* result =
- std::lower_bound(sparse_indices, sparse_indices_end, entry_index,
- [](const ResTable_sparseTypeEntry& entry, uint16_t entry_idx) {
- return dtohs(entry.idx) < entry_idx;
- });
+ const uint32_t offset = dtohl(entry_offsets[entry_idx]);
+ if (offset == ResTable_type::NO_ENTRY) {
+ continue;
+ }
- if (result == sparse_indices_end || dtohs(result->idx) != entry_index) {
- // No entry found.
- return ResTable_type::NO_ENTRY;
+ // There is an entry for this resource, record it.
+ best_offset = offset;
+ }
+
+ best_config = &type->configuration;
+ best_type = type_chunk;
}
-
- // Extract the offset from the entry. Each offset must be a multiple of 4 so we store it as
- // the real offset divided by 4.
- return uint32_t{dtohs(result->offset)} * 4u;
}
- // This type is encoded as a dense array.
- if (entry_index >= entry_count) {
- // This entry cannot be here.
- return ResTable_type::NO_ENTRY;
+ if (best_type == nullptr) {
+ return false;
}
- const uint32_t* entry_offsets = reinterpret_cast<const uint32_t*>(
- reinterpret_cast<const uint8_t*>(type_chunk) + offsets_offset);
- return dtohl(entry_offsets[entry_index]);
+ if (UNLIKELY(!VerifyResTableEntry(best_type, best_offset, entry_idx))) {
+ return false;
+ }
+
+ const ResTable_entry* best_entry = reinterpret_cast<const ResTable_entry*>(
+ reinterpret_cast<const uint8_t*>(best_type) + best_offset + dtohl(best_type->entriesStart));
+
+ const uint32_t* flags = reinterpret_cast<const uint32_t*>(type_spec_ptr->type_spec + 1);
+ out_entry->type_flags = dtohl(flags[entry_idx]);
+ out_entry->entry = best_entry;
+ out_entry->config = best_config;
+ out_entry->type_string_ref = StringPoolRef(&type_string_pool_, best_type->id - 1);
+ out_entry->entry_string_ref = StringPoolRef(&key_string_pool_, dtohl(best_entry->key.index));
+ return true;
}
-const ResTable_entry* LoadedPackage::GetEntryFromOffset(const ResTable_type* type_chunk,
- uint32_t offset) {
- if (UNLIKELY(!VerifyResTableEntry(type_chunk, offset))) {
- return nullptr;
+bool LoadedPackage::FindEntry(uint8_t type_idx, uint16_t entry_idx, const ResTable_config& config,
+ FindEntryResult* out_entry) const {
+ // If the type IDs are offset in this package, we need to take that into account when searching
+ // for a type.
+ const TypeSpecPtr& ptr = type_specs_[type_idx - type_id_offset_];
+ if (UNLIKELY(ptr == nullptr)) {
+ return false;
}
- return reinterpret_cast<const ResTable_entry*>(reinterpret_cast<const uint8_t*>(type_chunk) +
- offset + dtohl(type_chunk->entriesStart));
+
+ // If there is an IDMAP supplied with this package, translate the entry ID.
+ if (ptr->idmap_entries != nullptr) {
+ if (!LoadedIdmap::Lookup(ptr->idmap_entries, entry_idx, &entry_idx)) {
+ // There is no mapping, so the resource is not meant to be in this overlay package.
+ return false;
+ }
+ }
+ return FindEntry(ptr, entry_idx, config, out_entry);
}
void LoadedPackage::CollectConfigurations(bool exclude_mipmap,
@@ -267,7 +346,7 @@
const static std::u16string kMipMap = u"mipmap";
const size_t type_count = type_specs_.size();
for (size_t i = 0; i < type_count; i++) {
- const TypeSpecPtr& type_spec = type_specs_[i];
+ const util::unique_cptr<TypeSpec>& type_spec = type_specs_[i];
if (type_spec != nullptr) {
if (exclude_mipmap) {
const int type_idx = type_spec->type_spec->id - 1;
@@ -288,11 +367,8 @@
}
}
- const auto iter_end = type_spec->types + type_spec->type_count;
- for (auto iter = type_spec->types; iter != iter_end; ++iter) {
- ResTable_config config;
- config.copyFromDtoH((*iter)->config);
- out_configs->insert(config);
+ for (size_t j = 0; j < type_spec->type_count; j++) {
+ out_configs->insert(type_spec->types[j].configuration);
}
}
}
@@ -302,12 +378,10 @@
char temp_locale[RESTABLE_MAX_LOCALE_LEN];
const size_t type_count = type_specs_.size();
for (size_t i = 0; i < type_count; i++) {
- const TypeSpecPtr& type_spec = type_specs_[i];
+ const util::unique_cptr<TypeSpec>& type_spec = type_specs_[i];
if (type_spec != nullptr) {
- const auto iter_end = type_spec->types + type_spec->type_count;
- for (auto iter = type_spec->types; iter != iter_end; ++iter) {
- ResTable_config configuration;
- configuration.copyFromDtoH((*iter)->config);
+ for (size_t j = 0; j < type_spec->type_count; j++) {
+ const ResTable_config& configuration = type_spec->types[j].configuration;
if (configuration.locale != 0) {
configuration.getBcp47Locale(temp_locale, canonicalize);
std::string locale(temp_locale);
@@ -335,17 +409,17 @@
return 0u;
}
- const auto iter_end = type_spec->types + type_spec->type_count;
- for (auto iter = type_spec->types; iter != iter_end; ++iter) {
- const ResTable_type* type = *iter;
- size_t entry_count = dtohl(type->entryCount);
+ for (size_t ti = 0; ti < type_spec->type_count; ti++) {
+ const Type* type = &type_spec->types[ti];
+ size_t entry_count = dtohl(type->type->entryCount);
for (size_t entry_idx = 0; entry_idx < entry_count; entry_idx++) {
const uint32_t* entry_offsets = reinterpret_cast<const uint32_t*>(
- reinterpret_cast<const uint8_t*>(type) + dtohs(type->header.headerSize));
+ reinterpret_cast<const uint8_t*>(type->type) + dtohs(type->type->header.headerSize));
const uint32_t offset = dtohl(entry_offsets[entry_idx]);
if (offset != ResTable_type::NO_ENTRY) {
- const ResTable_entry* entry = reinterpret_cast<const ResTable_entry*>(
- reinterpret_cast<const uint8_t*>(type) + dtohl(type->entriesStart) + offset);
+ const ResTable_entry* entry =
+ reinterpret_cast<const ResTable_entry*>(reinterpret_cast<const uint8_t*>(type->type) +
+ dtohl(type->type->entriesStart) + offset);
if (dtohl(entry->key.index) == static_cast<uint32_t>(key_idx)) {
// The package ID will be overridden by the caller (due to runtime assignment of package
// IDs for shared libraries).
@@ -357,7 +431,8 @@
return 0u;
}
-const LoadedPackage* LoadedArsc::GetPackageById(uint8_t package_id) const {
+const LoadedPackage* LoadedArsc::GetPackageForId(uint32_t resid) const {
+ const uint8_t package_id = get_package_id(resid);
for (const auto& loaded_package : packages_) {
if (loaded_package->GetPackageId() == package_id) {
return loaded_package.get();
@@ -605,6 +680,26 @@
return std::move(loaded_package);
}
+bool LoadedArsc::FindEntry(uint32_t resid, const ResTable_config& config,
+ FindEntryResult* out_entry) const {
+ ATRACE_CALL();
+
+ const uint8_t package_id = get_package_id(resid);
+ const uint8_t type_id = get_type_id(resid);
+ const uint16_t entry_id = get_entry_id(resid);
+
+ if (UNLIKELY(type_id == 0)) {
+ LOG(ERROR) << base::StringPrintf("Invalid ID 0x%08x.", resid);
+ return false;
+ }
+
+ for (const auto& loaded_package : packages_) {
+ if (loaded_package->GetPackageId() == package_id) {
+ return loaded_package->FindEntry(type_id - 1, entry_id, config, out_entry);
+ }
+ }
+ return false;
+}
bool LoadedArsc::LoadTable(const Chunk& chunk, const LoadedIdmap* loaded_idmap,
bool load_as_shared_library) {