blob: 70bb441f94cb5ad573815e54e4108f21cb81876b [file] [log] [blame]
Adam Lesinski7ad11102016-10-28 16:39:15 -07001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define ATRACE_TAG ATRACE_TAG_RESOURCES
18
19#include "androidfw/LoadedArsc.h"
20
Adam Lesinski73f6f9d2017-11-14 10:18:05 -080021#include <algorithm>
Adam Lesinski7ad11102016-10-28 16:39:15 -070022#include <cstddef>
23#include <limits>
24
25#include "android-base/logging.h"
26#include "android-base/stringprintf.h"
27#include "utils/ByteOrder.h"
28#include "utils/Trace.h"
29
30#ifdef _WIN32
31#ifdef ERROR
32#undef ERROR
33#endif
34#endif
35
Adam Lesinski7ad11102016-10-28 16:39:15 -070036#include "androidfw/ByteBucketArray.h"
Adam Lesinskida431a22016-12-29 16:08:16 -050037#include "androidfw/Chunk.h"
Adam Lesinski929d6512017-01-16 19:11:19 -080038#include "androidfw/ResourceUtils.h"
Adam Lesinski7ad11102016-10-28 16:39:15 -070039#include "androidfw/Util.h"
40
Ryan Mitchell55ef6162020-11-13 23:55:20 +000041using ::android::base::StringPrintf;
Adam Lesinski7ad11102016-10-28 16:39:15 -070042
43namespace android {
44
Adam Lesinskida431a22016-12-29 16:08:16 -050045constexpr const static int kAppPackageId = 0x7f;
Adam Lesinski7ad11102016-10-28 16:39:15 -070046
Adam Lesinskida431a22016-12-29 16:08:16 -050047namespace {
48
Adam Lesinski7ad11102016-10-28 16:39:15 -070049// Builder that helps accumulate Type structs and then create a single
50// contiguous block of memory to store both the TypeSpec struct and
51// the Type structs.
52class TypeSpecPtrBuilder {
53 public:
Ryan Mitchell55ef6162020-11-13 23:55:20 +000054 explicit TypeSpecPtrBuilder(const ResTable_typeSpec* header)
Ryan Mitchell8a891d82019-07-01 09:48:23 -070055 : header_(header) {
Adam Lesinski970bd8d2017-09-25 13:21:55 -070056 }
Adam Lesinski7ad11102016-10-28 16:39:15 -070057
Ryan Mitchell55ef6162020-11-13 23:55:20 +000058 void AddType(const ResTable_type* type) {
Adam Lesinskibebfcc42018-02-12 14:27:46 -080059 types_.push_back(type);
Adam Lesinski7ad11102016-10-28 16:39:15 -070060 }
61
62 TypeSpecPtr Build() {
63 // Check for overflow.
Ryan Mitchell55ef6162020-11-13 23:55:20 +000064 using ElementType = const ResTable_type*;
Adam Lesinskibebfcc42018-02-12 14:27:46 -080065 if ((std::numeric_limits<size_t>::max() - sizeof(TypeSpec)) / sizeof(ElementType) <
66 types_.size()) {
Adam Lesinski7ad11102016-10-28 16:39:15 -070067 return {};
68 }
Adam Lesinskibebfcc42018-02-12 14:27:46 -080069 TypeSpec* type_spec =
70 (TypeSpec*)::malloc(sizeof(TypeSpec) + (types_.size() * sizeof(ElementType)));
Adam Lesinski7ad11102016-10-28 16:39:15 -070071 type_spec->type_spec = header_;
72 type_spec->type_count = types_.size();
Adam Lesinskibebfcc42018-02-12 14:27:46 -080073 memcpy(type_spec + 1, types_.data(), types_.size() * sizeof(ElementType));
Adam Lesinski7ad11102016-10-28 16:39:15 -070074 return TypeSpecPtr(type_spec);
75 }
76
77 private:
78 DISALLOW_COPY_AND_ASSIGN(TypeSpecPtrBuilder);
79
Ryan Mitchell55ef6162020-11-13 23:55:20 +000080 const ResTable_typeSpec* header_;
81 std::vector<const ResTable_type*> types_;
Adam Lesinski7ad11102016-10-28 16:39:15 -070082};
83
84} // namespace
85
Adam Lesinski1a1e9c22017-10-13 15:45:34 -070086LoadedPackage::LoadedPackage() = default;
87LoadedPackage::~LoadedPackage() = default;
88
89// Precondition: The header passed in has already been verified, so reading any fields and trusting
90// the ResChunk_header is safe.
Ryan Mitchell55ef6162020-11-13 23:55:20 +000091static bool VerifyResTableType(const ResTable_type* header) {
Adam Lesinski498f6052017-11-29 13:24:29 -080092 if (header->id == 0) {
93 LOG(ERROR) << "RES_TABLE_TYPE_TYPE has invalid ID 0.";
94 return false;
95 }
96
Adam Lesinski1a1e9c22017-10-13 15:45:34 -070097 const size_t entry_count = dtohl(header->entryCount);
98 if (entry_count > std::numeric_limits<uint16_t>::max()) {
Adam Lesinski498f6052017-11-29 13:24:29 -080099 LOG(ERROR) << "RES_TABLE_TYPE_TYPE has too many entries (" << entry_count << ").";
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700100 return false;
101 }
102
103 // Make sure that there is enough room for the entry offsets.
104 const size_t offsets_offset = dtohs(header->header.headerSize);
105 const size_t entries_offset = dtohl(header->entriesStart);
106 const size_t offsets_length = sizeof(uint32_t) * entry_count;
107
108 if (offsets_offset > entries_offset || entries_offset - offsets_offset < offsets_length) {
Adam Lesinski498f6052017-11-29 13:24:29 -0800109 LOG(ERROR) << "RES_TABLE_TYPE_TYPE entry offsets overlap actual entry data.";
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700110 return false;
111 }
112
113 if (entries_offset > dtohl(header->header.size)) {
Adam Lesinski498f6052017-11-29 13:24:29 -0800114 LOG(ERROR) << "RES_TABLE_TYPE_TYPE entry offsets extend beyond chunk.";
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700115 return false;
116 }
117
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000118 if (entries_offset & 0x03) {
Adam Lesinski498f6052017-11-29 13:24:29 -0800119 LOG(ERROR) << "RES_TABLE_TYPE_TYPE entries start at unaligned address.";
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700120 return false;
121 }
122 return true;
123}
124
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000125static bool VerifyResTableEntry(const ResTable_type* type, uint32_t entry_offset) {
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700126 // Check that the offset is aligned.
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000127 if (entry_offset & 0x03) {
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800128 LOG(ERROR) << "Entry at offset " << entry_offset << " is not 4-byte aligned.";
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000129 return false;
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700130 }
131
132 // Check that the offset doesn't overflow.
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000133 if (entry_offset > std::numeric_limits<uint32_t>::max() - dtohl(type->entriesStart)) {
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700134 // Overflow in offset.
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800135 LOG(ERROR) << "Entry at offset " << entry_offset << " is too large.";
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000136 return false;
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700137 }
138
139 const size_t chunk_size = dtohl(type->header.size);
140
141 entry_offset += dtohl(type->entriesStart);
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000142 if (entry_offset > chunk_size - sizeof(ResTable_entry)) {
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800143 LOG(ERROR) << "Entry at offset " << entry_offset
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700144 << " is too large. No room for ResTable_entry.";
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000145 return false;
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700146 }
147
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000148 const ResTable_entry* entry = reinterpret_cast<const ResTable_entry*>(
149 reinterpret_cast<const uint8_t*>(type) + entry_offset);
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700150
151 const size_t entry_size = dtohs(entry->size);
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000152 if (entry_size < sizeof(*entry)) {
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800153 LOG(ERROR) << "ResTable_entry size " << entry_size << " at offset " << entry_offset
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700154 << " is too small.";
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000155 return false;
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700156 }
157
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000158 if (entry_size > chunk_size || entry_offset > chunk_size - entry_size) {
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800159 LOG(ERROR) << "ResTable_entry size " << entry_size << " at offset " << entry_offset
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700160 << " is too large.";
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000161 return false;
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700162 }
163
164 if (entry_size < sizeof(ResTable_map_entry)) {
165 // There needs to be room for one Res_value struct.
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000166 if (entry_offset + entry_size > chunk_size - sizeof(Res_value)) {
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800167 LOG(ERROR) << "No room for Res_value after ResTable_entry at offset " << entry_offset
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700168 << " for type " << (int)type->id << ".";
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000169 return false;
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700170 }
171
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000172 const Res_value* value =
173 reinterpret_cast<const Res_value*>(reinterpret_cast<const uint8_t*>(entry) + entry_size);
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700174 const size_t value_size = dtohs(value->size);
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000175 if (value_size < sizeof(Res_value)) {
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800176 LOG(ERROR) << "Res_value at offset " << entry_offset << " is too small.";
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000177 return false;
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700178 }
179
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000180 if (value_size > chunk_size || entry_offset + entry_size > chunk_size - value_size) {
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800181 LOG(ERROR) << "Res_value size " << value_size << " at offset " << entry_offset
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700182 << " is too large.";
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000183 return false;
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700184 }
185 } else {
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000186 const ResTable_map_entry* map = reinterpret_cast<const ResTable_map_entry*>(entry);
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700187 const size_t map_entry_count = dtohl(map->count);
188 size_t map_entries_start = entry_offset + entry_size;
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000189 if (map_entries_start & 0x03) {
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800190 LOG(ERROR) << "Map entries at offset " << entry_offset << " start at unaligned offset.";
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000191 return false;
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700192 }
193
194 // Each entry is sizeof(ResTable_map) big.
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000195 if (map_entry_count > ((chunk_size - map_entries_start) / sizeof(ResTable_map))) {
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800196 LOG(ERROR) << "Too many map entries in ResTable_map_entry at offset " << entry_offset << ".";
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000197 return false;
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700198 }
199 }
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000200 return true;
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700201}
202
MÃ¥rten Kongstad3f1f4fc2018-03-02 09:34:18 +0100203LoadedPackage::iterator::iterator(const LoadedPackage* lp, size_t ti, size_t ei)
204 : loadedPackage_(lp),
205 typeIndex_(ti),
206 entryIndex_(ei),
207 typeIndexEnd_(lp->resource_ids_.size() + 1) {
208 while (typeIndex_ < typeIndexEnd_ && loadedPackage_->resource_ids_[typeIndex_] == 0) {
209 typeIndex_++;
210 }
211}
212
213LoadedPackage::iterator& LoadedPackage::iterator::operator++() {
214 while (typeIndex_ < typeIndexEnd_) {
215 if (entryIndex_ + 1 < loadedPackage_->resource_ids_[typeIndex_]) {
216 entryIndex_++;
217 break;
218 }
219 entryIndex_ = 0;
220 typeIndex_++;
221 if (typeIndex_ < typeIndexEnd_ && loadedPackage_->resource_ids_[typeIndex_] != 0) {
222 break;
223 }
224 }
225 return *this;
226}
227
228uint32_t LoadedPackage::iterator::operator*() const {
229 if (typeIndex_ >= typeIndexEnd_) {
230 return 0;
231 }
232 return make_resid(loadedPackage_->package_id_, typeIndex_ + loadedPackage_->type_id_offset_,
233 entryIndex_);
234}
235
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000236const ResTable_entry* LoadedPackage::GetEntry(const ResTable_type* type_chunk,
237 uint16_t entry_index) {
238 uint32_t entry_offset = GetEntryOffset(type_chunk, entry_index);
239 if (entry_offset == ResTable_type::NO_ENTRY) {
240 return nullptr;
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800241 }
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000242 return GetEntryFromOffset(type_chunk, entry_offset);
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800243}
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700244
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000245uint32_t LoadedPackage::GetEntryOffset(const ResTable_type* type_chunk, uint16_t entry_index) {
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800246 // The configuration matches and is better than the previous selection.
247 // Find the entry value if it exists for this configuration.
248 const size_t entry_count = dtohl(type_chunk->entryCount);
249 const size_t offsets_offset = dtohs(type_chunk->header.headerSize);
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700250
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800251 // Check if there is the desired entry in this type.
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000252
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800253 if (type_chunk->flags & ResTable_type::FLAG_SPARSE) {
254 // This is encoded as a sparse map, so perform a binary search.
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000255 const ResTable_sparseTypeEntry* sparse_indices =
256 reinterpret_cast<const ResTable_sparseTypeEntry*>(
257 reinterpret_cast<const uint8_t*>(type_chunk) + offsets_offset);
258 const ResTable_sparseTypeEntry* sparse_indices_end = sparse_indices + entry_count;
259 const ResTable_sparseTypeEntry* result =
260 std::lower_bound(sparse_indices, sparse_indices_end, entry_index,
261 [](const ResTable_sparseTypeEntry& entry, uint16_t entry_idx) {
262 return dtohs(entry.idx) < entry_idx;
263 });
Adam Lesinski73f6f9d2017-11-14 10:18:05 -0800264
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000265 if (result == sparse_indices_end || dtohs(result->idx) != entry_index) {
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800266 // No entry found.
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000267 return ResTable_type::NO_ENTRY;
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700268 }
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800269
270 // Extract the offset from the entry. Each offset must be a multiple of 4 so we store it as
271 // the real offset divided by 4.
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000272 return uint32_t{dtohs(result->offset)} * 4u;
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700273 }
274
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800275 // This type is encoded as a dense array.
276 if (entry_index >= entry_count) {
277 // This entry cannot be here.
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000278 return ResTable_type::NO_ENTRY;
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700279 }
280
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000281 const uint32_t* entry_offsets = reinterpret_cast<const uint32_t*>(
282 reinterpret_cast<const uint8_t*>(type_chunk) + offsets_offset);
283 return dtohl(entry_offsets[entry_index]);
Adam Lesinski7ad11102016-10-28 16:39:15 -0700284}
285
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000286const ResTable_entry* LoadedPackage::GetEntryFromOffset(const ResTable_type* type_chunk,
287 uint32_t offset) {
288 if (UNLIKELY(!VerifyResTableEntry(type_chunk, offset))) {
289 return nullptr;
Ryan Mitchellc75c2e02020-08-17 08:42:48 -0700290 }
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000291 return reinterpret_cast<const ResTable_entry*>(reinterpret_cast<const uint8_t*>(type_chunk) +
292 offset + dtohl(type_chunk->entriesStart));
Ryan Mitchellc75c2e02020-08-17 08:42:48 -0700293}
294
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000295void LoadedPackage::CollectConfigurations(bool exclude_mipmap,
296 std::set<ResTable_config>* out_configs) const {
297 const static std::u16string kMipMap = u"mipmap";
Adam Lesinski0c405242017-01-13 20:47:26 -0800298 const size_t type_count = type_specs_.size();
299 for (size_t i = 0; i < type_count; i++) {
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800300 const TypeSpecPtr& type_spec = type_specs_[i];
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000301 if (type_spec != nullptr) {
302 if (exclude_mipmap) {
303 const int type_idx = type_spec->type_spec->id - 1;
304 size_t type_name_len;
305 const char16_t* type_name16 = type_string_pool_.stringAt(type_idx, &type_name_len);
306 if (type_name16 != nullptr) {
307 if (kMipMap.compare(0, std::u16string::npos, type_name16, type_name_len) == 0) {
308 // This is a mipmap type, skip collection.
309 continue;
310 }
311 }
312 const char* type_name = type_string_pool_.string8At(type_idx, &type_name_len);
313 if (type_name != nullptr) {
314 if (strncmp(type_name, "mipmap", type_name_len) == 0) {
315 // This is a mipmap type, skip collection.
316 continue;
317 }
Adam Lesinski0c405242017-01-13 20:47:26 -0800318 }
319 }
320
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000321 const auto iter_end = type_spec->types + type_spec->type_count;
322 for (auto iter = type_spec->types; iter != iter_end; ++iter) {
323 ResTable_config config;
324 config.copyFromDtoH((*iter)->config);
325 out_configs->insert(config);
Ryan Mitchellc75c2e02020-08-17 08:42:48 -0700326 }
Ryan Mitchellc75c2e02020-08-17 08:42:48 -0700327 }
Adam Lesinski0c405242017-01-13 20:47:26 -0800328 }
329}
330
331void LoadedPackage::CollectLocales(bool canonicalize, std::set<std::string>* out_locales) const {
332 char temp_locale[RESTABLE_MAX_LOCALE_LEN];
333 const size_t type_count = type_specs_.size();
334 for (size_t i = 0; i < type_count; i++) {
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800335 const TypeSpecPtr& type_spec = type_specs_[i];
Adam Lesinski0c405242017-01-13 20:47:26 -0800336 if (type_spec != nullptr) {
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800337 const auto iter_end = type_spec->types + type_spec->type_count;
338 for (auto iter = type_spec->types; iter != iter_end; ++iter) {
339 ResTable_config configuration;
340 configuration.copyFromDtoH((*iter)->config);
Adam Lesinski0c405242017-01-13 20:47:26 -0800341 if (configuration.locale != 0) {
342 configuration.getBcp47Locale(temp_locale, canonicalize);
343 std::string locale(temp_locale);
344 out_locales->insert(std::move(locale));
345 }
346 }
347 }
348 }
349}
350
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000351uint32_t LoadedPackage::FindEntryByName(const std::u16string& type_name,
352 const std::u16string& entry_name) const {
353 ssize_t type_idx = type_string_pool_.indexOfString(type_name.data(), type_name.size());
354 if (type_idx < 0) {
355 return 0u;
Adam Lesinski929d6512017-01-16 19:11:19 -0800356 }
357
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000358 ssize_t key_idx = key_string_pool_.indexOfString(entry_name.data(), entry_name.size());
359 if (key_idx < 0) {
360 return 0u;
Adam Lesinski929d6512017-01-16 19:11:19 -0800361 }
362
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000363 const TypeSpec* type_spec = type_specs_[type_idx].get();
Adam Lesinski929d6512017-01-16 19:11:19 -0800364 if (type_spec == nullptr) {
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000365 return 0u;
Adam Lesinski929d6512017-01-16 19:11:19 -0800366 }
367
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800368 const auto iter_end = type_spec->types + type_spec->type_count;
369 for (auto iter = type_spec->types; iter != iter_end; ++iter) {
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000370 const ResTable_type* type = *iter;
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800371 size_t entry_count = dtohl(type->entryCount);
Adam Lesinski929d6512017-01-16 19:11:19 -0800372 for (size_t entry_idx = 0; entry_idx < entry_count; entry_idx++) {
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000373 const uint32_t* entry_offsets = reinterpret_cast<const uint32_t*>(
374 reinterpret_cast<const uint8_t*>(type) + dtohs(type->header.headerSize));
375 const uint32_t offset = dtohl(entry_offsets[entry_idx]);
Adam Lesinski929d6512017-01-16 19:11:19 -0800376 if (offset != ResTable_type::NO_ENTRY) {
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000377 const ResTable_entry* entry = reinterpret_cast<const ResTable_entry*>(
378 reinterpret_cast<const uint8_t*>(type) + dtohl(type->entriesStart) + offset);
379 if (dtohl(entry->key.index) == static_cast<uint32_t>(key_idx)) {
Adam Lesinski929d6512017-01-16 19:11:19 -0800380 // The package ID will be overridden by the caller (due to runtime assignment of package
381 // IDs for shared libraries).
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000382 return make_resid(0x00, type_idx + type_id_offset_ + 1, entry_idx);
Adam Lesinski929d6512017-01-16 19:11:19 -0800383 }
384 }
385 }
386 }
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000387 return 0u;
Adam Lesinski929d6512017-01-16 19:11:19 -0800388}
389
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800390const LoadedPackage* LoadedArsc::GetPackageById(uint8_t package_id) const {
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700391 for (const auto& loaded_package : packages_) {
392 if (loaded_package->GetPackageId() == package_id) {
393 return loaded_package.get();
394 }
395 }
396 return nullptr;
397}
398
399std::unique_ptr<const LoadedPackage> LoadedPackage::Load(const Chunk& chunk,
Ryan Mitchell73bfe412019-11-12 16:22:04 -0800400 package_property_t property_flags) {
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800401 ATRACE_NAME("LoadedPackage::Load");
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700402 std::unique_ptr<LoadedPackage> loaded_package(new LoadedPackage());
Adam Lesinskida431a22016-12-29 16:08:16 -0500403
Adam Lesinski970bd8d2017-09-25 13:21:55 -0700404 // typeIdOffset was added at some point, but we still must recognize apps built before this
405 // was added.
Adam Lesinski33af6c72017-03-29 13:00:35 -0700406 constexpr size_t kMinPackageSize =
407 sizeof(ResTable_package) - sizeof(ResTable_package::typeIdOffset);
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000408 const ResTable_package* header = chunk.header<ResTable_package, kMinPackageSize>();
409 if (header == nullptr) {
Adam Lesinski498f6052017-11-29 13:24:29 -0800410 LOG(ERROR) << "RES_TABLE_PACKAGE_TYPE too small.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500411 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700412 }
413
Ryan Mitchell73bfe412019-11-12 16:22:04 -0800414 if ((property_flags & PROPERTY_SYSTEM) != 0) {
415 loaded_package->property_flags_ |= PROPERTY_SYSTEM;
416 }
417
418 if ((property_flags & PROPERTY_LOADER) != 0) {
419 loaded_package->property_flags_ |= PROPERTY_LOADER;
420 }
421
422 if ((property_flags & PROPERTY_OVERLAY) != 0) {
423 // Overlay resources must have an exclusive resource id space for referencing internal
424 // resources.
425 loaded_package->property_flags_ |= PROPERTY_OVERLAY | PROPERTY_DYNAMIC;
426 }
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700427
Adam Lesinski7ad11102016-10-28 16:39:15 -0700428 loaded_package->package_id_ = dtohl(header->id);
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700429 if (loaded_package->package_id_ == 0 ||
Ryan Mitchell73bfe412019-11-12 16:22:04 -0800430 (loaded_package->package_id_ == kAppPackageId && (property_flags & PROPERTY_DYNAMIC) != 0)) {
431 loaded_package->property_flags_ |= PROPERTY_DYNAMIC;
Winson9947f1e2019-08-16 10:20:39 -0700432 }
433
Adam Lesinskic6aada92017-01-13 15:34:14 -0800434 if (header->header.headerSize >= sizeof(ResTable_package)) {
435 uint32_t type_id_offset = dtohl(header->typeIdOffset);
436 if (type_id_offset > std::numeric_limits<uint8_t>::max()) {
Adam Lesinski498f6052017-11-29 13:24:29 -0800437 LOG(ERROR) << "RES_TABLE_PACKAGE_TYPE type ID offset too large.";
Adam Lesinskic6aada92017-01-13 15:34:14 -0800438 return {};
439 }
440 loaded_package->type_id_offset_ = static_cast<int>(type_id_offset);
441 }
442
Adam Lesinskida431a22016-12-29 16:08:16 -0500443 util::ReadUtf16StringFromDevice(header->name, arraysize(header->name),
444 &loaded_package->package_name_);
Adam Lesinski7ad11102016-10-28 16:39:15 -0700445
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800446 // A map of TypeSpec builders, each associated with an type index.
447 // We use these to accumulate the set of Types available for a TypeSpec, and later build a single,
448 // contiguous block of memory that holds all the Types together with the TypeSpec.
449 std::unordered_map<int, std::unique_ptr<TypeSpecPtrBuilder>> type_builder_map;
Adam Lesinski7ad11102016-10-28 16:39:15 -0700450
451 ChunkIterator iter(chunk.data_ptr(), chunk.data_size());
452 while (iter.HasNext()) {
453 const Chunk child_chunk = iter.Next();
454 switch (child_chunk.type()) {
455 case RES_STRING_POOL_TYPE: {
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000456 const uintptr_t pool_address =
457 reinterpret_cast<uintptr_t>(child_chunk.header<ResChunk_header>());
458 const uintptr_t header_address = reinterpret_cast<uintptr_t>(header);
459 if (pool_address == header_address + dtohl(header->typeStrings)) {
Adam Lesinski7ad11102016-10-28 16:39:15 -0700460 // This string pool is the type string pool.
461 status_t err = loaded_package->type_string_pool_.setTo(
462 child_chunk.header<ResStringPool_header>(), child_chunk.size());
463 if (err != NO_ERROR) {
Adam Lesinski498f6052017-11-29 13:24:29 -0800464 LOG(ERROR) << "RES_STRING_POOL_TYPE for types corrupt.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500465 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700466 }
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000467 } else if (pool_address == header_address + dtohl(header->keyStrings)) {
Adam Lesinski7ad11102016-10-28 16:39:15 -0700468 // This string pool is the key string pool.
469 status_t err = loaded_package->key_string_pool_.setTo(
470 child_chunk.header<ResStringPool_header>(), child_chunk.size());
471 if (err != NO_ERROR) {
Adam Lesinski498f6052017-11-29 13:24:29 -0800472 LOG(ERROR) << "RES_STRING_POOL_TYPE for keys corrupt.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500473 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700474 }
475 } else {
Adam Lesinski498f6052017-11-29 13:24:29 -0800476 LOG(WARNING) << "Too many RES_STRING_POOL_TYPEs found in RES_TABLE_PACKAGE_TYPE.";
Adam Lesinski7ad11102016-10-28 16:39:15 -0700477 }
478 } break;
479
480 case RES_TABLE_TYPE_SPEC_TYPE: {
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000481 const ResTable_typeSpec* type_spec = child_chunk.header<ResTable_typeSpec>();
482 if (type_spec == nullptr) {
Adam Lesinski498f6052017-11-29 13:24:29 -0800483 LOG(ERROR) << "RES_TABLE_TYPE_SPEC_TYPE too small.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500484 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700485 }
486
487 if (type_spec->id == 0) {
Adam Lesinski498f6052017-11-29 13:24:29 -0800488 LOG(ERROR) << "RES_TABLE_TYPE_SPEC_TYPE has invalid ID 0.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500489 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700490 }
491
Adam Lesinskic6aada92017-01-13 15:34:14 -0800492 if (loaded_package->type_id_offset_ + static_cast<int>(type_spec->id) >
493 std::numeric_limits<uint8_t>::max()) {
Adam Lesinski498f6052017-11-29 13:24:29 -0800494 LOG(ERROR) << "RES_TABLE_TYPE_SPEC_TYPE has out of range ID.";
Adam Lesinskic6aada92017-01-13 15:34:14 -0800495 return {};
496 }
497
Adam Lesinski7ad11102016-10-28 16:39:15 -0700498 // The data portion of this chunk contains entry_count 32bit entries,
499 // each one representing a set of flags.
500 // Here we only validate that the chunk is well formed.
501 const size_t entry_count = dtohl(type_spec->entryCount);
502
503 // There can only be 2^16 entries in a type, because that is the ID
504 // space for entries (EEEE) in the resource ID 0xPPTTEEEE.
505 if (entry_count > std::numeric_limits<uint16_t>::max()) {
Adam Lesinski498f6052017-11-29 13:24:29 -0800506 LOG(ERROR) << "RES_TABLE_TYPE_SPEC_TYPE has too many entries (" << entry_count << ").";
Adam Lesinskida431a22016-12-29 16:08:16 -0500507 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700508 }
509
510 if (entry_count * sizeof(uint32_t) > chunk.data_size()) {
Adam Lesinski498f6052017-11-29 13:24:29 -0800511 LOG(ERROR) << "RES_TABLE_TYPE_SPEC_TYPE too small to hold entries.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500512 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700513 }
514
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800515 std::unique_ptr<TypeSpecPtrBuilder>& builder_ptr = type_builder_map[type_spec->id - 1];
516 if (builder_ptr == nullptr) {
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000517 builder_ptr = util::make_unique<TypeSpecPtrBuilder>(type_spec);
MÃ¥rten Kongstad3f1f4fc2018-03-02 09:34:18 +0100518 loaded_package->resource_ids_.set(type_spec->id, entry_count);
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800519 } else {
520 LOG(WARNING) << StringPrintf("RES_TABLE_TYPE_SPEC_TYPE already defined for ID %02x",
521 type_spec->id);
522 }
Adam Lesinski7ad11102016-10-28 16:39:15 -0700523 } break;
524
525 case RES_TABLE_TYPE_TYPE: {
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000526 const ResTable_type* type = child_chunk.header<ResTable_type, kResTableTypeMinSize>();
527 if (type == nullptr) {
Adam Lesinski498f6052017-11-29 13:24:29 -0800528 LOG(ERROR) << "RES_TABLE_TYPE_TYPE too small.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500529 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700530 }
531
Adam Lesinski498f6052017-11-29 13:24:29 -0800532 if (!VerifyResTableType(type)) {
Adam Lesinskida431a22016-12-29 16:08:16 -0500533 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700534 }
535
536 // Type chunks must be preceded by their TypeSpec chunks.
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800537 std::unique_ptr<TypeSpecPtrBuilder>& builder_ptr = type_builder_map[type->id - 1];
538 if (builder_ptr != nullptr) {
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000539 builder_ptr->AddType(type);
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800540 } else {
541 LOG(ERROR) << StringPrintf(
542 "RES_TABLE_TYPE_TYPE with ID %02x found without preceding RES_TABLE_TYPE_SPEC_TYPE.",
543 type->id);
Adam Lesinskida431a22016-12-29 16:08:16 -0500544 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700545 }
Adam Lesinski7ad11102016-10-28 16:39:15 -0700546 } break;
547
Adam Lesinskida431a22016-12-29 16:08:16 -0500548 case RES_TABLE_LIBRARY_TYPE: {
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000549 const ResTable_lib_header* lib = child_chunk.header<ResTable_lib_header>();
550 if (lib == nullptr) {
Adam Lesinski498f6052017-11-29 13:24:29 -0800551 LOG(ERROR) << "RES_TABLE_LIBRARY_TYPE too small.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500552 return {};
553 }
554
555 if (child_chunk.data_size() / sizeof(ResTable_lib_entry) < dtohl(lib->count)) {
Adam Lesinski498f6052017-11-29 13:24:29 -0800556 LOG(ERROR) << "RES_TABLE_LIBRARY_TYPE too small to hold entries.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500557 return {};
558 }
559
560 loaded_package->dynamic_package_map_.reserve(dtohl(lib->count));
561
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000562 const ResTable_lib_entry* const entry_begin =
563 reinterpret_cast<const ResTable_lib_entry*>(child_chunk.data_ptr());
564 const ResTable_lib_entry* const entry_end = entry_begin + dtohl(lib->count);
Adam Lesinskida431a22016-12-29 16:08:16 -0500565 for (auto entry_iter = entry_begin; entry_iter != entry_end; ++entry_iter) {
566 std::string package_name;
567 util::ReadUtf16StringFromDevice(entry_iter->packageName,
568 arraysize(entry_iter->packageName), &package_name);
569
570 if (dtohl(entry_iter->packageId) >= std::numeric_limits<uint8_t>::max()) {
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800571 LOG(ERROR) << StringPrintf(
Adam Lesinskida431a22016-12-29 16:08:16 -0500572 "Package ID %02x in RES_TABLE_LIBRARY_TYPE too large for package '%s'.",
573 dtohl(entry_iter->packageId), package_name.c_str());
574 return {};
575 }
576
577 loaded_package->dynamic_package_map_.emplace_back(std::move(package_name),
578 dtohl(entry_iter->packageId));
579 }
Ryan Mitchell75e20dd2018-11-06 16:39:36 -0800580 } break;
Adam Lesinskida431a22016-12-29 16:08:16 -0500581
Ryan Mitchell75e20dd2018-11-06 16:39:36 -0800582 case RES_TABLE_OVERLAYABLE_TYPE: {
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000583 const ResTable_overlayable_header* header =
584 child_chunk.header<ResTable_overlayable_header>();
585 if (header == nullptr) {
Ryan Mitchell75e20dd2018-11-06 16:39:36 -0800586 LOG(ERROR) << "RES_TABLE_OVERLAYABLE_TYPE too small.";
587 return {};
588 }
589
Ryan Mitchellef5673a2018-12-12 18:45:34 -0800590 std::string name;
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000591 util::ReadUtf16StringFromDevice(header->name, arraysize(header->name), &name);
Ryan Mitchellef5673a2018-12-12 18:45:34 -0800592 std::string actor;
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000593 util::ReadUtf16StringFromDevice(header->actor, arraysize(header->actor), &actor);
Ryan Mitchellef5673a2018-12-12 18:45:34 -0800594
MÃ¥rten Kongstadc92c4dd2019-02-05 01:29:59 +0100595 if (loaded_package->overlayable_map_.find(name) !=
596 loaded_package->overlayable_map_.end()) {
597 LOG(ERROR) << "Multiple <overlayable> blocks with the same name '" << name << "'.";
598 return {};
599 }
600 loaded_package->overlayable_map_.emplace(name, actor);
601
Ryan Mitchellef5673a2018-12-12 18:45:34 -0800602 // Iterate over the overlayable policy chunks contained within the overlayable chunk data
Ryan Mitchell75e20dd2018-11-06 16:39:36 -0800603 ChunkIterator overlayable_iter(child_chunk.data_ptr(), child_chunk.data_size());
604 while (overlayable_iter.HasNext()) {
605 const Chunk overlayable_child_chunk = overlayable_iter.Next();
606
607 switch (overlayable_child_chunk.type()) {
608 case RES_TABLE_OVERLAYABLE_POLICY_TYPE: {
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000609 const ResTable_overlayable_policy_header* policy_header =
Ryan Mitchell75e20dd2018-11-06 16:39:36 -0800610 overlayable_child_chunk.header<ResTable_overlayable_policy_header>();
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000611 if (policy_header == nullptr) {
Ryan Mitchell75e20dd2018-11-06 16:39:36 -0800612 LOG(ERROR) << "RES_TABLE_OVERLAYABLE_POLICY_TYPE too small.";
613 return {};
614 }
615
616 if ((overlayable_child_chunk.data_size() / sizeof(ResTable_ref))
617 < dtohl(policy_header->entry_count)) {
618 LOG(ERROR) << "RES_TABLE_OVERLAYABLE_POLICY_TYPE too small to hold entries.";
619 return {};
620 }
621
Ryan Mitchellef5673a2018-12-12 18:45:34 -0800622 // Retrieve all the resource ids belonging to this policy chunk
Ryan Mitchell75e20dd2018-11-06 16:39:36 -0800623 std::unordered_set<uint32_t> ids;
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000624 const auto ids_begin =
625 reinterpret_cast<const ResTable_ref*>(overlayable_child_chunk.data_ptr());
Ryan Mitchell75e20dd2018-11-06 16:39:36 -0800626 const auto ids_end = ids_begin + dtohl(policy_header->entry_count);
627 for (auto id_iter = ids_begin; id_iter != ids_end; ++id_iter) {
628 ids.insert(dtohl(id_iter->ident));
629 }
630
Ryan Mitchellef5673a2018-12-12 18:45:34 -0800631 // Add the pairing of overlayable properties and resource ids to the package
Ryan Mitchell54237ff2018-12-13 15:44:29 -0800632 OverlayableInfo overlayable_info{};
Ryan Mitchellef5673a2018-12-12 18:45:34 -0800633 overlayable_info.name = name;
634 overlayable_info.actor = actor;
Ryan Mitchell75e20dd2018-11-06 16:39:36 -0800635 overlayable_info.policy_flags = policy_header->policy_flags;
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000636 loaded_package->overlayable_infos_.push_back(std::make_pair(overlayable_info, ids));
Ryan Mitchell19823452019-01-29 12:01:24 -0800637 loaded_package->defines_overlayable_ = true;
Ryan Mitchell75e20dd2018-11-06 16:39:36 -0800638 break;
639 }
640
641 default:
642 LOG(WARNING) << StringPrintf("Unknown chunk type '%02x'.", chunk.type());
643 break;
644 }
645 }
646
647 if (overlayable_iter.HadError()) {
Ryan Mitchellef5673a2018-12-12 18:45:34 -0800648 LOG(ERROR) << StringPrintf("Error parsing RES_TABLE_OVERLAYABLE_TYPE: %s",
Ryan Mitchell75e20dd2018-11-06 16:39:36 -0800649 overlayable_iter.GetLastError().c_str());
650 if (overlayable_iter.HadFatalError()) {
651 return {};
652 }
653 }
Adam Lesinskida431a22016-12-29 16:08:16 -0500654 } break;
655
Adam Lesinski7ad11102016-10-28 16:39:15 -0700656 default:
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800657 LOG(WARNING) << StringPrintf("Unknown chunk type '%02x'.", chunk.type());
Adam Lesinski7ad11102016-10-28 16:39:15 -0700658 break;
659 }
660 }
661
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800662 if (iter.HadError()) {
663 LOG(ERROR) << iter.GetLastError();
Todd Kennedy28e663c2018-07-12 13:15:54 -0700664 if (iter.HadFatalError()) {
665 return {};
666 }
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800667 }
668
669 // Flatten and construct the TypeSpecs.
670 for (auto& entry : type_builder_map) {
671 uint8_t type_idx = static_cast<uint8_t>(entry.first);
672 TypeSpecPtr type_spec_ptr = entry.second->Build();
Adam Lesinski7ad11102016-10-28 16:39:15 -0700673 if (type_spec_ptr == nullptr) {
674 LOG(ERROR) << "Too many type configurations, overflow detected.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500675 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700676 }
Adam Lesinski970bd8d2017-09-25 13:21:55 -0700677
Ryan Mitchell8a891d82019-07-01 09:48:23 -0700678 loaded_package->type_specs_.editItemAt(type_idx) = std::move(type_spec_ptr);
Adam Lesinski7ad11102016-10-28 16:39:15 -0700679 }
680
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700681 return std::move(loaded_package);
682}
683
Adam Lesinski970bd8d2017-09-25 13:21:55 -0700684bool LoadedArsc::LoadTable(const Chunk& chunk, const LoadedIdmap* loaded_idmap,
Ryan Mitchell73bfe412019-11-12 16:22:04 -0800685 package_property_t property_flags) {
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000686 const ResTable_header* header = chunk.header<ResTable_header>();
687 if (header == nullptr) {
Adam Lesinski498f6052017-11-29 13:24:29 -0800688 LOG(ERROR) << "RES_TABLE_TYPE too small.";
Adam Lesinski7ad11102016-10-28 16:39:15 -0700689 return false;
690 }
691
Ryan Mitchell8a891d82019-07-01 09:48:23 -0700692 if (loaded_idmap != nullptr) {
693 global_string_pool_ = util::make_unique<OverlayStringPool>(loaded_idmap);
694 }
695
Adam Lesinski7ad11102016-10-28 16:39:15 -0700696 const size_t package_count = dtohl(header->packageCount);
697 size_t packages_seen = 0;
698
699 packages_.reserve(package_count);
700
701 ChunkIterator iter(chunk.data_ptr(), chunk.data_size());
702 while (iter.HasNext()) {
703 const Chunk child_chunk = iter.Next();
704 switch (child_chunk.type()) {
705 case RES_STRING_POOL_TYPE:
706 // Only use the first string pool. Ignore others.
Ryan Mitchell8a891d82019-07-01 09:48:23 -0700707 if (global_string_pool_->getError() == NO_INIT) {
708 status_t err = global_string_pool_->setTo(child_chunk.header<ResStringPool_header>(),
709 child_chunk.size());
Adam Lesinski7ad11102016-10-28 16:39:15 -0700710 if (err != NO_ERROR) {
Adam Lesinski498f6052017-11-29 13:24:29 -0800711 LOG(ERROR) << "RES_STRING_POOL_TYPE corrupt.";
Adam Lesinski7ad11102016-10-28 16:39:15 -0700712 return false;
713 }
714 } else {
Adam Lesinski498f6052017-11-29 13:24:29 -0800715 LOG(WARNING) << "Multiple RES_STRING_POOL_TYPEs found in RES_TABLE_TYPE.";
Adam Lesinski7ad11102016-10-28 16:39:15 -0700716 }
717 break;
718
719 case RES_TABLE_PACKAGE_TYPE: {
720 if (packages_seen + 1 > package_count) {
721 LOG(ERROR) << "More package chunks were found than the " << package_count
Adam Lesinski970bd8d2017-09-25 13:21:55 -0700722 << " declared in the header.";
Adam Lesinski7ad11102016-10-28 16:39:15 -0700723 return false;
724 }
725 packages_seen++;
726
Adam Lesinski1a1e9c22017-10-13 15:45:34 -0700727 std::unique_ptr<const LoadedPackage> loaded_package =
Ryan Mitchell73bfe412019-11-12 16:22:04 -0800728 LoadedPackage::Load(child_chunk, property_flags);
Adam Lesinskida431a22016-12-29 16:08:16 -0500729 if (!loaded_package) {
Adam Lesinski7ad11102016-10-28 16:39:15 -0700730 return false;
731 }
732 packages_.push_back(std::move(loaded_package));
733 } break;
734
735 default:
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800736 LOG(WARNING) << StringPrintf("Unknown chunk type '%02x'.", chunk.type());
Adam Lesinski7ad11102016-10-28 16:39:15 -0700737 break;
738 }
739 }
740
741 if (iter.HadError()) {
742 LOG(ERROR) << iter.GetLastError();
Todd Kennedy28e663c2018-07-12 13:15:54 -0700743 if (iter.HadFatalError()) {
744 return false;
745 }
Adam Lesinski7ad11102016-10-28 16:39:15 -0700746 }
747 return true;
748}
749
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000750std::unique_ptr<const LoadedArsc> LoadedArsc::Load(const StringPiece& data,
Winson9947f1e2019-08-16 10:20:39 -0700751 const LoadedIdmap* loaded_idmap,
Ryan Mitchellef40d2e2020-03-11 10:26:08 -0700752 const package_property_t property_flags) {
Winson9947f1e2019-08-16 10:20:39 -0700753 ATRACE_NAME("LoadedArsc::Load");
Adam Lesinski7ad11102016-10-28 16:39:15 -0700754
755 // Not using make_unique because the constructor is private.
756 std::unique_ptr<LoadedArsc> loaded_arsc(new LoadedArsc());
757
Ryan Mitchell55ef6162020-11-13 23:55:20 +0000758 ChunkIterator iter(data.data(), data.size());
Adam Lesinski7ad11102016-10-28 16:39:15 -0700759 while (iter.HasNext()) {
760 const Chunk chunk = iter.Next();
761 switch (chunk.type()) {
762 case RES_TABLE_TYPE:
Ryan Mitchell73bfe412019-11-12 16:22:04 -0800763 if (!loaded_arsc->LoadTable(chunk, loaded_idmap, property_flags)) {
Adam Lesinski7ad11102016-10-28 16:39:15 -0700764 return {};
765 }
766 break;
767
768 default:
Adam Lesinskibebfcc42018-02-12 14:27:46 -0800769 LOG(WARNING) << StringPrintf("Unknown chunk type '%02x'.", chunk.type());
Adam Lesinski7ad11102016-10-28 16:39:15 -0700770 break;
771 }
772 }
773
774 if (iter.HadError()) {
775 LOG(ERROR) << iter.GetLastError();
Todd Kennedy28e663c2018-07-12 13:15:54 -0700776 if (iter.HadFatalError()) {
777 return {};
778 }
Adam Lesinski7ad11102016-10-28 16:39:15 -0700779 }
Adam Lesinski0c405242017-01-13 20:47:26 -0800780
781 // Need to force a move for mingw32.
782 return std::move(loaded_arsc);
Adam Lesinski7ad11102016-10-28 16:39:15 -0700783}
784
Adam Lesinski970bd8d2017-09-25 13:21:55 -0700785std::unique_ptr<const LoadedArsc> LoadedArsc::CreateEmpty() {
786 return std::unique_ptr<LoadedArsc>(new LoadedArsc());
787}
788
Adam Lesinski7ad11102016-10-28 16:39:15 -0700789} // namespace android