blob: e17a3a6ff0a14453c3cd72a2f425eefec26147f6 [file] [log] [blame]
Adam Lesinski7ad11102016-10-28 16:39:15 -07001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define ATRACE_TAG ATRACE_TAG_RESOURCES
18
19#include "androidfw/LoadedArsc.h"
20
21#include <cstddef>
22#include <limits>
23
24#include "android-base/logging.h"
25#include "android-base/stringprintf.h"
26#include "utils/ByteOrder.h"
27#include "utils/Trace.h"
28
29#ifdef _WIN32
30#ifdef ERROR
31#undef ERROR
32#endif
33#endif
34
Adam Lesinski7ad11102016-10-28 16:39:15 -070035#include "androidfw/ByteBucketArray.h"
Adam Lesinskida431a22016-12-29 16:08:16 -050036#include "androidfw/Chunk.h"
Adam Lesinski7ad11102016-10-28 16:39:15 -070037#include "androidfw/Util.h"
38
39using android::base::StringPrintf;
40
41namespace android {
42
Adam Lesinskida431a22016-12-29 16:08:16 -050043constexpr const static int kAppPackageId = 0x7f;
Adam Lesinski7ad11102016-10-28 16:39:15 -070044
45// Element of a TypeSpec array. See TypeSpec.
46struct Type {
47 // The configuration for which this type defines entries.
48 // This is already converted to host endianness.
49 ResTable_config configuration;
50
51 // Pointer to the mmapped data where entry definitions are kept.
52 const ResTable_type* type;
53};
54
55// TypeSpec is going to be immediately proceeded by
56// an array of Type structs, all in the same block of memory.
57struct TypeSpec {
58 // Pointer to the mmapped data where flags are kept.
59 // Flags denote whether the resource entry is public
60 // and under which configurations it varies.
61 const ResTable_typeSpec* type_spec;
62
63 // The number of types that follow this struct.
64 // There is a type for each configuration
65 // that entries are defined for.
66 size_t type_count;
67
68 // Trick to easily access a variable number of Type structs
69 // proceeding this struct, and to ensure their alignment.
70 const Type types[0];
71};
72
73// TypeSpecPtr points to the block of memory that holds
74// a TypeSpec struct, followed by an array of Type structs.
75// TypeSpecPtr is a managed pointer that knows how to delete
76// itself.
77using TypeSpecPtr = util::unique_cptr<TypeSpec>;
78
Adam Lesinskida431a22016-12-29 16:08:16 -050079namespace {
80
Adam Lesinski7ad11102016-10-28 16:39:15 -070081// Builder that helps accumulate Type structs and then create a single
82// contiguous block of memory to store both the TypeSpec struct and
83// the Type structs.
84class TypeSpecPtrBuilder {
85 public:
86 TypeSpecPtrBuilder(const ResTable_typeSpec* header) : header_(header) {}
87
88 void AddType(const ResTable_type* type) {
89 ResTable_config config;
90 config.copyFromDtoH(type->config);
91 types_.push_back(Type{config, type});
92 }
93
94 TypeSpecPtr Build() {
95 // Check for overflow.
96 if ((std::numeric_limits<size_t>::max() - sizeof(TypeSpec)) / sizeof(Type) < types_.size()) {
97 return {};
98 }
99 TypeSpec* type_spec = (TypeSpec*)::malloc(sizeof(TypeSpec) + (types_.size() * sizeof(Type)));
100 type_spec->type_spec = header_;
101 type_spec->type_count = types_.size();
102 memcpy(type_spec + 1, types_.data(), types_.size() * sizeof(Type));
103 return TypeSpecPtr(type_spec);
104 }
105
106 private:
107 DISALLOW_COPY_AND_ASSIGN(TypeSpecPtrBuilder);
108
109 const ResTable_typeSpec* header_;
110 std::vector<Type> types_;
111};
112
113} // namespace
114
Adam Lesinskida431a22016-12-29 16:08:16 -0500115bool LoadedPackage::FindEntry(uint8_t type_idx, uint16_t entry_idx, const ResTable_config& config,
116 LoadedArscEntry* out_entry, ResTable_config* out_selected_config,
Adam Lesinski7ad11102016-10-28 16:39:15 -0700117 uint32_t* out_flags) const {
Adam Lesinskida431a22016-12-29 16:08:16 -0500118 ATRACE_CALL();
119 const TypeSpecPtr& ptr = type_specs_[type_idx];
Adam Lesinski7ad11102016-10-28 16:39:15 -0700120 if (ptr == nullptr) {
121 return false;
122 }
123
124 // Don't bother checking if the entry ID is larger than
125 // the number of entries.
Adam Lesinskida431a22016-12-29 16:08:16 -0500126 if (entry_idx >= dtohl(ptr->type_spec->entryCount)) {
Adam Lesinski7ad11102016-10-28 16:39:15 -0700127 return false;
128 }
129
130 const ResTable_config* best_config = nullptr;
131 const ResTable_type* best_type = nullptr;
132 uint32_t best_offset = 0;
133
134 for (uint32_t i = 0; i < ptr->type_count; i++) {
135 const Type* type = &ptr->types[i];
136
137 if (type->configuration.match(config) &&
138 (best_config == nullptr || type->configuration.isBetterThan(*best_config, &config))) {
139 // The configuration matches and is better than the previous selection.
140 // Find the entry value if it exists for this configuration.
141 size_t entry_count = dtohl(type->type->entryCount);
Adam Lesinskida431a22016-12-29 16:08:16 -0500142 if (entry_idx < entry_count) {
Adam Lesinski7ad11102016-10-28 16:39:15 -0700143 const uint32_t* entry_offsets = reinterpret_cast<const uint32_t*>(
144 reinterpret_cast<const uint8_t*>(type->type) + dtohs(type->type->header.headerSize));
Adam Lesinskida431a22016-12-29 16:08:16 -0500145 const uint32_t offset = dtohl(entry_offsets[entry_idx]);
Adam Lesinski7ad11102016-10-28 16:39:15 -0700146 if (offset != ResTable_type::NO_ENTRY) {
147 // There is an entry for this resource, record it.
148 best_config = &type->configuration;
149 best_type = type->type;
150 best_offset = offset + dtohl(type->type->entriesStart);
151 }
152 }
153 }
154 }
155
156 if (best_type == nullptr) {
157 return false;
158 }
159
160 const uint32_t* flags = reinterpret_cast<const uint32_t*>(ptr->type_spec + 1);
Adam Lesinskida431a22016-12-29 16:08:16 -0500161 *out_flags = dtohl(flags[entry_idx]);
Adam Lesinski7ad11102016-10-28 16:39:15 -0700162 *out_selected_config = *best_config;
163
164 const ResTable_entry* best_entry = reinterpret_cast<const ResTable_entry*>(
165 reinterpret_cast<const uint8_t*>(best_type) + best_offset);
166 out_entry->entry = best_entry;
167 out_entry->type_string_ref = StringPoolRef(&type_string_pool_, best_type->id - 1);
168 out_entry->entry_string_ref = StringPoolRef(&key_string_pool_, dtohl(best_entry->key.index));
169 return true;
170}
171
172// The destructor gets generated into arbitrary translation units
173// if left implicit, which causes the compiler to complain about
174// forward declarations and incomplete types.
175LoadedArsc::~LoadedArsc() {}
176
Adam Lesinskida431a22016-12-29 16:08:16 -0500177bool LoadedArsc::FindEntry(uint32_t resid, const ResTable_config& config,
178 LoadedArscEntry* out_entry, ResTable_config* out_selected_config,
179 uint32_t* out_flags) const {
180 ATRACE_CALL();
Adam Lesinski7ad11102016-10-28 16:39:15 -0700181 const uint8_t package_id = util::get_package_id(resid);
182 const uint8_t type_id = util::get_type_id(resid);
183 const uint16_t entry_id = util::get_entry_id(resid);
184
185 if (type_id == 0) {
186 LOG(ERROR) << "Invalid ID 0x" << std::hex << resid << std::dec << ".";
187 return false;
188 }
189
190 for (const auto& loaded_package : packages_) {
191 if (loaded_package->package_id_ == package_id) {
192 return loaded_package->FindEntry(type_id - 1, entry_id, config, out_entry,
193 out_selected_config, out_flags);
194 }
195 }
196 return false;
197}
198
Adam Lesinskida431a22016-12-29 16:08:16 -0500199const LoadedPackage* LoadedArsc::GetPackageForId(uint32_t resid) const {
Adam Lesinski7ad11102016-10-28 16:39:15 -0700200 const uint8_t package_id = util::get_package_id(resid);
201 for (const auto& loaded_package : packages_) {
202 if (loaded_package->package_id_ == package_id) {
Adam Lesinskida431a22016-12-29 16:08:16 -0500203 return loaded_package.get();
Adam Lesinski7ad11102016-10-28 16:39:15 -0700204 }
205 }
206 return nullptr;
207}
208
209static bool VerifyType(const Chunk& chunk) {
210 ATRACE_CALL();
211 const ResTable_type* header = chunk.header<ResTable_type>();
212
213 const size_t entry_count = dtohl(header->entryCount);
214 if (entry_count > std::numeric_limits<uint16_t>::max()) {
215 LOG(ERROR) << "Too many entries in RES_TABLE_TYPE_TYPE.";
216 return false;
217 }
218
219 // Make sure that there is enough room for the entry offsets.
220 const size_t offsets_offset = chunk.header_size();
221 const size_t entries_offset = dtohl(header->entriesStart);
222 const size_t offsets_length = sizeof(uint32_t) * entry_count;
223
224 if (offsets_offset + offsets_length > entries_offset) {
225 LOG(ERROR) << "Entry offsets overlap actual entry data.";
226 return false;
227 }
228
229 if (entries_offset > chunk.size()) {
230 LOG(ERROR) << "Entry offsets extend beyond chunk.";
231 return false;
232 }
233
234 if (entries_offset & 0x03) {
235 LOG(ERROR) << "Entries start at unaligned address.";
236 return false;
237 }
238
239 // Check each entry offset.
240 const uint32_t* offsets =
241 reinterpret_cast<const uint32_t*>(reinterpret_cast<const uint8_t*>(header) + offsets_offset);
242 for (size_t i = 0; i < entry_count; i++) {
243 uint32_t offset = dtohl(offsets[i]);
244 if (offset != ResTable_type::NO_ENTRY) {
245 // Check that the offset is aligned.
246 if (offset & 0x03) {
247 LOG(ERROR) << "Entry offset at index " << i << " is not 4-byte aligned.";
248 return false;
249 }
250
251 // Check that the offset doesn't overflow.
252 if (offset > std::numeric_limits<uint32_t>::max() - entries_offset) {
253 // Overflow in offset.
254 LOG(ERROR) << "Entry offset at index " << i << " is too large.";
255 return false;
256 }
257
258 offset += entries_offset;
259 if (offset > chunk.size() - sizeof(ResTable_entry)) {
260 LOG(ERROR) << "Entry offset at index " << i << " is too large. No room for ResTable_entry.";
261 return false;
262 }
263
264 const ResTable_entry* entry = reinterpret_cast<const ResTable_entry*>(
265 reinterpret_cast<const uint8_t*>(header) + offset);
266 const size_t entry_size = dtohs(entry->size);
267 if (entry_size < sizeof(*entry)) {
268 LOG(ERROR) << "ResTable_entry size " << entry_size << " is too small.";
269 return false;
270 }
271
272 // Check the declared entrySize.
273 if (entry_size > chunk.size() || offset > chunk.size() - entry_size) {
274 LOG(ERROR) << "ResTable_entry size " << entry_size << " is too large.";
275 return false;
276 }
277
278 // If this is a map entry, then keep validating.
279 if (entry_size >= sizeof(ResTable_map_entry)) {
280 const ResTable_map_entry* map = reinterpret_cast<const ResTable_map_entry*>(entry);
281 const size_t map_entry_count = dtohl(map->count);
282
283 size_t map_entries_start = offset + entry_size;
284 if (map_entries_start & 0x03) {
285 LOG(ERROR) << "Map entries start at unaligned offset.";
286 return false;
287 }
288
289 // Each entry is sizeof(ResTable_map) big.
290 if (map_entry_count > ((chunk.size() - map_entries_start) / sizeof(ResTable_map))) {
291 LOG(ERROR) << "Too many map entries in ResTable_map_entry.";
292 return false;
293 }
294
295 // Great, all the map entries fit!.
296 } else {
297 // There needs to be room for one Res_value struct.
298 if (offset + entry_size > chunk.size() - sizeof(Res_value)) {
299 LOG(ERROR) << "No room for Res_value after ResTable_entry.";
300 return false;
301 }
302
303 const Res_value* value = reinterpret_cast<const Res_value*>(
304 reinterpret_cast<const uint8_t*>(entry) + entry_size);
305 const size_t value_size = dtohs(value->size);
306 if (value_size < sizeof(Res_value)) {
307 LOG(ERROR) << "Res_value is too small.";
308 return false;
309 }
310
311 if (value_size > chunk.size() || offset + entry_size > chunk.size() - value_size) {
312 LOG(ERROR) << "Res_value size is too large.";
313 return false;
314 }
315 }
316 }
317 }
318 return true;
319}
320
Adam Lesinskida431a22016-12-29 16:08:16 -0500321std::unique_ptr<LoadedPackage> LoadedPackage::Load(const Chunk& chunk) {
Adam Lesinski7ad11102016-10-28 16:39:15 -0700322 ATRACE_CALL();
Adam Lesinskida431a22016-12-29 16:08:16 -0500323 std::unique_ptr<LoadedPackage> loaded_package{new LoadedPackage()};
324
Adam Lesinski7ad11102016-10-28 16:39:15 -0700325 const ResTable_package* header = chunk.header<ResTable_package>();
326 if (header == nullptr) {
327 LOG(ERROR) << "Chunk RES_TABLE_PACKAGE_TYPE is too small.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500328 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700329 }
330
331 loaded_package->package_id_ = dtohl(header->id);
Adam Lesinskida431a22016-12-29 16:08:16 -0500332 if (loaded_package->package_id_ == 0) {
333 // Package ID of 0 means this is a shared library.
334 loaded_package->dynamic_ = true;
335 }
336
337 util::ReadUtf16StringFromDevice(header->name, arraysize(header->name),
338 &loaded_package->package_name_);
Adam Lesinski7ad11102016-10-28 16:39:15 -0700339
340 // A TypeSpec builder. We use this to accumulate the set of Types
341 // available for a TypeSpec, and later build a single, contiguous block
342 // of memory that holds all the Types together with the TypeSpec.
343 std::unique_ptr<TypeSpecPtrBuilder> types_builder;
344
345 // Keep track of the last seen type index. Since type IDs are 1-based,
346 // this records their index, which is 0-based (type ID - 1).
347 uint8_t last_type_idx = 0;
348
349 ChunkIterator iter(chunk.data_ptr(), chunk.data_size());
350 while (iter.HasNext()) {
351 const Chunk child_chunk = iter.Next();
352 switch (child_chunk.type()) {
353 case RES_STRING_POOL_TYPE: {
354 const uintptr_t pool_address =
355 reinterpret_cast<uintptr_t>(child_chunk.header<ResChunk_header>());
356 const uintptr_t header_address = reinterpret_cast<uintptr_t>(header);
357 if (pool_address == header_address + dtohl(header->typeStrings)) {
358 // This string pool is the type string pool.
359 status_t err = loaded_package->type_string_pool_.setTo(
360 child_chunk.header<ResStringPool_header>(), child_chunk.size());
361 if (err != NO_ERROR) {
362 LOG(ERROR) << "Corrupt package type string pool.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500363 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700364 }
365 } else if (pool_address == header_address + dtohl(header->keyStrings)) {
366 // This string pool is the key string pool.
367 status_t err = loaded_package->key_string_pool_.setTo(
368 child_chunk.header<ResStringPool_header>(), child_chunk.size());
369 if (err != NO_ERROR) {
370 LOG(ERROR) << "Corrupt package key string pool.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500371 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700372 }
373 } else {
374 LOG(WARNING) << "Too many string pool chunks found in package.";
375 }
376 } break;
377
378 case RES_TABLE_TYPE_SPEC_TYPE: {
379 ATRACE_NAME("LoadTableTypeSpec");
380
381 // Starting a new TypeSpec, so finish the old one if there was one.
382 if (types_builder) {
383 TypeSpecPtr type_spec_ptr = types_builder->Build();
384 if (type_spec_ptr == nullptr) {
385 LOG(ERROR) << "Too many type configurations, overflow detected.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500386 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700387 }
388
389 loaded_package->type_specs_.editItemAt(last_type_idx) = std::move(type_spec_ptr);
390
391 types_builder = {};
392 last_type_idx = 0;
393 }
394
395 const ResTable_typeSpec* type_spec = child_chunk.header<ResTable_typeSpec>();
396 if (type_spec == nullptr) {
397 LOG(ERROR) << "Chunk RES_TABLE_TYPE_SPEC_TYPE is too small.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500398 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700399 }
400
401 if (type_spec->id == 0) {
402 LOG(ERROR) << "Chunk RES_TABLE_TYPE_SPEC_TYPE has invalid ID 0.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500403 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700404 }
405
406 // The data portion of this chunk contains entry_count 32bit entries,
407 // each one representing a set of flags.
408 // Here we only validate that the chunk is well formed.
409 const size_t entry_count = dtohl(type_spec->entryCount);
410
411 // There can only be 2^16 entries in a type, because that is the ID
412 // space for entries (EEEE) in the resource ID 0xPPTTEEEE.
413 if (entry_count > std::numeric_limits<uint16_t>::max()) {
414 LOG(ERROR) << "Too many entries in RES_TABLE_TYPE_SPEC_TYPE: " << entry_count << ".";
Adam Lesinskida431a22016-12-29 16:08:16 -0500415 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700416 }
417
418 if (entry_count * sizeof(uint32_t) > chunk.data_size()) {
419 LOG(ERROR) << "Chunk too small to hold entries in RES_TABLE_TYPE_SPEC_TYPE.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500420 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700421 }
422
423 last_type_idx = type_spec->id - 1;
424 types_builder = util::make_unique<TypeSpecPtrBuilder>(type_spec);
425 } break;
426
427 case RES_TABLE_TYPE_TYPE: {
428 const ResTable_type* type = child_chunk.header<ResTable_type>();
429 if (type == nullptr) {
430 LOG(ERROR) << "Chunk RES_TABLE_TYPE_TYPE is too small.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500431 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700432 }
433
434 if (type->id == 0) {
435 LOG(ERROR) << "Chunk RES_TABLE_TYPE_TYPE has invalid ID 0.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500436 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700437 }
438
439 // Type chunks must be preceded by their TypeSpec chunks.
440 if (!types_builder || type->id - 1 != last_type_idx) {
441 LOG(ERROR) << "Found RES_TABLE_TYPE_TYPE chunk without "
442 "RES_TABLE_TYPE_SPEC_TYPE.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500443 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700444 }
445
446 if (!VerifyType(child_chunk)) {
Adam Lesinskida431a22016-12-29 16:08:16 -0500447 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700448 }
449
450 types_builder->AddType(type);
451 } break;
452
Adam Lesinskida431a22016-12-29 16:08:16 -0500453 case RES_TABLE_LIBRARY_TYPE: {
454 const ResTable_lib_header* lib = child_chunk.header<ResTable_lib_header>();
455 if (lib == nullptr) {
456 LOG(ERROR) << "Chunk RES_TABLE_LIBRARY_TYPE is too small.";
457 return {};
458 }
459
460 if (child_chunk.data_size() / sizeof(ResTable_lib_entry) < dtohl(lib->count)) {
461 LOG(ERROR) << "Chunk too small to hold entries in RES_TABLE_LIBRARY_TYPE.";
462 return {};
463 }
464
465 loaded_package->dynamic_package_map_.reserve(dtohl(lib->count));
466
467 const ResTable_lib_entry* const entry_begin =
468 reinterpret_cast<const ResTable_lib_entry*>(child_chunk.data_ptr());
469 const ResTable_lib_entry* const entry_end = entry_begin + dtohl(lib->count);
470 for (auto entry_iter = entry_begin; entry_iter != entry_end; ++entry_iter) {
471 std::string package_name;
472 util::ReadUtf16StringFromDevice(entry_iter->packageName,
473 arraysize(entry_iter->packageName), &package_name);
474
475 if (dtohl(entry_iter->packageId) >= std::numeric_limits<uint8_t>::max()) {
476 LOG(ERROR) << base::StringPrintf(
477 "Package ID %02x in RES_TABLE_LIBRARY_TYPE too large for package '%s'.",
478 dtohl(entry_iter->packageId), package_name.c_str());
479 return {};
480 }
481
482 loaded_package->dynamic_package_map_.emplace_back(std::move(package_name),
483 dtohl(entry_iter->packageId));
484 }
485
486 } break;
487
Adam Lesinski7ad11102016-10-28 16:39:15 -0700488 default:
489 LOG(WARNING) << base::StringPrintf("Unknown chunk type '%02x'.", chunk.type());
490 break;
491 }
492 }
493
494 // Finish the last TypeSpec.
495 if (types_builder) {
496 TypeSpecPtr type_spec_ptr = types_builder->Build();
497 if (type_spec_ptr == nullptr) {
498 LOG(ERROR) << "Too many type configurations, overflow detected.";
Adam Lesinskida431a22016-12-29 16:08:16 -0500499 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700500 }
501 loaded_package->type_specs_.editItemAt(last_type_idx) = std::move(type_spec_ptr);
502 }
503
504 if (iter.HadError()) {
505 LOG(ERROR) << iter.GetLastError();
Adam Lesinskida431a22016-12-29 16:08:16 -0500506 return {};
Adam Lesinski7ad11102016-10-28 16:39:15 -0700507 }
Adam Lesinskida431a22016-12-29 16:08:16 -0500508 return loaded_package;
Adam Lesinski7ad11102016-10-28 16:39:15 -0700509}
510
Adam Lesinskida431a22016-12-29 16:08:16 -0500511bool LoadedArsc::LoadTable(const Chunk& chunk, bool load_as_shared_library) {
Adam Lesinski7ad11102016-10-28 16:39:15 -0700512 ATRACE_CALL();
513 const ResTable_header* header = chunk.header<ResTable_header>();
514 if (header == nullptr) {
515 LOG(ERROR) << "Chunk RES_TABLE_TYPE is too small.";
516 return false;
517 }
518
519 const size_t package_count = dtohl(header->packageCount);
520 size_t packages_seen = 0;
521
522 packages_.reserve(package_count);
523
524 ChunkIterator iter(chunk.data_ptr(), chunk.data_size());
525 while (iter.HasNext()) {
526 const Chunk child_chunk = iter.Next();
527 switch (child_chunk.type()) {
528 case RES_STRING_POOL_TYPE:
529 // Only use the first string pool. Ignore others.
530 if (global_string_pool_.getError() == NO_INIT) {
531 status_t err = global_string_pool_.setTo(child_chunk.header<ResStringPool_header>(),
532 child_chunk.size());
533 if (err != NO_ERROR) {
534 LOG(ERROR) << "Corrupt string pool.";
535 return false;
536 }
537 } else {
538 LOG(WARNING) << "Multiple string pool chunks found in resource table.";
539 }
540 break;
541
542 case RES_TABLE_PACKAGE_TYPE: {
543 if (packages_seen + 1 > package_count) {
544 LOG(ERROR) << "More package chunks were found than the " << package_count
545 << " declared in the "
546 "header.";
547 return false;
548 }
549 packages_seen++;
550
Adam Lesinskida431a22016-12-29 16:08:16 -0500551 std::unique_ptr<LoadedPackage> loaded_package = LoadedPackage::Load(child_chunk);
552 if (!loaded_package) {
Adam Lesinski7ad11102016-10-28 16:39:15 -0700553 return false;
554 }
Adam Lesinskida431a22016-12-29 16:08:16 -0500555
556 // Mark the package as dynamic if we are forcefully loading the Apk as a shared library.
557 if (loaded_package->package_id_ == kAppPackageId) {
558 loaded_package->dynamic_ = load_as_shared_library;
559 }
Adam Lesinski7ad11102016-10-28 16:39:15 -0700560 packages_.push_back(std::move(loaded_package));
561 } break;
562
563 default:
564 LOG(WARNING) << base::StringPrintf("Unknown chunk type '%02x'.", chunk.type());
565 break;
566 }
567 }
568
569 if (iter.HadError()) {
570 LOG(ERROR) << iter.GetLastError();
571 return false;
572 }
573 return true;
574}
575
Adam Lesinskida431a22016-12-29 16:08:16 -0500576std::unique_ptr<LoadedArsc> LoadedArsc::Load(const void* data, size_t len,
577 bool load_as_shared_library) {
Adam Lesinski7ad11102016-10-28 16:39:15 -0700578 ATRACE_CALL();
579
580 // Not using make_unique because the constructor is private.
581 std::unique_ptr<LoadedArsc> loaded_arsc(new LoadedArsc());
582
583 ChunkIterator iter(data, len);
584 while (iter.HasNext()) {
585 const Chunk chunk = iter.Next();
586 switch (chunk.type()) {
587 case RES_TABLE_TYPE:
Adam Lesinskida431a22016-12-29 16:08:16 -0500588 if (!loaded_arsc->LoadTable(chunk, load_as_shared_library)) {
Adam Lesinski7ad11102016-10-28 16:39:15 -0700589 return {};
590 }
591 break;
592
593 default:
594 LOG(WARNING) << base::StringPrintf("Unknown chunk type '%02x'.", chunk.type());
595 break;
596 }
597 }
598
599 if (iter.HadError()) {
600 LOG(ERROR) << iter.GetLastError();
601 return {};
602 }
603 return loaded_arsc;
604}
605
606} // namespace android