blob: f42e6b7b126f4c602a36bd83c736dbb54980e81d [file] [log] [blame]
Adam Lesinski1ab598f2015-08-14 14:26:04 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "ResourceTable.h"
18#include "ResourceValues.h"
19#include "ValueVisitor.h"
20
21#include "flatten/ChunkWriter.h"
22#include "flatten/ResourceTypeExtensions.h"
23#include "flatten/TableFlattener.h"
24#include "util/BigBuffer.h"
25
Adam Lesinski9ba47d82015-10-13 11:37:10 -070026#include <base/macros.h>
Adam Lesinski1ab598f2015-08-14 14:26:04 -070027#include <type_traits>
28#include <numeric>
Adam Lesinski1ab598f2015-08-14 14:26:04 -070029
30using namespace android;
31
32namespace aapt {
33
34namespace {
35
36template <typename T>
37static bool cmpIds(const T* a, const T* b) {
38 return a->id.value() < b->id.value();
39}
40
41static void strcpy16_htod(uint16_t* dst, size_t len, const StringPiece16& src) {
42 if (len == 0) {
43 return;
44 }
45
46 size_t i;
47 const char16_t* srcData = src.data();
48 for (i = 0; i < len - 1 && i < src.size(); i++) {
49 dst[i] = util::hostToDevice16((uint16_t) srcData[i]);
50 }
51 dst[i] = 0;
52}
53
54struct FlatEntry {
55 ResourceEntry* entry;
56 Value* value;
Adam Lesinski3b4cd942015-10-30 16:31:42 -070057
58 // The entry string pool index to the entry's name.
Adam Lesinski1ab598f2015-08-14 14:26:04 -070059 uint32_t entryKey;
Adam Lesinski3b4cd942015-10-30 16:31:42 -070060
61 // The source string pool index to the source file path.
Adam Lesinski1ab598f2015-08-14 14:26:04 -070062 uint32_t sourcePathKey;
63 uint32_t sourceLine;
Adam Lesinski3b4cd942015-10-30 16:31:42 -070064
65 // The source string pool index to the comment.
66 uint32_t commentKey;
Adam Lesinski1ab598f2015-08-14 14:26:04 -070067};
68
Adam Lesinski9ba47d82015-10-13 11:37:10 -070069class SymbolWriter {
70public:
Adam Lesinski1ab598f2015-08-14 14:26:04 -070071 struct Entry {
72 StringPool::Ref name;
73 size_t offset;
74 };
75
Adam Lesinski1ab598f2015-08-14 14:26:04 -070076 std::vector<Entry> symbols;
77
Adam Lesinski9ba47d82015-10-13 11:37:10 -070078 explicit SymbolWriter(StringPool* pool) : mPool(pool) {
Adam Lesinski1ab598f2015-08-14 14:26:04 -070079 }
Adam Lesinski9ba47d82015-10-13 11:37:10 -070080
Adam Lesinski467f1712015-11-16 17:35:44 -080081 void addSymbol(const Reference& ref, size_t offset) {
82 const ResourceName& name = ref.name.value();
83 std::u16string fullName;
84 if (ref.privateReference) {
85 fullName += u"*";
86 }
87
88 if (!name.package.empty()) {
89 fullName += name.package + u":";
90 }
91 fullName += toString(name.type).toString() + u"/" + name.entry;
92 symbols.push_back(Entry{ mPool->makeRef(fullName), offset });
Adam Lesinski9ba47d82015-10-13 11:37:10 -070093 }
94
95 void shiftAllOffsets(size_t offset) {
96 for (Entry& entry : symbols) {
97 entry.offset += offset;
98 }
99 }
100
101private:
102 StringPool* mPool;
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700103};
104
105struct MapFlattenVisitor : public RawValueVisitor {
106 using RawValueVisitor::visit;
107
108 SymbolWriter* mSymbols;
109 FlatEntry* mEntry;
110 BigBuffer* mBuffer;
Adam Lesinski28cacf02015-11-23 14:22:47 -0800111 StringPool* mSourcePool;
112 StringPool* mCommentPool;
Adam Lesinski467f1712015-11-16 17:35:44 -0800113 bool mUseExtendedChunks;
Adam Lesinski28cacf02015-11-23 14:22:47 -0800114
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700115 size_t mEntryCount = 0;
116 Maybe<uint32_t> mParentIdent;
117 Maybe<ResourceNameRef> mParentName;
118
Adam Lesinski467f1712015-11-16 17:35:44 -0800119 MapFlattenVisitor(SymbolWriter* symbols, FlatEntry* entry, BigBuffer* buffer,
Adam Lesinski28cacf02015-11-23 14:22:47 -0800120 StringPool* sourcePool, StringPool* commentPool,
Adam Lesinski467f1712015-11-16 17:35:44 -0800121 bool useExtendedChunks) :
Adam Lesinski28cacf02015-11-23 14:22:47 -0800122 mSymbols(symbols), mEntry(entry), mBuffer(buffer), mSourcePool(sourcePool),
123 mCommentPool(commentPool), mUseExtendedChunks(useExtendedChunks) {
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700124 }
125
126 void flattenKey(Reference* key, ResTable_map* outEntry) {
Adam Lesinski467f1712015-11-16 17:35:44 -0800127 if (!key->id || (key->privateReference && mUseExtendedChunks)) {
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700128 assert(key->name && "reference must have a name");
129
130 outEntry->name.ident = util::hostToDevice32(0);
Adam Lesinski467f1712015-11-16 17:35:44 -0800131 mSymbols->addSymbol(*key, (mBuffer->size() - sizeof(ResTable_map)) +
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700132 offsetof(ResTable_map, name));
133 } else {
134 outEntry->name.ident = util::hostToDevice32(key->id.value().id);
135 }
136 }
137
138 void flattenValue(Item* value, ResTable_map* outEntry) {
Adam Lesinski467f1712015-11-16 17:35:44 -0800139 bool privateRef = false;
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700140 if (Reference* ref = valueCast<Reference>(value)) {
Adam Lesinski467f1712015-11-16 17:35:44 -0800141 privateRef = ref->privateReference && mUseExtendedChunks;
142 if (!ref->id || privateRef) {
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700143 assert(ref->name && "reference must have a name");
144
Adam Lesinski467f1712015-11-16 17:35:44 -0800145 mSymbols->addSymbol(*ref, (mBuffer->size() - sizeof(ResTable_map)) +
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700146 offsetof(ResTable_map, value) + offsetof(Res_value, data));
147 }
148 }
149
150 bool result = value->flatten(&outEntry->value);
Adam Lesinski467f1712015-11-16 17:35:44 -0800151 if (privateRef) {
152 outEntry->value.data = 0;
153 }
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700154 assert(result && "flatten failed");
155 }
156
157 void flattenEntry(Reference* key, Item* value) {
158 ResTable_map* outEntry = mBuffer->nextBlock<ResTable_map>();
159 flattenKey(key, outEntry);
160 flattenValue(value, outEntry);
161 outEntry->value.size = util::hostToDevice16(sizeof(outEntry->value));
162 mEntryCount++;
163 }
164
Adam Lesinski28cacf02015-11-23 14:22:47 -0800165 void flattenMetaData(Value* value) {
166 if (!mUseExtendedChunks) {
167 return;
168 }
169
170 Reference key(ResourceId{ ExtendedResTableMapTypes::ATTR_SOURCE_PATH });
171 StringPool::Ref sourcePathRef = mSourcePool->makeRef(
172 util::utf8ToUtf16(value->getSource().path));
173 BinaryPrimitive val(Res_value::TYPE_INT_DEC,
174 static_cast<uint32_t>(sourcePathRef.getIndex()));
175 flattenEntry(&key, &val);
176
177 if (value->getSource().line) {
178 key.id = ResourceId(ExtendedResTableMapTypes::ATTR_SOURCE_LINE);
179 val.value.data = static_cast<uint32_t>(value->getSource().line.value());
180 flattenEntry(&key, &val);
181 }
182
183 if (!value->getComment().empty()) {
184 key.id = ResourceId(ExtendedResTableMapTypes::ATTR_COMMENT);
185 StringPool::Ref commentRef = mCommentPool->makeRef(value->getComment());
186 val.value.data = static_cast<uint32_t>(commentRef.getIndex());
187 flattenEntry(&key, &val);
188 }
189 }
190
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700191 void visit(Attribute* attr) override {
192 {
193 Reference key(ResourceId{ ResTable_map::ATTR_TYPE });
194 BinaryPrimitive val(Res_value::TYPE_INT_DEC, attr->typeMask);
195 flattenEntry(&key, &val);
196 }
197
Adam Lesinskia5870652015-11-20 15:32:30 -0800198 if (attr->minInt != std::numeric_limits<int32_t>::min()) {
199 Reference key(ResourceId{ ResTable_map::ATTR_MIN });
200 BinaryPrimitive val(Res_value::TYPE_INT_DEC, static_cast<uint32_t>(attr->minInt));
201 flattenEntry(&key, &val);
202 }
203
204 if (attr->maxInt != std::numeric_limits<int32_t>::max()) {
205 Reference key(ResourceId{ ResTable_map::ATTR_MAX });
206 BinaryPrimitive val(Res_value::TYPE_INT_DEC, static_cast<uint32_t>(attr->maxInt));
207 flattenEntry(&key, &val);
208 }
209
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700210 for (Attribute::Symbol& s : attr->symbols) {
211 BinaryPrimitive val(Res_value::TYPE_INT_DEC, s.value);
212 flattenEntry(&s.symbol, &val);
213 }
214 }
215
216 static bool cmpStyleEntries(const Style::Entry& a, const Style::Entry& b) {
217 if (a.key.id) {
218 if (b.key.id) {
219 return a.key.id.value() < b.key.id.value();
220 }
221 return true;
222 } else if (!b.key.id) {
223 return a.key.name.value() < b.key.name.value();
224 }
225 return false;
226 }
227
228 void visit(Style* style) override {
229 if (style->parent) {
Adam Lesinski467f1712015-11-16 17:35:44 -0800230 bool privateRef = style->parent.value().privateReference && mUseExtendedChunks;
231 if (!style->parent.value().id || privateRef) {
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700232 assert(style->parent.value().name && "reference must have a name");
233 mParentName = style->parent.value().name;
234 } else {
235 mParentIdent = style->parent.value().id.value().id;
236 }
237 }
238
239 // Sort the style.
240 std::sort(style->entries.begin(), style->entries.end(), cmpStyleEntries);
241
242 for (Style::Entry& entry : style->entries) {
243 flattenEntry(&entry.key, entry.value.get());
Adam Lesinski28cacf02015-11-23 14:22:47 -0800244 flattenMetaData(&entry.key);
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700245 }
246 }
247
248 void visit(Styleable* styleable) override {
249 for (auto& attrRef : styleable->entries) {
250 BinaryPrimitive val(Res_value{});
251 flattenEntry(&attrRef, &val);
Adam Lesinski28cacf02015-11-23 14:22:47 -0800252 flattenMetaData(&attrRef);
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700253 }
254 }
255
256 void visit(Array* array) override {
257 for (auto& item : array->items) {
258 ResTable_map* outEntry = mBuffer->nextBlock<ResTable_map>();
259 flattenValue(item.get(), outEntry);
260 outEntry->value.size = util::hostToDevice16(sizeof(outEntry->value));
261 mEntryCount++;
Adam Lesinski28cacf02015-11-23 14:22:47 -0800262 flattenMetaData(item.get());
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700263 }
264 }
265
266 void visit(Plural* plural) override {
267 const size_t count = plural->values.size();
268 for (size_t i = 0; i < count; i++) {
269 if (!plural->values[i]) {
270 continue;
271 }
272
273 ResourceId q;
274 switch (i) {
275 case Plural::Zero:
276 q.id = android::ResTable_map::ATTR_ZERO;
277 break;
278
279 case Plural::One:
280 q.id = android::ResTable_map::ATTR_ONE;
281 break;
282
283 case Plural::Two:
284 q.id = android::ResTable_map::ATTR_TWO;
285 break;
286
287 case Plural::Few:
288 q.id = android::ResTable_map::ATTR_FEW;
289 break;
290
291 case Plural::Many:
292 q.id = android::ResTable_map::ATTR_MANY;
293 break;
294
295 case Plural::Other:
296 q.id = android::ResTable_map::ATTR_OTHER;
297 break;
298
299 default:
300 assert(false);
301 break;
302 }
303
304 Reference key(q);
305 flattenEntry(&key, plural->values[i].get());
Adam Lesinski28cacf02015-11-23 14:22:47 -0800306 flattenMetaData(plural->values[i].get());
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700307 }
308 }
309};
310
Adam Lesinski9ba47d82015-10-13 11:37:10 -0700311class PackageFlattener {
312public:
313 PackageFlattener(IDiagnostics* diag, TableFlattenerOptions options,
314 ResourceTablePackage* package, SymbolWriter* symbolWriter,
315 StringPool* sourcePool) :
316 mDiag(diag), mOptions(options), mPackage(package), mSymbols(symbolWriter),
317 mSourcePool(sourcePool) {
318 }
319
320 bool flattenPackage(BigBuffer* buffer) {
321 ChunkWriter pkgWriter(buffer);
322 ResTable_package* pkgHeader = pkgWriter.startChunk<ResTable_package>(
323 RES_TABLE_PACKAGE_TYPE);
324 pkgHeader->id = util::hostToDevice32(mPackage->id.value());
325
326 if (mPackage->name.size() >= arraysize(pkgHeader->name)) {
327 mDiag->error(DiagMessage() <<
328 "package name '" << mPackage->name << "' is too long");
329 return false;
330 }
331
332 // Copy the package name in device endianness.
333 strcpy16_htod(pkgHeader->name, arraysize(pkgHeader->name), mPackage->name);
334
335 // Serialize the types. We do this now so that our type and key strings
336 // are populated. We write those first.
337 BigBuffer typeBuffer(1024);
338 flattenTypes(&typeBuffer);
339
340 pkgHeader->typeStrings = util::hostToDevice32(pkgWriter.size());
341 StringPool::flattenUtf16(pkgWriter.getBuffer(), mTypePool);
342
343 pkgHeader->keyStrings = util::hostToDevice32(pkgWriter.size());
344 StringPool::flattenUtf16(pkgWriter.getBuffer(), mKeyPool);
345
346 // Add the ResTable_package header/type/key strings to the offset.
347 mSymbols->shiftAllOffsets(pkgWriter.size());
348
349 // Append the types.
350 buffer->appendBuffer(std::move(typeBuffer));
351
352 pkgWriter.finish();
353 return true;
354 }
355
356private:
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700357 IDiagnostics* mDiag;
358 TableFlattenerOptions mOptions;
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700359 ResourceTablePackage* mPackage;
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700360 StringPool mTypePool;
361 StringPool mKeyPool;
Adam Lesinski9ba47d82015-10-13 11:37:10 -0700362 SymbolWriter* mSymbols;
363 StringPool* mSourcePool;
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700364
Adam Lesinskie78fd612015-10-22 12:48:43 -0700365 template <typename T, bool IsItem>
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700366 T* writeEntry(FlatEntry* entry, BigBuffer* buffer) {
367 static_assert(std::is_same<ResTable_entry, T>::value ||
368 std::is_same<ResTable_entry_ext, T>::value,
369 "T must be ResTable_entry or ResTable_entry_ext");
370
371 T* result = buffer->nextBlock<T>();
372 ResTable_entry* outEntry = (ResTable_entry*)(result);
Adam Lesinski9e10ac72015-10-16 14:37:48 -0700373 if (entry->entry->symbolStatus.state == SymbolState::kPublic) {
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700374 outEntry->flags |= ResTable_entry::FLAG_PUBLIC;
375 }
376
377 if (entry->value->isWeak()) {
378 outEntry->flags |= ResTable_entry::FLAG_WEAK;
379 }
380
Adam Lesinskie78fd612015-10-22 12:48:43 -0700381 if (!IsItem) {
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700382 outEntry->flags |= ResTable_entry::FLAG_COMPLEX;
383 }
384
385 outEntry->key.index = util::hostToDevice32(entry->entryKey);
386 outEntry->size = sizeof(T);
387
388 if (mOptions.useExtendedChunks) {
389 // Write the extra source block. This will be ignored by the Android runtime.
390 ResTable_entry_source* sourceBlock = buffer->nextBlock<ResTable_entry_source>();
Adam Lesinski3b4cd942015-10-30 16:31:42 -0700391 sourceBlock->path.index = util::hostToDevice32(entry->sourcePathKey);
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700392 sourceBlock->line = util::hostToDevice32(entry->sourceLine);
Adam Lesinski3b4cd942015-10-30 16:31:42 -0700393 sourceBlock->comment.index = util::hostToDevice32(entry->commentKey);
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700394 outEntry->size += sizeof(*sourceBlock);
395 }
396
397 outEntry->flags = util::hostToDevice16(outEntry->flags);
398 outEntry->size = util::hostToDevice16(outEntry->size);
399 return result;
400 }
401
402 bool flattenValue(FlatEntry* entry, BigBuffer* buffer) {
Adam Lesinskie78fd612015-10-22 12:48:43 -0700403 if (Item* item = valueCast<Item>(entry->value)) {
404 writeEntry<ResTable_entry, true>(entry, buffer);
Adam Lesinski467f1712015-11-16 17:35:44 -0800405 bool privateRef = false;
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700406 if (Reference* ref = valueCast<Reference>(entry->value)) {
Adam Lesinski467f1712015-11-16 17:35:44 -0800407 // If there is no ID or the reference is private and we allow extended chunks,
408 // write out a 0 and mark the symbol table with the name of the reference.
409 privateRef = (ref->privateReference && mOptions.useExtendedChunks);
410 if (!ref->id || privateRef) {
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700411 assert(ref->name && "reference must have at least a name");
Adam Lesinski467f1712015-11-16 17:35:44 -0800412 mSymbols->addSymbol(*ref, buffer->size() + offsetof(Res_value, data));
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700413 }
414 }
415 Res_value* outValue = buffer->nextBlock<Res_value>();
Adam Lesinskie78fd612015-10-22 12:48:43 -0700416 bool result = item->flatten(outValue);
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700417 assert(result && "flatten failed");
Adam Lesinski467f1712015-11-16 17:35:44 -0800418 if (privateRef) {
419 // Force the value of 0 so we look up the symbol at unflatten time.
420 outValue->data = 0;
421 }
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700422 outValue->size = util::hostToDevice16(sizeof(*outValue));
423 } else {
424 const size_t beforeEntry = buffer->size();
Adam Lesinskie78fd612015-10-22 12:48:43 -0700425 ResTable_entry_ext* outEntry = writeEntry<ResTable_entry_ext, false>(entry, buffer);
Adam Lesinski28cacf02015-11-23 14:22:47 -0800426 MapFlattenVisitor visitor(mSymbols, entry, buffer, mSourcePool, mSourcePool,
427 mOptions.useExtendedChunks);
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700428 entry->value->accept(&visitor);
429 outEntry->count = util::hostToDevice32(visitor.mEntryCount);
430 if (visitor.mParentName) {
Adam Lesinski9ba47d82015-10-13 11:37:10 -0700431 mSymbols->addSymbol(visitor.mParentName.value(),
432 beforeEntry + offsetof(ResTable_entry_ext, parent));
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700433 } else if (visitor.mParentIdent) {
434 outEntry->parent.ident = util::hostToDevice32(visitor.mParentIdent.value());
435 }
436 }
437 return true;
438 }
439
440 bool flattenConfig(const ResourceTableType* type, const ConfigDescription& config,
441 std::vector<FlatEntry>* entries, BigBuffer* buffer) {
442 ChunkWriter typeWriter(buffer);
443 ResTable_type* typeHeader = typeWriter.startChunk<ResTable_type>(RES_TABLE_TYPE_TYPE);
444 typeHeader->id = type->id.value();
445 typeHeader->config = config;
446 typeHeader->config.swapHtoD();
447
448 auto maxAccum = [](uint32_t max, const std::unique_ptr<ResourceEntry>& a) -> uint32_t {
449 return std::max(max, (uint32_t) a->id.value());
450 };
451
452 // Find the largest entry ID. That is how many entries we will have.
453 const uint32_t entryCount =
454 std::accumulate(type->entries.begin(), type->entries.end(), 0, maxAccum) + 1;
455
456 typeHeader->entryCount = util::hostToDevice32(entryCount);
457 uint32_t* indices = typeWriter.nextBlock<uint32_t>(entryCount);
458
459 assert((size_t) entryCount <= std::numeric_limits<uint16_t>::max() + 1);
460 memset(indices, 0xff, entryCount * sizeof(uint32_t));
461
462 typeHeader->entriesStart = util::hostToDevice32(typeWriter.size());
463
464 const size_t entryStart = typeWriter.getBuffer()->size();
465 for (FlatEntry& flatEntry : *entries) {
466 assert(flatEntry.entry->id.value() < entryCount);
467 indices[flatEntry.entry->id.value()] = util::hostToDevice32(
468 typeWriter.getBuffer()->size() - entryStart);
469 if (!flattenValue(&flatEntry, typeWriter.getBuffer())) {
470 mDiag->error(DiagMessage()
471 << "failed to flatten resource '"
472 << ResourceNameRef(mPackage->name, type->type, flatEntry.entry->name)
473 << "' for configuration '" << config << "'");
474 return false;
475 }
476 }
477 typeWriter.finish();
478 return true;
479 }
480
481 std::vector<ResourceTableType*> collectAndSortTypes() {
482 std::vector<ResourceTableType*> sortedTypes;
483 for (auto& type : mPackage->types) {
484 if (type->type == ResourceType::kStyleable && !mOptions.useExtendedChunks) {
485 // Styleables aren't real Resource Types, they are represented in the R.java
486 // file.
487 continue;
488 }
489
490 assert(type->id && "type must have an ID set");
491
492 sortedTypes.push_back(type.get());
493 }
494 std::sort(sortedTypes.begin(), sortedTypes.end(), cmpIds<ResourceTableType>);
495 return sortedTypes;
496 }
497
498 std::vector<ResourceEntry*> collectAndSortEntries(ResourceTableType* type) {
499 // Sort the entries by entry ID.
500 std::vector<ResourceEntry*> sortedEntries;
501 for (auto& entry : type->entries) {
502 assert(entry->id && "entry must have an ID set");
503 sortedEntries.push_back(entry.get());
504 }
505 std::sort(sortedEntries.begin(), sortedEntries.end(), cmpIds<ResourceEntry>);
506 return sortedEntries;
507 }
508
509 bool flattenTypeSpec(ResourceTableType* type, std::vector<ResourceEntry*>* sortedEntries,
510 BigBuffer* buffer) {
511 ChunkWriter typeSpecWriter(buffer);
512 ResTable_typeSpec* specHeader = typeSpecWriter.startChunk<ResTable_typeSpec>(
513 RES_TABLE_TYPE_SPEC_TYPE);
514 specHeader->id = type->id.value();
515
516 if (sortedEntries->empty()) {
517 typeSpecWriter.finish();
518 return true;
519 }
520
521 // We can't just take the size of the vector. There may be holes in the entry ID space.
522 // Since the entries are sorted by ID, the last one will be the biggest.
523 const size_t numEntries = sortedEntries->back()->id.value() + 1;
524
525 specHeader->entryCount = util::hostToDevice32(numEntries);
526
527 // Reserve space for the masks of each resource in this type. These
528 // show for which configuration axis the resource changes.
529 uint32_t* configMasks = typeSpecWriter.nextBlock<uint32_t>(numEntries);
530
531 const size_t actualNumEntries = sortedEntries->size();
532 for (size_t entryIndex = 0; entryIndex < actualNumEntries; entryIndex++) {
533 ResourceEntry* entry = sortedEntries->at(entryIndex);
534
535 // Populate the config masks for this entry.
536
Adam Lesinski9e10ac72015-10-16 14:37:48 -0700537 if (entry->symbolStatus.state == SymbolState::kPublic) {
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700538 configMasks[entry->id.value()] |=
539 util::hostToDevice32(ResTable_typeSpec::SPEC_PUBLIC);
540 }
541
542 const size_t configCount = entry->values.size();
543 for (size_t i = 0; i < configCount; i++) {
544 const ConfigDescription& config = entry->values[i].config;
545 for (size_t j = i + 1; j < configCount; j++) {
546 configMasks[entry->id.value()] |= util::hostToDevice32(
547 config.diff(entry->values[j].config));
548 }
549 }
550 }
551 typeSpecWriter.finish();
552 return true;
553 }
554
555 bool flattenPublic(ResourceTableType* type, std::vector<ResourceEntry*>* sortedEntries,
556 BigBuffer* buffer) {
557 ChunkWriter publicWriter(buffer);
558 Public_header* publicHeader = publicWriter.startChunk<Public_header>(RES_TABLE_PUBLIC_TYPE);
559 publicHeader->typeId = type->id.value();
560
561 for (ResourceEntry* entry : *sortedEntries) {
Adam Lesinski9e10ac72015-10-16 14:37:48 -0700562 if (entry->symbolStatus.state != SymbolState::kUndefined) {
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700563 // Write the public status of this entry.
564 Public_entry* publicEntry = publicWriter.nextBlock<Public_entry>();
565 publicEntry->entryId = util::hostToDevice32(entry->id.value());
566 publicEntry->key.index = util::hostToDevice32(mKeyPool.makeRef(
567 entry->name).getIndex());
Adam Lesinski3b4cd942015-10-30 16:31:42 -0700568 publicEntry->source.path.index = util::hostToDevice32(mSourcePool->makeRef(
Adam Lesinski9e10ac72015-10-16 14:37:48 -0700569 util::utf8ToUtf16(entry->symbolStatus.source.path)).getIndex());
570 if (entry->symbolStatus.source.line) {
Adam Lesinski3b4cd942015-10-30 16:31:42 -0700571 publicEntry->source.line = util::hostToDevice32(
Adam Lesinski9e10ac72015-10-16 14:37:48 -0700572 entry->symbolStatus.source.line.value());
573 }
Adam Lesinski3b4cd942015-10-30 16:31:42 -0700574 publicEntry->source.comment.index = util::hostToDevice32(mSourcePool->makeRef(
575 entry->symbolStatus.comment).getIndex());
Adam Lesinski9e10ac72015-10-16 14:37:48 -0700576
577 switch (entry->symbolStatus.state) {
578 case SymbolState::kPrivate:
579 publicEntry->state = Public_entry::kPrivate;
580 break;
581
582 case SymbolState::kPublic:
583 publicEntry->state = Public_entry::kPublic;
584 break;
585
586 default:
587 assert(false && "should not serialize any other state");
588 break;
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700589 }
590
591 // Don't hostToDevice until the last step.
592 publicHeader->count += 1;
593 }
594 }
595
596 publicHeader->count = util::hostToDevice32(publicHeader->count);
597 publicWriter.finish();
598 return true;
599 }
600
601 bool flattenTypes(BigBuffer* buffer) {
602 // Sort the types by their IDs. They will be inserted into the StringPool in this order.
603 std::vector<ResourceTableType*> sortedTypes = collectAndSortTypes();
604
605 size_t expectedTypeId = 1;
606 for (ResourceTableType* type : sortedTypes) {
607 // If there is a gap in the type IDs, fill in the StringPool
608 // with empty values until we reach the ID we expect.
609 while (type->id.value() > expectedTypeId) {
610 std::u16string typeName(u"?");
611 typeName += expectedTypeId;
612 mTypePool.makeRef(typeName);
613 expectedTypeId++;
614 }
615 expectedTypeId++;
616 mTypePool.makeRef(toString(type->type));
617
618 std::vector<ResourceEntry*> sortedEntries = collectAndSortEntries(type);
619
620 if (!flattenTypeSpec(type, &sortedEntries, buffer)) {
621 return false;
622 }
623
624 if (mOptions.useExtendedChunks) {
625 if (!flattenPublic(type, &sortedEntries, buffer)) {
626 return false;
627 }
628 }
629
630 // The binary resource table lists resource entries for each configuration.
631 // We store them inverted, where a resource entry lists the values for each
632 // configuration available. Here we reverse this to match the binary table.
633 std::map<ConfigDescription, std::vector<FlatEntry>> configToEntryListMap;
634 for (ResourceEntry* entry : sortedEntries) {
Adam Lesinskie78fd612015-10-22 12:48:43 -0700635 const uint32_t keyIndex = (uint32_t) mKeyPool.makeRef(entry->name).getIndex();
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700636
637 // Group values by configuration.
638 for (auto& configValue : entry->values) {
Adam Lesinskie78fd612015-10-22 12:48:43 -0700639 Value* value = configValue.value.get();
640
641 const StringPool::Ref sourceRef = mSourcePool->makeRef(
642 util::utf8ToUtf16(value->getSource().path));
643
644 uint32_t lineNumber = 0;
645 if (value->getSource().line) {
646 lineNumber = value->getSource().line.value();
647 }
648
Adam Lesinski3b4cd942015-10-30 16:31:42 -0700649 const StringPool::Ref commentRef = mSourcePool->makeRef(value->getComment());
650
Adam Lesinskie78fd612015-10-22 12:48:43 -0700651 configToEntryListMap[configValue.config]
652 .push_back(FlatEntry{
653 entry,
654 value,
655 keyIndex,
656 (uint32_t) sourceRef.getIndex(),
Adam Lesinski3b4cd942015-10-30 16:31:42 -0700657 lineNumber,
658 (uint32_t) commentRef.getIndex() });
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700659 }
660 }
661
662 // Flatten a configuration value.
663 for (auto& entry : configToEntryListMap) {
664 if (!flattenConfig(type, entry.first, &entry.second, buffer)) {
665 return false;
666 }
667 }
668 }
669 return true;
670 }
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700671};
672
673} // namespace
674
675bool TableFlattener::consume(IAaptContext* context, ResourceTable* table) {
Adam Lesinski9ba47d82015-10-13 11:37:10 -0700676 // We must do this before writing the resources, since the string pool IDs may change.
677 table->stringPool.sort([](const StringPool::Entry& a, const StringPool::Entry& b) -> bool {
678 int diff = a.context.priority - b.context.priority;
679 if (diff < 0) return true;
680 if (diff > 0) return false;
681 diff = a.context.config.compare(b.context.config);
682 if (diff < 0) return true;
683 if (diff > 0) return false;
684 return a.value < b.value;
685 });
686 table->stringPool.prune();
687
688 // Write the ResTable header.
689 ChunkWriter tableWriter(mBuffer);
690 ResTable_header* tableHeader = tableWriter.startChunk<ResTable_header>(RES_TABLE_TYPE);
691 tableHeader->packageCount = util::hostToDevice32(table->packages.size());
692
693 // Flatten the values string pool.
694 StringPool::flattenUtf8(tableWriter.getBuffer(), table->stringPool);
695
696 // If we have a reference to a symbol that doesn't exist, we don't know its resource ID.
697 // We encode the name of the symbol along with the offset of where to include the resource ID
698 // once it is found.
699 StringPool symbolPool;
700 std::vector<SymbolWriter::Entry> symbolOffsets;
701
702 // String pool holding the source paths of each value.
703 StringPool sourcePool;
704
705 BigBuffer packageBuffer(1024);
706
707 // Flatten each package.
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700708 for (auto& package : table->packages) {
Adam Lesinski9ba47d82015-10-13 11:37:10 -0700709 const size_t beforePackageSize = packageBuffer.size();
710
711 // All packages will share a single global symbol pool.
712 SymbolWriter packageSymbolWriter(&symbolPool);
713
714 PackageFlattener flattener(context->getDiagnostics(), mOptions, package.get(),
715 &packageSymbolWriter, &sourcePool);
716 if (!flattener.flattenPackage(&packageBuffer)) {
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700717 return false;
718 }
719
Adam Lesinski9ba47d82015-10-13 11:37:10 -0700720 // The symbols are offset only from their own Package start. Offset them from the
721 // start of the packageBuffer.
722 packageSymbolWriter.shiftAllOffsets(beforePackageSize);
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700723
Adam Lesinski9ba47d82015-10-13 11:37:10 -0700724 // Extract all the symbols to offset
725 symbolOffsets.insert(symbolOffsets.end(),
726 std::make_move_iterator(packageSymbolWriter.symbols.begin()),
727 std::make_move_iterator(packageSymbolWriter.symbols.end()));
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700728 }
729
Adam Lesinski9ba47d82015-10-13 11:37:10 -0700730 SymbolTable_entry* symbolEntryData = nullptr;
731 if (mOptions.useExtendedChunks) {
732 if (!symbolOffsets.empty()) {
733 // Sort the offsets so we can scan them linearly.
734 std::sort(symbolOffsets.begin(), symbolOffsets.end(),
735 [](const SymbolWriter::Entry& a, const SymbolWriter::Entry& b) -> bool {
736 return a.offset < b.offset;
737 });
738
739 // Write the Symbol header.
740 ChunkWriter symbolWriter(tableWriter.getBuffer());
741 SymbolTable_header* symbolHeader = symbolWriter.startChunk<SymbolTable_header>(
742 RES_TABLE_SYMBOL_TABLE_TYPE);
743 symbolHeader->count = util::hostToDevice32(symbolOffsets.size());
744
745 symbolEntryData = symbolWriter.nextBlock<SymbolTable_entry>(symbolOffsets.size());
746 StringPool::flattenUtf8(symbolWriter.getBuffer(), symbolPool);
747 symbolWriter.finish();
748 }
749
750 if (sourcePool.size() > 0) {
751 // Write out source pool.
752 ChunkWriter srcWriter(tableWriter.getBuffer());
753 srcWriter.startChunk<ResChunk_header>(RES_TABLE_SOURCE_POOL_TYPE);
754 StringPool::flattenUtf8(srcWriter.getBuffer(), sourcePool);
755 srcWriter.finish();
756 }
757 }
758
759 const size_t beforePackagesSize = tableWriter.size();
760
761 // Finally merge all the packages into the main buffer.
762 tableWriter.getBuffer()->appendBuffer(std::move(packageBuffer));
763
764 // Update the offsets to their final values.
765 if (symbolEntryData) {
766 for (SymbolWriter::Entry& entry : symbolOffsets) {
Adam Lesinski3b4cd942015-10-30 16:31:42 -0700767 symbolEntryData->name.index = util::hostToDevice32(entry.name.getIndex());
Adam Lesinski9ba47d82015-10-13 11:37:10 -0700768
769 // The symbols were all calculated with the packageBuffer offset. We need to
770 // add the beginning of the output buffer.
771 symbolEntryData->offset = util::hostToDevice32(entry.offset + beforePackagesSize);
772 symbolEntryData++;
773 }
774 }
775
776 tableWriter.finish();
777 return true;
Adam Lesinski1ab598f2015-08-14 14:26:04 -0700778}
779
780} // namespace aapt