Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "ResourceTable.h" |
| 18 | #include "ResourceValues.h" |
| 19 | #include "ValueVisitor.h" |
| 20 | |
| 21 | #include "flatten/ChunkWriter.h" |
| 22 | #include "flatten/ResourceTypeExtensions.h" |
| 23 | #include "flatten/TableFlattener.h" |
| 24 | #include "util/BigBuffer.h" |
| 25 | |
Adam Lesinski | 9ba47d8 | 2015-10-13 11:37:10 -0700 | [diff] [blame] | 26 | #include <base/macros.h> |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 27 | #include <type_traits> |
| 28 | #include <numeric> |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 29 | |
| 30 | using namespace android; |
| 31 | |
| 32 | namespace aapt { |
| 33 | |
| 34 | namespace { |
| 35 | |
| 36 | template <typename T> |
| 37 | static bool cmpIds(const T* a, const T* b) { |
| 38 | return a->id.value() < b->id.value(); |
| 39 | } |
| 40 | |
| 41 | static void strcpy16_htod(uint16_t* dst, size_t len, const StringPiece16& src) { |
| 42 | if (len == 0) { |
| 43 | return; |
| 44 | } |
| 45 | |
| 46 | size_t i; |
| 47 | const char16_t* srcData = src.data(); |
| 48 | for (i = 0; i < len - 1 && i < src.size(); i++) { |
| 49 | dst[i] = util::hostToDevice16((uint16_t) srcData[i]); |
| 50 | } |
| 51 | dst[i] = 0; |
| 52 | } |
| 53 | |
| 54 | struct FlatEntry { |
| 55 | ResourceEntry* entry; |
| 56 | Value* value; |
| 57 | uint32_t entryKey; |
| 58 | uint32_t sourcePathKey; |
| 59 | uint32_t sourceLine; |
| 60 | }; |
| 61 | |
Adam Lesinski | 9ba47d8 | 2015-10-13 11:37:10 -0700 | [diff] [blame] | 62 | class SymbolWriter { |
| 63 | public: |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 64 | struct Entry { |
| 65 | StringPool::Ref name; |
| 66 | size_t offset; |
| 67 | }; |
| 68 | |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 69 | std::vector<Entry> symbols; |
| 70 | |
Adam Lesinski | 9ba47d8 | 2015-10-13 11:37:10 -0700 | [diff] [blame] | 71 | explicit SymbolWriter(StringPool* pool) : mPool(pool) { |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 72 | } |
Adam Lesinski | 9ba47d8 | 2015-10-13 11:37:10 -0700 | [diff] [blame] | 73 | |
| 74 | void addSymbol(const ResourceNameRef& name, size_t offset) { |
| 75 | symbols.push_back(Entry{ mPool->makeRef(name.package.toString() + u":" + |
| 76 | toString(name.type).toString() + u"/" + |
| 77 | name.entry.toString()), offset }); |
| 78 | } |
| 79 | |
| 80 | void shiftAllOffsets(size_t offset) { |
| 81 | for (Entry& entry : symbols) { |
| 82 | entry.offset += offset; |
| 83 | } |
| 84 | } |
| 85 | |
| 86 | private: |
| 87 | StringPool* mPool; |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 88 | }; |
| 89 | |
| 90 | struct MapFlattenVisitor : public RawValueVisitor { |
| 91 | using RawValueVisitor::visit; |
| 92 | |
| 93 | SymbolWriter* mSymbols; |
| 94 | FlatEntry* mEntry; |
| 95 | BigBuffer* mBuffer; |
| 96 | size_t mEntryCount = 0; |
| 97 | Maybe<uint32_t> mParentIdent; |
| 98 | Maybe<ResourceNameRef> mParentName; |
| 99 | |
| 100 | MapFlattenVisitor(SymbolWriter* symbols, FlatEntry* entry, BigBuffer* buffer) : |
| 101 | mSymbols(symbols), mEntry(entry), mBuffer(buffer) { |
| 102 | } |
| 103 | |
| 104 | void flattenKey(Reference* key, ResTable_map* outEntry) { |
| 105 | if (!key->id) { |
| 106 | assert(key->name && "reference must have a name"); |
| 107 | |
| 108 | outEntry->name.ident = util::hostToDevice32(0); |
| 109 | mSymbols->addSymbol(key->name.value(), (mBuffer->size() - sizeof(ResTable_map)) + |
| 110 | offsetof(ResTable_map, name)); |
| 111 | } else { |
| 112 | outEntry->name.ident = util::hostToDevice32(key->id.value().id); |
| 113 | } |
| 114 | } |
| 115 | |
| 116 | void flattenValue(Item* value, ResTable_map* outEntry) { |
| 117 | if (Reference* ref = valueCast<Reference>(value)) { |
| 118 | if (!ref->id) { |
| 119 | assert(ref->name && "reference must have a name"); |
| 120 | |
| 121 | mSymbols->addSymbol(ref->name.value(), (mBuffer->size() - sizeof(ResTable_map)) + |
| 122 | offsetof(ResTable_map, value) + offsetof(Res_value, data)); |
| 123 | } |
| 124 | } |
| 125 | |
| 126 | bool result = value->flatten(&outEntry->value); |
| 127 | assert(result && "flatten failed"); |
| 128 | } |
| 129 | |
| 130 | void flattenEntry(Reference* key, Item* value) { |
| 131 | ResTable_map* outEntry = mBuffer->nextBlock<ResTable_map>(); |
| 132 | flattenKey(key, outEntry); |
| 133 | flattenValue(value, outEntry); |
| 134 | outEntry->value.size = util::hostToDevice16(sizeof(outEntry->value)); |
| 135 | mEntryCount++; |
| 136 | } |
| 137 | |
| 138 | void visit(Attribute* attr) override { |
| 139 | { |
| 140 | Reference key(ResourceId{ ResTable_map::ATTR_TYPE }); |
| 141 | BinaryPrimitive val(Res_value::TYPE_INT_DEC, attr->typeMask); |
| 142 | flattenEntry(&key, &val); |
| 143 | } |
| 144 | |
| 145 | for (Attribute::Symbol& s : attr->symbols) { |
| 146 | BinaryPrimitive val(Res_value::TYPE_INT_DEC, s.value); |
| 147 | flattenEntry(&s.symbol, &val); |
| 148 | } |
| 149 | } |
| 150 | |
| 151 | static bool cmpStyleEntries(const Style::Entry& a, const Style::Entry& b) { |
| 152 | if (a.key.id) { |
| 153 | if (b.key.id) { |
| 154 | return a.key.id.value() < b.key.id.value(); |
| 155 | } |
| 156 | return true; |
| 157 | } else if (!b.key.id) { |
| 158 | return a.key.name.value() < b.key.name.value(); |
| 159 | } |
| 160 | return false; |
| 161 | } |
| 162 | |
| 163 | void visit(Style* style) override { |
| 164 | if (style->parent) { |
| 165 | if (!style->parent.value().id) { |
| 166 | assert(style->parent.value().name && "reference must have a name"); |
| 167 | mParentName = style->parent.value().name; |
| 168 | } else { |
| 169 | mParentIdent = style->parent.value().id.value().id; |
| 170 | } |
| 171 | } |
| 172 | |
| 173 | // Sort the style. |
| 174 | std::sort(style->entries.begin(), style->entries.end(), cmpStyleEntries); |
| 175 | |
| 176 | for (Style::Entry& entry : style->entries) { |
| 177 | flattenEntry(&entry.key, entry.value.get()); |
| 178 | } |
| 179 | } |
| 180 | |
| 181 | void visit(Styleable* styleable) override { |
| 182 | for (auto& attrRef : styleable->entries) { |
| 183 | BinaryPrimitive val(Res_value{}); |
| 184 | flattenEntry(&attrRef, &val); |
| 185 | } |
| 186 | } |
| 187 | |
| 188 | void visit(Array* array) override { |
| 189 | for (auto& item : array->items) { |
| 190 | ResTable_map* outEntry = mBuffer->nextBlock<ResTable_map>(); |
| 191 | flattenValue(item.get(), outEntry); |
| 192 | outEntry->value.size = util::hostToDevice16(sizeof(outEntry->value)); |
| 193 | mEntryCount++; |
| 194 | } |
| 195 | } |
| 196 | |
| 197 | void visit(Plural* plural) override { |
| 198 | const size_t count = plural->values.size(); |
| 199 | for (size_t i = 0; i < count; i++) { |
| 200 | if (!plural->values[i]) { |
| 201 | continue; |
| 202 | } |
| 203 | |
| 204 | ResourceId q; |
| 205 | switch (i) { |
| 206 | case Plural::Zero: |
| 207 | q.id = android::ResTable_map::ATTR_ZERO; |
| 208 | break; |
| 209 | |
| 210 | case Plural::One: |
| 211 | q.id = android::ResTable_map::ATTR_ONE; |
| 212 | break; |
| 213 | |
| 214 | case Plural::Two: |
| 215 | q.id = android::ResTable_map::ATTR_TWO; |
| 216 | break; |
| 217 | |
| 218 | case Plural::Few: |
| 219 | q.id = android::ResTable_map::ATTR_FEW; |
| 220 | break; |
| 221 | |
| 222 | case Plural::Many: |
| 223 | q.id = android::ResTable_map::ATTR_MANY; |
| 224 | break; |
| 225 | |
| 226 | case Plural::Other: |
| 227 | q.id = android::ResTable_map::ATTR_OTHER; |
| 228 | break; |
| 229 | |
| 230 | default: |
| 231 | assert(false); |
| 232 | break; |
| 233 | } |
| 234 | |
| 235 | Reference key(q); |
| 236 | flattenEntry(&key, plural->values[i].get()); |
| 237 | } |
| 238 | } |
| 239 | }; |
| 240 | |
Adam Lesinski | 9ba47d8 | 2015-10-13 11:37:10 -0700 | [diff] [blame] | 241 | class PackageFlattener { |
| 242 | public: |
| 243 | PackageFlattener(IDiagnostics* diag, TableFlattenerOptions options, |
| 244 | ResourceTablePackage* package, SymbolWriter* symbolWriter, |
| 245 | StringPool* sourcePool) : |
| 246 | mDiag(diag), mOptions(options), mPackage(package), mSymbols(symbolWriter), |
| 247 | mSourcePool(sourcePool) { |
| 248 | } |
| 249 | |
| 250 | bool flattenPackage(BigBuffer* buffer) { |
| 251 | ChunkWriter pkgWriter(buffer); |
| 252 | ResTable_package* pkgHeader = pkgWriter.startChunk<ResTable_package>( |
| 253 | RES_TABLE_PACKAGE_TYPE); |
| 254 | pkgHeader->id = util::hostToDevice32(mPackage->id.value()); |
| 255 | |
| 256 | if (mPackage->name.size() >= arraysize(pkgHeader->name)) { |
| 257 | mDiag->error(DiagMessage() << |
| 258 | "package name '" << mPackage->name << "' is too long"); |
| 259 | return false; |
| 260 | } |
| 261 | |
| 262 | // Copy the package name in device endianness. |
| 263 | strcpy16_htod(pkgHeader->name, arraysize(pkgHeader->name), mPackage->name); |
| 264 | |
| 265 | // Serialize the types. We do this now so that our type and key strings |
| 266 | // are populated. We write those first. |
| 267 | BigBuffer typeBuffer(1024); |
| 268 | flattenTypes(&typeBuffer); |
| 269 | |
| 270 | pkgHeader->typeStrings = util::hostToDevice32(pkgWriter.size()); |
| 271 | StringPool::flattenUtf16(pkgWriter.getBuffer(), mTypePool); |
| 272 | |
| 273 | pkgHeader->keyStrings = util::hostToDevice32(pkgWriter.size()); |
| 274 | StringPool::flattenUtf16(pkgWriter.getBuffer(), mKeyPool); |
| 275 | |
| 276 | // Add the ResTable_package header/type/key strings to the offset. |
| 277 | mSymbols->shiftAllOffsets(pkgWriter.size()); |
| 278 | |
| 279 | // Append the types. |
| 280 | buffer->appendBuffer(std::move(typeBuffer)); |
| 281 | |
| 282 | pkgWriter.finish(); |
| 283 | return true; |
| 284 | } |
| 285 | |
| 286 | private: |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 287 | IDiagnostics* mDiag; |
| 288 | TableFlattenerOptions mOptions; |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 289 | ResourceTablePackage* mPackage; |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 290 | StringPool mTypePool; |
| 291 | StringPool mKeyPool; |
Adam Lesinski | 9ba47d8 | 2015-10-13 11:37:10 -0700 | [diff] [blame] | 292 | SymbolWriter* mSymbols; |
| 293 | StringPool* mSourcePool; |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 294 | |
| 295 | template <typename T> |
| 296 | T* writeEntry(FlatEntry* entry, BigBuffer* buffer) { |
| 297 | static_assert(std::is_same<ResTable_entry, T>::value || |
| 298 | std::is_same<ResTable_entry_ext, T>::value, |
| 299 | "T must be ResTable_entry or ResTable_entry_ext"); |
| 300 | |
| 301 | T* result = buffer->nextBlock<T>(); |
| 302 | ResTable_entry* outEntry = (ResTable_entry*)(result); |
Adam Lesinski | 9e10ac7 | 2015-10-16 14:37:48 -0700 | [diff] [blame^] | 303 | if (entry->entry->symbolStatus.state == SymbolState::kPublic) { |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 304 | outEntry->flags |= ResTable_entry::FLAG_PUBLIC; |
| 305 | } |
| 306 | |
| 307 | if (entry->value->isWeak()) { |
| 308 | outEntry->flags |= ResTable_entry::FLAG_WEAK; |
| 309 | } |
| 310 | |
| 311 | if (!entry->value->isItem()) { |
| 312 | outEntry->flags |= ResTable_entry::FLAG_COMPLEX; |
| 313 | } |
| 314 | |
| 315 | outEntry->key.index = util::hostToDevice32(entry->entryKey); |
| 316 | outEntry->size = sizeof(T); |
| 317 | |
| 318 | if (mOptions.useExtendedChunks) { |
| 319 | // Write the extra source block. This will be ignored by the Android runtime. |
| 320 | ResTable_entry_source* sourceBlock = buffer->nextBlock<ResTable_entry_source>(); |
| 321 | sourceBlock->pathIndex = util::hostToDevice32(entry->sourcePathKey); |
| 322 | sourceBlock->line = util::hostToDevice32(entry->sourceLine); |
| 323 | outEntry->size += sizeof(*sourceBlock); |
| 324 | } |
| 325 | |
| 326 | outEntry->flags = util::hostToDevice16(outEntry->flags); |
| 327 | outEntry->size = util::hostToDevice16(outEntry->size); |
| 328 | return result; |
| 329 | } |
| 330 | |
| 331 | bool flattenValue(FlatEntry* entry, BigBuffer* buffer) { |
| 332 | if (entry->value->isItem()) { |
| 333 | writeEntry<ResTable_entry>(entry, buffer); |
| 334 | if (Reference* ref = valueCast<Reference>(entry->value)) { |
| 335 | if (!ref->id) { |
| 336 | assert(ref->name && "reference must have at least a name"); |
Adam Lesinski | 9ba47d8 | 2015-10-13 11:37:10 -0700 | [diff] [blame] | 337 | mSymbols->addSymbol(ref->name.value(), |
| 338 | buffer->size() + offsetof(Res_value, data)); |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 339 | } |
| 340 | } |
| 341 | Res_value* outValue = buffer->nextBlock<Res_value>(); |
| 342 | bool result = static_cast<Item*>(entry->value)->flatten(outValue); |
| 343 | assert(result && "flatten failed"); |
| 344 | outValue->size = util::hostToDevice16(sizeof(*outValue)); |
| 345 | } else { |
| 346 | const size_t beforeEntry = buffer->size(); |
| 347 | ResTable_entry_ext* outEntry = writeEntry<ResTable_entry_ext>(entry, buffer); |
Adam Lesinski | 9ba47d8 | 2015-10-13 11:37:10 -0700 | [diff] [blame] | 348 | MapFlattenVisitor visitor(mSymbols, entry, buffer); |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 349 | entry->value->accept(&visitor); |
| 350 | outEntry->count = util::hostToDevice32(visitor.mEntryCount); |
| 351 | if (visitor.mParentName) { |
Adam Lesinski | 9ba47d8 | 2015-10-13 11:37:10 -0700 | [diff] [blame] | 352 | mSymbols->addSymbol(visitor.mParentName.value(), |
| 353 | beforeEntry + offsetof(ResTable_entry_ext, parent)); |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 354 | } else if (visitor.mParentIdent) { |
| 355 | outEntry->parent.ident = util::hostToDevice32(visitor.mParentIdent.value()); |
| 356 | } |
| 357 | } |
| 358 | return true; |
| 359 | } |
| 360 | |
| 361 | bool flattenConfig(const ResourceTableType* type, const ConfigDescription& config, |
| 362 | std::vector<FlatEntry>* entries, BigBuffer* buffer) { |
| 363 | ChunkWriter typeWriter(buffer); |
| 364 | ResTable_type* typeHeader = typeWriter.startChunk<ResTable_type>(RES_TABLE_TYPE_TYPE); |
| 365 | typeHeader->id = type->id.value(); |
| 366 | typeHeader->config = config; |
| 367 | typeHeader->config.swapHtoD(); |
| 368 | |
| 369 | auto maxAccum = [](uint32_t max, const std::unique_ptr<ResourceEntry>& a) -> uint32_t { |
| 370 | return std::max(max, (uint32_t) a->id.value()); |
| 371 | }; |
| 372 | |
| 373 | // Find the largest entry ID. That is how many entries we will have. |
| 374 | const uint32_t entryCount = |
| 375 | std::accumulate(type->entries.begin(), type->entries.end(), 0, maxAccum) + 1; |
| 376 | |
| 377 | typeHeader->entryCount = util::hostToDevice32(entryCount); |
| 378 | uint32_t* indices = typeWriter.nextBlock<uint32_t>(entryCount); |
| 379 | |
| 380 | assert((size_t) entryCount <= std::numeric_limits<uint16_t>::max() + 1); |
| 381 | memset(indices, 0xff, entryCount * sizeof(uint32_t)); |
| 382 | |
| 383 | typeHeader->entriesStart = util::hostToDevice32(typeWriter.size()); |
| 384 | |
| 385 | const size_t entryStart = typeWriter.getBuffer()->size(); |
| 386 | for (FlatEntry& flatEntry : *entries) { |
| 387 | assert(flatEntry.entry->id.value() < entryCount); |
| 388 | indices[flatEntry.entry->id.value()] = util::hostToDevice32( |
| 389 | typeWriter.getBuffer()->size() - entryStart); |
| 390 | if (!flattenValue(&flatEntry, typeWriter.getBuffer())) { |
| 391 | mDiag->error(DiagMessage() |
| 392 | << "failed to flatten resource '" |
| 393 | << ResourceNameRef(mPackage->name, type->type, flatEntry.entry->name) |
| 394 | << "' for configuration '" << config << "'"); |
| 395 | return false; |
| 396 | } |
| 397 | } |
| 398 | typeWriter.finish(); |
| 399 | return true; |
| 400 | } |
| 401 | |
| 402 | std::vector<ResourceTableType*> collectAndSortTypes() { |
| 403 | std::vector<ResourceTableType*> sortedTypes; |
| 404 | for (auto& type : mPackage->types) { |
| 405 | if (type->type == ResourceType::kStyleable && !mOptions.useExtendedChunks) { |
| 406 | // Styleables aren't real Resource Types, they are represented in the R.java |
| 407 | // file. |
| 408 | continue; |
| 409 | } |
| 410 | |
| 411 | assert(type->id && "type must have an ID set"); |
| 412 | |
| 413 | sortedTypes.push_back(type.get()); |
| 414 | } |
| 415 | std::sort(sortedTypes.begin(), sortedTypes.end(), cmpIds<ResourceTableType>); |
| 416 | return sortedTypes; |
| 417 | } |
| 418 | |
| 419 | std::vector<ResourceEntry*> collectAndSortEntries(ResourceTableType* type) { |
| 420 | // Sort the entries by entry ID. |
| 421 | std::vector<ResourceEntry*> sortedEntries; |
| 422 | for (auto& entry : type->entries) { |
| 423 | assert(entry->id && "entry must have an ID set"); |
| 424 | sortedEntries.push_back(entry.get()); |
| 425 | } |
| 426 | std::sort(sortedEntries.begin(), sortedEntries.end(), cmpIds<ResourceEntry>); |
| 427 | return sortedEntries; |
| 428 | } |
| 429 | |
| 430 | bool flattenTypeSpec(ResourceTableType* type, std::vector<ResourceEntry*>* sortedEntries, |
| 431 | BigBuffer* buffer) { |
| 432 | ChunkWriter typeSpecWriter(buffer); |
| 433 | ResTable_typeSpec* specHeader = typeSpecWriter.startChunk<ResTable_typeSpec>( |
| 434 | RES_TABLE_TYPE_SPEC_TYPE); |
| 435 | specHeader->id = type->id.value(); |
| 436 | |
| 437 | if (sortedEntries->empty()) { |
| 438 | typeSpecWriter.finish(); |
| 439 | return true; |
| 440 | } |
| 441 | |
| 442 | // We can't just take the size of the vector. There may be holes in the entry ID space. |
| 443 | // Since the entries are sorted by ID, the last one will be the biggest. |
| 444 | const size_t numEntries = sortedEntries->back()->id.value() + 1; |
| 445 | |
| 446 | specHeader->entryCount = util::hostToDevice32(numEntries); |
| 447 | |
| 448 | // Reserve space for the masks of each resource in this type. These |
| 449 | // show for which configuration axis the resource changes. |
| 450 | uint32_t* configMasks = typeSpecWriter.nextBlock<uint32_t>(numEntries); |
| 451 | |
| 452 | const size_t actualNumEntries = sortedEntries->size(); |
| 453 | for (size_t entryIndex = 0; entryIndex < actualNumEntries; entryIndex++) { |
| 454 | ResourceEntry* entry = sortedEntries->at(entryIndex); |
| 455 | |
| 456 | // Populate the config masks for this entry. |
| 457 | |
Adam Lesinski | 9e10ac7 | 2015-10-16 14:37:48 -0700 | [diff] [blame^] | 458 | if (entry->symbolStatus.state == SymbolState::kPublic) { |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 459 | configMasks[entry->id.value()] |= |
| 460 | util::hostToDevice32(ResTable_typeSpec::SPEC_PUBLIC); |
| 461 | } |
| 462 | |
| 463 | const size_t configCount = entry->values.size(); |
| 464 | for (size_t i = 0; i < configCount; i++) { |
| 465 | const ConfigDescription& config = entry->values[i].config; |
| 466 | for (size_t j = i + 1; j < configCount; j++) { |
| 467 | configMasks[entry->id.value()] |= util::hostToDevice32( |
| 468 | config.diff(entry->values[j].config)); |
| 469 | } |
| 470 | } |
| 471 | } |
| 472 | typeSpecWriter.finish(); |
| 473 | return true; |
| 474 | } |
| 475 | |
| 476 | bool flattenPublic(ResourceTableType* type, std::vector<ResourceEntry*>* sortedEntries, |
| 477 | BigBuffer* buffer) { |
| 478 | ChunkWriter publicWriter(buffer); |
| 479 | Public_header* publicHeader = publicWriter.startChunk<Public_header>(RES_TABLE_PUBLIC_TYPE); |
| 480 | publicHeader->typeId = type->id.value(); |
| 481 | |
| 482 | for (ResourceEntry* entry : *sortedEntries) { |
Adam Lesinski | 9e10ac7 | 2015-10-16 14:37:48 -0700 | [diff] [blame^] | 483 | if (entry->symbolStatus.state != SymbolState::kUndefined) { |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 484 | // Write the public status of this entry. |
| 485 | Public_entry* publicEntry = publicWriter.nextBlock<Public_entry>(); |
| 486 | publicEntry->entryId = util::hostToDevice32(entry->id.value()); |
| 487 | publicEntry->key.index = util::hostToDevice32(mKeyPool.makeRef( |
| 488 | entry->name).getIndex()); |
Adam Lesinski | 9ba47d8 | 2015-10-13 11:37:10 -0700 | [diff] [blame] | 489 | publicEntry->source.index = util::hostToDevice32(mSourcePool->makeRef( |
Adam Lesinski | 9e10ac7 | 2015-10-16 14:37:48 -0700 | [diff] [blame^] | 490 | util::utf8ToUtf16(entry->symbolStatus.source.path)).getIndex()); |
| 491 | if (entry->symbolStatus.source.line) { |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 492 | publicEntry->sourceLine = util::hostToDevice32( |
Adam Lesinski | 9e10ac7 | 2015-10-16 14:37:48 -0700 | [diff] [blame^] | 493 | entry->symbolStatus.source.line.value()); |
| 494 | } |
| 495 | |
| 496 | switch (entry->symbolStatus.state) { |
| 497 | case SymbolState::kPrivate: |
| 498 | publicEntry->state = Public_entry::kPrivate; |
| 499 | break; |
| 500 | |
| 501 | case SymbolState::kPublic: |
| 502 | publicEntry->state = Public_entry::kPublic; |
| 503 | break; |
| 504 | |
| 505 | default: |
| 506 | assert(false && "should not serialize any other state"); |
| 507 | break; |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 508 | } |
| 509 | |
| 510 | // Don't hostToDevice until the last step. |
| 511 | publicHeader->count += 1; |
| 512 | } |
| 513 | } |
| 514 | |
| 515 | publicHeader->count = util::hostToDevice32(publicHeader->count); |
| 516 | publicWriter.finish(); |
| 517 | return true; |
| 518 | } |
| 519 | |
| 520 | bool flattenTypes(BigBuffer* buffer) { |
| 521 | // Sort the types by their IDs. They will be inserted into the StringPool in this order. |
| 522 | std::vector<ResourceTableType*> sortedTypes = collectAndSortTypes(); |
| 523 | |
| 524 | size_t expectedTypeId = 1; |
| 525 | for (ResourceTableType* type : sortedTypes) { |
| 526 | // If there is a gap in the type IDs, fill in the StringPool |
| 527 | // with empty values until we reach the ID we expect. |
| 528 | while (type->id.value() > expectedTypeId) { |
| 529 | std::u16string typeName(u"?"); |
| 530 | typeName += expectedTypeId; |
| 531 | mTypePool.makeRef(typeName); |
| 532 | expectedTypeId++; |
| 533 | } |
| 534 | expectedTypeId++; |
| 535 | mTypePool.makeRef(toString(type->type)); |
| 536 | |
| 537 | std::vector<ResourceEntry*> sortedEntries = collectAndSortEntries(type); |
| 538 | |
| 539 | if (!flattenTypeSpec(type, &sortedEntries, buffer)) { |
| 540 | return false; |
| 541 | } |
| 542 | |
| 543 | if (mOptions.useExtendedChunks) { |
| 544 | if (!flattenPublic(type, &sortedEntries, buffer)) { |
| 545 | return false; |
| 546 | } |
| 547 | } |
| 548 | |
| 549 | // The binary resource table lists resource entries for each configuration. |
| 550 | // We store them inverted, where a resource entry lists the values for each |
| 551 | // configuration available. Here we reverse this to match the binary table. |
| 552 | std::map<ConfigDescription, std::vector<FlatEntry>> configToEntryListMap; |
| 553 | for (ResourceEntry* entry : sortedEntries) { |
| 554 | const size_t keyIndex = mKeyPool.makeRef(entry->name).getIndex(); |
| 555 | |
| 556 | // Group values by configuration. |
| 557 | for (auto& configValue : entry->values) { |
| 558 | configToEntryListMap[configValue.config].push_back(FlatEntry{ |
| 559 | entry, configValue.value.get(), (uint32_t) keyIndex, |
Adam Lesinski | 9ba47d8 | 2015-10-13 11:37:10 -0700 | [diff] [blame] | 560 | (uint32_t)(mSourcePool->makeRef(util::utf8ToUtf16( |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 561 | configValue.source.path)).getIndex()), |
| 562 | (uint32_t)(configValue.source.line |
| 563 | ? configValue.source.line.value() : 0) |
| 564 | }); |
| 565 | } |
| 566 | } |
| 567 | |
| 568 | // Flatten a configuration value. |
| 569 | for (auto& entry : configToEntryListMap) { |
| 570 | if (!flattenConfig(type, entry.first, &entry.second, buffer)) { |
| 571 | return false; |
| 572 | } |
| 573 | } |
| 574 | } |
| 575 | return true; |
| 576 | } |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 577 | }; |
| 578 | |
| 579 | } // namespace |
| 580 | |
| 581 | bool TableFlattener::consume(IAaptContext* context, ResourceTable* table) { |
Adam Lesinski | 9ba47d8 | 2015-10-13 11:37:10 -0700 | [diff] [blame] | 582 | // We must do this before writing the resources, since the string pool IDs may change. |
| 583 | table->stringPool.sort([](const StringPool::Entry& a, const StringPool::Entry& b) -> bool { |
| 584 | int diff = a.context.priority - b.context.priority; |
| 585 | if (diff < 0) return true; |
| 586 | if (diff > 0) return false; |
| 587 | diff = a.context.config.compare(b.context.config); |
| 588 | if (diff < 0) return true; |
| 589 | if (diff > 0) return false; |
| 590 | return a.value < b.value; |
| 591 | }); |
| 592 | table->stringPool.prune(); |
| 593 | |
| 594 | // Write the ResTable header. |
| 595 | ChunkWriter tableWriter(mBuffer); |
| 596 | ResTable_header* tableHeader = tableWriter.startChunk<ResTable_header>(RES_TABLE_TYPE); |
| 597 | tableHeader->packageCount = util::hostToDevice32(table->packages.size()); |
| 598 | |
| 599 | // Flatten the values string pool. |
| 600 | StringPool::flattenUtf8(tableWriter.getBuffer(), table->stringPool); |
| 601 | |
| 602 | // If we have a reference to a symbol that doesn't exist, we don't know its resource ID. |
| 603 | // We encode the name of the symbol along with the offset of where to include the resource ID |
| 604 | // once it is found. |
| 605 | StringPool symbolPool; |
| 606 | std::vector<SymbolWriter::Entry> symbolOffsets; |
| 607 | |
| 608 | // String pool holding the source paths of each value. |
| 609 | StringPool sourcePool; |
| 610 | |
| 611 | BigBuffer packageBuffer(1024); |
| 612 | |
| 613 | // Flatten each package. |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 614 | for (auto& package : table->packages) { |
Adam Lesinski | 9ba47d8 | 2015-10-13 11:37:10 -0700 | [diff] [blame] | 615 | const size_t beforePackageSize = packageBuffer.size(); |
| 616 | |
| 617 | // All packages will share a single global symbol pool. |
| 618 | SymbolWriter packageSymbolWriter(&symbolPool); |
| 619 | |
| 620 | PackageFlattener flattener(context->getDiagnostics(), mOptions, package.get(), |
| 621 | &packageSymbolWriter, &sourcePool); |
| 622 | if (!flattener.flattenPackage(&packageBuffer)) { |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 623 | return false; |
| 624 | } |
| 625 | |
Adam Lesinski | 9ba47d8 | 2015-10-13 11:37:10 -0700 | [diff] [blame] | 626 | // The symbols are offset only from their own Package start. Offset them from the |
| 627 | // start of the packageBuffer. |
| 628 | packageSymbolWriter.shiftAllOffsets(beforePackageSize); |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 629 | |
Adam Lesinski | 9ba47d8 | 2015-10-13 11:37:10 -0700 | [diff] [blame] | 630 | // Extract all the symbols to offset |
| 631 | symbolOffsets.insert(symbolOffsets.end(), |
| 632 | std::make_move_iterator(packageSymbolWriter.symbols.begin()), |
| 633 | std::make_move_iterator(packageSymbolWriter.symbols.end())); |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 634 | } |
| 635 | |
Adam Lesinski | 9ba47d8 | 2015-10-13 11:37:10 -0700 | [diff] [blame] | 636 | SymbolTable_entry* symbolEntryData = nullptr; |
| 637 | if (mOptions.useExtendedChunks) { |
| 638 | if (!symbolOffsets.empty()) { |
| 639 | // Sort the offsets so we can scan them linearly. |
| 640 | std::sort(symbolOffsets.begin(), symbolOffsets.end(), |
| 641 | [](const SymbolWriter::Entry& a, const SymbolWriter::Entry& b) -> bool { |
| 642 | return a.offset < b.offset; |
| 643 | }); |
| 644 | |
| 645 | // Write the Symbol header. |
| 646 | ChunkWriter symbolWriter(tableWriter.getBuffer()); |
| 647 | SymbolTable_header* symbolHeader = symbolWriter.startChunk<SymbolTable_header>( |
| 648 | RES_TABLE_SYMBOL_TABLE_TYPE); |
| 649 | symbolHeader->count = util::hostToDevice32(symbolOffsets.size()); |
| 650 | |
| 651 | symbolEntryData = symbolWriter.nextBlock<SymbolTable_entry>(symbolOffsets.size()); |
| 652 | StringPool::flattenUtf8(symbolWriter.getBuffer(), symbolPool); |
| 653 | symbolWriter.finish(); |
| 654 | } |
| 655 | |
| 656 | if (sourcePool.size() > 0) { |
| 657 | // Write out source pool. |
| 658 | ChunkWriter srcWriter(tableWriter.getBuffer()); |
| 659 | srcWriter.startChunk<ResChunk_header>(RES_TABLE_SOURCE_POOL_TYPE); |
| 660 | StringPool::flattenUtf8(srcWriter.getBuffer(), sourcePool); |
| 661 | srcWriter.finish(); |
| 662 | } |
| 663 | } |
| 664 | |
| 665 | const size_t beforePackagesSize = tableWriter.size(); |
| 666 | |
| 667 | // Finally merge all the packages into the main buffer. |
| 668 | tableWriter.getBuffer()->appendBuffer(std::move(packageBuffer)); |
| 669 | |
| 670 | // Update the offsets to their final values. |
| 671 | if (symbolEntryData) { |
| 672 | for (SymbolWriter::Entry& entry : symbolOffsets) { |
| 673 | symbolEntryData->stringIndex = util::hostToDevice32(entry.name.getIndex()); |
| 674 | |
| 675 | // The symbols were all calculated with the packageBuffer offset. We need to |
| 676 | // add the beginning of the output buffer. |
| 677 | symbolEntryData->offset = util::hostToDevice32(entry.offset + beforePackagesSize); |
| 678 | symbolEntryData++; |
| 679 | } |
| 680 | } |
| 681 | |
| 682 | tableWriter.finish(); |
| 683 | return true; |
Adam Lesinski | 1ab598f | 2015-08-14 14:26:04 -0700 | [diff] [blame] | 684 | } |
| 685 | |
| 686 | } // namespace aapt |