The Android Open Source Project | 7c1b96a | 2008-10-21 07:00:00 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2008 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #define LOG_TAG "SurfaceFlinger" |
| 18 | |
| 19 | #include <stdlib.h> |
| 20 | #include <stdio.h> |
| 21 | #include <stdint.h> |
| 22 | #include <unistd.h> |
| 23 | #include <fcntl.h> |
| 24 | #include <errno.h> |
| 25 | #include <math.h> |
| 26 | #include <sys/types.h> |
| 27 | #include <sys/stat.h> |
| 28 | #include <sys/ioctl.h> |
| 29 | |
| 30 | #include <cutils/log.h> |
| 31 | #include <cutils/properties.h> |
| 32 | |
| 33 | #include <utils/MemoryDealer.h> |
| 34 | #include <utils/MemoryBase.h> |
| 35 | #include <utils/MemoryHeapPmem.h> |
| 36 | #include <utils/MemoryHeapBase.h> |
| 37 | #include <utils/IPCThreadState.h> |
| 38 | #include <utils/StopWatch.h> |
| 39 | |
| 40 | #include <ui/ISurfaceComposer.h> |
| 41 | |
| 42 | #include "VRamHeap.h" |
| 43 | #include "GPUHardware.h" |
| 44 | |
| 45 | #if HAVE_ANDROID_OS |
| 46 | #include <linux/android_pmem.h> |
| 47 | #endif |
| 48 | |
| 49 | #include "GPUHardware/GPUHardware.h" |
| 50 | |
| 51 | /* |
| 52 | * This file manages the GPU if there is one. The intent is that this code |
| 53 | * needs to be different for every devce. Currently there is no abstraction, |
| 54 | * but in the long term, this code needs to be refactored so that API and |
| 55 | * implementation are separated. |
| 56 | * |
| 57 | * In this particular implementation, the GPU, its memory and register are |
| 58 | * managed here. Clients (such as OpenGL ES) request the GPU when then need |
| 59 | * it and are given a revokable heap containing the registers on memory. |
| 60 | * |
| 61 | */ |
| 62 | |
| 63 | namespace android { |
| 64 | // --------------------------------------------------------------------------- |
| 65 | |
| 66 | // size reserved for GPU surfaces |
| 67 | // 1200 KB fits exactly: |
| 68 | // - two 320*480 16-bits double-buffered surfaces |
| 69 | // - one 320*480 32-bits double-buffered surface |
| 70 | // - one 320*240 16-bits double-bufferd, 4x anti-aliased surface |
| 71 | static const int GPU_RESERVED_SIZE = 1200 * 1024; |
| 72 | |
| 73 | static const int GPUR_SIZE = 1 * 1024 * 1024; |
| 74 | |
| 75 | // --------------------------------------------------------------------------- |
| 76 | |
| 77 | /* |
| 78 | * GPUHandle is a special IMemory given to the client. It represents their |
| 79 | * handle to the GPU. Once they give it up, they loose GPU access, or if |
| 80 | * they explicitely revoke their acces through the binder code 1000. |
| 81 | * In both cases, this triggers a callback to revoke() |
| 82 | * first, and then actually powers down the chip. |
| 83 | * |
| 84 | * In the case of a misbehaving app, GPUHardware can ask for an immediate |
| 85 | * release of the GPU to the target process which should answer by calling |
| 86 | * code 1000 on GPUHandle. If it doesn't in a timely manner, the GPU will |
| 87 | * be revoked from under their feet. |
| 88 | * |
| 89 | * We should never hold a strong reference on GPUHandle. In practice this |
| 90 | * shouldn't be a big issue though because clients should use code 1000 and |
| 91 | * not rely on the dtor being called. |
| 92 | * |
| 93 | */ |
| 94 | |
| 95 | class GPUHandle : public BnMemory |
| 96 | { |
| 97 | public: |
| 98 | GPUHandle(const sp<GPUHardware>& gpu, const sp<IMemoryHeap>& heap) |
| 99 | : mGPU(gpu), mClientHeap(heap) { |
| 100 | } |
| 101 | virtual ~GPUHandle(); |
| 102 | virtual sp<IMemoryHeap> getMemory(ssize_t* offset, size_t* size) const; |
| 103 | virtual status_t onTransact( |
| 104 | uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags); |
| 105 | void setOwner(int owner) { mOwner = owner; } |
| 106 | private: |
| 107 | void revokeNotification(); |
| 108 | wp<GPUHardware> mGPU; |
| 109 | sp<IMemoryHeap> mClientHeap; |
| 110 | int mOwner; |
| 111 | }; |
| 112 | |
| 113 | GPUHandle::~GPUHandle() { |
| 114 | //LOGD("GPUHandle %p released, revoking GPU", this); |
| 115 | revokeNotification(); |
| 116 | } |
| 117 | |
| 118 | void GPUHandle::revokeNotification() { |
| 119 | sp<GPUHardware> hw(mGPU.promote()); |
| 120 | if (hw != 0) { |
| 121 | hw->revoke(mOwner); |
| 122 | } |
| 123 | } |
| 124 | sp<IMemoryHeap> GPUHandle::getMemory(ssize_t* offset, size_t* size) const |
| 125 | { |
| 126 | if (offset) *offset = 0; |
| 127 | if (size) *size = mClientHeap !=0 ? mClientHeap->virtualSize() : 0; |
| 128 | return mClientHeap; |
| 129 | } |
| 130 | status_t GPUHandle::onTransact( |
| 131 | uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) |
| 132 | { |
| 133 | status_t err = BnMemory::onTransact(code, data, reply, flags); |
| 134 | if (err == UNKNOWN_TRANSACTION && code == 1000) { |
| 135 | int callingPid = IPCThreadState::self()->getCallingPid(); |
| 136 | //LOGD("pid %d voluntarily revoking gpu", callingPid); |
| 137 | if (callingPid == mOwner) { |
| 138 | revokeNotification(); |
| 139 | // we've revoked the GPU, don't do it again later when we |
| 140 | // are destroyed. |
| 141 | mGPU.clear(); |
| 142 | } else { |
| 143 | LOGW("%d revoking someone else's gpu? (owner=%d)", |
| 144 | callingPid, mOwner); |
| 145 | } |
| 146 | err = NO_ERROR; |
| 147 | } |
| 148 | return err; |
| 149 | } |
| 150 | |
| 151 | // --------------------------------------------------------------------------- |
| 152 | |
| 153 | class MemoryHeapRegs : public MemoryHeapPmem |
| 154 | { |
| 155 | public: |
| 156 | MemoryHeapRegs(const wp<GPUHardware>& gpu, const sp<MemoryHeapBase>& heap); |
| 157 | virtual ~MemoryHeapRegs(); |
| 158 | sp<IMemory> mapMemory(size_t offset, size_t size); |
| 159 | virtual void revoke(); |
| 160 | private: |
| 161 | wp<GPUHardware> mGPU; |
| 162 | }; |
| 163 | |
| 164 | MemoryHeapRegs::MemoryHeapRegs(const wp<GPUHardware>& gpu, const sp<MemoryHeapBase>& heap) |
| 165 | : MemoryHeapPmem(heap), mGPU(gpu) |
| 166 | { |
| 167 | #if HAVE_ANDROID_OS |
| 168 | if (heapID()>0) { |
| 169 | /* this is where the GPU is powered on and the registers are mapped |
| 170 | * in the client */ |
| 171 | //LOGD("ioctl(HW3D_GRANT_GPU)"); |
| 172 | int err = ioctl(heapID(), HW3D_GRANT_GPU, base()); |
| 173 | if (err) { |
| 174 | // it can happen if the master heap has been closed already |
| 175 | // in which case the GPU already is revoked (app crash for |
| 176 | // instance). |
| 177 | //LOGW("HW3D_GRANT_GPU failed (%s), mFD=%d, base=%p", |
| 178 | // strerror(errno), heapID(), base()); |
| 179 | } |
| 180 | } |
| 181 | #endif |
| 182 | } |
| 183 | |
| 184 | MemoryHeapRegs::~MemoryHeapRegs() |
| 185 | { |
| 186 | } |
| 187 | |
| 188 | sp<IMemory> MemoryHeapRegs::mapMemory(size_t offset, size_t size) |
| 189 | { |
| 190 | sp<GPUHandle> memory; |
| 191 | sp<GPUHardware> gpu = mGPU.promote(); |
| 192 | if (heapID()>0 && gpu!=0) |
| 193 | memory = new GPUHandle(gpu, this); |
| 194 | return memory; |
| 195 | } |
| 196 | |
| 197 | void MemoryHeapRegs::revoke() |
| 198 | { |
| 199 | MemoryHeapPmem::revoke(); |
| 200 | #if HAVE_ANDROID_OS |
| 201 | if (heapID() > 0) { |
| 202 | //LOGD("ioctl(HW3D_REVOKE_GPU)"); |
| 203 | int err = ioctl(heapID(), HW3D_REVOKE_GPU, base()); |
| 204 | LOGE_IF(err, "HW3D_REVOKE_GPU failed (%s), mFD=%d, base=%p", |
| 205 | strerror(errno), heapID(), base()); |
| 206 | } |
| 207 | #endif |
| 208 | } |
| 209 | |
| 210 | // --------------------------------------------------------------------------- |
| 211 | |
| 212 | class GPURegisterHeap : public PMemHeapInterface |
| 213 | { |
| 214 | public: |
| 215 | GPURegisterHeap(const sp<GPUHardware>& gpu) |
| 216 | : PMemHeapInterface("/dev/hw3d", GPUR_SIZE), mGPU(gpu) |
| 217 | { |
| 218 | } |
| 219 | virtual ~GPURegisterHeap() { |
| 220 | } |
| 221 | virtual sp<MemoryHeapPmem> createClientHeap() { |
| 222 | sp<MemoryHeapBase> parentHeap(this); |
| 223 | return new MemoryHeapRegs(mGPU, parentHeap); |
| 224 | } |
| 225 | private: |
| 226 | wp<GPUHardware> mGPU; |
| 227 | }; |
| 228 | |
| 229 | /*****************************************************************************/ |
| 230 | |
| 231 | GPUHardware::GPUHardware() |
| 232 | : mOwner(NO_OWNER) |
| 233 | { |
| 234 | } |
| 235 | |
| 236 | GPUHardware::~GPUHardware() |
| 237 | { |
| 238 | } |
| 239 | |
| 240 | sp<MemoryDealer> GPUHardware::request(int pid) |
| 241 | { |
| 242 | sp<MemoryDealer> dealer; |
| 243 | |
| 244 | LOGD("pid %d requesting gpu surface (current owner = %d)", pid, mOwner); |
| 245 | |
| 246 | const int self_pid = getpid(); |
| 247 | if (pid == self_pid) { |
| 248 | // can't use GPU from surfaceflinger's process |
| 249 | return dealer; |
| 250 | } |
| 251 | |
| 252 | Mutex::Autolock _l(mLock); |
| 253 | |
| 254 | if (mOwner != pid) { |
| 255 | // someone already has the gpu. |
| 256 | takeBackGPULocked(); |
| 257 | |
| 258 | // releaseLocked() should be a no-op most of the time |
| 259 | releaseLocked(); |
| 260 | |
| 261 | requestLocked(); |
| 262 | } |
| 263 | |
| 264 | dealer = mAllocator; |
| 265 | mOwner = pid; |
| 266 | if (dealer == 0) { |
| 267 | mOwner = SURFACE_FAILED; |
| 268 | } |
| 269 | |
| 270 | LOGD_IF(dealer!=0, "gpu surface granted to pid %d", mOwner); |
| 271 | return dealer; |
| 272 | } |
| 273 | |
| 274 | status_t GPUHardware::request(const sp<IGPUCallback>& callback, |
| 275 | ISurfaceComposer::gpu_info_t* gpu) |
| 276 | { |
| 277 | sp<IMemory> gpuHandle; |
| 278 | IPCThreadState* ipc = IPCThreadState::self(); |
| 279 | const int pid = ipc->getCallingPid(); |
| 280 | const int self_pid = getpid(); |
| 281 | |
| 282 | LOGD("pid %d requesting gpu core (owner = %d)", pid, mOwner); |
| 283 | |
| 284 | if (pid == self_pid) { |
| 285 | // can't use GPU from surfaceflinger's process |
| 286 | return PERMISSION_DENIED; |
| 287 | } |
| 288 | |
| 289 | Mutex::Autolock _l(mLock); |
| 290 | if (mOwner != pid) { |
| 291 | // someone already has the gpu. |
| 292 | takeBackGPULocked(); |
| 293 | |
| 294 | // releaseLocked() should be a no-op most of the time |
| 295 | releaseLocked(); |
| 296 | |
| 297 | requestLocked(); |
| 298 | } |
| 299 | |
| 300 | if (mHeapR.isValid()) { |
| 301 | gpu->count = 2; |
| 302 | gpu->regions[0].region = mHeap0.map(true); |
| 303 | gpu->regions[0].reserved = mHeap0.reserved; |
| 304 | gpu->regions[1].region = mHeap1.map(true); |
| 305 | gpu->regions[1].reserved = mHeap1.reserved; |
| 306 | gpu->regs = mHeapR.map(); |
| 307 | if (gpu->regs != 0) { |
| 308 | static_cast< GPUHandle* >(gpu->regs.get())->setOwner(pid); |
| 309 | } |
| 310 | mCallback = callback; |
| 311 | mOwner = pid; |
| 312 | //LOGD("gpu core granted to pid %d, handle base=%p", |
| 313 | // mOwner, gpu->regs->pointer()); |
| 314 | } else { |
| 315 | LOGW("couldn't grant gpu core to pid %d", pid); |
| 316 | } |
| 317 | |
| 318 | return NO_ERROR; |
| 319 | } |
| 320 | |
| 321 | void GPUHardware::revoke(int pid) |
| 322 | { |
| 323 | Mutex::Autolock _l(mLock); |
| 324 | if (mOwner > 0) { |
| 325 | if (pid != mOwner) { |
| 326 | LOGW("GPU owned by %d, revoke from %d", mOwner, pid); |
| 327 | return; |
| 328 | } |
| 329 | //LOGD("revoke pid=%d, owner=%d", pid, mOwner); |
| 330 | // mOwner could be <0 if the same process acquired the GPU |
| 331 | // several times without releasing it first. |
| 332 | mCondition.signal(); |
| 333 | releaseLocked(true); |
| 334 | } |
| 335 | } |
| 336 | |
| 337 | status_t GPUHardware::friendlyRevoke() |
| 338 | { |
| 339 | Mutex::Autolock _l(mLock); |
| 340 | takeBackGPULocked(); |
| 341 | //LOGD("friendlyRevoke owner=%d", mOwner); |
| 342 | releaseLocked(true); |
| 343 | return NO_ERROR; |
| 344 | } |
| 345 | |
| 346 | void GPUHardware::takeBackGPULocked() |
| 347 | { |
| 348 | sp<IGPUCallback> callback = mCallback; |
| 349 | mCallback.clear(); |
| 350 | if (callback != 0) { |
| 351 | callback->gpuLost(); // one-way |
| 352 | mCondition.waitRelative(mLock, ms2ns(250)); |
| 353 | } |
| 354 | } |
| 355 | |
| 356 | void GPUHardware::requestLocked() |
| 357 | { |
| 358 | if (mAllocator == 0) { |
| 359 | GPUPart* part = 0; |
| 360 | sp<PMemHeap> surfaceHeap; |
| 361 | if (mHeap1.promote() == false) { |
| 362 | //LOGD("requestLocked: (1) creating new heap"); |
| 363 | mHeap1.set(new PMemHeap("/dev/pmem_gpu1", 0, GPU_RESERVED_SIZE)); |
| 364 | } |
| 365 | if (mHeap1.isValid()) { |
| 366 | //LOGD("requestLocked: (1) heap is valid"); |
| 367 | // NOTE: if GPU1 is available we use it for our surfaces |
| 368 | // this could be device specific, so we should do something more |
| 369 | // generic |
| 370 | surfaceHeap = static_cast< PMemHeap* >( mHeap1.getHeap().get() ); |
| 371 | part = &mHeap1; |
| 372 | if (mHeap0.promote() == false) { |
| 373 | //LOGD("requestLocked: (0) creating new heap"); |
| 374 | mHeap0.set(new PMemHeap("/dev/pmem_gpu0")); |
| 375 | } |
| 376 | } else { |
| 377 | //LOGD("requestLocked: (1) heap is not valid"); |
| 378 | // No GPU1, use GPU0 only |
| 379 | if (mHeap0.promote() == false) { |
| 380 | //LOGD("requestLocked: (0) creating new heap"); |
| 381 | mHeap0.set(new PMemHeap("/dev/pmem_gpu0", 0, GPU_RESERVED_SIZE)); |
| 382 | } |
| 383 | if (mHeap0.isValid()) { |
| 384 | //LOGD("requestLocked: (0) heap is valid"); |
| 385 | surfaceHeap = static_cast< PMemHeap* >( mHeap0.getHeap().get() ); |
| 386 | part = &mHeap0; |
| 387 | } |
| 388 | } |
| 389 | |
| 390 | if (mHeap0.isValid() || mHeap1.isValid()) { |
| 391 | if (mHeapR.promote() == false) { |
| 392 | //LOGD("requestLocked: (R) creating new register heap"); |
| 393 | mHeapR.set(new GPURegisterHeap(this)); |
| 394 | } |
| 395 | } else { |
| 396 | // we got nothing... |
| 397 | mHeap0.clear(); |
| 398 | mHeap1.clear(); |
| 399 | } |
| 400 | |
| 401 | if (mHeapR.isValid() == false) { |
| 402 | //LOGD("requestLocked: (R) register heap not valid!!!"); |
| 403 | // damn, couldn't get the gpu registers! |
| 404 | mHeap0.clear(); |
| 405 | mHeap1.clear(); |
| 406 | surfaceHeap.clear(); |
| 407 | part = NULL; |
| 408 | } |
| 409 | |
| 410 | if (surfaceHeap != 0 && part && part->getClientHeap()!=0) { |
| 411 | part->reserved = GPU_RESERVED_SIZE; |
| 412 | part->surface = true; |
| 413 | mAllocatorDebug = static_cast<SimpleBestFitAllocator*>( |
| 414 | surfaceHeap->getAllocator().get()); |
| 415 | mAllocator = new MemoryDealer( |
| 416 | part->getClientHeap(), |
| 417 | surfaceHeap->getAllocator()); |
| 418 | } |
| 419 | } |
| 420 | } |
| 421 | |
| 422 | void GPUHardware::releaseLocked(bool dispose) |
| 423 | { |
| 424 | /* |
| 425 | * if dispose is set, we will force the destruction of the heap, |
| 426 | * so it is given back to other systems, such as camera. |
| 427 | * Otherwise, we'll keep a weak pointer to it, this way we might be able |
| 428 | * to reuse it later if it's still around. |
| 429 | */ |
| 430 | //LOGD("revoking gpu from pid %d", mOwner); |
| 431 | mOwner = NO_OWNER; |
| 432 | mAllocator.clear(); |
| 433 | mCallback.clear(); |
| 434 | |
| 435 | /* if we're asked for a full revoke, dispose only of the heap |
| 436 | * we're not using for surface (as we might need it while drawing) */ |
| 437 | mHeap0.release(mHeap0.surface ? false : dispose); |
| 438 | mHeap1.release(mHeap1.surface ? false : dispose); |
| 439 | mHeapR.release(false); |
| 440 | } |
| 441 | |
| 442 | // ---------------------------------------------------------------------------- |
| 443 | // for debugging / testing ... |
| 444 | |
| 445 | sp<SimpleBestFitAllocator> GPUHardware::getAllocator() const { |
| 446 | Mutex::Autolock _l(mLock); |
| 447 | sp<SimpleBestFitAllocator> allocator = mAllocatorDebug.promote(); |
| 448 | return allocator; |
| 449 | } |
| 450 | |
| 451 | void GPUHardware::unconditionalRevoke() |
| 452 | { |
| 453 | Mutex::Autolock _l(mLock); |
| 454 | releaseLocked(); |
| 455 | } |
| 456 | |
| 457 | // --------------------------------------------------------------------------- |
| 458 | |
| 459 | |
| 460 | GPUHardware::GPUPart::GPUPart() |
| 461 | : surface(false), reserved(0) |
| 462 | { |
| 463 | } |
| 464 | |
| 465 | GPUHardware::GPUPart::~GPUPart() { |
| 466 | } |
| 467 | |
| 468 | const sp<PMemHeapInterface>& GPUHardware::GPUPart::getHeap() const { |
| 469 | return mHeap; |
| 470 | } |
| 471 | |
| 472 | const sp<MemoryHeapPmem>& GPUHardware::GPUPart::getClientHeap() const { |
| 473 | return mClientHeap; |
| 474 | } |
| 475 | |
| 476 | bool GPUHardware::GPUPart::isValid() const { |
| 477 | return ((mHeap!=0) && (mHeap->base() != MAP_FAILED)); |
| 478 | } |
| 479 | |
| 480 | void GPUHardware::GPUPart::clear() |
| 481 | { |
| 482 | mHeap.clear(); |
| 483 | mHeapWeak.clear(); |
| 484 | mClientHeap.clear(); |
| 485 | surface = false; |
| 486 | } |
| 487 | |
| 488 | void GPUHardware::GPUPart::set(const sp<PMemHeapInterface>& heap) |
| 489 | { |
| 490 | mHeapWeak.clear(); |
| 491 | if (heap!=0 && heap->base() == MAP_FAILED) { |
| 492 | mHeap.clear(); |
| 493 | mClientHeap.clear(); |
| 494 | } else { |
| 495 | mHeap = heap; |
| 496 | mClientHeap = mHeap->createClientHeap(); |
| 497 | } |
| 498 | } |
| 499 | |
| 500 | bool GPUHardware::GPUPart::promote() |
| 501 | { |
| 502 | //LOGD("mHeapWeak=%p, mHeap=%p", mHeapWeak.unsafe_get(), mHeap.get()); |
| 503 | if (mHeap == 0) { |
| 504 | mHeap = mHeapWeak.promote(); |
| 505 | } |
| 506 | if (mHeap != 0) { |
| 507 | if (mClientHeap != 0) { |
| 508 | mClientHeap->revoke(); |
| 509 | } |
| 510 | mClientHeap = mHeap->createClientHeap(); |
| 511 | } else { |
| 512 | surface = false; |
| 513 | } |
| 514 | return mHeap != 0; |
| 515 | } |
| 516 | |
| 517 | sp<IMemory> GPUHardware::GPUPart::map(bool clear) |
| 518 | { |
| 519 | sp<IMemory> memory; |
| 520 | if (mClientHeap != NULL) { |
| 521 | memory = mClientHeap->mapMemory(0, mHeap->virtualSize()); |
| 522 | if (clear && memory!=0) { |
| 523 | //StopWatch sw("memset"); |
| 524 | memset(memory->pointer(), 0, memory->size()); |
| 525 | } |
| 526 | } |
| 527 | return memory; |
| 528 | } |
| 529 | |
| 530 | void GPUHardware::GPUPart::release(bool dispose) |
| 531 | { |
| 532 | if (mClientHeap != 0) { |
| 533 | mClientHeap->revoke(); |
| 534 | mClientHeap.clear(); |
| 535 | } |
| 536 | if (dispose) { |
| 537 | if (mHeapWeak!=0 && mHeap==0) { |
| 538 | mHeap = mHeapWeak.promote(); |
| 539 | } |
| 540 | if (mHeap != 0) { |
| 541 | mHeap->dispose(); |
| 542 | mHeapWeak.clear(); |
| 543 | mHeap.clear(); |
| 544 | } else { |
| 545 | surface = false; |
| 546 | } |
| 547 | } else { |
| 548 | if (mHeap != 0) { |
| 549 | mHeapWeak = mHeap; |
| 550 | mHeap.clear(); |
| 551 | } |
| 552 | } |
| 553 | } |
| 554 | |
| 555 | // --------------------------------------------------------------------------- |
| 556 | }; // namespace android |
| 557 | |