Code drop from //branches/cupcake/...@124589
diff --git a/libs/surfaceflinger/GPUHardware/GPUHardware.cpp b/libs/surfaceflinger/GPUHardware/GPUHardware.cpp
index b24a0f2..eb75f99 100644
--- a/libs/surfaceflinger/GPUHardware/GPUHardware.cpp
+++ b/libs/surfaceflinger/GPUHardware/GPUHardware.cpp
@@ -30,6 +30,7 @@
 #include <cutils/log.h>
 #include <cutils/properties.h>
 
+#include <utils/IBinder.h>
 #include <utils/MemoryDealer.h>
 #include <utils/MemoryBase.h>
 #include <utils/MemoryHeapPmem.h>
@@ -48,36 +49,113 @@
 
 #include "GPUHardware/GPUHardware.h"
 
+
 /* 
- * This file manages the GPU if there is one. The intent is that this code
- * needs to be different for every devce. Currently there is no abstraction,
- * but in the long term, this code needs to be refactored so that API and
- * implementation are separated.
+ * Manage the GPU. This implementation is very specific to the G1.
+ * There are no abstraction here. 
  * 
- * In this particular implementation, the GPU, its memory and register are
- * managed here. Clients (such as OpenGL ES) request the GPU when then need
- * it and are given a revokable heap containing the registers on memory. 
+ * All this code will soon go-away and be replaced by a new architecture
+ * for managing graphics accelerators.
+ * 
+ * In the meantime, it is conceptually possible to instantiate a
+ * GPUHardwareInterface for another GPU (see GPUFactory at the bottom
+ * of this file); practically... doubtful.
  * 
  */
 
 namespace android {
+
 // ---------------------------------------------------------------------------
 
+class GPUClientHeap;
+class GPUAreaHeap;
+
+class GPUHardware : public GPUHardwareInterface, public IBinder::DeathRecipient
+{
+public:
+    static const int GPU_RESERVED_SIZE;
+    static const int GPUR_SIZE;
+
+            GPUHardware();
+    virtual ~GPUHardware();
+    
+    virtual void revoke(int pid);
+    virtual sp<MemoryDealer> request(int pid);
+    virtual status_t request(int pid, 
+            const sp<IGPUCallback>& callback,
+            ISurfaceComposer::gpu_info_t* gpu);
+
+    virtual status_t friendlyRevoke();
+    virtual void unconditionalRevoke();
+    
+    virtual pid_t getOwner() const { return mOwner; }
+
+    // used for debugging only...
+    virtual sp<SimpleBestFitAllocator> getAllocator() const;
+
+private:
+    
+    
+    enum {
+        NO_OWNER = -1,
+    };
+        
+    struct GPUArea {
+        sp<GPUAreaHeap>     heap;
+        sp<MemoryHeapPmem>  clientHeap;
+        sp<IMemory> map();
+    };
+    
+    struct Client {
+        pid_t       pid;
+        GPUArea     smi;
+        GPUArea     ebi;
+        GPUArea     reg;
+        void createClientHeaps();
+        void revokeAllHeaps();
+    };
+    
+    Client& getClientLocked(pid_t pid);
+    status_t requestLocked(int pid);
+    void releaseLocked();
+    void takeBackGPULocked();
+    void registerCallbackLocked(const sp<IGPUCallback>& callback,
+            Client& client);
+
+    virtual void binderDied(const wp<IBinder>& who);
+
+    mutable Mutex           mLock;
+    sp<GPUAreaHeap>         mSMIHeap;
+    sp<GPUAreaHeap>         mEBIHeap;
+    sp<GPUAreaHeap>         mREGHeap;
+
+    KeyedVector<pid_t, Client> mClients;
+    DefaultKeyedVector< wp<IBinder>, pid_t > mRegisteredClients;
+    
+    pid_t                   mOwner;
+
+    sp<MemoryDealer>        mCurrentAllocator;
+    sp<IGPUCallback>        mCallback;
+    
+    sp<SimpleBestFitAllocator>  mAllocator;
+
+    Condition               mCondition;
+};
+
 // size reserved for GPU surfaces
 // 1200 KB fits exactly:
 //  - two 320*480 16-bits double-buffered surfaces
 //  - one 320*480 32-bits double-buffered surface
-//  - one 320*240 16-bits double-bufferd, 4x anti-aliased surface
-static const int GPU_RESERVED_SIZE  = 1200 * 1024;
-
-static const int GPUR_SIZE          = 1 * 1024 * 1024;
+//  - one 320*240 16-bits double-buffered, 4x anti-aliased surface
+const int GPUHardware::GPU_RESERVED_SIZE  = 1200 * 1024;
+const int GPUHardware::GPUR_SIZE          = 1 * 1024 * 1024;
 
 // ---------------------------------------------------------------------------
 
 /* 
  * GPUHandle is a special IMemory given to the client. It represents their
  * handle to the GPU. Once they give it up, they loose GPU access, or if
- * they explicitely revoke their acces through the binder code 1000.
+ * they explicitly revoke their access through the binder code 1000.
  * In both cases, this triggers a callback to revoke()
  * first, and then actually powers down the chip.
  * 
@@ -92,42 +170,99 @@
  * 
  */
 
-class GPUHandle : public BnMemory
+class GPUClientHeap : public MemoryHeapPmem
 {
 public:
-            GPUHandle(const sp<GPUHardware>& gpu, const sp<IMemoryHeap>& heap)
-                : mGPU(gpu), mClientHeap(heap) {
-            }
-    virtual ~GPUHandle();
-    virtual sp<IMemoryHeap> getMemory(ssize_t* offset, size_t* size) const;
-    virtual status_t onTransact(
-            uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
-    void setOwner(int owner) { mOwner = owner; }
-private:
-    void revokeNotification();
+    GPUClientHeap(const wp<GPUHardware>& gpu, 
+            const sp<MemoryHeapBase>& heap)
+        :  MemoryHeapPmem(heap), mGPU(gpu) { }
+protected:
     wp<GPUHardware> mGPU;
-    sp<IMemoryHeap> mClientHeap;
-    int mOwner;
 };
 
-GPUHandle::~GPUHandle() { 
+class GPUAreaHeap : public MemoryHeapBase
+{
+public:
+    GPUAreaHeap(const wp<GPUHardware>& gpu,
+            const char* const vram, size_t size=0, size_t reserved=0)
+    : MemoryHeapBase(vram, size), mGPU(gpu) { 
+        if (base() != MAP_FAILED) {
+            if (reserved == 0)
+                reserved = virtualSize();
+            mAllocator = new SimpleBestFitAllocator(reserved);
+        }
+    }
+    virtual sp<MemoryHeapPmem> createClientHeap() {
+        sp<MemoryHeapBase> parentHeap(this);
+        return new GPUClientHeap(mGPU, parentHeap);
+    }
+    virtual const sp<SimpleBestFitAllocator>& getAllocator() const {
+        return mAllocator; 
+    }
+private:
+    sp<SimpleBestFitAllocator>  mAllocator;
+protected:
+    wp<GPUHardware> mGPU;
+};
+
+class GPURegisterHeap : public GPUAreaHeap
+{
+public:
+    GPURegisterHeap(const sp<GPUHardware>& gpu)
+        : GPUAreaHeap(gpu, "/dev/hw3d", GPUHardware::GPUR_SIZE) { }
+    virtual sp<MemoryHeapPmem> createClientHeap() {
+        sp<MemoryHeapBase> parentHeap(this);
+        return new MemoryHeapRegs(mGPU, parentHeap);
+    }
+private:
+    class MemoryHeapRegs : public GPUClientHeap  {
+    public:
+        MemoryHeapRegs(const wp<GPUHardware>& gpu, 
+             const sp<MemoryHeapBase>& heap)
+            : GPUClientHeap(gpu, heap) { }
+        sp<MemoryHeapPmem::MemoryPmem> createMemory(size_t offset, size_t size);
+        virtual void revoke();
+    private:
+        class GPUHandle : public MemoryHeapPmem::MemoryPmem {
+        public:
+            GPUHandle(const sp<GPUHardware>& gpu,
+                    const sp<MemoryHeapPmem>& heap)
+                : MemoryHeapPmem::MemoryPmem(heap), 
+                  mGPU(gpu), mOwner(gpu->getOwner()) { }
+            virtual ~GPUHandle();
+            virtual sp<IMemoryHeap> getMemory(
+                    ssize_t* offset, size_t* size) const;
+            virtual void revoke() { };
+            virtual status_t onTransact(
+                    uint32_t code, const Parcel& data, 
+                    Parcel* reply, uint32_t flags);
+        private:
+            void revokeNotification();
+            wp<GPUHardware> mGPU;
+            pid_t mOwner;
+        };
+    };
+};
+
+GPURegisterHeap::MemoryHeapRegs::GPUHandle::~GPUHandle() { 
     //LOGD("GPUHandle %p released, revoking GPU", this);
     revokeNotification(); 
 }
-
-void GPUHandle::revokeNotification()  {
+void GPURegisterHeap::MemoryHeapRegs::GPUHandle::revokeNotification()  {
     sp<GPUHardware> hw(mGPU.promote());
     if (hw != 0) {
         hw->revoke(mOwner);
     }
 }
-sp<IMemoryHeap> GPUHandle::getMemory(ssize_t* offset, size_t* size) const
+sp<IMemoryHeap> GPURegisterHeap::MemoryHeapRegs::GPUHandle::getMemory(
+        ssize_t* offset, size_t* size) const
 {
+    sp<MemoryHeapPmem> heap = getHeap();
     if (offset) *offset = 0;
-    if (size)   *size = mClientHeap !=0 ? mClientHeap->virtualSize() : 0;
-    return mClientHeap;
+    if (size)   *size = heap !=0 ? heap->virtualSize() : 0;
+    return heap;
 }
-status_t GPUHandle::onTransact(
+status_t GPURegisterHeap::MemoryHeapRegs::GPUHandle::onTransact(
         uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
     status_t err = BnMemory::onTransact(code, data, reply, flags);
@@ -150,22 +285,14 @@
 
 // ---------------------------------------------------------------------------
 
-class MemoryHeapRegs : public MemoryHeapPmem 
-{
-public:
-            MemoryHeapRegs(const wp<GPUHardware>& gpu, const sp<MemoryHeapBase>& heap);
-    virtual ~MemoryHeapRegs();
-    sp<IMemory> mapMemory(size_t offset, size_t size);
-    virtual void revoke();
-private:
-    wp<GPUHardware> mGPU;
-};
 
-MemoryHeapRegs::MemoryHeapRegs(const wp<GPUHardware>& gpu, const sp<MemoryHeapBase>& heap)
-    :  MemoryHeapPmem(heap), mGPU(gpu)
+sp<MemoryHeapPmem::MemoryPmem> GPURegisterHeap::MemoryHeapRegs::createMemory(
+        size_t offset, size_t size)
 {
+    sp<GPUHandle> memory;
+    sp<GPUHardware> gpu = mGPU.promote();
+    if (heapID()>0 && gpu!=0) {
 #if HAVE_ANDROID_OS
-    if (heapID()>0) {
         /* this is where the GPU is powered on and the registers are mapped
          * in the client */
         //LOGD("ioctl(HW3D_GRANT_GPU)");
@@ -174,27 +301,16 @@
             // it can happen if the master heap has been closed already
             // in which case the GPU already is revoked (app crash for
             // instance).
-            //LOGW("HW3D_GRANT_GPU failed (%s), mFD=%d, base=%p",
-            //        strerror(errno), heapID(), base());
+            LOGW("HW3D_GRANT_GPU failed (%s), mFD=%d, base=%p",
+                    strerror(errno), heapID(), base());
         }
-    }
-#endif
-}
-
-MemoryHeapRegs::~MemoryHeapRegs() 
-{
-}
-
-sp<IMemory> MemoryHeapRegs::mapMemory(size_t offset, size_t size)
-{
-    sp<GPUHandle> memory;
-    sp<GPUHardware> gpu = mGPU.promote();
-    if (heapID()>0 && gpu!=0) 
         memory = new GPUHandle(gpu, this);
+#endif
+    }
     return memory;
 }
 
-void MemoryHeapRegs::revoke() 
+void GPURegisterHeap::MemoryHeapRegs::revoke() 
 {
     MemoryHeapPmem::revoke();
 #if HAVE_ANDROID_OS
@@ -207,25 +323,6 @@
 #endif
 }
 
-// ---------------------------------------------------------------------------
-
-class GPURegisterHeap : public PMemHeapInterface
-{
-public:
-    GPURegisterHeap(const sp<GPUHardware>& gpu)
-        : PMemHeapInterface("/dev/hw3d", GPUR_SIZE), mGPU(gpu)
-    {
-    }
-    virtual ~GPURegisterHeap() {
-    }
-    virtual sp<MemoryHeapPmem> createClientHeap() {
-        sp<MemoryHeapBase> parentHeap(this);
-        return new MemoryHeapRegs(mGPU, parentHeap);
-    }
-private:
-    wp<GPUHardware> mGPU;
-};
-
 /*****************************************************************************/
 
 GPUHardware::GPUHardware()
@@ -237,85 +334,87 @@
 {
 }
 
-sp<MemoryDealer> GPUHardware::request(int pid)
+status_t GPUHardware::requestLocked(int pid)
 {
-    sp<MemoryDealer> dealer;
-
-    LOGD("pid %d requesting gpu surface (current owner = %d)", pid, mOwner);
-
     const int self_pid = getpid();
     if (pid == self_pid) {
         // can't use GPU from surfaceflinger's process
-        return dealer;
-    }
-
-    Mutex::Autolock _l(mLock);
-    
-    if (mOwner != pid) {
-        // someone already has the gpu.
-        takeBackGPULocked();
-
-        // releaseLocked() should be a no-op most of the time
-        releaseLocked();
-
-        requestLocked(); 
-    }
-
-    dealer = mAllocator;
-    mOwner = pid;
-    if (dealer == 0) {
-        mOwner = SURFACE_FAILED;
-    }
-    
-    LOGD_IF(dealer!=0, "gpu surface granted to pid %d", mOwner);
-    return dealer;
-}
-
-status_t GPUHardware::request(const sp<IGPUCallback>& callback,
-        ISurfaceComposer::gpu_info_t* gpu)
-{
-    sp<IMemory> gpuHandle;
-    IPCThreadState* ipc = IPCThreadState::self();
-    const int pid = ipc->getCallingPid();
-    const int self_pid = getpid();
-
-    LOGD("pid %d requesting gpu core (owner = %d)", pid, mOwner);
-
-    if (pid == self_pid) {
-        // can't use GPU from surfaceflinger's process
         return PERMISSION_DENIED;
     }
 
-    Mutex::Autolock _l(mLock);
     if (mOwner != pid) {
-        // someone already has the gpu.
-        takeBackGPULocked();
-
-        // releaseLocked() should be a no-op most of the time
-        releaseLocked();
-
-        requestLocked(); 
+        if (mREGHeap != 0) {
+            if (mOwner != NO_OWNER) {
+                // someone already has the gpu.
+                takeBackGPULocked();
+                releaseLocked();
+            }
+        } else {
+            // first time, initialize the stuff.
+            if (mSMIHeap == 0)
+                mSMIHeap = new GPUAreaHeap(this, "/dev/pmem_gpu0");
+            if (mEBIHeap == 0)
+                mEBIHeap = new GPUAreaHeap(this, 
+                        "/dev/pmem_gpu1", 0, GPU_RESERVED_SIZE);
+            mREGHeap = new GPURegisterHeap(this);
+            mAllocator = mEBIHeap->getAllocator();
+            if (mAllocator == NULL) {
+                // something went terribly wrong.
+                mSMIHeap.clear();
+                mEBIHeap.clear();
+                mREGHeap.clear();
+                return INVALID_OPERATION;
+            }
+        }
+        Client& client = getClientLocked(pid);
+        mCurrentAllocator = new MemoryDealer(client.ebi.clientHeap, mAllocator);
+        mOwner = pid;
     }
+    return NO_ERROR;
+}
 
-    if (mHeapR.isValid()) {
+sp<MemoryDealer> GPUHardware::request(int pid)
+{
+    sp<MemoryDealer> dealer;
+    Mutex::Autolock _l(mLock);
+    Client* client;
+    LOGD("pid %d requesting gpu surface (current owner = %d)", pid, mOwner);
+    if (requestLocked(pid) == NO_ERROR) {
+        dealer = mCurrentAllocator;
+        LOGD_IF(dealer!=0, "gpu surface granted to pid %d", mOwner);
+    }
+    return dealer;
+}
+
+status_t GPUHardware::request(int pid, const sp<IGPUCallback>& callback,
+        ISurfaceComposer::gpu_info_t* gpu)
+{
+    if (callback == 0)
+        return BAD_VALUE;
+
+    sp<IMemory> gpuHandle;
+    LOGD("pid %d requesting gpu core (owner = %d)", pid, mOwner);
+    Mutex::Autolock _l(mLock);
+    status_t err = requestLocked(pid);
+    if (err == NO_ERROR) {
+        // it's guaranteed to be there, be construction
+        Client& client = mClients.editValueFor(pid);
+        registerCallbackLocked(callback, client);
         gpu->count = 2;
-        gpu->regions[0].region = mHeap0.map(true);
-        gpu->regions[0].reserved = mHeap0.reserved;
-        gpu->regions[1].region = mHeap1.map(true);
-        gpu->regions[1].reserved = mHeap1.reserved;
-        gpu->regs = mHeapR.map();
+        gpu->regions[0].region = client.smi.map();
+        gpu->regions[1].region = client.ebi.map();
+        gpu->regs              = client.reg.map();
+        gpu->regions[0].reserved = 0;
+        gpu->regions[1].reserved = GPU_RESERVED_SIZE;
         if (gpu->regs != 0) {
-            static_cast< GPUHandle* >(gpu->regs.get())->setOwner(pid);
+            //LOGD("gpu core granted to pid %d, handle base=%p",
+            //        mOwner, gpu->regs->pointer());
         }
         mCallback = callback;
-        mOwner = pid;
-        //LOGD("gpu core granted to pid %d, handle base=%p",
-        //        mOwner, gpu->regs->pointer());
     } else {
         LOGW("couldn't grant gpu core to pid %d", pid);
     }
-
-    return NO_ERROR;
+    return err;
 }
 
 void GPUHardware::revoke(int pid)
@@ -330,16 +429,16 @@
         // mOwner could be <0 if the same process acquired the GPU
         // several times without releasing it first.
         mCondition.signal();
-        releaseLocked(true);
+        releaseLocked();
     }
 }
 
 status_t GPUHardware::friendlyRevoke()
 {
     Mutex::Autolock _l(mLock);
-    takeBackGPULocked();
     //LOGD("friendlyRevoke owner=%d", mOwner);
-    releaseLocked(true);
+    takeBackGPULocked();
+    releaseLocked();
     return NO_ERROR;
 }
 
@@ -353,90 +452,37 @@
     }
 }
 
-void GPUHardware::requestLocked()
+void GPUHardware::releaseLocked()
 {
-    if (mAllocator == 0) {
-        GPUPart* part = 0;
-        sp<PMemHeap> surfaceHeap;
-        if (mHeap1.promote() == false) {
-            //LOGD("requestLocked: (1) creating new heap");
-            mHeap1.set(new PMemHeap("/dev/pmem_gpu1", 0, GPU_RESERVED_SIZE));
+    //LOGD("revoking gpu from pid %d", mOwner);
+    if (mOwner != NO_OWNER) {
+        // this may fail because the client might have died, and have
+        // been removed from the list.
+        ssize_t index = mClients.indexOfKey(mOwner);
+        if (index >= 0) {
+            Client& client(mClients.editValueAt(index));
+            client.revokeAllHeaps();
         }
-        if (mHeap1.isValid()) {
-            //LOGD("requestLocked: (1) heap is valid");
-            // NOTE: if GPU1 is available we use it for our surfaces
-            // this could be device specific, so we should do something more
-            // generic
-            surfaceHeap = static_cast< PMemHeap* >( mHeap1.getHeap().get() );
-            part = &mHeap1;
-            if (mHeap0.promote() == false) {
-                //LOGD("requestLocked: (0) creating new heap");
-                mHeap0.set(new PMemHeap("/dev/pmem_gpu0"));
-            }
-        } else {
-            //LOGD("requestLocked: (1) heap is not valid");
-            // No GPU1, use GPU0 only
-            if (mHeap0.promote() == false) {
-                //LOGD("requestLocked: (0) creating new heap");
-                mHeap0.set(new PMemHeap("/dev/pmem_gpu0", 0, GPU_RESERVED_SIZE));
-            }
-            if (mHeap0.isValid()) {
-                //LOGD("requestLocked: (0) heap is valid");
-                surfaceHeap = static_cast< PMemHeap* >( mHeap0.getHeap().get() );
-                part = &mHeap0;
-            }
-        }
-        
-        if (mHeap0.isValid() || mHeap1.isValid()) {
-            if (mHeapR.promote() == false) {
-                //LOGD("requestLocked: (R) creating new register heap");
-                mHeapR.set(new GPURegisterHeap(this));
-            }
-        } else {
-            // we got nothing...
-            mHeap0.clear();
-            mHeap1.clear();
-        }
-
-        if (mHeapR.isValid() == false) {
-            //LOGD("requestLocked: (R) register heap not valid!!!");
-            // damn, couldn't get the gpu registers!
-            mHeap0.clear();
-            mHeap1.clear();
-            surfaceHeap.clear();
-            part = NULL;
-        }
-
-        if (surfaceHeap != 0 && part && part->getClientHeap()!=0) {
-            part->reserved = GPU_RESERVED_SIZE;
-            part->surface = true;
-            mAllocatorDebug = static_cast<SimpleBestFitAllocator*>(
-                    surfaceHeap->getAllocator().get());
-            mAllocator = new MemoryDealer(
-                    part->getClientHeap(),
-                    surfaceHeap->getAllocator());
-        }
+        mOwner = NO_OWNER;
+        mCurrentAllocator.clear();
+        mCallback.clear();
     }
 }
 
-void GPUHardware::releaseLocked(bool dispose)
+GPUHardware::Client& GPUHardware::getClientLocked(pid_t pid)
 {
-    /* 
-     * if dispose is set, we will force the destruction of the heap,
-     * so it is given back to other systems, such as camera.
-     * Otherwise, we'll keep a weak pointer to it, this way we might be able
-     * to reuse it later if it's still around. 
-     */
-    //LOGD("revoking gpu from pid %d", mOwner);
-    mOwner = NO_OWNER;
-    mAllocator.clear();
-    mCallback.clear();
-
-    /* if we're asked for a full revoke, dispose only of the heap
-     * we're not using for surface (as we might need it while drawing) */
-    mHeap0.release(mHeap0.surface ? false : dispose);
-    mHeap1.release(mHeap1.surface ? false : dispose);
-    mHeapR.release(false);
+    ssize_t index = mClients.indexOfKey(pid);
+    if (index < 0) {
+        Client client;
+        client.pid = pid;
+        client.smi.heap = mSMIHeap;
+        client.ebi.heap = mEBIHeap;
+        client.reg.heap = mREGHeap;
+        index = mClients.add(pid, client);
+    }
+    Client& client(mClients.editValueAt(index));
+    client.createClientHeaps();
+    return client;
 }
 
 // ----------------------------------------------------------------------------
@@ -444,8 +490,7 @@
 
 sp<SimpleBestFitAllocator> GPUHardware::getAllocator() const {
     Mutex::Autolock _l(mLock);
-    sp<SimpleBestFitAllocator> allocator = mAllocatorDebug.promote();
-    return allocator;
+    return mAllocator;
 }
 
 void GPUHardware::unconditionalRevoke()
@@ -456,102 +501,81 @@
 
 // ---------------------------------------------------------------------------
 
-
-GPUHardware::GPUPart::GPUPart()
-    : surface(false), reserved(0)
-{
-}
-
-GPUHardware::GPUPart::~GPUPart() {
-}
-    
-const sp<PMemHeapInterface>& GPUHardware::GPUPart::getHeap() const {
-    return mHeap;
-}
-
-const sp<MemoryHeapPmem>& GPUHardware::GPUPart::getClientHeap() const {
-    return mClientHeap;
-}
-
-bool GPUHardware::GPUPart::isValid() const {
-    return ((mHeap!=0) && (mHeap->base() != MAP_FAILED));
-}
-
-void GPUHardware::GPUPart::clear() 
-{
-    mHeap.clear();
-    mHeapWeak.clear();
-    mClientHeap.clear();
-    surface = false;
-}
-
-void GPUHardware::GPUPart::set(const sp<PMemHeapInterface>& heap) 
-{
-    mHeapWeak.clear();
-    if (heap!=0 && heap->base() == MAP_FAILED) {
-        mHeap.clear();
-        mClientHeap.clear();
-    } else { 
-        mHeap = heap;
-        mClientHeap = mHeap->createClientHeap();
-    }
-}
-
-bool GPUHardware::GPUPart::promote() 
-{
-    //LOGD("mHeapWeak=%p, mHeap=%p", mHeapWeak.unsafe_get(), mHeap.get());
-    if (mHeap == 0) {
-        mHeap = mHeapWeak.promote();
-    }
-    if (mHeap != 0) {
-        if (mClientHeap != 0) {
-            mClientHeap->revoke();
-        }
-        mClientHeap = mHeap->createClientHeap();
-    }  else {
-        surface = false;
-    }
-    return mHeap != 0;
-}
-
-sp<IMemory> GPUHardware::GPUPart::map(bool clear) 
-{
+sp<IMemory> GPUHardware::GPUArea::map() {
     sp<IMemory> memory;
-    if (mClientHeap != NULL) {
-        memory = mClientHeap->mapMemory(0, mHeap->virtualSize());
-        if (clear && memory!=0) {
-            //StopWatch sw("memset");
-            memset(memory->pointer(), 0, memory->size());
-        }
+    if (clientHeap != 0 && heap != 0) {
+        memory = clientHeap->mapMemory(0, heap->virtualSize());
     }
     return memory;
 }
 
-void GPUHardware::GPUPart::release(bool dispose)
+void GPUHardware::Client::createClientHeaps() 
 {
-    if (mClientHeap != 0) {
-        mClientHeap->revoke();
-        mClientHeap.clear();
+    if (smi.clientHeap == 0)
+        smi.clientHeap = smi.heap->createClientHeap();
+    if (ebi.clientHeap == 0)
+        ebi.clientHeap = ebi.heap->createClientHeap();
+    if (reg.clientHeap == 0)
+        reg.clientHeap = reg.heap->createClientHeap();
+}
+
+void GPUHardware::Client::revokeAllHeaps() 
+{
+    if (smi.clientHeap != 0)
+        smi.clientHeap->revoke();
+    if (ebi.clientHeap != 0)
+        ebi.clientHeap->revoke();
+    if (reg.clientHeap != 0)
+        reg.clientHeap->revoke();
+}
+
+void GPUHardware::registerCallbackLocked(const sp<IGPUCallback>& callback,
+        Client& client)
+{
+    sp<IBinder> binder = callback->asBinder();
+    if (mRegisteredClients.add(binder, client.pid) >= 0) {
+        binder->linkToDeath(this);
     }
-    if (dispose) {
-        if (mHeapWeak!=0 && mHeap==0) {
-            mHeap = mHeapWeak.promote();
-        }
-        if (mHeap != 0) {
-            mHeap->dispose();
-            mHeapWeak.clear();
-            mHeap.clear();
-        } else {
-            surface = false;
-        }
-    } else {
-        if (mHeap != 0) {
-            mHeapWeak = mHeap;
-            mHeap.clear();
+}
+
+void GPUHardware::binderDied(const wp<IBinder>& who)
+{
+    Mutex::Autolock _l(mLock);
+    pid_t pid = mRegisteredClients.valueFor(who);
+    if (pid != 0) {
+        ssize_t index = mClients.indexOfKey(pid);
+        if (index >= 0) {
+            //LOGD("*** removing client at %d", index);
+            Client& client(mClients.editValueAt(index));
+            client.revokeAllHeaps(); // not really needed in theory
+            mClients.removeItemsAt(index);
+            if (mClients.size() == 0) {
+                //LOGD("*** was last client closing everything");
+                mCallback.clear();
+                mAllocator.clear();
+                mCurrentAllocator.clear();
+                mSMIHeap.clear();
+                mREGHeap.clear();
+                
+                // NOTE: we cannot clear the EBI heap because surfaceflinger
+                // itself may be using it, since this is where surfaces
+                // are allocated. if we're in the middle of compositing 
+                // a surface (even if its process just died), we cannot
+                // rip the heap under our feet.
+                
+                mOwner = NO_OWNER;
+            }
         }
     }
 }
 
 // ---------------------------------------------------------------------------
+
+sp<GPUHardwareInterface> GPUFactory::getGPU()
+{
+    return new GPUHardware();
+}
+
+// ---------------------------------------------------------------------------
 }; // namespace android
 
diff --git a/libs/surfaceflinger/GPUHardware/GPUHardware.h b/libs/surfaceflinger/GPUHardware/GPUHardware.h
index 9a78b99..3354528 100644
--- a/libs/surfaceflinger/GPUHardware/GPUHardware.h
+++ b/libs/surfaceflinger/GPUHardware/GPUHardware.h
@@ -22,92 +22,39 @@
 
 #include <utils/RefBase.h>
 #include <utils/threads.h>
+#include <utils/KeyedVector.h>
+
+#include <ui/ISurfaceComposer.h>
 
 namespace android {
 
 // ---------------------------------------------------------------------------
 
-class GPUHardwareInterface : public RefBase
+class IGPUCallback;
+
+class GPUHardwareInterface : public virtual RefBase
 {
 public:
     virtual void                revoke(int pid) = 0;
     virtual sp<MemoryDealer>    request(int pid) = 0;
-    virtual status_t            request(const sp<IGPUCallback>& callback,
+    virtual status_t            request(int pid, const sp<IGPUCallback>& callback,
             ISurfaceComposer::gpu_info_t* gpu) = 0;
 
     virtual status_t            friendlyRevoke() = 0;
-    virtual void                unconditionalRevoke() = 0;
     
     // used for debugging only...
     virtual sp<SimpleBestFitAllocator> getAllocator() const  = 0;
     virtual pid_t getOwner() const = 0;
+    virtual void unconditionalRevoke() = 0;
 };
 
 // ---------------------------------------------------------------------------
 
-class IMemory;
-class MemoryHeapPmem;
-class PMemHeap;
-
-class GPUHardware : public GPUHardwareInterface
-{
+class GPUFactory
+{    
 public:
-            GPUHardware();
-    virtual ~GPUHardware();
-    
-    virtual void                revoke(int pid);
-    virtual sp<MemoryDealer>    request(int pid);
-    virtual status_t            request(const sp<IGPUCallback>& callback,
-            ISurfaceComposer::gpu_info_t* gpu);
-
-    virtual status_t            friendlyRevoke();
-    virtual void                unconditionalRevoke();
-    
-    // used for debugging only...
-    virtual sp<SimpleBestFitAllocator> getAllocator() const;
-    virtual pid_t getOwner() const { return mOwner; }
-    
-private:
-    enum {
-        NO_OWNER        = -1,
-        SURFACE_FAILED  = -2
-    };
-    
-    void requestLocked();
-    void releaseLocked(bool dispose = false);
-    void takeBackGPULocked();
-    
-    class GPUPart
-    {
-    public:
-        bool surface;
-        size_t reserved;
-        GPUPart();
-        ~GPUPart();
-        const sp<PMemHeapInterface>& getHeap() const;
-        const sp<MemoryHeapPmem>& getClientHeap() const;
-        bool isValid() const;
-        void clear();
-        void set(const sp<PMemHeapInterface>& heap);
-        bool promote();
-        sp<IMemory> map(bool clear = false);
-        void release(bool dispose);
-    private:
-        sp<PMemHeapInterface>   mHeap;
-        wp<PMemHeapInterface>   mHeapWeak;
-        sp<MemoryHeapPmem>      mClientHeap;
-    };
-    
-    mutable Mutex   mLock;
-    GPUPart         mHeap0; // SMI
-    GPUPart         mHeap1; // EBI1
-    GPUPart         mHeapR;
-    sp<MemoryDealer> mAllocator;
-    pid_t            mOwner;
-    sp<IGPUCallback> mCallback;
-    wp<SimpleBestFitAllocator> mAllocatorDebug;
-    
-    Condition       mCondition;
+    // the gpu factory
+    static sp<GPUHardwareInterface> getGPU();
 };
 
 // ---------------------------------------------------------------------------