blob: 8ef56118d854e515b964f9e1e8e530b1e6310aba [file] [log] [blame]
Iliyan Malchev202a77d2012-06-11 14:41:12 -07001/*
Duy Truong73d36df2013-02-09 20:33:23 -08002 * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
Iliyan Malchev202a77d2012-06-11 14:41:12 -07003
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above
10 * copyright notice, this list of conditions and the following
11 * disclaimer in the documentation and/or other materials provided
12 * with the distribution.
Duy Truong73d36df2013-02-09 20:33:23 -080013 * * Neither the name of The Linux Foundation nor the names of its
Iliyan Malchev202a77d2012-06-11 14:41:12 -070014 * contributors may be used to endorse or promote products derived
15 * from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
24 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
26 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
27 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include <cutils/log.h>
Iliyan Malchev202a77d2012-06-11 14:41:12 -070031#include <fcntl.h>
Naomi Luis01f5c8e2013-02-11 12:46:24 -080032#include <dlfcn.h>
Iliyan Malchev202a77d2012-06-11 14:41:12 -070033#include "gralloc_priv.h"
34#include "alloc_controller.h"
35#include "memalloc.h"
36#include "ionalloc.h"
Iliyan Malchev202a77d2012-06-11 14:41:12 -070037#include "gr.h"
Naseer Ahmeda87da602012-07-01 23:54:19 -070038#include "comptype.h"
Iliyan Malchev202a77d2012-06-11 14:41:12 -070039
Sushil Chauhanc6bd6d92012-12-12 12:33:01 -080040#ifdef VENUS_COLOR_FORMAT
41#include <media/msm_media_info.h>
42#else
43#define VENUS_Y_STRIDE(args...) 0
44#define VENUS_Y_SCANLINES(args...) 0
45#define VENUS_BUFFER_SIZE(args...) 0
46#endif
47
Iliyan Malchev202a77d2012-06-11 14:41:12 -070048using namespace gralloc;
Naseer Ahmeda87da602012-07-01 23:54:19 -070049using namespace qdutils;
Iliyan Malchev202a77d2012-06-11 14:41:12 -070050
Naomi Luisa44100c2013-02-08 12:42:03 -080051ANDROID_SINGLETON_STATIC_INSTANCE(AdrenoMemInfo);
52
Iliyan Malchev202a77d2012-06-11 14:41:12 -070053//Common functions
Naseer Ahmed29a26812012-06-14 00:56:20 -070054static bool canFallback(int usage, bool triedSystem)
Iliyan Malchev202a77d2012-06-11 14:41:12 -070055{
56 // Fallback to system heap when alloc fails unless
57 // 1. Composition type is MDP
58 // 2. Alloc from system heap was already tried
59 // 3. The heap type is requsted explicitly
60 // 4. The heap type is protected
61 // 5. The buffer is meant for external display only
62
Naseer Ahmeda87da602012-07-01 23:54:19 -070063 if(QCCompositionType::getInstance().getCompositionType() &
64 COMPOSITION_TYPE_MDP)
Iliyan Malchev202a77d2012-06-11 14:41:12 -070065 return false;
66 if(triedSystem)
67 return false;
Sushil Chauhan7651a802013-01-08 16:08:09 -080068 if(usage & (GRALLOC_HEAP_MASK | GRALLOC_USAGE_PROTECTED))
Iliyan Malchev202a77d2012-06-11 14:41:12 -070069 return false;
Naseer Ahmed4c588a22012-07-31 19:12:17 -070070 if(usage & (GRALLOC_HEAP_MASK | GRALLOC_USAGE_PRIVATE_EXTERNAL_ONLY))
Iliyan Malchev202a77d2012-06-11 14:41:12 -070071 return false;
72 //Return true by default
73 return true;
74}
75
76static bool useUncached(int usage)
77{
78 // System heaps cannot be uncached
Naseer Ahmed2ea027b2013-02-28 10:46:14 -050079 if(usage & GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP)
Iliyan Malchev202a77d2012-06-11 14:41:12 -070080 return false;
81 if (usage & GRALLOC_USAGE_PRIVATE_UNCACHED)
82 return true;
83 return false;
84}
85
Naomi Luisa44100c2013-02-08 12:42:03 -080086//-------------- AdrenoMemInfo-----------------------//
Naomi Luis01f5c8e2013-02-11 12:46:24 -080087AdrenoMemInfo::AdrenoMemInfo()
88{
89 libadreno_utils = ::dlopen("libadreno_utils.so", RTLD_NOW);
90 if (libadreno_utils) {
91 *(void **)&LINK_adreno_compute_padding = ::dlsym(libadreno_utils,
92 "compute_surface_padding");
93 }
94}
95
96AdrenoMemInfo::~AdrenoMemInfo()
97{
98 if (libadreno_utils) {
99 ::dlclose(libadreno_utils);
100 }
101}
102
Naomi Luisa44100c2013-02-08 12:42:03 -0800103int AdrenoMemInfo::getStride(int width, int format)
104{
105 int stride = ALIGN(width, 32);
Naomi Luis01f5c8e2013-02-11 12:46:24 -0800106 // Currently surface padding is only computed for RGB* surfaces.
107 if (format < 0x7) {
108 int bpp = 4;
109 switch(format)
110 {
111 case HAL_PIXEL_FORMAT_RGB_888:
112 bpp = 3;
113 break;
114 case HAL_PIXEL_FORMAT_RGB_565:
115 case HAL_PIXEL_FORMAT_RGBA_5551:
116 case HAL_PIXEL_FORMAT_RGBA_4444:
117 bpp = 2;
118 break;
119 default: break;
120 }
121 if ((libadreno_utils) && (LINK_adreno_compute_padding)) {
122 int surface_tile_height = 1; // Linear surface
123 int raster_mode = 1; // Adreno TW raster mode.
124 int padding_threshold = 512; // Threshold for padding surfaces.
125 // the function below expects the width to be a multiple of
126 // 32 pixels, hence we pass stride instead of width.
127 stride = LINK_adreno_compute_padding(stride, bpp,
128 surface_tile_height, raster_mode,
129 padding_threshold);
130 }
131 } else {
132 switch (format)
133 {
134 case HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO:
135 stride = ALIGN(width, 32);
136 break;
137 case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED:
138 stride = ALIGN(width, 128);
139 break;
140 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
141 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
142 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
143 case HAL_PIXEL_FORMAT_YV12:
144 case HAL_PIXEL_FORMAT_YCbCr_422_SP:
145 case HAL_PIXEL_FORMAT_YCrCb_422_SP:
146 stride = ALIGN(width, 16);
147 break;
148 case HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS:
149 stride = VENUS_Y_STRIDE(COLOR_FMT_NV12, width);
150 break;
151 default: break;
152 }
Naomi Luisa44100c2013-02-08 12:42:03 -0800153 }
154 return stride;
155}
156
157//-------------- IAllocController-----------------------//
Naseer Ahmed01d3fd32012-07-14 21:08:13 -0700158IAllocController* IAllocController::sController = NULL;
159IAllocController* IAllocController::getInstance(void)
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700160{
161 if(sController == NULL) {
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700162 sController = new IonController();
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700163 }
164 return sController;
165}
166
167
168//-------------- IonController-----------------------//
169IonController::IonController()
170{
171 mIonAlloc = new IonAlloc();
172}
173
Naseer Ahmed01d3fd32012-07-14 21:08:13 -0700174int IonController::allocate(alloc_data& data, int usage)
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700175{
176 int ionFlags = 0;
177 int ret;
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700178
179 data.uncached = useUncached(usage);
Naseer Ahmed29a26812012-06-14 00:56:20 -0700180 data.allocType = 0;
181
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700182 if(usage & GRALLOC_USAGE_PRIVATE_UI_CONTIG_HEAP)
183 ionFlags |= ION_HEAP(ION_SF_HEAP_ID);
184
Naseer Ahmedc5e6fb02013-03-07 13:42:20 -0500185 if(usage & GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP)
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700186 ionFlags |= ION_HEAP(ION_SYSTEM_HEAP_ID);
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700187
Naseer Ahmedc5e6fb02013-03-07 13:42:20 -0500188 if(usage & GRALLOC_USAGE_PRIVATE_IOMMU_HEAP)
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700189 ionFlags |= ION_HEAP(ION_IOMMU_HEAP_ID);
190
Naseer Ahmedc5e6fb02013-03-07 13:42:20 -0500191 //MM Heap is exclusively a secure heap.
192 if(usage & GRALLOC_USAGE_PRIVATE_MM_HEAP) {
193 //XXX: Right now the MM heap is the only secure heap we have. When we
194 //have other secure heaps, we can change this.
195 if(usage & GRALLOC_USAGE_PROTECTED) {
196 ionFlags |= ION_HEAP(ION_CP_MM_HEAP_ID);
197 ionFlags |= ION_SECURE;
198 }
199 else {
200 ALOGW("GRALLOC_USAGE_PRIVATE_MM_HEAP \
201 cannot be used as an insecure heap!\
202 trying to use IOMMU instead !!");
203 ionFlags |= ION_HEAP(ION_IOMMU_HEAP_ID);
204 }
205 }
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700206
Arun Kumar K.R0daaa992013-03-12 15:08:29 -0700207 if(usage & GRALLOC_USAGE_PRIVATE_ADSP_HEAP)
208 ionFlags |= ION_HEAP(ION_ADSP_HEAP_ID);
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700209
Naseer Ahmedc5e6fb02013-03-07 13:42:20 -0500210 if(usage & GRALLOC_USAGE_PROTECTED)
211 data.allocType |= private_handle_t::PRIV_FLAGS_SECURE_BUFFER;
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700212
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700213 // if no flags are set, default to
214 // SF + IOMMU heaps, so that bypass can work
215 // we can fall back to system heap if
216 // we run out.
217 if(!ionFlags)
218 ionFlags = ION_HEAP(ION_SF_HEAP_ID) | ION_HEAP(ION_IOMMU_HEAP_ID);
219
220 data.flags = ionFlags;
221 ret = mIonAlloc->alloc_buffer(data);
Naseer Ahmed29a26812012-06-14 00:56:20 -0700222
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700223 // Fallback
Naseer Ahmed29a26812012-06-14 00:56:20 -0700224 if(ret < 0 && canFallback(usage,
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700225 (ionFlags & ION_SYSTEM_HEAP_ID)))
226 {
227 ALOGW("Falling back to system heap");
228 data.flags = ION_HEAP(ION_SYSTEM_HEAP_ID);
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700229 ret = mIonAlloc->alloc_buffer(data);
230 }
231
232 if(ret >= 0 ) {
Naseer Ahmed29a26812012-06-14 00:56:20 -0700233 data.allocType |= private_handle_t::PRIV_FLAGS_USES_ION;
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700234 }
235
236 return ret;
237}
238
Naseer Ahmed01d3fd32012-07-14 21:08:13 -0700239IMemAlloc* IonController::getAllocator(int flags)
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700240{
Naseer Ahmedb16edac2012-07-15 23:56:21 -0700241 IMemAlloc* memalloc = NULL;
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700242 if (flags & private_handle_t::PRIV_FLAGS_USES_ION) {
243 memalloc = mIonAlloc;
244 } else {
245 ALOGE("%s: Invalid flags passed: 0x%x", __FUNCTION__, flags);
246 }
247
248 return memalloc;
249}
250
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700251size_t getBufferSizeAndDimensions(int width, int height, int format,
Naseer Ahmed29a26812012-06-14 00:56:20 -0700252 int& alignedw, int &alignedh)
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700253{
254 size_t size;
255
Naomi Luisa44100c2013-02-08 12:42:03 -0800256 alignedw = AdrenoMemInfo::getInstance().getStride(width, format);
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700257 alignedh = ALIGN(height, 32);
258 switch (format) {
259 case HAL_PIXEL_FORMAT_RGBA_8888:
260 case HAL_PIXEL_FORMAT_RGBX_8888:
261 case HAL_PIXEL_FORMAT_BGRA_8888:
262 size = alignedw * alignedh * 4;
263 break;
264 case HAL_PIXEL_FORMAT_RGB_888:
265 size = alignedw * alignedh * 3;
266 break;
267 case HAL_PIXEL_FORMAT_RGB_565:
268 case HAL_PIXEL_FORMAT_RGBA_5551:
269 case HAL_PIXEL_FORMAT_RGBA_4444:
270 size = alignedw * alignedh * 2;
271 break;
272
273 // adreno formats
274 case HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO: // NV21
275 size = ALIGN(alignedw*alignedh, 4096);
276 size += ALIGN(2 * ALIGN(width/2, 32) * ALIGN(height/2, 32), 4096);
277 break;
278 case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED: // NV12
279 // The chroma plane is subsampled,
280 // but the pitch in bytes is unchanged
281 // The GPU needs 4K alignment, but the video decoder needs 8K
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700282 size = ALIGN( alignedw * alignedh, 8192);
283 size += ALIGN( alignedw * ALIGN(height/2, 32), 8192);
284 break;
285 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
286 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
287 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
288 case HAL_PIXEL_FORMAT_YV12:
289 if ((format == HAL_PIXEL_FORMAT_YV12) && ((width&1) || (height&1))) {
290 ALOGE("w or h is odd for the YV12 format");
291 return -EINVAL;
292 }
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700293 alignedh = height;
294 if (HAL_PIXEL_FORMAT_NV12_ENCODEABLE == format) {
295 // The encoder requires a 2K aligned chroma offset.
296 size = ALIGN(alignedw*alignedh, 2048) +
Naseer Ahmed29a26812012-06-14 00:56:20 -0700297 (ALIGN(alignedw/2, 16) * (alignedh/2))*2;
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700298 } else {
299 size = alignedw*alignedh +
300 (ALIGN(alignedw/2, 16) * (alignedh/2))*2;
301 }
302 size = ALIGN(size, 4096);
303 break;
Naseer Ahmed29a26812012-06-14 00:56:20 -0700304 case HAL_PIXEL_FORMAT_YCbCr_422_SP:
305 case HAL_PIXEL_FORMAT_YCrCb_422_SP:
306 if(width & 1) {
307 ALOGE("width is odd for the YUV422_SP format");
308 return -EINVAL;
309 }
Naseer Ahmed29a26812012-06-14 00:56:20 -0700310 alignedh = height;
311 size = ALIGN(alignedw * alignedh * 2, 4096);
312 break;
Sushil Chauhanc5e61482012-08-22 17:13:32 -0700313 case HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS:
Sushil Chauhan73dcce42012-11-15 14:25:19 -0800314 alignedh = VENUS_Y_SCANLINES(COLOR_FMT_NV12, height);
Sushil Chauhane8a01792012-11-01 16:25:45 -0700315 size = VENUS_BUFFER_SIZE(COLOR_FMT_NV12, width, height);
Sushil Chauhanc5e61482012-08-22 17:13:32 -0700316 break;
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700317 default:
Naseer Ahmed29a26812012-06-14 00:56:20 -0700318 ALOGE("unrecognized pixel format: 0x%x", format);
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700319 return -EINVAL;
320 }
321
322 return size;
323}
324
325// Allocate buffer from width, height and format into a
326// private_handle_t. It is the responsibility of the caller
327// to free the buffer using the free_buffer function
328int alloc_buffer(private_handle_t **pHnd, int w, int h, int format, int usage)
329{
Naseer Ahmed29a26812012-06-14 00:56:20 -0700330 alloc_data data;
331 int alignedw, alignedh;
Naseer Ahmed01d3fd32012-07-14 21:08:13 -0700332 gralloc::IAllocController* sAlloc =
333 gralloc::IAllocController::getInstance();
Naseer Ahmed29a26812012-06-14 00:56:20 -0700334 data.base = 0;
335 data.fd = -1;
336 data.offset = 0;
337 data.size = getBufferSizeAndDimensions(w, h, format, alignedw, alignedh);
338 data.align = getpagesize();
339 data.uncached = useUncached(usage);
340 int allocFlags = usage;
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700341
Naseer Ahmed01d3fd32012-07-14 21:08:13 -0700342 int err = sAlloc->allocate(data, allocFlags);
Naseer Ahmed29a26812012-06-14 00:56:20 -0700343 if (0 != err) {
344 ALOGE("%s: allocate failed", __FUNCTION__);
345 return -ENOMEM;
346 }
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700347
Naseer Ahmed29a26812012-06-14 00:56:20 -0700348 private_handle_t* hnd = new private_handle_t(data.fd, data.size,
Naseer Ahmed01d3fd32012-07-14 21:08:13 -0700349 data.allocType, 0, format,
350 alignedw, alignedh);
Naseer Ahmed29a26812012-06-14 00:56:20 -0700351 hnd->base = (int) data.base;
352 hnd->offset = data.offset;
353 hnd->gpuaddr = 0;
354 *pHnd = hnd;
355 return 0;
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700356}
357
358void free_buffer(private_handle_t *hnd)
359{
Naseer Ahmed01d3fd32012-07-14 21:08:13 -0700360 gralloc::IAllocController* sAlloc =
361 gralloc::IAllocController::getInstance();
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700362 if (hnd && hnd->fd > 0) {
Naseer Ahmed01d3fd32012-07-14 21:08:13 -0700363 IMemAlloc* memalloc = sAlloc->getAllocator(hnd->flags);
Iliyan Malchev202a77d2012-06-11 14:41:12 -0700364 memalloc->free_buffer((void*)hnd->base, hnd->size, hnd->offset, hnd->fd);
365 }
366 if(hnd)
367 delete hnd;
368
369}