blob: 7b1ce7596f6560a4b03f56ec7dea23a438d01ac2 [file] [log] [blame]
Glenn Kasten632e0c02011-12-16 10:42:58 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andy Hunge0ccb202014-07-27 20:11:31 -070017#include <cutils/bitops.h> /* for popcount() */
Glenn Kasten632e0c02011-12-16 10:42:58 -080018#include <audio_utils/primitives.h>
Andy Hunge0ccb202014-07-27 20:11:31 -070019#include "private/private.h"
Glenn Kasten632e0c02011-12-16 10:42:58 -080020
Andy Hung1307aa72017-11-17 15:46:55 -080021void ditherAndClamp(int32_t *out, const int32_t *sums, size_t pairs)
Glenn Kasten632e0c02011-12-16 10:42:58 -080022{
Andy Hung1307aa72017-11-17 15:46:55 -080023 for (; pairs > 0; --pairs) {
24 const int32_t l = clamp16(*sums++ >> 12);
25 const int32_t r = clamp16(*sums++ >> 12);
26 *out++ = (r << 16) | (l & 0xFFFF);
Glenn Kasten632e0c02011-12-16 10:42:58 -080027 }
28}
Glenn Kastenddb2e932012-01-16 13:21:31 -080029
Andy Hung62c3d9f2017-11-21 16:36:41 -080030void memcpy_to_i16_from_q4_27(int16_t *dst, const int32_t *src, size_t count)
31{
32 for (; count > 0; --count) {
33 *dst++ = clamp16(*src++ >> 12);
34 }
35}
36
Glenn Kastenddb2e932012-01-16 13:21:31 -080037void memcpy_to_i16_from_u8(int16_t *dst, const uint8_t *src, size_t count)
38{
39 dst += count;
40 src += count;
Andy Hung1307aa72017-11-17 15:46:55 -080041 for (; count > 0; --count) {
Glenn Kastenddb2e932012-01-16 13:21:31 -080042 *--dst = (int16_t)(*--src - 0x80) << 8;
43 }
44}
Glenn Kasten7a0baca2012-07-19 13:59:50 -070045
Glenn Kasten78da2ac2012-11-12 14:39:36 -080046void memcpy_to_u8_from_i16(uint8_t *dst, const int16_t *src, size_t count)
47{
Andy Hung1307aa72017-11-17 15:46:55 -080048 for (; count > 0; --count) {
Glenn Kasten78da2ac2012-11-12 14:39:36 -080049 *dst++ = (*src++ >> 8) + 0x80;
50 }
51}
52
Andy Hung23ef1b32015-01-13 13:56:37 -080053void memcpy_to_u8_from_float(uint8_t *dst, const float *src, size_t count)
54{
Andy Hung1307aa72017-11-17 15:46:55 -080055 for (; count > 0; --count) {
Andy Hung23ef1b32015-01-13 13:56:37 -080056 *dst++ = clamp8_from_float(*src++);
57 }
58}
59
Glenn Kasten5d436052013-06-21 14:01:54 -070060void memcpy_to_i16_from_i32(int16_t *dst, const int32_t *src, size_t count)
61{
Andy Hung1307aa72017-11-17 15:46:55 -080062 for (; count > 0; --count) {
Glenn Kasten5d436052013-06-21 14:01:54 -070063 *dst++ = *src++ >> 16;
64 }
65}
66
67void memcpy_to_i16_from_float(int16_t *dst, const float *src, size_t count)
68{
Andy Hung1307aa72017-11-17 15:46:55 -080069 for (; count > 0; --count) {
Andy Hung65b5ccd2014-03-18 12:00:55 -070070 *dst++ = clamp16_from_float(*src++);
Glenn Kasten5d436052013-06-21 14:01:54 -070071 }
72}
73
Andy Hungb878e522014-04-04 13:05:43 -070074void memcpy_to_float_from_q4_27(float *dst, const int32_t *src, size_t count)
Andy Hungaecb15e2014-02-24 19:07:40 -080075{
Andy Hung1307aa72017-11-17 15:46:55 -080076 for (; count > 0; --count) {
Andy Hungd2a25cd2014-04-02 11:23:56 -070077 *dst++ = float_from_q4_27(*src++);
Andy Hungaecb15e2014-02-24 19:07:40 -080078 }
79}
80
Andy Hunge0454e22014-03-06 13:04:56 -080081void memcpy_to_float_from_i16(float *dst, const int16_t *src, size_t count)
82{
Andy Hung22667a62017-11-17 15:54:27 -080083 dst += count;
84 src += count;
Andy Hung1307aa72017-11-17 15:46:55 -080085 for (; count > 0; --count) {
Andy Hung22667a62017-11-17 15:54:27 -080086 *--dst = float_from_i16(*--src);
Andy Hunge0454e22014-03-06 13:04:56 -080087 }
88}
89
Andy Hung23ef1b32015-01-13 13:56:37 -080090void memcpy_to_float_from_u8(float *dst, const uint8_t *src, size_t count)
91{
Andy Hung22667a62017-11-17 15:54:27 -080092 dst += count;
93 src += count;
Andy Hung1307aa72017-11-17 15:46:55 -080094 for (; count > 0; --count) {
Andy Hung22667a62017-11-17 15:54:27 -080095 *--dst = float_from_u8(*--src);
Andy Hung23ef1b32015-01-13 13:56:37 -080096 }
97}
98
Andy Hunge0454e22014-03-06 13:04:56 -080099void memcpy_to_float_from_p24(float *dst, const uint8_t *src, size_t count)
100{
Andy Hung22667a62017-11-17 15:54:27 -0800101 dst += count;
102 src += count * 3;
Andy Hung1307aa72017-11-17 15:46:55 -0800103 for (; count > 0; --count) {
Andy Hung22667a62017-11-17 15:54:27 -0800104 src -= 3;
105 *--dst = float_from_p24(src);
Andy Hunge0454e22014-03-06 13:04:56 -0800106 }
107}
108
109void memcpy_to_i16_from_p24(int16_t *dst, const uint8_t *src, size_t count)
110{
Andy Hung1307aa72017-11-17 15:46:55 -0800111 for (; count > 0; --count) {
Andy Hung2af0e172017-03-31 14:08:14 -0700112#if HAVE_BIG_ENDIAN
Andy Hunge0454e22014-03-06 13:04:56 -0800113 *dst++ = src[1] | (src[0] << 8);
114#else
115 *dst++ = src[1] | (src[2] << 8);
116#endif
117 src += 3;
118 }
119}
120
Glenn Kasten95880cb2015-05-28 15:19:16 -0700121void memcpy_to_i32_from_p24(int32_t *dst, const uint8_t *src, size_t count)
122{
Andy Hung22667a62017-11-17 15:54:27 -0800123 dst += count;
124 src += count * 3;
Andy Hung1307aa72017-11-17 15:46:55 -0800125 for (; count > 0; --count) {
Andy Hung22667a62017-11-17 15:54:27 -0800126 src -= 3;
Andy Hung2af0e172017-03-31 14:08:14 -0700127#if HAVE_BIG_ENDIAN
Andy Hung22667a62017-11-17 15:54:27 -0800128 *--dst = (src[2] << 8) | (src[1] << 16) | (src[0] << 24);
Glenn Kasten95880cb2015-05-28 15:19:16 -0700129#else
Andy Hung22667a62017-11-17 15:54:27 -0800130 *--dst = (src[0] << 8) | (src[1] << 16) | (src[2] << 24);
Glenn Kasten95880cb2015-05-28 15:19:16 -0700131#endif
Glenn Kasten95880cb2015-05-28 15:19:16 -0700132 }
133}
134
Andy Hunge0454e22014-03-06 13:04:56 -0800135void memcpy_to_p24_from_i16(uint8_t *dst, const int16_t *src, size_t count)
136{
Andy Hung22667a62017-11-17 15:54:27 -0800137 dst += count * 3;
138 src += count;
Andy Hung1307aa72017-11-17 15:46:55 -0800139 for (; count > 0; --count) {
Andy Hung22667a62017-11-17 15:54:27 -0800140 dst -= 3;
Andy Hungc6ddd032018-04-25 17:59:24 -0700141 const int16_t sample = *--src;
Andy Hung2af0e172017-03-31 14:08:14 -0700142#if HAVE_BIG_ENDIAN
Andy Hungc6ddd032018-04-25 17:59:24 -0700143 dst[0] = sample >> 8;
144 dst[1] = sample;
Andy Hung22667a62017-11-17 15:54:27 -0800145 dst[2] = 0;
Andy Hunge0454e22014-03-06 13:04:56 -0800146#else
Andy Hung22667a62017-11-17 15:54:27 -0800147 dst[0] = 0;
Andy Hungc6ddd032018-04-25 17:59:24 -0700148 dst[1] = sample;
149 dst[2] = sample >> 8;
Andy Hunge0454e22014-03-06 13:04:56 -0800150#endif
151 }
152}
153
154void memcpy_to_p24_from_float(uint8_t *dst, const float *src, size_t count)
155{
Andy Hung1307aa72017-11-17 15:46:55 -0800156 for (; count > 0; --count) {
Andy Hunge0454e22014-03-06 13:04:56 -0800157 int32_t ival = clamp24_from_float(*src++);
158
Andy Hung2af0e172017-03-31 14:08:14 -0700159#if HAVE_BIG_ENDIAN
Andy Hunge0454e22014-03-06 13:04:56 -0800160 *dst++ = ival >> 16;
161 *dst++ = ival >> 8;
162 *dst++ = ival;
163#else
164 *dst++ = ival;
165 *dst++ = ival >> 8;
166 *dst++ = ival >> 16;
167#endif
168 }
169}
170
Glenn Kasteneee45152014-05-02 12:41:04 -0700171void memcpy_to_p24_from_q8_23(uint8_t *dst, const int32_t *src, size_t count)
172{
Andy Hung1307aa72017-11-17 15:46:55 -0800173 for (; count > 0; --count) {
Glenn Kasteneee45152014-05-02 12:41:04 -0700174 int32_t ival = clamp24_from_q8_23(*src++);
175
Andy Hung2af0e172017-03-31 14:08:14 -0700176#if HAVE_BIG_ENDIAN
Glenn Kasteneee45152014-05-02 12:41:04 -0700177 *dst++ = ival >> 16;
178 *dst++ = ival >> 8;
179 *dst++ = ival;
180#else
181 *dst++ = ival;
182 *dst++ = ival >> 8;
183 *dst++ = ival >> 16;
184#endif
185 }
186}
187
Glenn Kastendaa1a002015-05-29 16:50:24 -0700188void memcpy_to_p24_from_i32(uint8_t *dst, const int32_t *src, size_t count)
189{
Andy Hung1307aa72017-11-17 15:46:55 -0800190 for (; count > 0; --count) {
Glenn Kastendaa1a002015-05-29 16:50:24 -0700191 int32_t ival = *src++ >> 8;
192
Andy Hung2af0e172017-03-31 14:08:14 -0700193#if HAVE_BIG_ENDIAN
Glenn Kastendaa1a002015-05-29 16:50:24 -0700194 *dst++ = ival >> 16;
195 *dst++ = ival >> 8;
196 *dst++ = ival;
197#else
198 *dst++ = ival;
199 *dst++ = ival >> 8;
200 *dst++ = ival >> 16;
201#endif
202 }
203}
204
Andy Hungd5829882014-03-12 11:46:15 -0700205void memcpy_to_q8_23_from_i16(int32_t *dst, const int16_t *src, size_t count)
206{
Andy Hung22667a62017-11-17 15:54:27 -0800207 dst += count;
208 src += count;
Andy Hung1307aa72017-11-17 15:46:55 -0800209 for (; count > 0; --count) {
Andy Hung22667a62017-11-17 15:54:27 -0800210 *--dst = (int32_t)*--src << 8;
Andy Hungd5829882014-03-12 11:46:15 -0700211 }
212}
213
214void memcpy_to_q8_23_from_float_with_clamp(int32_t *dst, const float *src, size_t count)
215{
Andy Hung1307aa72017-11-17 15:46:55 -0800216 for (; count > 0; --count) {
Andy Hungd5829882014-03-12 11:46:15 -0700217 *dst++ = clamp24_from_float(*src++);
218 }
219}
220
Haynes Mathew George78ac9f82015-04-09 13:50:13 -0700221void memcpy_to_q8_23_from_p24(int32_t *dst, const uint8_t *src, size_t count)
222{
Andy Hung22667a62017-11-17 15:54:27 -0800223 dst += count;
224 src += count * 3;
Andy Hung1307aa72017-11-17 15:46:55 -0800225 for (; count > 0; --count) {
Andy Hung22667a62017-11-17 15:54:27 -0800226 src -= 3;
Andy Hung2af0e172017-03-31 14:08:14 -0700227#if HAVE_BIG_ENDIAN
Andy Hung22667a62017-11-17 15:54:27 -0800228 *--dst = (int8_t)src[0] << 16 | src[1] << 8 | src[2];
Haynes Mathew George78ac9f82015-04-09 13:50:13 -0700229#else
Andy Hung22667a62017-11-17 15:54:27 -0800230 *--dst = (int8_t)src[2] << 16 | src[1] << 8 | src[0];
Haynes Mathew George78ac9f82015-04-09 13:50:13 -0700231#endif
Haynes Mathew George78ac9f82015-04-09 13:50:13 -0700232 }
233}
234
Andy Hungb878e522014-04-04 13:05:43 -0700235void memcpy_to_q4_27_from_float(int32_t *dst, const float *src, size_t count)
236{
Andy Hung1307aa72017-11-17 15:46:55 -0800237 for (; count > 0; --count) {
Andy Hungb878e522014-04-04 13:05:43 -0700238 *dst++ = clampq4_27_from_float(*src++);
239 }
240}
241
Andy Hungd5829882014-03-12 11:46:15 -0700242void memcpy_to_i16_from_q8_23(int16_t *dst, const int32_t *src, size_t count)
243{
Andy Hung1307aa72017-11-17 15:46:55 -0800244 for (; count > 0; --count) {
Andy Hungd5829882014-03-12 11:46:15 -0700245 *dst++ = clamp16(*src++ >> 8);
246 }
247}
248
249void memcpy_to_float_from_q8_23(float *dst, const int32_t *src, size_t count)
250{
Andy Hung1307aa72017-11-17 15:46:55 -0800251 for (; count > 0; --count) {
Andy Hungd5829882014-03-12 11:46:15 -0700252 *dst++ = float_from_q8_23(*src++);
253 }
254}
255
Glenn Kasten5cb49ce2018-10-30 09:02:38 -0700256void memcpy_to_i32_from_u8(int32_t *dst, const uint8_t *src, size_t count)
257{
258 dst += count;
259 src += count;
260 for (; count > 0; --count) {
261 *--dst = ((int32_t)(*--src) - 0x80) << 24;
262 }
263}
264
Andy Hung2c63fb62014-03-12 14:44:12 -0700265void memcpy_to_i32_from_i16(int32_t *dst, const int16_t *src, size_t count)
266{
Andy Hung22667a62017-11-17 15:54:27 -0800267 dst += count;
268 src += count;
Andy Hung1307aa72017-11-17 15:46:55 -0800269 for (; count > 0; --count) {
Andy Hung22667a62017-11-17 15:54:27 -0800270 *--dst = (int32_t)*--src << 16;
Andy Hung2c63fb62014-03-12 14:44:12 -0700271 }
272}
273
274void memcpy_to_i32_from_float(int32_t *dst, const float *src, size_t count)
275{
Andy Hung1307aa72017-11-17 15:46:55 -0800276 for (; count > 0; --count) {
Andy Hung2c63fb62014-03-12 14:44:12 -0700277 *dst++ = clamp32_from_float(*src++);
278 }
279}
280
281void memcpy_to_float_from_i32(float *dst, const int32_t *src, size_t count)
282{
Andy Hung1307aa72017-11-17 15:46:55 -0800283 for (; count > 0; --count) {
Andy Hung2c63fb62014-03-12 14:44:12 -0700284 *dst++ = float_from_i32(*src++);
285 }
286}
287
Kevin Rocard2d98fd32017-11-09 22:12:51 -0800288void memcpy_to_float_from_float_with_clamping(float *dst, const float *src, size_t count,
289 float absMax) {
290 // Note: using NEON intrinsics (vminq_f32, vld1q_f32...) did NOT accelerate
291 // the function when benchmarked. The compiler already vectorize using FMINNM f32x4 & similar.
292 // Note: clamping induce a ~20% overhead compared to memcpy for count in [64, 512]
293 // See primitives_benchmark
Andy Hung1307aa72017-11-17 15:46:55 -0800294 for (; count > 0; --count) {
Kevin Rocard2d98fd32017-11-09 22:12:51 -0800295 const float sample = *src++;
296 *dst++ = fmax(-absMax, fmin(absMax, sample));
297 }
298}
299
Glenn Kasten7a0baca2012-07-19 13:59:50 -0700300void downmix_to_mono_i16_from_stereo_i16(int16_t *dst, const int16_t *src, size_t count)
301{
Andy Hung1307aa72017-11-17 15:46:55 -0800302 for (; count > 0; --count) {
Glenn Kasten7a0baca2012-07-19 13:59:50 -0700303 *dst++ = (int16_t)(((int32_t)src[0] + (int32_t)src[1]) >> 1);
304 src += 2;
305 }
306}
307
308void upmix_to_stereo_i16_from_mono_i16(int16_t *dst, const int16_t *src, size_t count)
309{
Andy Hung22667a62017-11-17 15:54:27 -0800310 dst += count * 2;
311 src += count;
Andy Hung1307aa72017-11-17 15:46:55 -0800312 for (; count > 0; --count) {
Andy Hung22667a62017-11-17 15:54:27 -0800313 const int32_t temp = *--src;
314 dst -= 2;
Glenn Kasten7a0baca2012-07-19 13:59:50 -0700315 dst[0] = temp;
316 dst[1] = temp;
Glenn Kasten7a0baca2012-07-19 13:59:50 -0700317 }
318}
Glenn Kasteneb247df2014-02-21 10:00:51 -0800319
Andy Hung9c8dd452015-04-21 13:21:36 -0700320void downmix_to_mono_float_from_stereo_float(float *dst, const float *src, size_t frames)
321{
Andy Hung1307aa72017-11-17 15:46:55 -0800322 for (; frames > 0; --frames) {
Andy Hung9c8dd452015-04-21 13:21:36 -0700323 *dst++ = (src[0] + src[1]) * 0.5;
324 src += 2;
325 }
326}
327
328void upmix_to_stereo_float_from_mono_float(float *dst, const float *src, size_t frames)
329{
Andy Hung22667a62017-11-17 15:54:27 -0800330 dst += frames * 2;
331 src += frames;
Andy Hung1307aa72017-11-17 15:46:55 -0800332 for (; frames > 0; --frames) {
Andy Hung22667a62017-11-17 15:54:27 -0800333 const float temp = *--src;
334 dst -= 2;
Andy Hung9c8dd452015-04-21 13:21:36 -0700335 dst[0] = temp;
336 dst[1] = temp;
Andy Hung9c8dd452015-04-21 13:21:36 -0700337 }
338}
339
Glenn Kasteneb247df2014-02-21 10:00:51 -0800340size_t nonZeroMono32(const int32_t *samples, size_t count)
341{
342 size_t nonZero = 0;
Andy Hung1307aa72017-11-17 15:46:55 -0800343 for (; count > 0; --count) {
Andy Hung0ebba4b2017-11-17 16:02:19 -0800344 nonZero += *samples++ != 0;
Glenn Kasteneb247df2014-02-21 10:00:51 -0800345 }
346 return nonZero;
347}
348
349size_t nonZeroMono16(const int16_t *samples, size_t count)
350{
351 size_t nonZero = 0;
Andy Hung1307aa72017-11-17 15:46:55 -0800352 for (; count > 0; --count) {
Andy Hung0ebba4b2017-11-17 16:02:19 -0800353 nonZero += *samples++ != 0;
Glenn Kasteneb247df2014-02-21 10:00:51 -0800354 }
355 return nonZero;
356}
357
358size_t nonZeroStereo32(const int32_t *frames, size_t count)
359{
360 size_t nonZero = 0;
Andy Hung1307aa72017-11-17 15:46:55 -0800361 for (; count > 0; --count) {
Andy Hung0ebba4b2017-11-17 16:02:19 -0800362 nonZero += frames[0] != 0 || frames[1] != 0;
Glenn Kasteneb247df2014-02-21 10:00:51 -0800363 frames += 2;
364 }
365 return nonZero;
366}
367
368size_t nonZeroStereo16(const int16_t *frames, size_t count)
369{
370 size_t nonZero = 0;
Andy Hung1307aa72017-11-17 15:46:55 -0800371 for (; count > 0; --count) {
Andy Hung0ebba4b2017-11-17 16:02:19 -0800372 nonZero += frames[0] != 0 || frames[1] != 0;
Glenn Kasteneb247df2014-02-21 10:00:51 -0800373 frames += 2;
374 }
375 return nonZero;
376}
Andy Hung3af2af22014-05-22 18:40:30 -0700377
Andy Hung3af2af22014-05-22 18:40:30 -0700378/*
379 * C macro to do channel mask copying independent of dst/src sample type.
380 * Don't pass in any expressions for the macro arguments here.
381 */
382#define copy_frame_by_mask(dst, dmask, src, smask, count, zero) \
383{ \
384 uint32_t bit, ormask; \
Andy Hung1307aa72017-11-17 15:46:55 -0800385 for (; (count) > 0; --(count)) { \
Chih-Hung Hsieh2c853ac2016-05-11 15:13:31 -0700386 ormask = (dmask) | (smask); \
Andy Hung3af2af22014-05-22 18:40:30 -0700387 while (ormask) { \
388 bit = ormask & -ormask; /* get lowest bit */ \
389 ormask ^= bit; /* remove lowest bit */ \
Chih-Hung Hsieh2c853ac2016-05-11 15:13:31 -0700390 if ((dmask) & bit) { \
391 *(dst)++ = (smask) & bit ? *(src)++ : (zero); \
Andy Hung3af2af22014-05-22 18:40:30 -0700392 } else { /* source channel only */ \
Chih-Hung Hsieh2c853ac2016-05-11 15:13:31 -0700393 ++(src); \
Andy Hung3af2af22014-05-22 18:40:30 -0700394 } \
395 } \
396 } \
397}
398
399void memcpy_by_channel_mask(void *dst, uint32_t dst_mask,
400 const void *src, uint32_t src_mask, size_t sample_size, size_t count)
401{
402#if 0
403 /* alternate way of handling memcpy_by_channel_mask by using the idxary */
404 int8_t idxary[32];
405 uint32_t src_channels = popcount(src_mask);
406 uint32_t dst_channels =
407 memcpy_by_index_array_initialization(idxary, 32, dst_mask, src_mask);
408
409 memcpy_by_idxary(dst, dst_channels, src, src_channels, idxary, sample_size, count);
410#else
411 if (dst_mask == src_mask) {
412 memcpy(dst, src, sample_size * popcount(dst_mask) * count);
413 return;
414 }
415 switch (sample_size) {
416 case 1: {
417 uint8_t *udst = (uint8_t*)dst;
418 const uint8_t *usrc = (const uint8_t*)src;
419
420 copy_frame_by_mask(udst, dst_mask, usrc, src_mask, count, 0);
421 } break;
422 case 2: {
423 uint16_t *udst = (uint16_t*)dst;
424 const uint16_t *usrc = (const uint16_t*)src;
425
426 copy_frame_by_mask(udst, dst_mask, usrc, src_mask, count, 0);
427 } break;
428 case 3: { /* could be slow. use a struct to represent 3 bytes of data. */
429 uint8x3_t *udst = (uint8x3_t*)dst;
430 const uint8x3_t *usrc = (const uint8x3_t*)src;
431 static const uint8x3_t zero; /* tricky - we use this to zero out a sample */
432
433 copy_frame_by_mask(udst, dst_mask, usrc, src_mask, count, zero);
434 } break;
435 case 4: {
436 uint32_t *udst = (uint32_t*)dst;
437 const uint32_t *usrc = (const uint32_t*)src;
438
439 copy_frame_by_mask(udst, dst_mask, usrc, src_mask, count, 0);
440 } break;
441 default:
442 abort(); /* illegal value */
443 break;
444 }
445#endif
446}
447
448/*
449 * C macro to do copying by index array, to rearrange samples
450 * within a frame. This is independent of src/dst sample type.
451 * Don't pass in any expressions for the macro arguments here.
452 */
453#define copy_frame_by_idx(dst, dst_channels, src, src_channels, idxary, count, zero) \
454{ \
455 unsigned i; \
456 int index; \
Andy Hung1307aa72017-11-17 15:46:55 -0800457 for (; (count) > 0; --(count)) { \
Chih-Hung Hsieh2c853ac2016-05-11 15:13:31 -0700458 for (i = 0; i < (dst_channels); ++i) { \
459 index = (idxary)[i]; \
460 *(dst)++ = index < 0 ? (zero) : (src)[index]; \
Andy Hung3af2af22014-05-22 18:40:30 -0700461 } \
Chih-Hung Hsieh2c853ac2016-05-11 15:13:31 -0700462 (src) += (src_channels); \
Andy Hung3af2af22014-05-22 18:40:30 -0700463 } \
464}
465
466void memcpy_by_index_array(void *dst, uint32_t dst_channels,
467 const void *src, uint32_t src_channels,
468 const int8_t *idxary, size_t sample_size, size_t count)
469{
470 switch (sample_size) {
471 case 1: {
472 uint8_t *udst = (uint8_t*)dst;
473 const uint8_t *usrc = (const uint8_t*)src;
474
475 copy_frame_by_idx(udst, dst_channels, usrc, src_channels, idxary, count, 0);
476 } break;
477 case 2: {
478 uint16_t *udst = (uint16_t*)dst;
479 const uint16_t *usrc = (const uint16_t*)src;
480
481 copy_frame_by_idx(udst, dst_channels, usrc, src_channels, idxary, count, 0);
482 } break;
483 case 3: { /* could be slow. use a struct to represent 3 bytes of data. */
484 uint8x3_t *udst = (uint8x3_t*)dst;
485 const uint8x3_t *usrc = (const uint8x3_t*)src;
486 static const uint8x3_t zero;
487
488 copy_frame_by_idx(udst, dst_channels, usrc, src_channels, idxary, count, zero);
489 } break;
490 case 4: {
491 uint32_t *udst = (uint32_t*)dst;
492 const uint32_t *usrc = (const uint32_t*)src;
493
494 copy_frame_by_idx(udst, dst_channels, usrc, src_channels, idxary, count, 0);
495 } break;
496 default:
497 abort(); /* illegal value */
498 break;
499 }
500}
501
502size_t memcpy_by_index_array_initialization(int8_t *idxary, size_t idxcount,
503 uint32_t dst_mask, uint32_t src_mask)
504{
505 size_t n = 0;
506 int srcidx = 0;
507 uint32_t bit, ormask = src_mask | dst_mask;
508
509 while (ormask && n < idxcount) {
510 bit = ormask & -ormask; /* get lowest bit */
511 ormask ^= bit; /* remove lowest bit */
512 if (src_mask & dst_mask & bit) { /* matching channel */
513 idxary[n++] = srcidx++;
514 } else if (src_mask & bit) { /* source channel only */
515 ++srcidx;
516 } else { /* destination channel only */
517 idxary[n++] = -1;
518 }
519 }
520 return n + popcount(ormask & dst_mask);
521}
Andy Hung5a0d0282015-02-02 15:34:13 -0800522
523size_t memcpy_by_index_array_initialization_src_index(int8_t *idxary, size_t idxcount,
524 uint32_t dst_mask, uint32_t src_mask) {
525 size_t dst_count = popcount(dst_mask);
526 if (idxcount == 0) {
527 return dst_count;
528 }
529 if (dst_count > idxcount) {
530 dst_count = idxcount;
531 }
532
533 size_t src_idx, dst_idx;
534 for (src_idx = 0, dst_idx = 0; dst_idx < dst_count; ++dst_idx) {
535 if (src_mask & 1) {
536 idxary[dst_idx] = src_idx++;
537 } else {
538 idxary[dst_idx] = -1;
539 }
540 src_mask >>= 1;
541 }
542 return dst_idx;
543}
Andy Hung291a1942015-02-27 14:20:33 -0800544
545size_t memcpy_by_index_array_initialization_dst_index(int8_t *idxary, size_t idxcount,
546 uint32_t dst_mask, uint32_t src_mask) {
547 size_t src_idx, dst_idx;
548 size_t dst_count = __builtin_popcount(dst_mask);
549 size_t src_count = __builtin_popcount(src_mask);
550 if (idxcount == 0) {
551 return dst_count;
552 }
553 if (dst_count > idxcount) {
554 dst_count = idxcount;
555 }
556 for (src_idx = 0, dst_idx = 0; dst_idx < dst_count; ++src_idx) {
557 if (dst_mask & 1) {
558 idxary[dst_idx++] = src_idx < src_count ? (signed)src_idx : -1;
559 }
560 dst_mask >>= 1;
561 }
562 return dst_idx;
563}
Ari Hausman-Cohen90ac5f62017-08-16 18:32:22 -0700564
565void accumulate_i16(int16_t *dst, const int16_t *src, size_t count) {
566 while (count--) {
567 *dst = clamp16((int32_t)*dst + *src++);
568 ++dst;
569 }
570}
571
572void accumulate_u8(uint8_t *dst, const uint8_t *src, size_t count) {
573 int32_t sum;
Andy Hung1307aa72017-11-17 15:46:55 -0800574 for (; count > 0; --count) {
Ari Hausman-Cohen90ac5f62017-08-16 18:32:22 -0700575 // 8-bit samples are centered around 0x80.
576 sum = *dst + *src++ - 0x80;
577 // Clamp to [0, 0xff].
578 *dst++ = (sum & 0x100) ? (~sum >> 9) : sum;
579 }
580}
581
582void accumulate_p24(uint8_t *dst, const uint8_t *src, size_t count) {
Andy Hung1307aa72017-11-17 15:46:55 -0800583 for (; count > 0; --count) {
Ari Hausman-Cohen90ac5f62017-08-16 18:32:22 -0700584 // Unpack.
585 int32_t dst_q8_23 = 0;
586 int32_t src_q8_23 = 0;
587 memcpy_to_q8_23_from_p24(&dst_q8_23, dst, 1);
588 memcpy_to_q8_23_from_p24(&src_q8_23, src, 1);
589
590 // Accumulate and overwrite.
591 dst_q8_23 += src_q8_23;
592 memcpy_to_p24_from_q8_23(dst, &dst_q8_23, 1);
593
594 // Move on to next sample.
595 dst += 3;
596 src += 3;
597 }
598}
599
600void accumulate_q8_23(int32_t *dst, const int32_t *src, size_t count) {
Andy Hung1307aa72017-11-17 15:46:55 -0800601 for (; count > 0; --count) {
Ari Hausman-Cohen90ac5f62017-08-16 18:32:22 -0700602 *dst = clamp24_from_q8_23(*dst + *src++);
603 ++dst;
604 }
605}
606
607void accumulate_i32(int32_t *dst, const int32_t *src, size_t count) {
Andy Hung1307aa72017-11-17 15:46:55 -0800608 for (; count > 0; --count) {
Ari Hausman-Cohen90ac5f62017-08-16 18:32:22 -0700609 *dst = clamp32((int64_t)*dst + *src++);
610 ++dst;
611 }
612}
613
614void accumulate_float(float *dst, const float *src, size_t count) {
Andy Hung1307aa72017-11-17 15:46:55 -0800615 for (; count > 0; --count) {
Ari Hausman-Cohen90ac5f62017-08-16 18:32:22 -0700616 *dst++ += *src++;
617 }
618}