blob: bb45a9ac7a38e1606300048e18678ca67d46ffd1 [file] [log] [blame]
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +08001/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <stdio.h>
18#include <stdint.h>
19#include <string.h>
20#include <errno.h>
21#include <fcntl.h>
22#include <sys/epoll.h>
23#include <sys/types.h>
24#include <sys/socket.h>
25#include <sys/stat.h>
26#include <sys/time.h>
27#include <time.h>
28#include <arpa/inet.h>
29#include <netinet/in.h>
30
31#define LOG_TAG "AudioGroup"
32#include <cutils/atomic.h>
33#include <utils/Log.h>
34#include <utils/Errors.h>
35#include <utils/RefBase.h>
36#include <utils/threads.h>
37#include <utils/SystemClock.h>
38#include <media/AudioSystem.h>
39#include <media/AudioRecord.h>
40#include <media/AudioTrack.h>
41#include <media/mediarecorder.h>
42
43#include "jni.h"
44#include "JNIHelp.h"
45
46#include "AudioCodec.h"
47
48extern int parse(JNIEnv *env, jstring jAddress, int port, sockaddr_storage *ss);
49
50namespace {
51
52using namespace android;
53
54int gRandom = -1;
55
56// We use a circular array to implement jitter buffer. The simplest way is doing
57// a modulo operation on the index while accessing the array. However modulo can
58// be expensive on some platforms, such as ARM. Thus we round up the size of the
59// array to the nearest power of 2 and then use bitwise-and instead of modulo.
60// Currently we make it 256ms long and assume packet interval is 32ms or less.
61// The first 64ms is the place where samples get mixed. The rest 192ms is the
62// real jitter buffer. For a stream at 8000Hz it takes 4096 bytes. These numbers
63// are chosen by experiments and each of them can be adjusted as needed.
64
65// Other notes:
66// + We use elapsedRealtime() to get the time. Since we use 32bit variables
67// instead of 64bit ones, comparison must be done by subtraction.
68// + Sampling rate must be multiple of 1000Hz, and packet length must be in
69// milliseconds. No floating points.
70// + If we cannot get enough CPU, we drop samples and simulate packet loss.
71// + Resampling is not done yet, so streams in one group must use the same rate.
72// For the first release we might only support 8kHz and 16kHz.
73
74class AudioStream
75{
76public:
77 AudioStream();
78 ~AudioStream();
79 bool set(int mode, int socket, sockaddr_storage *remote,
80 const char *codecName, int sampleRate, int sampleCount,
81 int codecType, int dtmfType);
82
83 void sendDtmf(int event);
84 bool mix(int32_t *output, int head, int tail, int sampleRate);
85 void encode(int tick, AudioStream *chain);
86 void decode(int tick);
87
88private:
89 enum {
90 NORMAL = 0,
91 SEND_ONLY = 1,
92 RECEIVE_ONLY = 2,
93 LAST_MODE = 2,
94 };
95
96 int mMode;
97 int mSocket;
98 sockaddr_storage mRemote;
99 AudioCodec *mCodec;
100 uint32_t mCodecMagic;
101 uint32_t mDtmfMagic;
102
103 int mTick;
104 int mSampleRate;
105 int mSampleCount;
106 int mInterval;
107
108 int16_t *mBuffer;
109 int mBufferMask;
110 int mBufferHead;
111 int mBufferTail;
112 int mLatencyScore;
113
114 uint16_t mSequence;
115 uint32_t mTimestamp;
116 uint32_t mSsrc;
117
118 int mDtmfEvent;
119 int mDtmfStart;
120
121 AudioStream *mNext;
122
123 friend class AudioGroup;
124};
125
126AudioStream::AudioStream()
127{
128 mSocket = -1;
129 mCodec = NULL;
130 mBuffer = NULL;
131 mNext = NULL;
132}
133
134AudioStream::~AudioStream()
135{
136 close(mSocket);
137 delete mCodec;
138 delete [] mBuffer;
139 LOGD("stream[%d] is dead", mSocket);
140}
141
142bool AudioStream::set(int mode, int socket, sockaddr_storage *remote,
143 const char *codecName, int sampleRate, int sampleCount,
144 int codecType, int dtmfType)
145{
146 if (mode < 0 || mode > LAST_MODE) {
147 return false;
148 }
149 mMode = mode;
150
151 if (codecName) {
152 mRemote = *remote;
153 mCodec = newAudioCodec(codecName);
154 if (!mCodec || !mCodec->set(sampleRate, sampleCount)) {
155 return false;
156 }
157 }
158
159 mCodecMagic = (0x8000 | codecType) << 16;
160 mDtmfMagic = (dtmfType == -1) ? 0 : (0x8000 | dtmfType) << 16;
161
162 mTick = elapsedRealtime();
163 mSampleRate = sampleRate / 1000;
164 mSampleCount = sampleCount;
165 mInterval = mSampleCount / mSampleRate;
166
167 // Allocate jitter buffer.
168 for (mBufferMask = 8192; mBufferMask < sampleRate; mBufferMask <<= 1);
169 mBufferMask >>= 2;
170 mBuffer = new int16_t[mBufferMask];
171 --mBufferMask;
172 mBufferHead = 0;
173 mBufferTail = 0;
174 mLatencyScore = 0;
175
176 // Initialize random bits.
177 read(gRandom, &mSequence, sizeof(mSequence));
178 read(gRandom, &mTimestamp, sizeof(mTimestamp));
179 read(gRandom, &mSsrc, sizeof(mSsrc));
180
181 mDtmfEvent = -1;
182 mDtmfStart = 0;
183
184 // Only take over the socket when succeeded.
185 mSocket = socket;
186
187 LOGD("stream[%d] is configured as %s %dkHz %dms", mSocket,
188 (codecName ? codecName : "RAW"), mSampleRate, mInterval);
189 return true;
190}
191
192void AudioStream::sendDtmf(int event)
193{
194 if (mDtmfMagic != 0) {
195 mDtmfEvent = event << 24;
196 mDtmfStart = mTimestamp + mSampleCount;
197 }
198}
199
200bool AudioStream::mix(int32_t *output, int head, int tail, int sampleRate)
201{
202 if (mMode == SEND_ONLY) {
203 return false;
204 }
205
206 if (head - mBufferHead < 0) {
207 head = mBufferHead;
208 }
209 if (tail - mBufferTail > 0) {
210 tail = mBufferTail;
211 }
212 if (tail - head <= 0) {
213 return false;
214 }
215
216 head *= mSampleRate;
217 tail *= mSampleRate;
218
219 if (sampleRate == mSampleRate) {
220 for (int i = head; i - tail < 0; ++i) {
221 output[i - head] += mBuffer[i & mBufferMask];
222 }
223 } else {
224 // TODO: implement resampling.
225 return false;
226 }
227 return true;
228}
229
230void AudioStream::encode(int tick, AudioStream *chain)
231{
232 if (tick - mTick >= mInterval) {
233 // We just missed the train. Pretend that packets in between are lost.
234 int skipped = (tick - mTick) / mInterval;
235 mTick += skipped * mInterval;
236 mSequence += skipped;
237 mTimestamp += skipped * mSampleCount;
238 LOGD("stream[%d] skips %d packets", mSocket, skipped);
239 }
240
241 tick = mTick;
242 mTick += mInterval;
243 ++mSequence;
244 mTimestamp += mSampleCount;
245
246 if (mMode == RECEIVE_ONLY) {
247 return;
248 }
249
250 // If there is an ongoing DTMF event, send it now.
251 if (mDtmfEvent != -1) {
252 int duration = mTimestamp - mDtmfStart;
253 // Make sure duration is reasonable.
254 if (duration >= 0 && duration < mSampleRate * 100) {
255 duration += mSampleCount;
256 int32_t buffer[4] = {
257 htonl(mDtmfMagic | mSequence),
258 htonl(mDtmfStart),
259 mSsrc,
260 htonl(mDtmfEvent | duration),
261 };
262 if (duration >= mSampleRate * 100) {
263 buffer[3] |= htonl(1 << 23);
264 mDtmfEvent = -1;
265 }
266 sendto(mSocket, buffer, sizeof(buffer), MSG_DONTWAIT,
267 (sockaddr *)&mRemote, sizeof(mRemote));
268 return;
269 }
270 mDtmfEvent = -1;
271 }
272
273 // It is time to mix streams.
274 bool mixed = false;
275 int32_t buffer[mSampleCount + 3];
276 memset(buffer, 0, sizeof(buffer));
277 while (chain) {
278 if (chain != this &&
279 chain->mix(buffer, tick - mInterval, tick, mSampleRate)) {
280 mixed = true;
281 }
282 chain = chain->mNext;
283 }
284 if (!mixed) {
285 LOGD("stream[%d] no data", mSocket);
286 return;
287 }
288
289 // Cook the packet and send it out.
290 int16_t samples[mSampleCount];
291 for (int i = 0; i < mSampleCount; ++i) {
292 int32_t sample = buffer[i];
293 if (sample < -32768) {
294 sample = -32768;
295 }
296 if (sample > 32767) {
297 sample = 32767;
298 }
299 samples[i] = sample;
300 }
301 if (!mCodec) {
302 // Special case for device stream.
303 send(mSocket, samples, sizeof(samples), MSG_DONTWAIT);
304 return;
305 }
306
307 buffer[0] = htonl(mCodecMagic | mSequence);
308 buffer[1] = htonl(mTimestamp);
309 buffer[2] = mSsrc;
310 int length = mCodec->encode(&buffer[3], samples);
311 if (length <= 0) {
312 LOGD("stream[%d] encoder error", mSocket);
313 return;
314 }
315 sendto(mSocket, buffer, length + 12, MSG_DONTWAIT, (sockaddr *)&mRemote,
316 sizeof(mRemote));
317}
318
319void AudioStream::decode(int tick)
320{
321 char c;
322 if (mMode == SEND_ONLY) {
323 recv(mSocket, &c, 1, MSG_DONTWAIT);
324 return;
325 }
326
327 // Make sure mBufferHead and mBufferTail are reasonable.
328 if ((unsigned int)(tick + 256 - mBufferHead) > 1024) {
329 mBufferHead = tick - 64;
330 mBufferTail = mBufferHead;
331 }
332
333 if (tick - mBufferHead > 64) {
334 // Throw away outdated samples.
335 mBufferHead = tick - 64;
336 if (mBufferTail - mBufferHead < 0) {
337 mBufferTail = mBufferHead;
338 }
339 }
340
341 if (mBufferTail - tick <= 80) {
342 mLatencyScore = tick;
343 } else if (tick - mLatencyScore >= 5000) {
344 // Reset the jitter buffer to 40ms if the latency keeps larger than 80ms
345 // in the past 5s. This rarely happens, so let us just keep it simple.
346 LOGD("stream[%d] latency control", mSocket);
347 mBufferTail = tick + 40;
348 }
349
350 if (mBufferTail - mBufferHead > 256 - mInterval) {
351 // Buffer overflow. Drop the packet.
352 LOGD("stream[%d] buffer overflow", mSocket);
353 recv(mSocket, &c, 1, MSG_DONTWAIT);
354 return;
355 }
356
357 // Receive the packet and decode it.
358 int16_t samples[mSampleCount];
359 int length = 0;
360 if (!mCodec) {
361 // Special case for device stream.
362 length = recv(mSocket, samples, sizeof(samples),
363 MSG_TRUNC | MSG_DONTWAIT) >> 1;
364 } else {
365 __attribute__((aligned(4))) uint8_t buffer[2048];
366 length = recv(mSocket, buffer, sizeof(buffer),
367 MSG_TRUNC | MSG_DONTWAIT);
368
369 // Do we need to check SSRC, sequence, and timestamp? They are not
Chia-chi Yehb8790322010-08-19 18:26:53 +0800370 // reliable but at least they can be used to identify duplicates?
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800371 if (length < 12 || length > (int)sizeof(buffer) ||
372 (ntohl(*(uint32_t *)buffer) & 0xC07F0000) != mCodecMagic) {
373 LOGD("stream[%d] malformed packet", mSocket);
374 return;
375 }
376 int offset = 12 + ((buffer[0] & 0x0F) << 2);
377 if ((buffer[0] & 0x10) != 0) {
378 offset += 4 + (ntohs(*(uint16_t *)&buffer[offset + 2]) << 2);
379 }
380 if ((buffer[0] & 0x20) != 0) {
381 length -= buffer[length - 1];
382 }
383 length -= offset;
384 if (length >= 0) {
385 length = mCodec->decode(samples, &buffer[offset], length);
386 }
387 }
388 if (length != mSampleCount) {
389 LOGD("stream[%d] decoder error", mSocket);
390 return;
391 }
392
393 if (tick - mBufferTail > 0) {
394 // Buffer underrun. Reset the jitter buffer to 40ms.
395 LOGD("stream[%d] buffer underrun", mSocket);
396 if (mBufferTail - mBufferHead <= 0) {
397 mBufferHead = tick + 40;
398 mBufferTail = mBufferHead;
399 } else {
400 int tail = (tick + 40) * mSampleRate;
401 for (int i = mBufferTail * mSampleRate; i - tail < 0; ++i) {
402 mBuffer[i & mBufferMask] = 0;
403 }
404 mBufferTail = tick + 40;
405 }
406 }
407
408 // Append to the jitter buffer.
409 int tail = mBufferTail * mSampleRate;
410 for (int i = 0; i < mSampleCount; ++i) {
411 mBuffer[tail & mBufferMask] = samples[i];
412 ++tail;
413 }
414 mBufferTail += mInterval;
415}
416
417//------------------------------------------------------------------------------
418
419class AudioGroup
420{
421public:
422 AudioGroup();
423 ~AudioGroup();
424 bool set(int sampleRate, int sampleCount);
425
426 bool setMode(int mode);
427 bool sendDtmf(int event);
428 bool add(AudioStream *stream);
429 bool remove(int socket);
430
431private:
432 enum {
433 ON_HOLD = 0,
434 MUTED = 1,
435 NORMAL = 2,
436 EC_ENABLED = 3,
437 LAST_MODE = 3,
438 };
439 int mMode;
440 AudioStream *mChain;
441 int mEventQueue;
442 volatile int mDtmfEvent;
443
444 int mSampleCount;
445 int mDeviceSocket;
446 AudioTrack mTrack;
447 AudioRecord mRecord;
448
449 bool networkLoop();
450 bool deviceLoop();
451
452 class NetworkThread : public Thread
453 {
454 public:
455 NetworkThread(AudioGroup *group) : Thread(false), mGroup(group) {}
456
457 bool start()
458 {
459 if (run("Network", ANDROID_PRIORITY_AUDIO) != NO_ERROR) {
460 LOGE("cannot start network thread");
461 return false;
462 }
463 return true;
464 }
465
466 private:
467 AudioGroup *mGroup;
468 bool threadLoop()
469 {
470 return mGroup->networkLoop();
471 }
472 };
473 sp<NetworkThread> mNetworkThread;
474
475 class DeviceThread : public Thread
476 {
477 public:
478 DeviceThread(AudioGroup *group) : Thread(false), mGroup(group) {}
479
480 bool start()
481 {
482 char c;
483 while (recv(mGroup->mDeviceSocket, &c, 1, MSG_DONTWAIT) == 1);
484
485 if (run("Device", ANDROID_PRIORITY_AUDIO) != NO_ERROR) {
486 LOGE("cannot start device thread");
487 return false;
488 }
489 return true;
490 }
491
492 private:
493 AudioGroup *mGroup;
494 bool threadLoop()
495 {
496 return mGroup->deviceLoop();
497 }
498 };
499 sp<DeviceThread> mDeviceThread;
500};
501
502AudioGroup::AudioGroup()
503{
504 mMode = ON_HOLD;
505 mChain = NULL;
506 mEventQueue = -1;
507 mDtmfEvent = -1;
508 mDeviceSocket = -1;
509 mNetworkThread = new NetworkThread(this);
510 mDeviceThread = new DeviceThread(this);
511}
512
513AudioGroup::~AudioGroup()
514{
515 mNetworkThread->requestExitAndWait();
516 mDeviceThread->requestExitAndWait();
517 mTrack.stop();
518 mRecord.stop();
519 close(mEventQueue);
520 close(mDeviceSocket);
521 while (mChain) {
522 AudioStream *next = mChain->mNext;
523 delete mChain;
524 mChain = next;
525 }
526 LOGD("group[%d] is dead", mDeviceSocket);
527}
528
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800529bool AudioGroup::set(int sampleRate, int sampleCount)
530{
531 mEventQueue = epoll_create(2);
532 if (mEventQueue == -1) {
533 LOGE("epoll_create: %s", strerror(errno));
534 return false;
535 }
536
537 mSampleCount = sampleCount;
538
539 // Find out the frame count for AudioTrack and AudioRecord.
540 int output = 0;
541 int input = 0;
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800542 if (AudioTrack::getMinFrameCount(&output, AudioSystem::VOICE_CALL,
543 sampleRate) != NO_ERROR || output <= 0 ||
544 AudioRecord::getMinFrameCount(&input, sampleRate,
545 AudioSystem::PCM_16_BIT, 1) != NO_ERROR || input <= 0) {
546 LOGE("cannot compute frame count");
547 return false;
548 }
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800549 LOGD("reported frame count: output %d, input %d", output, input);
550
Chia-chi Yeh2880ef82010-08-24 13:58:12 +0800551 if (output < sampleCount * 2) {
552 output = sampleCount * 2;
553 }
554 if (input < sampleCount * 2) {
555 input = sampleCount * 2;
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800556 }
557 LOGD("adjusted frame count: output %d, input %d", output, input);
558
559 // Initialize AudioTrack and AudioRecord.
560 if (mTrack.set(AudioSystem::VOICE_CALL, sampleRate, AudioSystem::PCM_16_BIT,
561 AudioSystem::CHANNEL_OUT_MONO, output) != NO_ERROR ||
562 mRecord.set(AUDIO_SOURCE_MIC, sampleRate, AudioSystem::PCM_16_BIT,
563 AudioSystem::CHANNEL_IN_MONO, input) != NO_ERROR) {
564 LOGE("cannot initialize audio device");
565 return false;
566 }
567 LOGD("latency: output %d, input %d", mTrack.latency(), mRecord.latency());
568
Chia-chi Yeh7fa7ee12010-08-26 10:33:09 +0800569 // TODO: initialize echo canceler here.
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800570
571 // Create device socket.
572 int pair[2];
573 if (socketpair(AF_UNIX, SOCK_DGRAM, 0, pair)) {
574 LOGE("socketpair: %s", strerror(errno));
575 return false;
576 }
577 mDeviceSocket = pair[0];
578
579 // Create device stream.
580 mChain = new AudioStream;
581 if (!mChain->set(AudioStream::NORMAL, pair[1], NULL, NULL,
582 sampleRate, sampleCount, -1, -1)) {
583 close(pair[1]);
584 LOGE("cannot initialize device stream");
585 return false;
586 }
587
588 // Give device socket a reasonable timeout and buffer size.
589 timeval tv;
590 tv.tv_sec = 0;
Chia-chi Yeh2880ef82010-08-24 13:58:12 +0800591 tv.tv_usec = 1000 * sampleCount / sampleRate * 1000;
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800592 if (setsockopt(pair[0], SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)) ||
593 setsockopt(pair[0], SOL_SOCKET, SO_RCVBUF, &output, sizeof(output)) ||
594 setsockopt(pair[1], SOL_SOCKET, SO_SNDBUF, &output, sizeof(output))) {
595 LOGE("setsockopt: %s", strerror(errno));
596 return false;
597 }
598
599 // Add device stream into event queue.
600 epoll_event event;
601 event.events = EPOLLIN;
602 event.data.ptr = mChain;
603 if (epoll_ctl(mEventQueue, EPOLL_CTL_ADD, pair[1], &event)) {
604 LOGE("epoll_ctl: %s", strerror(errno));
605 return false;
606 }
607
608 // Anything else?
609 LOGD("stream[%d] joins group[%d]", pair[1], pair[0]);
610 return true;
611}
612
613bool AudioGroup::setMode(int mode)
614{
615 if (mode < 0 || mode > LAST_MODE) {
616 return false;
617 }
618 if (mMode == mode) {
619 return true;
620 }
621
622 LOGD("group[%d] switches from mode %d to %d", mDeviceSocket, mMode, mode);
623 mMode = mode;
624
625 mDeviceThread->requestExitAndWait();
626 if (mode == ON_HOLD) {
627 mTrack.stop();
628 mRecord.stop();
629 return true;
630 }
631
632 mTrack.start();
633 if (mode == MUTED) {
634 mRecord.stop();
635 } else {
636 mRecord.start();
637 }
638
639 if (!mDeviceThread->start()) {
640 mTrack.stop();
641 mRecord.stop();
642 return false;
643 }
644 return true;
645}
646
647bool AudioGroup::sendDtmf(int event)
648{
649 if (event < 0 || event > 15) {
650 return false;
651 }
652
653 // DTMF is rarely used, so we try to make it as lightweight as possible.
654 // Using volatile might be dodgy, but using a pipe or pthread primitives
655 // or stop-set-restart threads seems too heavy. Will investigate later.
656 timespec ts;
657 ts.tv_sec = 0;
658 ts.tv_nsec = 100000000;
659 for (int i = 0; mDtmfEvent != -1 && i < 20; ++i) {
660 nanosleep(&ts, NULL);
661 }
662 if (mDtmfEvent != -1) {
663 return false;
664 }
665 mDtmfEvent = event;
666 nanosleep(&ts, NULL);
667 return true;
668}
669
670bool AudioGroup::add(AudioStream *stream)
671{
672 mNetworkThread->requestExitAndWait();
673
674 epoll_event event;
675 event.events = EPOLLIN;
676 event.data.ptr = stream;
677 if (epoll_ctl(mEventQueue, EPOLL_CTL_ADD, stream->mSocket, &event)) {
678 LOGE("epoll_ctl: %s", strerror(errno));
679 return false;
680 }
681
682 stream->mNext = mChain->mNext;
683 mChain->mNext = stream;
684 if (!mNetworkThread->start()) {
685 // Only take over the stream when succeeded.
686 mChain->mNext = stream->mNext;
687 return false;
688 }
689
690 LOGD("stream[%d] joins group[%d]", stream->mSocket, mDeviceSocket);
691 return true;
692}
693
694bool AudioGroup::remove(int socket)
695{
696 mNetworkThread->requestExitAndWait();
697
698 for (AudioStream *stream = mChain; stream->mNext; stream = stream->mNext) {
699 AudioStream *target = stream->mNext;
700 if (target->mSocket == socket) {
Chia-chi Yehb8790322010-08-19 18:26:53 +0800701 if (epoll_ctl(mEventQueue, EPOLL_CTL_DEL, socket, NULL)) {
702 LOGE("epoll_ctl: %s", strerror(errno));
703 return false;
704 }
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800705 stream->mNext = target->mNext;
706 LOGD("stream[%d] leaves group[%d]", socket, mDeviceSocket);
707 delete target;
708 break;
709 }
710 }
711
712 // Do not start network thread if there is only one stream.
713 if (!mChain->mNext || !mNetworkThread->start()) {
714 return false;
715 }
716 return true;
717}
718
719bool AudioGroup::networkLoop()
720{
721 int tick = elapsedRealtime();
722 int deadline = tick + 10;
723 int count = 0;
724
725 for (AudioStream *stream = mChain; stream; stream = stream->mNext) {
726 if (!stream->mTick || tick - stream->mTick >= 0) {
727 stream->encode(tick, mChain);
728 }
729 if (deadline - stream->mTick > 0) {
730 deadline = stream->mTick;
731 }
732 ++count;
733 }
734
735 if (mDtmfEvent != -1) {
736 int event = mDtmfEvent;
737 for (AudioStream *stream = mChain; stream; stream = stream->mNext) {
738 stream->sendDtmf(event);
739 }
740 mDtmfEvent = -1;
741 }
742
743 deadline -= tick;
744 if (deadline < 1) {
745 deadline = 1;
746 }
747
748 epoll_event events[count];
749 count = epoll_wait(mEventQueue, events, count, deadline);
750 if (count == -1) {
751 LOGE("epoll_wait: %s", strerror(errno));
752 return false;
753 }
754 for (int i = 0; i < count; ++i) {
755 ((AudioStream *)events[i].data.ptr)->decode(tick);
756 }
757
758 return true;
759}
760
761bool AudioGroup::deviceLoop()
762{
763 int16_t output[mSampleCount];
764
765 if (recv(mDeviceSocket, output, sizeof(output), 0) <= 0) {
766 memset(output, 0, sizeof(output));
767 }
Chia-chi Yeh2880ef82010-08-24 13:58:12 +0800768
769 int16_t input[mSampleCount];
770 int toWrite = mSampleCount;
771 int toRead = (mMode == MUTED) ? 0 : mSampleCount;
772 int chances = 100;
773
774 while (--chances > 0 && (toWrite > 0 || toRead > 0)) {
775 if (toWrite > 0) {
776 AudioTrack::Buffer buffer;
777 buffer.frameCount = toWrite;
778
779 status_t status = mTrack.obtainBuffer(&buffer, 1);
780 if (status == NO_ERROR) {
781 memcpy(buffer.i8, &output[mSampleCount - toWrite], buffer.size);
782 toWrite -= buffer.frameCount;
783 mTrack.releaseBuffer(&buffer);
784 } else if (status != TIMED_OUT && status != WOULD_BLOCK) {
785 LOGE("cannot write to AudioTrack");
786 return false;
787 }
788 }
789
790 if (toRead > 0) {
791 AudioRecord::Buffer buffer;
792 buffer.frameCount = mRecord.frameCount();
793
794 status_t status = mRecord.obtainBuffer(&buffer, 1);
795 if (status == NO_ERROR) {
Chia-chi Yeh7fa7ee12010-08-26 10:33:09 +0800796 int count = (buffer.frameCount < toRead) ?
Chia-chi Yeh2880ef82010-08-24 13:58:12 +0800797 buffer.frameCount : toRead;
798 memcpy(&input[mSampleCount - toRead], buffer.i8, count * 2);
799 toRead -= count;
800 if (buffer.frameCount < mRecord.frameCount()) {
801 buffer.frameCount = count;
802 }
803 mRecord.releaseBuffer(&buffer);
804 } else if (status != TIMED_OUT && status != WOULD_BLOCK) {
805 LOGE("cannot read from AudioRecord");
806 return false;
807 }
808 }
809 }
810
811 if (!chances) {
812 LOGE("device loop timeout");
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800813 return false;
814 }
815
816 if (mMode != MUTED) {
Chia-chi Yeh2880ef82010-08-24 13:58:12 +0800817 if (mMode == NORMAL) {
818 send(mDeviceSocket, input, sizeof(input), MSG_DONTWAIT);
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800819 } else {
Chia-chi Yeh7fa7ee12010-08-26 10:33:09 +0800820 // TODO: Echo canceller runs here.
821 send(mDeviceSocket, input, sizeof(input), MSG_DONTWAIT);
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800822 }
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800823 }
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800824 return true;
825}
826
827//------------------------------------------------------------------------------
828
829static jfieldID gNative;
830static jfieldID gMode;
831
Chia-chi Yehb8790322010-08-19 18:26:53 +0800832void add(JNIEnv *env, jobject thiz, jint mode,
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800833 jint socket, jstring jRemoteAddress, jint remotePort,
834 jstring jCodecName, jint sampleRate, jint sampleCount,
835 jint codecType, jint dtmfType)
836{
837 const char *codecName = NULL;
838 AudioStream *stream = NULL;
839 AudioGroup *group = NULL;
840
841 // Sanity check.
842 sockaddr_storage remote;
843 if (parse(env, jRemoteAddress, remotePort, &remote) < 0) {
844 // Exception already thrown.
Chia-chi Yehb8790322010-08-19 18:26:53 +0800845 goto error;
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800846 }
847 if (sampleRate < 0 || sampleCount < 0 || codecType < 0 || codecType > 127) {
848 jniThrowException(env, "java/lang/IllegalArgumentException", NULL);
849 goto error;
850 }
851 if (!jCodecName) {
852 jniThrowNullPointerException(env, "codecName");
Chia-chi Yehb8790322010-08-19 18:26:53 +0800853 goto error;
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800854 }
855 codecName = env->GetStringUTFChars(jCodecName, NULL);
856 if (!codecName) {
857 // Exception already thrown.
Chia-chi Yehb8790322010-08-19 18:26:53 +0800858 goto error;
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800859 }
860
861 // Create audio stream.
862 stream = new AudioStream;
863 if (!stream->set(mode, socket, &remote, codecName, sampleRate, sampleCount,
864 codecType, dtmfType)) {
865 jniThrowException(env, "java/lang/IllegalStateException",
866 "cannot initialize audio stream");
Chia-chi Yehb8790322010-08-19 18:26:53 +0800867 env->ReleaseStringUTFChars(jCodecName, codecName);
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800868 goto error;
869 }
Chia-chi Yehb8790322010-08-19 18:26:53 +0800870 env->ReleaseStringUTFChars(jCodecName, codecName);
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800871 socket = -1;
872
873 // Create audio group.
874 group = (AudioGroup *)env->GetIntField(thiz, gNative);
875 if (!group) {
876 int mode = env->GetIntField(thiz, gMode);
877 group = new AudioGroup;
878 if (!group->set(8000, 256) || !group->setMode(mode)) {
879 jniThrowException(env, "java/lang/IllegalStateException",
880 "cannot initialize audio group");
881 goto error;
882 }
883 }
884
885 // Add audio stream into audio group.
886 if (!group->add(stream)) {
887 jniThrowException(env, "java/lang/IllegalStateException",
888 "cannot add audio stream");
889 goto error;
890 }
891
892 // Succeed.
893 env->SetIntField(thiz, gNative, (int)group);
Chia-chi Yehb8790322010-08-19 18:26:53 +0800894 return;
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800895
896error:
897 delete group;
898 delete stream;
899 close(socket);
900 env->SetIntField(thiz, gNative, NULL);
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800901}
902
903void remove(JNIEnv *env, jobject thiz, jint socket)
904{
905 AudioGroup *group = (AudioGroup *)env->GetIntField(thiz, gNative);
906 if (group) {
907 if (socket == -1 || !group->remove(socket)) {
908 delete group;
909 env->SetIntField(thiz, gNative, NULL);
910 }
911 }
912}
913
914void setMode(JNIEnv *env, jobject thiz, jint mode)
915{
916 AudioGroup *group = (AudioGroup *)env->GetIntField(thiz, gNative);
917 if (group && !group->setMode(mode)) {
918 jniThrowException(env, "java/lang/IllegalArgumentException", NULL);
919 return;
920 }
921 env->SetIntField(thiz, gMode, mode);
922}
923
924void sendDtmf(JNIEnv *env, jobject thiz, jint event)
925{
926 AudioGroup *group = (AudioGroup *)env->GetIntField(thiz, gNative);
927 if (group && !group->sendDtmf(event)) {
928 jniThrowException(env, "java/lang/IllegalArgumentException", NULL);
929 }
930}
931
932JNINativeMethod gMethods[] = {
Chia-chi Yehb8790322010-08-19 18:26:53 +0800933 {"add", "(IILjava/lang/String;ILjava/lang/String;IIII)V", (void *)add},
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800934 {"remove", "(I)V", (void *)remove},
935 {"setMode", "(I)V", (void *)setMode},
936 {"sendDtmf", "(I)V", (void *)sendDtmf},
937};
938
939} // namespace
940
941int registerAudioGroup(JNIEnv *env)
942{
943 gRandom = open("/dev/urandom", O_RDONLY);
944 if (gRandom == -1) {
945 LOGE("urandom: %s", strerror(errno));
946 return -1;
947 }
948
949 jclass clazz;
950 if ((clazz = env->FindClass("android/net/rtp/AudioGroup")) == NULL ||
951 (gNative = env->GetFieldID(clazz, "mNative", "I")) == NULL ||
952 (gMode = env->GetFieldID(clazz, "mMode", "I")) == NULL ||
953 env->RegisterNatives(clazz, gMethods, NELEM(gMethods)) < 0) {
954 LOGE("JNI registration failed");
955 return -1;
956 }
957 return 0;
958}