blob: 08a8d1c6d52424df23f278009cec952a86dbadd0 [file] [log] [blame]
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +08001/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <stdio.h>
18#include <stdint.h>
19#include <string.h>
20#include <errno.h>
21#include <fcntl.h>
22#include <sys/epoll.h>
23#include <sys/types.h>
24#include <sys/socket.h>
25#include <sys/stat.h>
26#include <sys/time.h>
27#include <time.h>
28#include <arpa/inet.h>
29#include <netinet/in.h>
30
31#define LOG_TAG "AudioGroup"
32#include <cutils/atomic.h>
33#include <utils/Log.h>
34#include <utils/Errors.h>
35#include <utils/RefBase.h>
36#include <utils/threads.h>
37#include <utils/SystemClock.h>
38#include <media/AudioSystem.h>
39#include <media/AudioRecord.h>
40#include <media/AudioTrack.h>
41#include <media/mediarecorder.h>
42
43#include "jni.h"
44#include "JNIHelp.h"
45
46#include "AudioCodec.h"
47
48extern int parse(JNIEnv *env, jstring jAddress, int port, sockaddr_storage *ss);
49
50namespace {
51
52using namespace android;
53
54int gRandom = -1;
55
56// We use a circular array to implement jitter buffer. The simplest way is doing
57// a modulo operation on the index while accessing the array. However modulo can
58// be expensive on some platforms, such as ARM. Thus we round up the size of the
59// array to the nearest power of 2 and then use bitwise-and instead of modulo.
60// Currently we make it 256ms long and assume packet interval is 32ms or less.
61// The first 64ms is the place where samples get mixed. The rest 192ms is the
62// real jitter buffer. For a stream at 8000Hz it takes 4096 bytes. These numbers
63// are chosen by experiments and each of them can be adjusted as needed.
64
65// Other notes:
66// + We use elapsedRealtime() to get the time. Since we use 32bit variables
67// instead of 64bit ones, comparison must be done by subtraction.
68// + Sampling rate must be multiple of 1000Hz, and packet length must be in
69// milliseconds. No floating points.
70// + If we cannot get enough CPU, we drop samples and simulate packet loss.
71// + Resampling is not done yet, so streams in one group must use the same rate.
72// For the first release we might only support 8kHz and 16kHz.
73
74class AudioStream
75{
76public:
77 AudioStream();
78 ~AudioStream();
79 bool set(int mode, int socket, sockaddr_storage *remote,
80 const char *codecName, int sampleRate, int sampleCount,
81 int codecType, int dtmfType);
82
83 void sendDtmf(int event);
84 bool mix(int32_t *output, int head, int tail, int sampleRate);
85 void encode(int tick, AudioStream *chain);
86 void decode(int tick);
87
88private:
89 enum {
90 NORMAL = 0,
91 SEND_ONLY = 1,
92 RECEIVE_ONLY = 2,
93 LAST_MODE = 2,
94 };
95
96 int mMode;
97 int mSocket;
98 sockaddr_storage mRemote;
99 AudioCodec *mCodec;
100 uint32_t mCodecMagic;
101 uint32_t mDtmfMagic;
102
103 int mTick;
104 int mSampleRate;
105 int mSampleCount;
106 int mInterval;
107
108 int16_t *mBuffer;
109 int mBufferMask;
110 int mBufferHead;
111 int mBufferTail;
112 int mLatencyScore;
113
114 uint16_t mSequence;
115 uint32_t mTimestamp;
116 uint32_t mSsrc;
117
118 int mDtmfEvent;
119 int mDtmfStart;
120
121 AudioStream *mNext;
122
123 friend class AudioGroup;
124};
125
126AudioStream::AudioStream()
127{
128 mSocket = -1;
129 mCodec = NULL;
130 mBuffer = NULL;
131 mNext = NULL;
132}
133
134AudioStream::~AudioStream()
135{
136 close(mSocket);
137 delete mCodec;
138 delete [] mBuffer;
139 LOGD("stream[%d] is dead", mSocket);
140}
141
142bool AudioStream::set(int mode, int socket, sockaddr_storage *remote,
143 const char *codecName, int sampleRate, int sampleCount,
144 int codecType, int dtmfType)
145{
146 if (mode < 0 || mode > LAST_MODE) {
147 return false;
148 }
149 mMode = mode;
150
151 if (codecName) {
152 mRemote = *remote;
153 mCodec = newAudioCodec(codecName);
154 if (!mCodec || !mCodec->set(sampleRate, sampleCount)) {
155 return false;
156 }
157 }
158
159 mCodecMagic = (0x8000 | codecType) << 16;
160 mDtmfMagic = (dtmfType == -1) ? 0 : (0x8000 | dtmfType) << 16;
161
162 mTick = elapsedRealtime();
163 mSampleRate = sampleRate / 1000;
164 mSampleCount = sampleCount;
165 mInterval = mSampleCount / mSampleRate;
166
167 // Allocate jitter buffer.
168 for (mBufferMask = 8192; mBufferMask < sampleRate; mBufferMask <<= 1);
169 mBufferMask >>= 2;
170 mBuffer = new int16_t[mBufferMask];
171 --mBufferMask;
172 mBufferHead = 0;
173 mBufferTail = 0;
174 mLatencyScore = 0;
175
176 // Initialize random bits.
177 read(gRandom, &mSequence, sizeof(mSequence));
178 read(gRandom, &mTimestamp, sizeof(mTimestamp));
179 read(gRandom, &mSsrc, sizeof(mSsrc));
180
181 mDtmfEvent = -1;
182 mDtmfStart = 0;
183
184 // Only take over the socket when succeeded.
185 mSocket = socket;
186
187 LOGD("stream[%d] is configured as %s %dkHz %dms", mSocket,
188 (codecName ? codecName : "RAW"), mSampleRate, mInterval);
189 return true;
190}
191
192void AudioStream::sendDtmf(int event)
193{
194 if (mDtmfMagic != 0) {
195 mDtmfEvent = event << 24;
196 mDtmfStart = mTimestamp + mSampleCount;
197 }
198}
199
200bool AudioStream::mix(int32_t *output, int head, int tail, int sampleRate)
201{
202 if (mMode == SEND_ONLY) {
203 return false;
204 }
205
206 if (head - mBufferHead < 0) {
207 head = mBufferHead;
208 }
209 if (tail - mBufferTail > 0) {
210 tail = mBufferTail;
211 }
212 if (tail - head <= 0) {
213 return false;
214 }
215
216 head *= mSampleRate;
217 tail *= mSampleRate;
218
219 if (sampleRate == mSampleRate) {
220 for (int i = head; i - tail < 0; ++i) {
221 output[i - head] += mBuffer[i & mBufferMask];
222 }
223 } else {
224 // TODO: implement resampling.
225 return false;
226 }
227 return true;
228}
229
230void AudioStream::encode(int tick, AudioStream *chain)
231{
232 if (tick - mTick >= mInterval) {
233 // We just missed the train. Pretend that packets in between are lost.
234 int skipped = (tick - mTick) / mInterval;
235 mTick += skipped * mInterval;
236 mSequence += skipped;
237 mTimestamp += skipped * mSampleCount;
238 LOGD("stream[%d] skips %d packets", mSocket, skipped);
239 }
240
241 tick = mTick;
242 mTick += mInterval;
243 ++mSequence;
244 mTimestamp += mSampleCount;
245
246 if (mMode == RECEIVE_ONLY) {
247 return;
248 }
249
250 // If there is an ongoing DTMF event, send it now.
251 if (mDtmfEvent != -1) {
252 int duration = mTimestamp - mDtmfStart;
253 // Make sure duration is reasonable.
254 if (duration >= 0 && duration < mSampleRate * 100) {
255 duration += mSampleCount;
256 int32_t buffer[4] = {
257 htonl(mDtmfMagic | mSequence),
258 htonl(mDtmfStart),
259 mSsrc,
260 htonl(mDtmfEvent | duration),
261 };
262 if (duration >= mSampleRate * 100) {
263 buffer[3] |= htonl(1 << 23);
264 mDtmfEvent = -1;
265 }
266 sendto(mSocket, buffer, sizeof(buffer), MSG_DONTWAIT,
267 (sockaddr *)&mRemote, sizeof(mRemote));
268 return;
269 }
270 mDtmfEvent = -1;
271 }
272
273 // It is time to mix streams.
274 bool mixed = false;
275 int32_t buffer[mSampleCount + 3];
276 memset(buffer, 0, sizeof(buffer));
277 while (chain) {
278 if (chain != this &&
279 chain->mix(buffer, tick - mInterval, tick, mSampleRate)) {
280 mixed = true;
281 }
282 chain = chain->mNext;
283 }
284 if (!mixed) {
285 LOGD("stream[%d] no data", mSocket);
286 return;
287 }
288
289 // Cook the packet and send it out.
290 int16_t samples[mSampleCount];
291 for (int i = 0; i < mSampleCount; ++i) {
292 int32_t sample = buffer[i];
293 if (sample < -32768) {
294 sample = -32768;
295 }
296 if (sample > 32767) {
297 sample = 32767;
298 }
299 samples[i] = sample;
300 }
301 if (!mCodec) {
302 // Special case for device stream.
303 send(mSocket, samples, sizeof(samples), MSG_DONTWAIT);
304 return;
305 }
306
307 buffer[0] = htonl(mCodecMagic | mSequence);
308 buffer[1] = htonl(mTimestamp);
309 buffer[2] = mSsrc;
310 int length = mCodec->encode(&buffer[3], samples);
311 if (length <= 0) {
312 LOGD("stream[%d] encoder error", mSocket);
313 return;
314 }
315 sendto(mSocket, buffer, length + 12, MSG_DONTWAIT, (sockaddr *)&mRemote,
316 sizeof(mRemote));
317}
318
319void AudioStream::decode(int tick)
320{
321 char c;
322 if (mMode == SEND_ONLY) {
323 recv(mSocket, &c, 1, MSG_DONTWAIT);
324 return;
325 }
326
327 // Make sure mBufferHead and mBufferTail are reasonable.
328 if ((unsigned int)(tick + 256 - mBufferHead) > 1024) {
329 mBufferHead = tick - 64;
330 mBufferTail = mBufferHead;
331 }
332
333 if (tick - mBufferHead > 64) {
334 // Throw away outdated samples.
335 mBufferHead = tick - 64;
336 if (mBufferTail - mBufferHead < 0) {
337 mBufferTail = mBufferHead;
338 }
339 }
340
341 if (mBufferTail - tick <= 80) {
342 mLatencyScore = tick;
343 } else if (tick - mLatencyScore >= 5000) {
344 // Reset the jitter buffer to 40ms if the latency keeps larger than 80ms
345 // in the past 5s. This rarely happens, so let us just keep it simple.
346 LOGD("stream[%d] latency control", mSocket);
347 mBufferTail = tick + 40;
348 }
349
350 if (mBufferTail - mBufferHead > 256 - mInterval) {
351 // Buffer overflow. Drop the packet.
352 LOGD("stream[%d] buffer overflow", mSocket);
353 recv(mSocket, &c, 1, MSG_DONTWAIT);
354 return;
355 }
356
357 // Receive the packet and decode it.
358 int16_t samples[mSampleCount];
359 int length = 0;
360 if (!mCodec) {
361 // Special case for device stream.
362 length = recv(mSocket, samples, sizeof(samples),
363 MSG_TRUNC | MSG_DONTWAIT) >> 1;
364 } else {
365 __attribute__((aligned(4))) uint8_t buffer[2048];
366 length = recv(mSocket, buffer, sizeof(buffer),
367 MSG_TRUNC | MSG_DONTWAIT);
368
369 // Do we need to check SSRC, sequence, and timestamp? They are not
Chia-chi Yehb8790322010-08-19 18:26:53 +0800370 // reliable but at least they can be used to identify duplicates?
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800371 if (length < 12 || length > (int)sizeof(buffer) ||
372 (ntohl(*(uint32_t *)buffer) & 0xC07F0000) != mCodecMagic) {
373 LOGD("stream[%d] malformed packet", mSocket);
374 return;
375 }
376 int offset = 12 + ((buffer[0] & 0x0F) << 2);
377 if ((buffer[0] & 0x10) != 0) {
378 offset += 4 + (ntohs(*(uint16_t *)&buffer[offset + 2]) << 2);
379 }
380 if ((buffer[0] & 0x20) != 0) {
381 length -= buffer[length - 1];
382 }
383 length -= offset;
384 if (length >= 0) {
385 length = mCodec->decode(samples, &buffer[offset], length);
386 }
387 }
388 if (length != mSampleCount) {
389 LOGD("stream[%d] decoder error", mSocket);
390 return;
391 }
392
393 if (tick - mBufferTail > 0) {
394 // Buffer underrun. Reset the jitter buffer to 40ms.
395 LOGD("stream[%d] buffer underrun", mSocket);
396 if (mBufferTail - mBufferHead <= 0) {
397 mBufferHead = tick + 40;
398 mBufferTail = mBufferHead;
399 } else {
400 int tail = (tick + 40) * mSampleRate;
401 for (int i = mBufferTail * mSampleRate; i - tail < 0; ++i) {
402 mBuffer[i & mBufferMask] = 0;
403 }
404 mBufferTail = tick + 40;
405 }
406 }
407
408 // Append to the jitter buffer.
409 int tail = mBufferTail * mSampleRate;
410 for (int i = 0; i < mSampleCount; ++i) {
411 mBuffer[tail & mBufferMask] = samples[i];
412 ++tail;
413 }
414 mBufferTail += mInterval;
415}
416
417//------------------------------------------------------------------------------
418
419class AudioGroup
420{
421public:
422 AudioGroup();
423 ~AudioGroup();
424 bool set(int sampleRate, int sampleCount);
425
426 bool setMode(int mode);
427 bool sendDtmf(int event);
428 bool add(AudioStream *stream);
429 bool remove(int socket);
430
431private:
432 enum {
433 ON_HOLD = 0,
434 MUTED = 1,
435 NORMAL = 2,
436 EC_ENABLED = 3,
437 LAST_MODE = 3,
438 };
439 int mMode;
440 AudioStream *mChain;
441 int mEventQueue;
442 volatile int mDtmfEvent;
443
444 int mSampleCount;
445 int mDeviceSocket;
446 AudioTrack mTrack;
447 AudioRecord mRecord;
448
449 bool networkLoop();
450 bool deviceLoop();
451
452 class NetworkThread : public Thread
453 {
454 public:
455 NetworkThread(AudioGroup *group) : Thread(false), mGroup(group) {}
456
457 bool start()
458 {
459 if (run("Network", ANDROID_PRIORITY_AUDIO) != NO_ERROR) {
460 LOGE("cannot start network thread");
461 return false;
462 }
463 return true;
464 }
465
466 private:
467 AudioGroup *mGroup;
468 bool threadLoop()
469 {
470 return mGroup->networkLoop();
471 }
472 };
473 sp<NetworkThread> mNetworkThread;
474
475 class DeviceThread : public Thread
476 {
477 public:
478 DeviceThread(AudioGroup *group) : Thread(false), mGroup(group) {}
479
480 bool start()
481 {
482 char c;
483 while (recv(mGroup->mDeviceSocket, &c, 1, MSG_DONTWAIT) == 1);
484
485 if (run("Device", ANDROID_PRIORITY_AUDIO) != NO_ERROR) {
486 LOGE("cannot start device thread");
487 return false;
488 }
489 return true;
490 }
491
492 private:
493 AudioGroup *mGroup;
494 bool threadLoop()
495 {
496 return mGroup->deviceLoop();
497 }
498 };
499 sp<DeviceThread> mDeviceThread;
500};
501
502AudioGroup::AudioGroup()
503{
504 mMode = ON_HOLD;
505 mChain = NULL;
506 mEventQueue = -1;
507 mDtmfEvent = -1;
508 mDeviceSocket = -1;
509 mNetworkThread = new NetworkThread(this);
510 mDeviceThread = new DeviceThread(this);
511}
512
513AudioGroup::~AudioGroup()
514{
515 mNetworkThread->requestExitAndWait();
516 mDeviceThread->requestExitAndWait();
517 mTrack.stop();
518 mRecord.stop();
519 close(mEventQueue);
520 close(mDeviceSocket);
521 while (mChain) {
522 AudioStream *next = mChain->mNext;
523 delete mChain;
524 mChain = next;
525 }
526 LOGD("group[%d] is dead", mDeviceSocket);
527}
528
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800529bool AudioGroup::set(int sampleRate, int sampleCount)
530{
531 mEventQueue = epoll_create(2);
532 if (mEventQueue == -1) {
533 LOGE("epoll_create: %s", strerror(errno));
534 return false;
535 }
536
537 mSampleCount = sampleCount;
538
539 // Find out the frame count for AudioTrack and AudioRecord.
540 int output = 0;
541 int input = 0;
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800542 if (AudioTrack::getMinFrameCount(&output, AudioSystem::VOICE_CALL,
543 sampleRate) != NO_ERROR || output <= 0 ||
544 AudioRecord::getMinFrameCount(&input, sampleRate,
545 AudioSystem::PCM_16_BIT, 1) != NO_ERROR || input <= 0) {
546 LOGE("cannot compute frame count");
547 return false;
548 }
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800549 LOGD("reported frame count: output %d, input %d", output, input);
550
551 output = (output + sampleCount - 1) / sampleCount * sampleCount;
552 input = (input + sampleCount - 1) / sampleCount * sampleCount;
553 if (input < output * 2) {
554 input = output * 2;
555 }
556 LOGD("adjusted frame count: output %d, input %d", output, input);
557
558 // Initialize AudioTrack and AudioRecord.
559 if (mTrack.set(AudioSystem::VOICE_CALL, sampleRate, AudioSystem::PCM_16_BIT,
560 AudioSystem::CHANNEL_OUT_MONO, output) != NO_ERROR ||
561 mRecord.set(AUDIO_SOURCE_MIC, sampleRate, AudioSystem::PCM_16_BIT,
562 AudioSystem::CHANNEL_IN_MONO, input) != NO_ERROR) {
563 LOGE("cannot initialize audio device");
564 return false;
565 }
566 LOGD("latency: output %d, input %d", mTrack.latency(), mRecord.latency());
567
568 // TODO: initialize echo canceler here.
569
570 // Create device socket.
571 int pair[2];
572 if (socketpair(AF_UNIX, SOCK_DGRAM, 0, pair)) {
573 LOGE("socketpair: %s", strerror(errno));
574 return false;
575 }
576 mDeviceSocket = pair[0];
577
578 // Create device stream.
579 mChain = new AudioStream;
580 if (!mChain->set(AudioStream::NORMAL, pair[1], NULL, NULL,
581 sampleRate, sampleCount, -1, -1)) {
582 close(pair[1]);
583 LOGE("cannot initialize device stream");
584 return false;
585 }
586
587 // Give device socket a reasonable timeout and buffer size.
588 timeval tv;
589 tv.tv_sec = 0;
590 tv.tv_usec = 1000 * sampleCount / sampleRate * 500;
591 if (setsockopt(pair[0], SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)) ||
592 setsockopt(pair[0], SOL_SOCKET, SO_RCVBUF, &output, sizeof(output)) ||
593 setsockopt(pair[1], SOL_SOCKET, SO_SNDBUF, &output, sizeof(output))) {
594 LOGE("setsockopt: %s", strerror(errno));
595 return false;
596 }
597
598 // Add device stream into event queue.
599 epoll_event event;
600 event.events = EPOLLIN;
601 event.data.ptr = mChain;
602 if (epoll_ctl(mEventQueue, EPOLL_CTL_ADD, pair[1], &event)) {
603 LOGE("epoll_ctl: %s", strerror(errno));
604 return false;
605 }
606
607 // Anything else?
608 LOGD("stream[%d] joins group[%d]", pair[1], pair[0]);
609 return true;
610}
611
612bool AudioGroup::setMode(int mode)
613{
614 if (mode < 0 || mode > LAST_MODE) {
615 return false;
616 }
617 if (mMode == mode) {
618 return true;
619 }
620
621 LOGD("group[%d] switches from mode %d to %d", mDeviceSocket, mMode, mode);
622 mMode = mode;
623
624 mDeviceThread->requestExitAndWait();
625 if (mode == ON_HOLD) {
626 mTrack.stop();
627 mRecord.stop();
628 return true;
629 }
630
631 mTrack.start();
632 if (mode == MUTED) {
633 mRecord.stop();
634 } else {
635 mRecord.start();
636 }
637
638 if (!mDeviceThread->start()) {
639 mTrack.stop();
640 mRecord.stop();
641 return false;
642 }
643 return true;
644}
645
646bool AudioGroup::sendDtmf(int event)
647{
648 if (event < 0 || event > 15) {
649 return false;
650 }
651
652 // DTMF is rarely used, so we try to make it as lightweight as possible.
653 // Using volatile might be dodgy, but using a pipe or pthread primitives
654 // or stop-set-restart threads seems too heavy. Will investigate later.
655 timespec ts;
656 ts.tv_sec = 0;
657 ts.tv_nsec = 100000000;
658 for (int i = 0; mDtmfEvent != -1 && i < 20; ++i) {
659 nanosleep(&ts, NULL);
660 }
661 if (mDtmfEvent != -1) {
662 return false;
663 }
664 mDtmfEvent = event;
665 nanosleep(&ts, NULL);
666 return true;
667}
668
669bool AudioGroup::add(AudioStream *stream)
670{
671 mNetworkThread->requestExitAndWait();
672
673 epoll_event event;
674 event.events = EPOLLIN;
675 event.data.ptr = stream;
676 if (epoll_ctl(mEventQueue, EPOLL_CTL_ADD, stream->mSocket, &event)) {
677 LOGE("epoll_ctl: %s", strerror(errno));
678 return false;
679 }
680
681 stream->mNext = mChain->mNext;
682 mChain->mNext = stream;
683 if (!mNetworkThread->start()) {
684 // Only take over the stream when succeeded.
685 mChain->mNext = stream->mNext;
686 return false;
687 }
688
689 LOGD("stream[%d] joins group[%d]", stream->mSocket, mDeviceSocket);
690 return true;
691}
692
693bool AudioGroup::remove(int socket)
694{
695 mNetworkThread->requestExitAndWait();
696
697 for (AudioStream *stream = mChain; stream->mNext; stream = stream->mNext) {
698 AudioStream *target = stream->mNext;
699 if (target->mSocket == socket) {
Chia-chi Yehb8790322010-08-19 18:26:53 +0800700 if (epoll_ctl(mEventQueue, EPOLL_CTL_DEL, socket, NULL)) {
701 LOGE("epoll_ctl: %s", strerror(errno));
702 return false;
703 }
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800704 stream->mNext = target->mNext;
705 LOGD("stream[%d] leaves group[%d]", socket, mDeviceSocket);
706 delete target;
707 break;
708 }
709 }
710
711 // Do not start network thread if there is only one stream.
712 if (!mChain->mNext || !mNetworkThread->start()) {
713 return false;
714 }
715 return true;
716}
717
718bool AudioGroup::networkLoop()
719{
720 int tick = elapsedRealtime();
721 int deadline = tick + 10;
722 int count = 0;
723
724 for (AudioStream *stream = mChain; stream; stream = stream->mNext) {
725 if (!stream->mTick || tick - stream->mTick >= 0) {
726 stream->encode(tick, mChain);
727 }
728 if (deadline - stream->mTick > 0) {
729 deadline = stream->mTick;
730 }
731 ++count;
732 }
733
734 if (mDtmfEvent != -1) {
735 int event = mDtmfEvent;
736 for (AudioStream *stream = mChain; stream; stream = stream->mNext) {
737 stream->sendDtmf(event);
738 }
739 mDtmfEvent = -1;
740 }
741
742 deadline -= tick;
743 if (deadline < 1) {
744 deadline = 1;
745 }
746
747 epoll_event events[count];
748 count = epoll_wait(mEventQueue, events, count, deadline);
749 if (count == -1) {
750 LOGE("epoll_wait: %s", strerror(errno));
751 return false;
752 }
753 for (int i = 0; i < count; ++i) {
754 ((AudioStream *)events[i].data.ptr)->decode(tick);
755 }
756
757 return true;
758}
759
760bool AudioGroup::deviceLoop()
761{
762 int16_t output[mSampleCount];
763
764 if (recv(mDeviceSocket, output, sizeof(output), 0) <= 0) {
765 memset(output, 0, sizeof(output));
766 }
767 if (mTrack.write(output, sizeof(output)) != (int)sizeof(output)) {
768 LOGE("cannot write to AudioTrack");
769 return false;
770 }
771
772 if (mMode != MUTED) {
773 uint32_t frameCount = mRecord.frameCount();
774 AudioRecord::Buffer input;
775 input.frameCount = frameCount;
776
777 if (mRecord.obtainBuffer(&input, -1) != NO_ERROR) {
778 LOGE("cannot read from AudioRecord");
779 return false;
780 }
781
782 if (input.frameCount < (uint32_t)mSampleCount) {
783 input.frameCount = 0;
784 } else {
785 if (mMode == NORMAL) {
786 send(mDeviceSocket, input.i8, sizeof(output), MSG_DONTWAIT);
787 } else {
788 // TODO: Echo canceller runs here.
789 send(mDeviceSocket, input.i8, sizeof(output), MSG_DONTWAIT);
790 }
791 if (input.frameCount < frameCount) {
792 input.frameCount = mSampleCount;
793 }
794 }
795
796 mRecord.releaseBuffer(&input);
797 }
798
799 return true;
800}
801
802//------------------------------------------------------------------------------
803
804static jfieldID gNative;
805static jfieldID gMode;
806
Chia-chi Yehb8790322010-08-19 18:26:53 +0800807void add(JNIEnv *env, jobject thiz, jint mode,
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800808 jint socket, jstring jRemoteAddress, jint remotePort,
809 jstring jCodecName, jint sampleRate, jint sampleCount,
810 jint codecType, jint dtmfType)
811{
812 const char *codecName = NULL;
813 AudioStream *stream = NULL;
814 AudioGroup *group = NULL;
815
816 // Sanity check.
817 sockaddr_storage remote;
818 if (parse(env, jRemoteAddress, remotePort, &remote) < 0) {
819 // Exception already thrown.
Chia-chi Yehb8790322010-08-19 18:26:53 +0800820 goto error;
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800821 }
822 if (sampleRate < 0 || sampleCount < 0 || codecType < 0 || codecType > 127) {
823 jniThrowException(env, "java/lang/IllegalArgumentException", NULL);
824 goto error;
825 }
826 if (!jCodecName) {
827 jniThrowNullPointerException(env, "codecName");
Chia-chi Yehb8790322010-08-19 18:26:53 +0800828 goto error;
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800829 }
830 codecName = env->GetStringUTFChars(jCodecName, NULL);
831 if (!codecName) {
832 // Exception already thrown.
Chia-chi Yehb8790322010-08-19 18:26:53 +0800833 goto error;
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800834 }
835
836 // Create audio stream.
837 stream = new AudioStream;
838 if (!stream->set(mode, socket, &remote, codecName, sampleRate, sampleCount,
839 codecType, dtmfType)) {
840 jniThrowException(env, "java/lang/IllegalStateException",
841 "cannot initialize audio stream");
Chia-chi Yehb8790322010-08-19 18:26:53 +0800842 env->ReleaseStringUTFChars(jCodecName, codecName);
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800843 goto error;
844 }
Chia-chi Yehb8790322010-08-19 18:26:53 +0800845 env->ReleaseStringUTFChars(jCodecName, codecName);
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800846 socket = -1;
847
848 // Create audio group.
849 group = (AudioGroup *)env->GetIntField(thiz, gNative);
850 if (!group) {
851 int mode = env->GetIntField(thiz, gMode);
852 group = new AudioGroup;
853 if (!group->set(8000, 256) || !group->setMode(mode)) {
854 jniThrowException(env, "java/lang/IllegalStateException",
855 "cannot initialize audio group");
856 goto error;
857 }
858 }
859
860 // Add audio stream into audio group.
861 if (!group->add(stream)) {
862 jniThrowException(env, "java/lang/IllegalStateException",
863 "cannot add audio stream");
864 goto error;
865 }
866
867 // Succeed.
868 env->SetIntField(thiz, gNative, (int)group);
Chia-chi Yehb8790322010-08-19 18:26:53 +0800869 return;
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800870
871error:
872 delete group;
873 delete stream;
874 close(socket);
875 env->SetIntField(thiz, gNative, NULL);
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800876}
877
878void remove(JNIEnv *env, jobject thiz, jint socket)
879{
880 AudioGroup *group = (AudioGroup *)env->GetIntField(thiz, gNative);
881 if (group) {
882 if (socket == -1 || !group->remove(socket)) {
883 delete group;
884 env->SetIntField(thiz, gNative, NULL);
885 }
886 }
887}
888
889void setMode(JNIEnv *env, jobject thiz, jint mode)
890{
891 AudioGroup *group = (AudioGroup *)env->GetIntField(thiz, gNative);
892 if (group && !group->setMode(mode)) {
893 jniThrowException(env, "java/lang/IllegalArgumentException", NULL);
894 return;
895 }
896 env->SetIntField(thiz, gMode, mode);
897}
898
899void sendDtmf(JNIEnv *env, jobject thiz, jint event)
900{
901 AudioGroup *group = (AudioGroup *)env->GetIntField(thiz, gNative);
902 if (group && !group->sendDtmf(event)) {
903 jniThrowException(env, "java/lang/IllegalArgumentException", NULL);
904 }
905}
906
907JNINativeMethod gMethods[] = {
Chia-chi Yehb8790322010-08-19 18:26:53 +0800908 {"add", "(IILjava/lang/String;ILjava/lang/String;IIII)V", (void *)add},
Chia-chi Yeh4c5d28c2010-08-06 14:12:05 +0800909 {"remove", "(I)V", (void *)remove},
910 {"setMode", "(I)V", (void *)setMode},
911 {"sendDtmf", "(I)V", (void *)sendDtmf},
912};
913
914} // namespace
915
916int registerAudioGroup(JNIEnv *env)
917{
918 gRandom = open("/dev/urandom", O_RDONLY);
919 if (gRandom == -1) {
920 LOGE("urandom: %s", strerror(errno));
921 return -1;
922 }
923
924 jclass clazz;
925 if ((clazz = env->FindClass("android/net/rtp/AudioGroup")) == NULL ||
926 (gNative = env->GetFieldID(clazz, "mNative", "I")) == NULL ||
927 (gMode = env->GetFieldID(clazz, "mMode", "I")) == NULL ||
928 env->RegisterNatives(clazz, gMethods, NELEM(gMethods)) < 0) {
929 LOGE("JNI registration failed");
930 return -1;
931 }
932 return 0;
933}