1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <stdio.h>
18 #include <stdint.h>
19 #include <string.h>
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <sys/epoll.h>
23 #include <sys/types.h>
24 #include <sys/socket.h>
25 #include <sys/stat.h>
26 #include <sys/time.h>
27 #include <time.h>
28 #include <arpa/inet.h>
29 #include <netinet/in.h>
30 
31 // #define LOG_NDEBUG 0
32 #define LOG_TAG "AudioGroup"
33 #include <cutils/atomic.h>
34 #include <cutils/properties.h>
35 #include <utils/Log.h>
36 #include <utils/Errors.h>
37 #include <utils/RefBase.h>
38 #include <utils/threads.h>
39 #include <utils/SystemClock.h>
40 #include <media/AudioRecord.h>
41 #include <media/AudioTrack.h>
42 #include <media/AudioEffect.h>
43 #include <system/audio_effects/effect_aec.h>
44 #include <system/audio.h>
45 
46 #include <nativehelper/ScopedUtfChars.h>
47 
48 #include "jni.h"
49 #include <nativehelper/JNIHelp.h>
50 
51 #include "AudioCodec.h"
52 #include "EchoSuppressor.h"
53 
54 extern int parse(JNIEnv *env, jstring jAddress, int port, sockaddr_storage *ss);
55 
56 namespace {
57 
58 using namespace android;
59 
60 int gRandom = -1;
61 
62 // We use a circular array to implement jitter buffer. The simplest way is doing
63 // a modulo operation on the index while accessing the array. However modulo can
64 // be expensive on some platforms, such as ARM. Thus we round up the size of the
65 // array to the nearest power of 2 and then use bitwise-and instead of modulo.
66 // Currently we make it 2048ms long and assume packet interval is 50ms or less.
67 // The first 100ms is the place where samples get mixed. The rest is the real
68 // jitter buffer. For a stream at 8000Hz it takes 32 kilobytes. These numbers
69 // are chosen by experiments and each of them can be adjusted as needed.
70 
71 // Originally a stream does not send packets when it is receive-only or there is
72 // nothing to mix. However, this causes some problems with certain firewalls and
73 // proxies. A firewall might remove a port mapping when there is no outgoing
74 // packet for a preiod of time, and a proxy might wait for incoming packets from
75 // both sides before start forwarding. To solve these problems, we send out a
76 // silence packet on the stream for every second. It should be good enough to
77 // keep the stream alive with relatively low resources.
78 
79 // Other notes:
80 // + We use elapsedRealtime() to get the time. Since we use 32bit variables
81 //   instead of 64bit ones, comparison must be done by subtraction.
82 // + Sampling rate must be multiple of 1000Hz, and packet length must be in
83 //   milliseconds. No floating points.
84 // + If we cannot get enough CPU, we drop samples and simulate packet loss.
85 // + Resampling is not done yet, so streams in one group must use the same rate.
86 //   For the first release only 8000Hz is supported.
87 
88 #define BUFFER_SIZE     2048
89 #define HISTORY_SIZE    100
90 #define MEASURE_BASE    100
91 #define MEASURE_PERIOD  5000
92 #define DTMF_PERIOD     200
93 
94 class AudioStream
95 {
96 public:
97     AudioStream();
98     ~AudioStream();
99     bool set(int mode, int socket, sockaddr_storage *remote,
100         AudioCodec *codec, int sampleRate, int sampleCount,
101         int codecType, int dtmfType);
102 
103     void sendDtmf(int event);
104     bool mix(int32_t *output, int head, int tail, int sampleRate);
105     void encode(int tick, AudioStream *chain);
106     void decode(int tick);
107 
108 private:
109     enum {
110         NORMAL = 0,
111         SEND_ONLY = 1,
112         RECEIVE_ONLY = 2,
113         LAST_MODE = 2,
114     };
115 
116     int mMode;
117     int mSocket;
118     sockaddr_storage mRemote;
119     AudioCodec *mCodec;
120     uint32_t mCodecMagic;
121     uint32_t mDtmfMagic;
122     bool mFixRemote;
123 
124     int mTick;
125     int mSampleRate;
126     int mSampleCount;
127     int mInterval;
128     int mKeepAlive;
129 
130     int16_t *mBuffer;
131     int mBufferMask;
132     int mBufferHead;
133     int mBufferTail;
134     int mLatencyTimer;
135     int mLatencyScore;
136 
137     uint16_t mSequence;
138     uint32_t mTimestamp;
139     uint32_t mSsrc;
140 
141     int mDtmfEvent;
142     int mDtmfStart;
143 
144     AudioStream *mNext;
145 
146     friend class AudioGroup;
147 };
148 
AudioStream()149 AudioStream::AudioStream()
150 {
151     mSocket = -1;
152     mCodec = NULL;
153     mBuffer = NULL;
154     mNext = NULL;
155 }
156 
~AudioStream()157 AudioStream::~AudioStream()
158 {
159     close(mSocket);
160     delete mCodec;
161     delete [] mBuffer;
162     ALOGD("stream[%d] is dead", mSocket);
163 }
164 
set(int mode,int socket,sockaddr_storage * remote,AudioCodec * codec,int sampleRate,int sampleCount,int codecType,int dtmfType)165 bool AudioStream::set(int mode, int socket, sockaddr_storage *remote,
166     AudioCodec *codec, int sampleRate, int sampleCount,
167     int codecType, int dtmfType)
168 {
169     if (mode < 0 || mode > LAST_MODE) {
170         return false;
171     }
172     mMode = mode;
173 
174     mCodecMagic = (0x8000 | codecType) << 16;
175     mDtmfMagic = (dtmfType == -1) ? 0 : (0x8000 | dtmfType) << 16;
176 
177     mTick = elapsedRealtime();
178     mSampleRate = sampleRate / 1000;
179     mSampleCount = sampleCount;
180     mInterval = mSampleCount / mSampleRate;
181 
182     // Allocate jitter buffer.
183     for (mBufferMask = 8; mBufferMask < mSampleRate; mBufferMask <<= 1);
184     mBufferMask *= BUFFER_SIZE;
185     mBuffer = new int16_t[mBufferMask];
186     --mBufferMask;
187     mBufferHead = 0;
188     mBufferTail = 0;
189     mLatencyTimer = 0;
190     mLatencyScore = 0;
191 
192     // Initialize random bits.
193     read(gRandom, &mSequence, sizeof(mSequence));
194     read(gRandom, &mTimestamp, sizeof(mTimestamp));
195     read(gRandom, &mSsrc, sizeof(mSsrc));
196 
197     mDtmfEvent = -1;
198     mDtmfStart = 0;
199 
200     // Only take over these things when succeeded.
201     mSocket = socket;
202     if (codec) {
203         mRemote = *remote;
204         mCodec = codec;
205 
206         // Here we should never get an private address, but some buggy proxy
207         // servers do give us one. To solve this, we replace the address when
208         // the first time we successfully decode an incoming packet.
209         mFixRemote = false;
210         if (remote->ss_family == AF_INET) {
211             unsigned char *address =
212                 (unsigned char *)&((sockaddr_in *)remote)->sin_addr;
213             if (address[0] == 10 ||
214                 (address[0] == 172 && (address[1] >> 4) == 1) ||
215                 (address[0] == 192 && address[1] == 168)) {
216                 mFixRemote = true;
217             }
218         }
219     }
220 
221     ALOGD("stream[%d] is configured as %s %dkHz %dms mode %d", mSocket,
222         (codec ? codec->name : "RAW"), mSampleRate, mInterval, mMode);
223     return true;
224 }
225 
sendDtmf(int event)226 void AudioStream::sendDtmf(int event)
227 {
228     if (mDtmfMagic != 0) {
229         mDtmfEvent = event << 24;
230         mDtmfStart = mTimestamp + mSampleCount;
231     }
232 }
233 
mix(int32_t * output,int head,int tail,int sampleRate)234 bool AudioStream::mix(int32_t *output, int head, int tail, int sampleRate)
235 {
236     if (mMode == SEND_ONLY) {
237         return false;
238     }
239 
240     if (head - mBufferHead < 0) {
241         head = mBufferHead;
242     }
243     if (tail - mBufferTail > 0) {
244         tail = mBufferTail;
245     }
246     if (tail - head <= 0) {
247         return false;
248     }
249 
250     head *= mSampleRate;
251     tail *= mSampleRate;
252 
253     if (sampleRate == mSampleRate) {
254         for (int i = head; i - tail < 0; ++i) {
255             output[i - head] += mBuffer[i & mBufferMask];
256         }
257     } else {
258         // TODO: implement resampling.
259         return false;
260     }
261     return true;
262 }
263 
encode(int tick,AudioStream * chain)264 void AudioStream::encode(int tick, AudioStream *chain)
265 {
266     if (tick - mTick >= mInterval) {
267         // We just missed the train. Pretend that packets in between are lost.
268         int skipped = (tick - mTick) / mInterval;
269         mTick += skipped * mInterval;
270         mSequence += skipped;
271         mTimestamp += skipped * mSampleCount;
272         ALOGV("stream[%d] skips %d packets", mSocket, skipped);
273     }
274 
275     tick = mTick;
276     mTick += mInterval;
277     ++mSequence;
278     mTimestamp += mSampleCount;
279 
280     // If there is an ongoing DTMF event, send it now.
281     if (mMode != RECEIVE_ONLY && mDtmfEvent != -1) {
282         int duration = mTimestamp - mDtmfStart;
283         // Make sure duration is reasonable.
284         if (duration >= 0 && duration < mSampleRate * DTMF_PERIOD) {
285             duration += mSampleCount;
286             int32_t buffer[4] = {
287                 static_cast<int32_t>(htonl(mDtmfMagic | mSequence)),
288                 static_cast<int32_t>(htonl(mDtmfStart)),
289                 static_cast<int32_t>(mSsrc),
290                 static_cast<int32_t>(htonl(mDtmfEvent | duration)),
291             };
292             if (duration >= mSampleRate * DTMF_PERIOD) {
293                 buffer[3] |= htonl(1 << 23);
294                 mDtmfEvent = -1;
295             }
296             sendto(mSocket, buffer, sizeof(buffer), MSG_DONTWAIT,
297                 (sockaddr *)&mRemote, sizeof(mRemote));
298             return;
299         }
300         mDtmfEvent = -1;
301     }
302 
303     int32_t buffer[mSampleCount + 3];
304     bool data = false;
305     if (mMode != RECEIVE_ONLY) {
306         // Mix all other streams.
307         memset(buffer, 0, sizeof(buffer));
308         while (chain) {
309             if (chain != this) {
310                 data |= chain->mix(buffer, tick - mInterval, tick, mSampleRate);
311             }
312             chain = chain->mNext;
313         }
314     }
315 
316     int16_t samples[mSampleCount];
317     if (data) {
318         // Saturate into 16 bits.
319         for (int i = 0; i < mSampleCount; ++i) {
320             int32_t sample = buffer[i];
321             if (sample < -32768) {
322                 sample = -32768;
323             }
324             if (sample > 32767) {
325                 sample = 32767;
326             }
327             samples[i] = sample;
328         }
329     } else {
330         if ((mTick ^ mKeepAlive) >> 10 == 0) {
331             return;
332         }
333         mKeepAlive = mTick;
334         memset(samples, 0, sizeof(samples));
335 
336         if (mMode != RECEIVE_ONLY) {
337             ALOGV("stream[%d] no data", mSocket);
338         }
339     }
340 
341     if (!mCodec) {
342         // Special case for device stream.
343         send(mSocket, samples, sizeof(samples), MSG_DONTWAIT);
344         return;
345     }
346 
347     // Cook the packet and send it out.
348     buffer[0] = htonl(mCodecMagic | mSequence);
349     buffer[1] = htonl(mTimestamp);
350     buffer[2] = mSsrc;
351     int length = mCodec->encode(&buffer[3], samples);
352     if (length <= 0) {
353         ALOGV("stream[%d] encoder error", mSocket);
354         return;
355     }
356     sendto(mSocket, buffer, length + 12, MSG_DONTWAIT, (sockaddr *)&mRemote,
357         sizeof(mRemote));
358 }
359 
decode(int tick)360 void AudioStream::decode(int tick)
361 {
362     char c;
363     if (mMode == SEND_ONLY) {
364         recv(mSocket, &c, 1, MSG_DONTWAIT);
365         return;
366     }
367 
368     // Make sure mBufferHead and mBufferTail are reasonable.
369     if ((unsigned int)(tick + BUFFER_SIZE - mBufferHead) > BUFFER_SIZE * 2) {
370         mBufferHead = tick - HISTORY_SIZE;
371         mBufferTail = mBufferHead;
372     }
373 
374     if (tick - mBufferHead > HISTORY_SIZE) {
375         // Throw away outdated samples.
376         mBufferHead = tick - HISTORY_SIZE;
377         if (mBufferTail - mBufferHead < 0) {
378             mBufferTail = mBufferHead;
379         }
380     }
381 
382     // Adjust the jitter buffer if the latency keeps larger than the threshold
383     // in the measurement period.
384     int score = mBufferTail - tick - MEASURE_BASE;
385     if (mLatencyScore > score || mLatencyScore <= 0) {
386         mLatencyScore = score;
387         mLatencyTimer = tick;
388     } else if (tick - mLatencyTimer >= MEASURE_PERIOD) {
389         ALOGV("stream[%d] reduces latency of %dms", mSocket, mLatencyScore);
390         mBufferTail -= mLatencyScore;
391         mLatencyScore = -1;
392     }
393 
394     int count = (BUFFER_SIZE - (mBufferTail - mBufferHead)) * mSampleRate;
395     if (count < mSampleCount) {
396         // Buffer overflow. Drop the packet.
397         ALOGV("stream[%d] buffer overflow", mSocket);
398         recv(mSocket, &c, 1, MSG_DONTWAIT);
399         return;
400     }
401 
402     // Receive the packet and decode it.
403     int16_t samples[count];
404     if (!mCodec) {
405         // Special case for device stream.
406         count = recv(mSocket, samples, sizeof(samples),
407             MSG_TRUNC | MSG_DONTWAIT) >> 1;
408     } else {
409         __attribute__((aligned(4))) uint8_t buffer[2048];
410         sockaddr_storage remote;
411         socklen_t addrlen = sizeof(remote);
412 
413         int bufferSize = sizeof(buffer);
414         int length = recvfrom(mSocket, buffer, bufferSize,
415             MSG_TRUNC | MSG_DONTWAIT, (sockaddr *)&remote, &addrlen);
416 
417         // Do we need to check SSRC, sequence, and timestamp? They are not
418         // reliable but at least they can be used to identify duplicates?
419         if (length < 12 || length > bufferSize ||
420             (ntohl(*(uint32_t *)buffer) & 0xC07F0000) != mCodecMagic) {
421             ALOGV("stream[%d] malformed packet", mSocket);
422             return;
423         }
424         int offset = 12 + ((buffer[0] & 0x0F) << 2);
425         if (offset+2 >= bufferSize) {
426             ALOGV("invalid buffer offset: %d", offset+2);
427             return;
428         }
429         if ((buffer[0] & 0x10) != 0) {
430             offset += 4 + (ntohs(*(uint16_t *)&buffer[offset + 2]) << 2);
431         }
432         if (offset >= bufferSize) {
433             ALOGV("invalid buffer offset: %d", offset);
434             return;
435         }
436         if ((buffer[0] & 0x20) != 0) {
437             length -= buffer[length - 1];
438         }
439         length -= offset;
440         if (length >= 0) {
441             length = mCodec->decode(samples, count, &buffer[offset], length);
442         }
443         if (length > 0 && mFixRemote) {
444             mRemote = remote;
445             mFixRemote = false;
446         }
447         count = length;
448     }
449     if (count <= 0) {
450         ALOGV("stream[%d] decoder error", mSocket);
451         return;
452     }
453 
454     if (tick - mBufferTail > 0) {
455         // Buffer underrun. Reset the jitter buffer.
456         ALOGV("stream[%d] buffer underrun", mSocket);
457         if (mBufferTail - mBufferHead <= 0) {
458             mBufferHead = tick + mInterval;
459             mBufferTail = mBufferHead;
460         } else {
461             int tail = (tick + mInterval) * mSampleRate;
462             for (int i = mBufferTail * mSampleRate; i - tail < 0; ++i) {
463                 mBuffer[i & mBufferMask] = 0;
464             }
465             mBufferTail = tick + mInterval;
466         }
467     }
468 
469     // Append to the jitter buffer.
470     int tail = mBufferTail * mSampleRate;
471     for (int i = 0; i < count; ++i) {
472         mBuffer[tail & mBufferMask] = samples[i];
473         ++tail;
474     }
475     mBufferTail += mInterval;
476 }
477 
478 //------------------------------------------------------------------------------
479 
480 class AudioGroup
481 {
482 public:
483     explicit AudioGroup(const String16 &opPackageName);
484     ~AudioGroup();
485     bool set(int sampleRate, int sampleCount);
486 
487     bool setMode(int mode);
488     bool sendDtmf(int event);
489     bool add(AudioStream *stream);
490     bool remove(AudioStream *stream);
platformHasAec()491     bool platformHasAec() { return mPlatformHasAec; }
492 
493 private:
494     enum {
495         ON_HOLD = 0,
496         MUTED = 1,
497         NORMAL = 2,
498         ECHO_SUPPRESSION = 3,
499         LAST_MODE = 3,
500     };
501 
502     bool checkPlatformAec();
503 
504     AudioStream *mChain;
505     int mEventQueue;
506     volatile int mDtmfEvent;
507 
508     String16 mOpPackageName;
509 
510     int mMode;
511     int mSampleRate;
512     size_t mSampleCount;
513     int mDeviceSocket;
514     bool mPlatformHasAec;
515 
516     class NetworkThread : public Thread
517     {
518     public:
NetworkThread(AudioGroup * group)519         explicit NetworkThread(AudioGroup *group) : Thread(false), mGroup(group) {}
520 
start()521         bool start()
522         {
523             if (run("Network", ANDROID_PRIORITY_AUDIO) != NO_ERROR) {
524                 ALOGE("cannot start network thread");
525                 return false;
526             }
527             return true;
528         }
529 
530     private:
531         AudioGroup *mGroup;
532         bool threadLoop();
533     };
534     sp<NetworkThread> mNetworkThread;
535 
536     class DeviceThread : public Thread
537     {
538     public:
DeviceThread(AudioGroup * group)539         explicit DeviceThread(AudioGroup *group) : Thread(false), mGroup(group) {}
540 
start()541         bool start()
542         {
543             if (run("Device", ANDROID_PRIORITY_AUDIO) != NO_ERROR) {
544                 ALOGE("cannot start device thread");
545                 return false;
546             }
547             return true;
548         }
549 
550     private:
551         AudioGroup *mGroup;
552         bool threadLoop();
553     };
554     sp<DeviceThread> mDeviceThread;
555 };
556 
AudioGroup(const String16 & opPackageName)557 AudioGroup::AudioGroup(const String16 &opPackageName)
558 {
559     mOpPackageName = opPackageName;
560     mMode = ON_HOLD;
561     mChain = NULL;
562     mEventQueue = -1;
563     mDtmfEvent = -1;
564     mDeviceSocket = -1;
565     mNetworkThread = new NetworkThread(this);
566     mDeviceThread = new DeviceThread(this);
567     mPlatformHasAec = checkPlatformAec();
568 }
569 
~AudioGroup()570 AudioGroup::~AudioGroup()
571 {
572     mNetworkThread->requestExitAndWait();
573     mDeviceThread->requestExitAndWait();
574     close(mEventQueue);
575     close(mDeviceSocket);
576     while (mChain) {
577         AudioStream *next = mChain->mNext;
578         delete mChain;
579         mChain = next;
580     }
581     ALOGD("group[%d] is dead", mDeviceSocket);
582 }
583 
set(int sampleRate,int sampleCount)584 bool AudioGroup::set(int sampleRate, int sampleCount)
585 {
586     mEventQueue = epoll_create1(EPOLL_CLOEXEC);
587     if (mEventQueue == -1) {
588         ALOGE("epoll_create1: %s", strerror(errno));
589         return false;
590     }
591 
592     mSampleRate = sampleRate;
593     mSampleCount = sampleCount;
594 
595     // Create device socket.
596     int pair[2];
597     if (socketpair(AF_UNIX, SOCK_DGRAM, 0, pair)) {
598         ALOGE("socketpair: %s", strerror(errno));
599         return false;
600     }
601     mDeviceSocket = pair[0];
602 
603     // Create device stream.
604     mChain = new AudioStream;
605     if (!mChain->set(AudioStream::NORMAL, pair[1], NULL, NULL,
606         sampleRate, sampleCount, -1, -1)) {
607         close(pair[1]);
608         ALOGE("cannot initialize device stream");
609         return false;
610     }
611 
612     // Give device socket a reasonable timeout.
613     timeval tv;
614     tv.tv_sec = 0;
615     tv.tv_usec = 1000 * sampleCount / sampleRate * 500;
616     if (setsockopt(pair[0], SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv))) {
617         ALOGE("setsockopt: %s", strerror(errno));
618         return false;
619     }
620 
621     // Add device stream into event queue.
622     epoll_event event;
623     event.events = EPOLLIN;
624     event.data.ptr = mChain;
625     if (epoll_ctl(mEventQueue, EPOLL_CTL_ADD, pair[1], &event)) {
626         ALOGE("epoll_ctl: %s", strerror(errno));
627         return false;
628     }
629 
630     // Anything else?
631     ALOGD("stream[%d] joins group[%d]", pair[1], pair[0]);
632     return true;
633 }
634 
setMode(int mode)635 bool AudioGroup::setMode(int mode)
636 {
637     if (mode < 0 || mode > LAST_MODE) {
638         return false;
639     }
640     // FIXME: temporary code to overcome echo and mic gain issues on herring and tuna boards.
641     // Must be modified/removed when the root cause of the issue is fixed in the hardware or
642     // driver
643     char value[PROPERTY_VALUE_MAX];
644     property_get("ro.product.board", value, "");
645     if (mode == NORMAL &&
646             (!strcmp(value, "herring") || !strcmp(value, "tuna"))) {
647         mode = ECHO_SUPPRESSION;
648     }
649     if (mMode == mode) {
650         return true;
651     }
652 
653     mDeviceThread->requestExitAndWait();
654     ALOGD("group[%d] switches from mode %d to %d", mDeviceSocket, mMode, mode);
655     mMode = mode;
656     return (mode == ON_HOLD) || mDeviceThread->start();
657 }
658 
sendDtmf(int event)659 bool AudioGroup::sendDtmf(int event)
660 {
661     if (event < 0 || event > 15) {
662         return false;
663     }
664 
665     // DTMF is rarely used, so we try to make it as lightweight as possible.
666     // Using volatile might be dodgy, but using a pipe or pthread primitives
667     // or stop-set-restart threads seems too heavy. Will investigate later.
668     timespec ts;
669     ts.tv_sec = 0;
670     ts.tv_nsec = 100000000;
671     for (int i = 0; mDtmfEvent != -1 && i < 20; ++i) {
672         nanosleep(&ts, NULL);
673     }
674     if (mDtmfEvent != -1) {
675         return false;
676     }
677     mDtmfEvent = event;
678     nanosleep(&ts, NULL);
679     return true;
680 }
681 
add(AudioStream * stream)682 bool AudioGroup::add(AudioStream *stream)
683 {
684     mNetworkThread->requestExitAndWait();
685 
686     epoll_event event;
687     event.events = EPOLLIN;
688     event.data.ptr = stream;
689     if (epoll_ctl(mEventQueue, EPOLL_CTL_ADD, stream->mSocket, &event)) {
690         ALOGE("epoll_ctl: %s", strerror(errno));
691         return false;
692     }
693 
694     stream->mNext = mChain->mNext;
695     mChain->mNext = stream;
696     if (!mNetworkThread->start()) {
697         // Only take over the stream when succeeded.
698         mChain->mNext = stream->mNext;
699         return false;
700     }
701 
702     ALOGD("stream[%d] joins group[%d]", stream->mSocket, mDeviceSocket);
703     return true;
704 }
705 
remove(AudioStream * stream)706 bool AudioGroup::remove(AudioStream *stream)
707 {
708     mNetworkThread->requestExitAndWait();
709 
710     for (AudioStream *chain = mChain; chain->mNext; chain = chain->mNext) {
711         if (chain->mNext == stream) {
712             if (epoll_ctl(mEventQueue, EPOLL_CTL_DEL, stream->mSocket, NULL)) {
713                 ALOGE("epoll_ctl: %s", strerror(errno));
714                 return false;
715             }
716             chain->mNext = stream->mNext;
717             ALOGD("stream[%d] leaves group[%d]", stream->mSocket, mDeviceSocket);
718             delete stream;
719             break;
720         }
721     }
722 
723     // Do not start network thread if there is only one stream.
724     if (!mChain->mNext || !mNetworkThread->start()) {
725         return false;
726     }
727     return true;
728 }
729 
threadLoop()730 bool AudioGroup::NetworkThread::threadLoop()
731 {
732     AudioStream *chain = mGroup->mChain;
733     int tick = elapsedRealtime();
734     int deadline = tick + 10;
735     int count = 0;
736 
737     for (AudioStream *stream = chain; stream; stream = stream->mNext) {
738         if (tick - stream->mTick >= 0) {
739             stream->encode(tick, chain);
740         }
741         if (deadline - stream->mTick > 0) {
742             deadline = stream->mTick;
743         }
744         ++count;
745     }
746 
747     int event = mGroup->mDtmfEvent;
748     if (event != -1) {
749         for (AudioStream *stream = chain; stream; stream = stream->mNext) {
750             stream->sendDtmf(event);
751         }
752         mGroup->mDtmfEvent = -1;
753     }
754 
755     deadline -= tick;
756     if (deadline < 1) {
757         deadline = 1;
758     }
759 
760     epoll_event events[count];
761     count = epoll_wait(mGroup->mEventQueue, events, count, deadline);
762     if (count == -1) {
763         ALOGE("epoll_wait: %s", strerror(errno));
764         return false;
765     }
766     for (int i = 0; i < count; ++i) {
767         ((AudioStream *)events[i].data.ptr)->decode(tick);
768     }
769 
770     return true;
771 }
772 
checkPlatformAec()773 bool AudioGroup::checkPlatformAec()
774 {
775     effect_descriptor_t fxDesc;
776     uint32_t numFx;
777 
778     if (AudioEffect::queryNumberEffects(&numFx) != NO_ERROR) {
779         return false;
780     }
781     for (uint32_t i = 0; i < numFx; i++) {
782         if (AudioEffect::queryEffect(i, &fxDesc) != NO_ERROR) {
783             continue;
784         }
785         if (memcmp(&fxDesc.type, FX_IID_AEC, sizeof(effect_uuid_t)) == 0) {
786             return true;
787         }
788     }
789     return false;
790 }
791 
threadLoop()792 bool AudioGroup::DeviceThread::threadLoop()
793 {
794     int mode = mGroup->mMode;
795     int sampleRate = mGroup->mSampleRate;
796     size_t sampleCount = mGroup->mSampleCount;
797     int deviceSocket = mGroup->mDeviceSocket;
798 
799     // Find out the frame count for AudioTrack and AudioRecord.
800     size_t output = 0;
801     size_t input = 0;
802     if (AudioTrack::getMinFrameCount(&output, AUDIO_STREAM_VOICE_CALL,
803         sampleRate) != NO_ERROR || output <= 0 ||
804         AudioRecord::getMinFrameCount(&input, sampleRate,
805         AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_MONO) != NO_ERROR || input <= 0) {
806         ALOGE("cannot compute frame count");
807         return false;
808     }
809     ALOGD("reported frame count: output %zu, input %zu", output, input);
810 
811     if (output < sampleCount * 2) {
812         output = sampleCount * 2;
813     }
814     if (input < sampleCount * 2) {
815         input = sampleCount * 2;
816     }
817     ALOGD("adjusted frame count: output %zu, input %zu", output, input);
818 
819     // Initialize AudioTrack and AudioRecord.
820     sp<AudioTrack> track = new AudioTrack();
821     sp<AudioRecord> record = new AudioRecord(mGroup->mOpPackageName);
822     if (track->set(AUDIO_STREAM_VOICE_CALL, sampleRate, AUDIO_FORMAT_PCM_16_BIT,
823                 AUDIO_CHANNEL_OUT_MONO, output, AUDIO_OUTPUT_FLAG_NONE, NULL /*callback_t*/,
824                 NULL /*user*/, 0 /*notificationFrames*/, 0 /*sharedBuffer*/,
825                 false /*threadCanCallJava*/, AUDIO_SESSION_ALLOCATE,
826                 AudioTrack::TRANSFER_OBTAIN) != NO_ERROR ||
827             record->set(AUDIO_SOURCE_VOICE_COMMUNICATION, sampleRate, AUDIO_FORMAT_PCM_16_BIT,
828                 AUDIO_CHANNEL_IN_MONO, input, NULL /*callback_t*/, NULL /*user*/,
829                 0 /*notificationFrames*/, false /*threadCanCallJava*/, AUDIO_SESSION_ALLOCATE,
830                 AudioRecord::TRANSFER_OBTAIN) != NO_ERROR) {
831         ALOGE("cannot initialize audio device");
832         return false;
833     }
834     ALOGD("latency: output %d, input %d", track->latency(), record->latency());
835 
836     // Give device socket a reasonable buffer size.
837     setsockopt(deviceSocket, SOL_SOCKET, SO_RCVBUF, &output, sizeof(output));
838     setsockopt(deviceSocket, SOL_SOCKET, SO_SNDBUF, &output, sizeof(output));
839 
840     // Drain device socket.
841     char c;
842     while (recv(deviceSocket, &c, 1, MSG_DONTWAIT) == 1);
843 
844     // check if platform supports echo cancellation and do not active local echo suppression in
845     // this case
846     EchoSuppressor *echo = NULL;
847     sp<AudioEffect> aec;
848     if (mode == ECHO_SUPPRESSION) {
849         if (mGroup->platformHasAec()) {
850             aec = new AudioEffect(FX_IID_AEC,
851                                     mGroup->mOpPackageName,
852                                     NULL,
853                                     0,
854                                     0,
855                                     0,
856                                     record->getSessionId(),
857                                     AUDIO_IO_HANDLE_NONE); // record sessionId is sufficient.
858             status_t status = aec->initCheck();
859             if (status == NO_ERROR || status == ALREADY_EXISTS) {
860                 aec->setEnabled(true);
861             } else {
862                 aec.clear();
863             }
864         }
865         // Create local echo suppressor if platform AEC cannot be used.
866         if (aec == 0) {
867              echo = new EchoSuppressor(sampleCount,
868                                        (track->latency() + record->latency()) * sampleRate / 1000);
869         }
870     }
871     // Start AudioRecord before AudioTrack. This prevents AudioTrack from being
872     // disabled due to buffer underrun while waiting for AudioRecord.
873     if (mode != MUTED) {
874         record->start();
875         int16_t one;
876         // FIXME this may not work any more
877         record->read(&one, sizeof(one));
878     }
879     track->start();
880 
881     while (!exitPending()) {
882         int16_t output[sampleCount];
883         if (recv(deviceSocket, output, sizeof(output), 0) <= 0) {
884             memset(output, 0, sizeof(output));
885         }
886 
887         int16_t input[sampleCount];
888         int toWrite = sampleCount;
889         int toRead = (mode == MUTED) ? 0 : sampleCount;
890         int chances = 100;
891 
892         while (--chances > 0 && (toWrite > 0 || toRead > 0)) {
893             if (toWrite > 0) {
894                 AudioTrack::Buffer buffer;
895                 buffer.frameCount = toWrite;
896 
897                 status_t status = track->obtainBuffer(&buffer, 1);
898                 if (status == NO_ERROR) {
899                     int offset = sampleCount - toWrite;
900                     memcpy(buffer.i8, &output[offset], buffer.size);
901                     toWrite -= buffer.frameCount;
902                     track->releaseBuffer(&buffer);
903                 } else if (status != TIMED_OUT && status != WOULD_BLOCK) {
904                     ALOGE("cannot write to AudioTrack");
905                     goto exit;
906                 }
907             }
908 
909             if (toRead > 0) {
910                 AudioRecord::Buffer buffer;
911                 buffer.frameCount = toRead;
912 
913                 status_t status = record->obtainBuffer(&buffer, 1);
914                 if (status == NO_ERROR) {
915                     int offset = sampleCount - toRead;
916                     memcpy(&input[offset], buffer.i8, buffer.size);
917                     toRead -= buffer.frameCount;
918                     record->releaseBuffer(&buffer);
919                 } else if (status != TIMED_OUT && status != WOULD_BLOCK) {
920                     ALOGE("cannot read from AudioRecord");
921                     goto exit;
922                 }
923             }
924         }
925 
926         if (chances <= 0) {
927             ALOGW("device loop timeout");
928             while (recv(deviceSocket, &c, 1, MSG_DONTWAIT) == 1);
929         }
930 
931         if (mode != MUTED) {
932             if (echo != NULL) {
933                 ALOGV("echo->run()");
934                 echo->run(output, input);
935             }
936             send(deviceSocket, input, sizeof(input), MSG_DONTWAIT);
937         }
938     }
939 
940 exit:
941     delete echo;
942     return true;
943 }
944 
945 //------------------------------------------------------------------------------
946 
947 static jfieldID gNative;
948 static jfieldID gMode;
949 
add(JNIEnv * env,jobject thiz,jint mode,jint socket,jstring jRemoteAddress,jint remotePort,jstring jCodecSpec,jint dtmfType,jstring opPackageNameStr)950 jlong add(JNIEnv *env, jobject thiz, jint mode,
951     jint socket, jstring jRemoteAddress, jint remotePort,
952     jstring jCodecSpec, jint dtmfType, jstring opPackageNameStr)
953 {
954     AudioCodec *codec = NULL;
955     AudioStream *stream = NULL;
956     AudioGroup *group = NULL;
957 
958     // Sanity check.
959     sockaddr_storage remote;
960     if (parse(env, jRemoteAddress, remotePort, &remote) < 0) {
961         // Exception already thrown.
962         return 0;
963     }
964     if (!jCodecSpec) {
965         jniThrowNullPointerException(env, "codecSpec");
966         return 0;
967     }
968     const char *codecSpec = env->GetStringUTFChars(jCodecSpec, NULL);
969     if (!codecSpec) {
970         // Exception already thrown.
971         return 0;
972     }
973     socket = dup(socket);
974     if (socket == -1) {
975         jniThrowException(env, "java/lang/IllegalStateException",
976             "cannot get stream socket");
977         return 0;
978     }
979 
980     ScopedUtfChars opPackageName(env, opPackageNameStr);
981 
982     // Create audio codec.
983     int codecType = -1;
984     char codecName[16];
985     int sampleRate = -1;
986     sscanf(codecSpec, "%d %15[^/]%*c%d", &codecType, codecName, &sampleRate);
987     codec = newAudioCodec(codecName);
988     int sampleCount = (codec ? codec->set(sampleRate, codecSpec) : -1);
989     env->ReleaseStringUTFChars(jCodecSpec, codecSpec);
990     if (sampleCount <= 0) {
991         jniThrowException(env, "java/lang/IllegalStateException",
992             "cannot initialize audio codec");
993         goto error;
994     }
995 
996     // Create audio stream.
997     stream = new AudioStream;
998     if (!stream->set(mode, socket, &remote, codec, sampleRate, sampleCount,
999         codecType, dtmfType)) {
1000         jniThrowException(env, "java/lang/IllegalStateException",
1001             "cannot initialize audio stream");
1002         goto error;
1003     }
1004     socket = -1;
1005     codec = NULL;
1006 
1007     // Create audio group.
1008     group = (AudioGroup *)env->GetLongField(thiz, gNative);
1009     if (!group) {
1010         int mode = env->GetIntField(thiz, gMode);
1011         group = new AudioGroup(String16(opPackageName.c_str()));
1012         if (!group->set(8000, 256) || !group->setMode(mode)) {
1013             jniThrowException(env, "java/lang/IllegalStateException",
1014                 "cannot initialize audio group");
1015             goto error;
1016         }
1017     }
1018 
1019     // Add audio stream into audio group.
1020     if (!group->add(stream)) {
1021         jniThrowException(env, "java/lang/IllegalStateException",
1022             "cannot add audio stream");
1023         goto error;
1024     }
1025 
1026     // Succeed.
1027     env->SetLongField(thiz, gNative, (jlong)group);
1028     return (jlong)stream;
1029 
1030 error:
1031     delete group;
1032     delete stream;
1033     delete codec;
1034     close(socket);
1035     env->SetLongField(thiz, gNative, 0);
1036     return 0;
1037 }
1038 
remove(JNIEnv * env,jobject thiz,jlong stream)1039 void remove(JNIEnv *env, jobject thiz, jlong stream)
1040 {
1041     AudioGroup *group = (AudioGroup *)env->GetLongField(thiz, gNative);
1042     if (group) {
1043         if (!stream || !group->remove((AudioStream *)stream)) {
1044             delete group;
1045             env->SetLongField(thiz, gNative, 0);
1046         }
1047     }
1048 }
1049 
setMode(JNIEnv * env,jobject thiz,jint mode)1050 void setMode(JNIEnv *env, jobject thiz, jint mode)
1051 {
1052     AudioGroup *group = (AudioGroup *)env->GetLongField(thiz, gNative);
1053     if (group && !group->setMode(mode)) {
1054         jniThrowException(env, "java/lang/IllegalArgumentException", NULL);
1055     }
1056 }
1057 
sendDtmf(JNIEnv * env,jobject thiz,jint event)1058 void sendDtmf(JNIEnv *env, jobject thiz, jint event)
1059 {
1060     AudioGroup *group = (AudioGroup *)env->GetLongField(thiz, gNative);
1061     if (group && !group->sendDtmf(event)) {
1062         jniThrowException(env, "java/lang/IllegalArgumentException", NULL);
1063     }
1064 }
1065 
1066 JNINativeMethod gMethods[] = {
1067     {"nativeAdd", "(IILjava/lang/String;ILjava/lang/String;ILjava/lang/String;)J", (void *)add},
1068     {"nativeRemove", "(J)V", (void *)remove},
1069     {"nativeSetMode", "(I)V", (void *)setMode},
1070     {"nativeSendDtmf", "(I)V", (void *)sendDtmf},
1071 };
1072 
1073 } // namespace
1074 
registerAudioGroup(JNIEnv * env)1075 int registerAudioGroup(JNIEnv *env)
1076 {
1077     gRandom = open("/dev/urandom", O_RDONLY);
1078     if (gRandom == -1) {
1079         ALOGE("urandom: %s", strerror(errno));
1080         return -1;
1081     }
1082 
1083     jclass clazz;
1084     if ((clazz = env->FindClass("android/net/rtp/AudioGroup")) == NULL ||
1085         (gNative = env->GetFieldID(clazz, "mNative", "J")) == NULL ||
1086         (gMode = env->GetFieldID(clazz, "mMode", "I")) == NULL ||
1087         env->RegisterNatives(clazz, gMethods, NELEM(gMethods)) < 0) {
1088         ALOGE("JNI registration failed");
1089         return -1;
1090     }
1091     return 0;
1092 }
1093