summaryrefslogtreecommitdiff
path: root/src/modules/audio_processing/main/source
diff options
context:
space:
mode:
Diffstat (limited to 'src/modules/audio_processing/main/source')
-rw-r--r--src/modules/audio_processing/main/source/apm.gypi136
-rw-r--r--src/modules/audio_processing/main/source/audio_buffer.cc278
-rw-r--r--src/modules/audio_processing/main/source/audio_buffer.h68
-rw-r--r--src/modules/audio_processing/main/source/audio_processing_impl.cc651
-rw-r--r--src/modules/audio_processing/main/source/audio_processing_impl.h117
-rw-r--r--src/modules/audio_processing/main/source/debug.proto37
-rw-r--r--src/modules/audio_processing/main/source/echo_cancellation_impl.cc348
-rw-r--r--src/modules/audio_processing/main/source/echo_cancellation_impl.h72
-rw-r--r--src/modules/audio_processing/main/source/echo_control_mobile_impl.cc309
-rw-r--r--src/modules/audio_processing/main/source/echo_control_mobile_impl.h62
-rw-r--r--src/modules/audio_processing/main/source/gain_control_impl.cc391
-rw-r--r--src/modules/audio_processing/main/source/gain_control_impl.h80
-rw-r--r--src/modules/audio_processing/main/source/high_pass_filter_impl.cc180
-rw-r--r--src/modules/audio_processing/main/source/high_pass_filter_impl.h51
-rw-r--r--src/modules/audio_processing/main/source/level_estimator_impl.cc182
-rw-r--r--src/modules/audio_processing/main/source/level_estimator_impl.h53
-rw-r--r--src/modules/audio_processing/main/source/noise_suppression_impl.cc179
-rw-r--r--src/modules/audio_processing/main/source/noise_suppression_impl.h54
-rw-r--r--src/modules/audio_processing/main/source/processing_component.cc112
-rw-r--r--src/modules/audio_processing/main/source/processing_component.h63
-rw-r--r--src/modules/audio_processing/main/source/splitting_filter.cc33
-rw-r--r--src/modules/audio_processing/main/source/splitting_filter.h63
-rw-r--r--src/modules/audio_processing/main/source/voice_detection_impl.cc202
-rw-r--r--src/modules/audio_processing/main/source/voice_detection_impl.h63
24 files changed, 3784 insertions, 0 deletions
diff --git a/src/modules/audio_processing/main/source/apm.gypi b/src/modules/audio_processing/main/source/apm.gypi
new file mode 100644
index 0000000..5cac671
--- /dev/null
+++ b/src/modules/audio_processing/main/source/apm.gypi
@@ -0,0 +1,136 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+ 'variables': {
+ 'protoc_out_dir': '<(SHARED_INTERMEDIATE_DIR)/protoc_out',
+ 'protoc_out_relpath': 'webrtc/audio_processing',
+ },
+ 'targets': [
+ {
+ 'target_name': 'audio_processing',
+ 'type': '<(library)',
+ 'conditions': [
+ ['prefer_fixed_point==1', {
+ 'dependencies': ['ns_fix'],
+ 'defines': ['WEBRTC_NS_FIXED'],
+ }, {
+ 'dependencies': ['ns'],
+ 'defines': ['WEBRTC_NS_FLOAT'],
+ }],
+ ['build_with_chromium==1', {
+ 'dependencies': [
+ '../../protobuf/protobuf.gyp:protobuf_lite',
+ ],
+ }, {
+ 'dependencies': [
+ '../../third_party/protobuf/protobuf.gyp:protobuf_lite',
+ ],
+ }],
+ ],
+ 'dependencies': [
+ 'debug_proto',
+ 'aec',
+ 'aecm',
+ 'agc',
+ '<(webrtc_root)/common_audio/common_audio.gyp:spl',
+ '<(webrtc_root)/common_audio/common_audio.gyp:vad',
+ '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
+ ],
+ 'include_dirs': [
+ '../interface',
+ '../../../interface',
+ '<(protoc_out_dir)',
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '../interface',
+ '../../../interface',
+ ],
+ },
+ 'sources': [
+ '../interface/audio_processing.h',
+ 'audio_buffer.cc',
+ 'audio_buffer.h',
+ 'audio_processing_impl.cc',
+ 'audio_processing_impl.h',
+ 'echo_cancellation_impl.cc',
+ 'echo_cancellation_impl.h',
+ 'echo_control_mobile_impl.cc',
+ 'echo_control_mobile_impl.h',
+ 'gain_control_impl.cc',
+ 'gain_control_impl.h',
+ 'high_pass_filter_impl.cc',
+ 'high_pass_filter_impl.h',
+ 'level_estimator_impl.cc',
+ 'level_estimator_impl.h',
+ 'noise_suppression_impl.cc',
+ 'noise_suppression_impl.h',
+ 'splitting_filter.cc',
+ 'splitting_filter.h',
+ 'processing_component.cc',
+ 'processing_component.h',
+ 'voice_detection_impl.cc',
+ 'voice_detection_impl.h',
+ '<(protoc_out_dir)/<(protoc_out_relpath)/debug.pb.cc',
+ '<(protoc_out_dir)/<(protoc_out_relpath)/debug.pb.h',
+ ],
+ },
+ {
+ # Protobuf compiler / generate rule for audio_processing
+ 'target_name': 'debug_proto',
+ 'type': 'none',
+ 'variables': {
+ 'proto_relpath': 'audio_processing/main/source/',
+ },
+ 'sources': [
+ '<(proto_relpath)/debug.proto',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'genproto',
+ 'extension': 'proto',
+ 'inputs': [
+ '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)',
+ ],
+ 'outputs': [
+ '<(protoc_out_dir)/<(protoc_out_relpath)/<(RULE_INPUT_ROOT).pb.cc',
+ '<(protoc_out_dir)/<(protoc_out_relpath)/<(RULE_INPUT_ROOT).pb.h',
+ ],
+ 'action': [
+ '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)',
+ '--proto_path=<(proto_relpath)',
+ '<(proto_relpath)/<(RULE_INPUT_NAME)',
+ '--cpp_out=<(protoc_out_dir)/<(protoc_out_relpath)',
+ ],
+ 'message': 'Generating C++ code from <(RULE_INPUT_PATH)',
+ },
+ ],
+ 'conditions': [
+ ['build_with_chromium==1', {
+ 'dependencies': [
+ '../../protobuf/protobuf.gyp:protoc#host',
+ ],
+ }, {
+ 'dependencies': [
+ '../../third_party/protobuf/protobuf.gyp:protoc#host',
+ ],
+ }],
+ ],
+ # This target exports a hard dependency because it generates header
+ # files.
+ 'hard_dependency': 1,
+ },
+ ],
+}
+
+# Local Variables:
+# tab-width:2
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=2 shiftwidth=2:
diff --git a/src/modules/audio_processing/main/source/audio_buffer.cc b/src/modules/audio_processing/main/source/audio_buffer.cc
new file mode 100644
index 0000000..6b20fce
--- /dev/null
+++ b/src/modules/audio_processing/main/source/audio_buffer.cc
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio_buffer.h"
+
+#include "module_common_types.h"
+
+namespace webrtc {
+namespace {
+
+enum {
+ kSamplesPer8kHzChannel = 80,
+ kSamplesPer16kHzChannel = 160,
+ kSamplesPer32kHzChannel = 320
+};
+
+void StereoToMono(const WebRtc_Word16* left, const WebRtc_Word16* right,
+ WebRtc_Word16* out, int samples_per_channel) {
+ WebRtc_Word32 data_int32 = 0;
+ for (int i = 0; i < samples_per_channel; i++) {
+ data_int32 = (left[i] + right[i]) >> 1;
+ if (data_int32 > 32767) {
+ data_int32 = 32767;
+ } else if (data_int32 < -32768) {
+ data_int32 = -32768;
+ }
+
+ out[i] = static_cast<WebRtc_Word16>(data_int32);
+ }
+}
+} // namespace
+
+struct AudioChannel {
+ AudioChannel() {
+ memset(data, 0, sizeof(data));
+ }
+
+ WebRtc_Word16 data[kSamplesPer32kHzChannel];
+};
+
+struct SplitAudioChannel {
+ SplitAudioChannel() {
+ memset(low_pass_data, 0, sizeof(low_pass_data));
+ memset(high_pass_data, 0, sizeof(high_pass_data));
+ memset(analysis_filter_state1, 0, sizeof(analysis_filter_state1));
+ memset(analysis_filter_state2, 0, sizeof(analysis_filter_state2));
+ memset(synthesis_filter_state1, 0, sizeof(synthesis_filter_state1));
+ memset(synthesis_filter_state2, 0, sizeof(synthesis_filter_state2));
+ }
+
+ WebRtc_Word16 low_pass_data[kSamplesPer16kHzChannel];
+ WebRtc_Word16 high_pass_data[kSamplesPer16kHzChannel];
+
+ WebRtc_Word32 analysis_filter_state1[6];
+ WebRtc_Word32 analysis_filter_state2[6];
+ WebRtc_Word32 synthesis_filter_state1[6];
+ WebRtc_Word32 synthesis_filter_state2[6];
+};
+
+// TODO(am): check range of input parameters?
+AudioBuffer::AudioBuffer(WebRtc_Word32 max_num_channels,
+ WebRtc_Word32 samples_per_channel)
+ : max_num_channels_(max_num_channels),
+ num_channels_(0),
+ num_mixed_channels_(0),
+ num_mixed_low_pass_channels_(0),
+ samples_per_channel_(samples_per_channel),
+ samples_per_split_channel_(samples_per_channel),
+ reference_copied_(false),
+ data_(NULL),
+ channels_(NULL),
+ split_channels_(NULL),
+ mixed_low_pass_channels_(NULL),
+ low_pass_reference_channels_(NULL) {
+ if (max_num_channels_ > 1) {
+ channels_ = new AudioChannel[max_num_channels_];
+ mixed_low_pass_channels_ = new AudioChannel[max_num_channels_];
+ }
+ low_pass_reference_channels_ = new AudioChannel[max_num_channels_];
+
+ if (samples_per_channel_ == kSamplesPer32kHzChannel) {
+ split_channels_ = new SplitAudioChannel[max_num_channels_];
+ samples_per_split_channel_ = kSamplesPer16kHzChannel;
+ }
+}
+
+AudioBuffer::~AudioBuffer() {
+ if (channels_ != NULL) {
+ delete [] channels_;
+ }
+
+ if (mixed_low_pass_channels_ != NULL) {
+ delete [] mixed_low_pass_channels_;
+ }
+
+ if (low_pass_reference_channels_ != NULL) {
+ delete [] low_pass_reference_channels_;
+ }
+
+ if (split_channels_ != NULL) {
+ delete [] split_channels_;
+ }
+}
+
+WebRtc_Word16* AudioBuffer::data(WebRtc_Word32 channel) const {
+ assert(channel >= 0 && channel < num_channels_);
+ if (data_ != NULL) {
+ return data_;
+ }
+
+ return channels_[channel].data;
+}
+
+WebRtc_Word16* AudioBuffer::low_pass_split_data(WebRtc_Word32 channel) const {
+ assert(channel >= 0 && channel < num_channels_);
+ if (split_channels_ == NULL) {
+ return data(channel);
+ }
+
+ return split_channels_[channel].low_pass_data;
+}
+
+WebRtc_Word16* AudioBuffer::high_pass_split_data(WebRtc_Word32 channel) const {
+ assert(channel >= 0 && channel < num_channels_);
+ if (split_channels_ == NULL) {
+ return NULL;
+ }
+
+ return split_channels_[channel].high_pass_data;
+}
+
+WebRtc_Word16* AudioBuffer::mixed_low_pass_data(WebRtc_Word32 channel) const {
+ assert(channel >= 0 && channel < num_mixed_low_pass_channels_);
+
+ return mixed_low_pass_channels_[channel].data;
+}
+
+WebRtc_Word16* AudioBuffer::low_pass_reference(WebRtc_Word32 channel) const {
+ assert(channel >= 0 && channel < num_channels_);
+ if (!reference_copied_) {
+ return NULL;
+ }
+
+ return low_pass_reference_channels_[channel].data;
+}
+
+WebRtc_Word32* AudioBuffer::analysis_filter_state1(WebRtc_Word32 channel) const {
+ assert(channel >= 0 && channel < num_channels_);
+ return split_channels_[channel].analysis_filter_state1;
+}
+
+WebRtc_Word32* AudioBuffer::analysis_filter_state2(WebRtc_Word32 channel) const {
+ assert(channel >= 0 && channel < num_channels_);
+ return split_channels_[channel].analysis_filter_state2;
+}
+
+WebRtc_Word32* AudioBuffer::synthesis_filter_state1(WebRtc_Word32 channel) const {
+ assert(channel >= 0 && channel < num_channels_);
+ return split_channels_[channel].synthesis_filter_state1;
+}
+
+WebRtc_Word32* AudioBuffer::synthesis_filter_state2(WebRtc_Word32 channel) const {
+ assert(channel >= 0 && channel < num_channels_);
+ return split_channels_[channel].synthesis_filter_state2;
+}
+
+WebRtc_Word32 AudioBuffer::num_channels() const {
+ return num_channels_;
+}
+
+WebRtc_Word32 AudioBuffer::samples_per_channel() const {
+ return samples_per_channel_;
+}
+
+WebRtc_Word32 AudioBuffer::samples_per_split_channel() const {
+ return samples_per_split_channel_;
+}
+
+// TODO(ajm): Do deinterleaving and mixing in one step?
+void AudioBuffer::DeinterleaveFrom(AudioFrame* audioFrame) {
+ assert(audioFrame->_audioChannel <= max_num_channels_);
+ assert(audioFrame->_payloadDataLengthInSamples == samples_per_channel_);
+
+ num_channels_ = audioFrame->_audioChannel;
+ num_mixed_channels_ = 0;
+ num_mixed_low_pass_channels_ = 0;
+ reference_copied_ = false;
+
+ if (num_channels_ == 1) {
+ // We can get away with a pointer assignment in this case.
+ data_ = audioFrame->_payloadData;
+ return;
+ }
+
+ for (int i = 0; i < num_channels_; i++) {
+ WebRtc_Word16* deinterleaved = channels_[i].data;
+ WebRtc_Word16* interleaved = audioFrame->_payloadData;
+ WebRtc_Word32 interleaved_idx = i;
+ for (int j = 0; j < samples_per_channel_; j++) {
+ deinterleaved[j] = interleaved[interleaved_idx];
+ interleaved_idx += num_channels_;
+ }
+ }
+}
+
+void AudioBuffer::InterleaveTo(AudioFrame* audioFrame) const {
+ assert(audioFrame->_audioChannel == num_channels_);
+ assert(audioFrame->_payloadDataLengthInSamples == samples_per_channel_);
+
+ if (num_channels_ == 1) {
+ if (num_mixed_channels_ == 1) {
+ memcpy(audioFrame->_payloadData,
+ channels_[0].data,
+ sizeof(WebRtc_Word16) * samples_per_channel_);
+ } else {
+ // These should point to the same buffer in this case.
+ assert(data_ == audioFrame->_payloadData);
+ }
+
+ return;
+ }
+
+ for (int i = 0; i < num_channels_; i++) {
+ WebRtc_Word16* deinterleaved = channels_[i].data;
+ WebRtc_Word16* interleaved = audioFrame->_payloadData;
+ WebRtc_Word32 interleaved_idx = i;
+ for (int j = 0; j < samples_per_channel_; j++) {
+ interleaved[interleaved_idx] = deinterleaved[j];
+ interleaved_idx += num_channels_;
+ }
+ }
+}
+
+// TODO(ajm): would be good to support the no-mix case with pointer assignment.
+// TODO(ajm): handle mixing to multiple channels?
+void AudioBuffer::Mix(WebRtc_Word32 num_mixed_channels) {
+ // We currently only support the stereo to mono case.
+ assert(num_channels_ == 2);
+ assert(num_mixed_channels == 1);
+
+ StereoToMono(channels_[0].data,
+ channels_[1].data,
+ channels_[0].data,
+ samples_per_channel_);
+
+ num_channels_ = num_mixed_channels;
+ num_mixed_channels_ = num_mixed_channels;
+}
+
+void AudioBuffer::CopyAndMixLowPass(WebRtc_Word32 num_mixed_channels) {
+ // We currently only support the stereo to mono case.
+ assert(num_channels_ == 2);
+ assert(num_mixed_channels == 1);
+
+ StereoToMono(low_pass_split_data(0),
+ low_pass_split_data(1),
+ mixed_low_pass_channels_[0].data,
+ samples_per_split_channel_);
+
+ num_mixed_low_pass_channels_ = num_mixed_channels;
+}
+
+void AudioBuffer::CopyLowPassToReference() {
+ reference_copied_ = true;
+ for (int i = 0; i < num_channels_; i++) {
+ memcpy(low_pass_reference_channels_[i].data,
+ low_pass_split_data(i),
+ sizeof(WebRtc_Word16) * samples_per_split_channel_);
+ }
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/audio_buffer.h b/src/modules/audio_processing/main/source/audio_buffer.h
new file mode 100644
index 0000000..15f850b
--- /dev/null
+++ b/src/modules/audio_processing/main/source/audio_buffer.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_BUFFER_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_BUFFER_H_
+
+#include "typedefs.h"
+
+
+namespace webrtc {
+
+struct AudioChannel;
+struct SplitAudioChannel;
+class AudioFrame;
+
+class AudioBuffer {
+ public:
+ AudioBuffer(WebRtc_Word32 max_num_channels, WebRtc_Word32 samples_per_channel);
+ virtual ~AudioBuffer();
+
+ WebRtc_Word32 num_channels() const;
+ WebRtc_Word32 samples_per_channel() const;
+ WebRtc_Word32 samples_per_split_channel() const;
+
+ WebRtc_Word16* data(WebRtc_Word32 channel) const;
+ WebRtc_Word16* low_pass_split_data(WebRtc_Word32 channel) const;
+ WebRtc_Word16* high_pass_split_data(WebRtc_Word32 channel) const;
+ WebRtc_Word16* mixed_low_pass_data(WebRtc_Word32 channel) const;
+ WebRtc_Word16* low_pass_reference(WebRtc_Word32 channel) const;
+
+ WebRtc_Word32* analysis_filter_state1(WebRtc_Word32 channel) const;
+ WebRtc_Word32* analysis_filter_state2(WebRtc_Word32 channel) const;
+ WebRtc_Word32* synthesis_filter_state1(WebRtc_Word32 channel) const;
+ WebRtc_Word32* synthesis_filter_state2(WebRtc_Word32 channel) const;
+
+ void DeinterleaveFrom(AudioFrame* audioFrame);
+ void InterleaveTo(AudioFrame* audioFrame) const;
+ void Mix(WebRtc_Word32 num_mixed_channels);
+ void CopyAndMixLowPass(WebRtc_Word32 num_mixed_channels);
+ void CopyLowPassToReference();
+
+ private:
+ const WebRtc_Word32 max_num_channels_;
+ WebRtc_Word32 num_channels_;
+ WebRtc_Word32 num_mixed_channels_;
+ WebRtc_Word32 num_mixed_low_pass_channels_;
+ const WebRtc_Word32 samples_per_channel_;
+ WebRtc_Word32 samples_per_split_channel_;
+ bool reference_copied_;
+
+ WebRtc_Word16* data_;
+ // TODO(ajm): Prefer to make these vectors if permitted...
+ AudioChannel* channels_;
+ SplitAudioChannel* split_channels_;
+ // TODO(ajm): improve this, we don't need the full 32 kHz space here.
+ AudioChannel* mixed_low_pass_channels_;
+ AudioChannel* low_pass_reference_channels_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_BUFFER_H_
diff --git a/src/modules/audio_processing/main/source/audio_processing_impl.cc b/src/modules/audio_processing/main/source/audio_processing_impl.cc
new file mode 100644
index 0000000..b1464e1
--- /dev/null
+++ b/src/modules/audio_processing/main/source/audio_processing_impl.cc
@@ -0,0 +1,651 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio_processing_impl.h"
+
+#include <assert.h>
+
+#include "audio_buffer.h"
+#include "critical_section_wrapper.h"
+#include "echo_cancellation_impl.h"
+#include "echo_control_mobile_impl.h"
+#include "file_wrapper.h"
+#include "high_pass_filter_impl.h"
+#include "gain_control_impl.h"
+#include "level_estimator_impl.h"
+#include "module_common_types.h"
+#include "noise_suppression_impl.h"
+#include "processing_component.h"
+#include "splitting_filter.h"
+#include "voice_detection_impl.h"
+#ifdef WEBRTC_ANDROID
+#include "external/webrtc/src/modules/audio_processing/main/source/debug.pb.h"
+#else
+#include "webrtc/audio_processing/debug.pb.h"
+#endif
+
+namespace webrtc {
+AudioProcessing* AudioProcessing::Create(int id) {
+ /*WEBRTC_TRACE(webrtc::kTraceModuleCall,
+ webrtc::kTraceAudioProcessing,
+ id,
+ "AudioProcessing::Create()");*/
+
+ AudioProcessingImpl* apm = new AudioProcessingImpl(id);
+ if (apm->Initialize() != kNoError) {
+ delete apm;
+ apm = NULL;
+ }
+
+ return apm;
+}
+
+void AudioProcessing::Destroy(AudioProcessing* apm) {
+ delete static_cast<AudioProcessingImpl*>(apm);
+}
+
+AudioProcessingImpl::AudioProcessingImpl(int id)
+ : id_(id),
+ echo_cancellation_(NULL),
+ echo_control_mobile_(NULL),
+ gain_control_(NULL),
+ high_pass_filter_(NULL),
+ level_estimator_(NULL),
+ noise_suppression_(NULL),
+ voice_detection_(NULL),
+ debug_file_(FileWrapper::Create()),
+ event_msg_(new audioproc::Event()),
+ crit_(CriticalSectionWrapper::CreateCriticalSection()),
+ render_audio_(NULL),
+ capture_audio_(NULL),
+ sample_rate_hz_(kSampleRate16kHz),
+ split_sample_rate_hz_(kSampleRate16kHz),
+ samples_per_channel_(sample_rate_hz_ / 100),
+ stream_delay_ms_(0),
+ was_stream_delay_set_(false),
+ num_reverse_channels_(1),
+ num_input_channels_(1),
+ num_output_channels_(1) {
+
+ echo_cancellation_ = new EchoCancellationImpl(this);
+ component_list_.push_back(echo_cancellation_);
+
+ echo_control_mobile_ = new EchoControlMobileImpl(this);
+ component_list_.push_back(echo_control_mobile_);
+
+ gain_control_ = new GainControlImpl(this);
+ component_list_.push_back(gain_control_);
+
+ high_pass_filter_ = new HighPassFilterImpl(this);
+ component_list_.push_back(high_pass_filter_);
+
+ level_estimator_ = new LevelEstimatorImpl(this);
+ component_list_.push_back(level_estimator_);
+
+ noise_suppression_ = new NoiseSuppressionImpl(this);
+ component_list_.push_back(noise_suppression_);
+
+ voice_detection_ = new VoiceDetectionImpl(this);
+ component_list_.push_back(voice_detection_);
+}
+
+AudioProcessingImpl::~AudioProcessingImpl() {
+ while (!component_list_.empty()) {
+ ProcessingComponent* component = component_list_.front();
+ component->Destroy();
+ delete component;
+ component_list_.pop_front();
+ }
+
+ if (debug_file_->Open()) {
+ debug_file_->CloseFile();
+ }
+ delete debug_file_;
+ debug_file_ = NULL;
+
+ delete event_msg_;
+ event_msg_ = NULL;
+
+ delete crit_;
+ crit_ = NULL;
+
+ if (render_audio_) {
+ delete render_audio_;
+ render_audio_ = NULL;
+ }
+
+ if (capture_audio_) {
+ delete capture_audio_;
+ capture_audio_ = NULL;
+ }
+}
+
+CriticalSectionWrapper* AudioProcessingImpl::crit() const {
+ return crit_;
+}
+
+int AudioProcessingImpl::split_sample_rate_hz() const {
+ return split_sample_rate_hz_;
+}
+
+int AudioProcessingImpl::Initialize() {
+ CriticalSectionScoped crit_scoped(*crit_);
+ return InitializeLocked();
+}
+
+int AudioProcessingImpl::InitializeLocked() {
+ if (render_audio_ != NULL) {
+ delete render_audio_;
+ render_audio_ = NULL;
+ }
+
+ if (capture_audio_ != NULL) {
+ delete capture_audio_;
+ capture_audio_ = NULL;
+ }
+
+ render_audio_ = new AudioBuffer(num_reverse_channels_,
+ samples_per_channel_);
+ capture_audio_ = new AudioBuffer(num_input_channels_,
+ samples_per_channel_);
+
+ was_stream_delay_set_ = false;
+
+ // Initialize all components.
+ std::list<ProcessingComponent*>::iterator it;
+ for (it = component_list_.begin(); it != component_list_.end(); it++) {
+ int err = (*it)->Initialize();
+ if (err != kNoError) {
+ return err;
+ }
+ }
+
+ if (debug_file_->Open()) {
+ int err = WriteInitMessage();
+ if (err != kNoError) {
+ return err;
+ }
+ }
+
+ return kNoError;
+}
+
+int AudioProcessingImpl::set_sample_rate_hz(int rate) {
+ CriticalSectionScoped crit_scoped(*crit_);
+ if (rate != kSampleRate8kHz &&
+ rate != kSampleRate16kHz &&
+ rate != kSampleRate32kHz) {
+ return kBadParameterError;
+ }
+
+ sample_rate_hz_ = rate;
+ samples_per_channel_ = rate / 100;
+
+ if (sample_rate_hz_ == kSampleRate32kHz) {
+ split_sample_rate_hz_ = kSampleRate16kHz;
+ } else {
+ split_sample_rate_hz_ = sample_rate_hz_;
+ }
+
+ return InitializeLocked();
+}
+
+int AudioProcessingImpl::sample_rate_hz() const {
+ return sample_rate_hz_;
+}
+
+int AudioProcessingImpl::set_num_reverse_channels(int channels) {
+ CriticalSectionScoped crit_scoped(*crit_);
+ // Only stereo supported currently.
+ if (channels > 2 || channels < 1) {
+ return kBadParameterError;
+ }
+
+ num_reverse_channels_ = channels;
+
+ return InitializeLocked();
+}
+
+int AudioProcessingImpl::num_reverse_channels() const {
+ return num_reverse_channels_;
+}
+
+int AudioProcessingImpl::set_num_channels(
+ int input_channels,
+ int output_channels) {
+ CriticalSectionScoped crit_scoped(*crit_);
+ if (output_channels > input_channels) {
+ return kBadParameterError;
+ }
+
+ // Only stereo supported currently.
+ if (input_channels > 2 || input_channels < 1) {
+ return kBadParameterError;
+ }
+
+ if (output_channels > 2 || output_channels < 1) {
+ return kBadParameterError;
+ }
+
+ num_input_channels_ = input_channels;
+ num_output_channels_ = output_channels;
+
+ return InitializeLocked();
+}
+
+int AudioProcessingImpl::num_input_channels() const {
+ return num_input_channels_;
+}
+
+int AudioProcessingImpl::num_output_channels() const {
+ return num_output_channels_;
+}
+
+int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
+ CriticalSectionScoped crit_scoped(*crit_);
+ int err = kNoError;
+
+ if (frame == NULL) {
+ return kNullPointerError;
+ }
+
+ if (frame->_frequencyInHz != sample_rate_hz_) {
+ return kBadSampleRateError;
+ }
+
+ if (frame->_audioChannel != num_input_channels_) {
+ return kBadNumberChannelsError;
+ }
+
+ if (frame->_payloadDataLengthInSamples != samples_per_channel_) {
+ return kBadDataLengthError;
+ }
+
+ if (debug_file_->Open()) {
+ event_msg_->set_type(audioproc::Event::STREAM);
+ audioproc::Stream* msg = event_msg_->mutable_stream();
+ const size_t data_size = sizeof(WebRtc_Word16) *
+ frame->_payloadDataLengthInSamples *
+ frame->_audioChannel;
+ msg->set_input_data(frame->_payloadData, data_size);
+ msg->set_delay(stream_delay_ms_);
+ msg->set_drift(echo_cancellation_->stream_drift_samples());
+ msg->set_level(gain_control_->stream_analog_level());
+ }
+
+ capture_audio_->DeinterleaveFrom(frame);
+
+ // TODO(ajm): experiment with mixing and AEC placement.
+ if (num_output_channels_ < num_input_channels_) {
+ capture_audio_->Mix(num_output_channels_);
+
+ frame->_audioChannel = num_output_channels_;
+ }
+
+ if (sample_rate_hz_ == kSampleRate32kHz) {
+ for (int i = 0; i < num_input_channels_; i++) {
+ // Split into a low and high band.
+ SplittingFilterAnalysis(capture_audio_->data(i),
+ capture_audio_->low_pass_split_data(i),
+ capture_audio_->high_pass_split_data(i),
+ capture_audio_->analysis_filter_state1(i),
+ capture_audio_->analysis_filter_state2(i));
+ }
+ }
+
+ err = high_pass_filter_->ProcessCaptureAudio(capture_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ err = gain_control_->AnalyzeCaptureAudio(capture_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ err = echo_cancellation_->ProcessCaptureAudio(capture_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ if (echo_control_mobile_->is_enabled() &&
+ noise_suppression_->is_enabled()) {
+ capture_audio_->CopyLowPassToReference();
+ }
+
+ err = noise_suppression_->ProcessCaptureAudio(capture_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ err = echo_control_mobile_->ProcessCaptureAudio(capture_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ err = voice_detection_->ProcessCaptureAudio(capture_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ err = gain_control_->ProcessCaptureAudio(capture_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ //err = level_estimator_->ProcessCaptureAudio(capture_audio_);
+ //if (err != kNoError) {
+ // return err;
+ //}
+
+ if (sample_rate_hz_ == kSampleRate32kHz) {
+ for (int i = 0; i < num_output_channels_; i++) {
+ // Recombine low and high bands.
+ SplittingFilterSynthesis(capture_audio_->low_pass_split_data(i),
+ capture_audio_->high_pass_split_data(i),
+ capture_audio_->data(i),
+ capture_audio_->synthesis_filter_state1(i),
+ capture_audio_->synthesis_filter_state2(i));
+ }
+ }
+
+ capture_audio_->InterleaveTo(frame);
+
+ if (debug_file_->Open()) {
+ audioproc::Stream* msg = event_msg_->mutable_stream();
+ const size_t data_size = sizeof(WebRtc_Word16) *
+ frame->_payloadDataLengthInSamples *
+ frame->_audioChannel;
+ msg->set_output_data(frame->_payloadData, data_size);
+ err = WriteMessageToDebugFile();
+ if (err != kNoError) {
+ return err;
+ }
+ }
+
+ return kNoError;
+}
+
+int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
+ CriticalSectionScoped crit_scoped(*crit_);
+ int err = kNoError;
+
+ if (frame == NULL) {
+ return kNullPointerError;
+ }
+
+ if (frame->_frequencyInHz != sample_rate_hz_) {
+ return kBadSampleRateError;
+ }
+
+ if (frame->_audioChannel != num_reverse_channels_) {
+ return kBadNumberChannelsError;
+ }
+
+ if (frame->_payloadDataLengthInSamples != samples_per_channel_) {
+ return kBadDataLengthError;
+ }
+
+ if (debug_file_->Open()) {
+ event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
+ audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
+ const size_t data_size = sizeof(WebRtc_Word16) *
+ frame->_payloadDataLengthInSamples *
+ frame->_audioChannel;
+ msg->set_data(frame->_payloadData, data_size);
+ err = WriteMessageToDebugFile();
+ if (err != kNoError) {
+ return err;
+ }
+ }
+
+ render_audio_->DeinterleaveFrom(frame);
+
+ // TODO(ajm): turn the splitting filter into a component?
+ if (sample_rate_hz_ == kSampleRate32kHz) {
+ for (int i = 0; i < num_reverse_channels_; i++) {
+ // Split into low and high band.
+ SplittingFilterAnalysis(render_audio_->data(i),
+ render_audio_->low_pass_split_data(i),
+ render_audio_->high_pass_split_data(i),
+ render_audio_->analysis_filter_state1(i),
+ render_audio_->analysis_filter_state2(i));
+ }
+ }
+
+ // TODO(ajm): warnings possible from components?
+ err = echo_cancellation_->ProcessRenderAudio(render_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ err = echo_control_mobile_->ProcessRenderAudio(render_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ err = gain_control_->ProcessRenderAudio(render_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ //err = level_estimator_->AnalyzeReverseStream(render_audio_);
+ //if (err != kNoError) {
+ // return err;
+ //}
+
+ was_stream_delay_set_ = false;
+ return err; // TODO(ajm): this is for returning warnings; necessary?
+}
+
+int AudioProcessingImpl::set_stream_delay_ms(int delay) {
+ was_stream_delay_set_ = true;
+ if (delay < 0) {
+ return kBadParameterError;
+ }
+
+ // TODO(ajm): the max is rather arbitrarily chosen; investigate.
+ if (delay > 500) {
+ stream_delay_ms_ = 500;
+ return kBadStreamParameterWarning;
+ }
+
+ stream_delay_ms_ = delay;
+ return kNoError;
+}
+
+int AudioProcessingImpl::stream_delay_ms() const {
+ return stream_delay_ms_;
+}
+
+bool AudioProcessingImpl::was_stream_delay_set() const {
+ return was_stream_delay_set_;
+}
+
+int AudioProcessingImpl::StartDebugRecording(
+ const char filename[AudioProcessing::kMaxFilenameSize]) {
+ CriticalSectionScoped crit_scoped(*crit_);
+ assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize);
+
+ if (filename == NULL) {
+ return kNullPointerError;
+ }
+
+ // Stop any ongoing recording.
+ if (debug_file_->Open()) {
+ if (debug_file_->CloseFile() == -1) {
+ return kFileError;
+ }
+ }
+
+ if (debug_file_->OpenFile(filename, false) == -1) {
+ debug_file_->CloseFile();
+ return kFileError;
+ }
+
+ int err = WriteInitMessage();
+ if (err != kNoError) {
+ return err;
+ }
+
+ return kNoError;
+}
+
+int AudioProcessingImpl::StopDebugRecording() {
+ CriticalSectionScoped crit_scoped(*crit_);
+ // We just return if recording hasn't started.
+ if (debug_file_->Open()) {
+ if (debug_file_->CloseFile() == -1) {
+ return kFileError;
+ }
+ }
+
+ return kNoError;
+}
+
+EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
+ return echo_cancellation_;
+}
+
+EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const {
+ return echo_control_mobile_;
+}
+
+GainControl* AudioProcessingImpl::gain_control() const {
+ return gain_control_;
+}
+
+HighPassFilter* AudioProcessingImpl::high_pass_filter() const {
+ return high_pass_filter_;
+}
+
+LevelEstimator* AudioProcessingImpl::level_estimator() const {
+ return level_estimator_;
+}
+
+NoiseSuppression* AudioProcessingImpl::noise_suppression() const {
+ return noise_suppression_;
+}
+
+VoiceDetection* AudioProcessingImpl::voice_detection() const {
+ return voice_detection_;
+}
+
+WebRtc_Word32 AudioProcessingImpl::Version(WebRtc_Word8* version,
+ WebRtc_UWord32& bytes_remaining, WebRtc_UWord32& position) const {
+ if (version == NULL) {
+ /*WEBRTC_TRACE(webrtc::kTraceError,
+ webrtc::kTraceAudioProcessing,
+ -1,
+ "Null version pointer");*/
+ return kNullPointerError;
+ }
+ memset(&version[position], 0, bytes_remaining);
+
+ char my_version[] = "AudioProcessing 1.0.0";
+ // Includes null termination.
+ WebRtc_UWord32 length = static_cast<WebRtc_UWord32>(strlen(my_version));
+ if (bytes_remaining < length) {
+ /*WEBRTC_TRACE(webrtc::kTraceError,
+ webrtc::kTraceAudioProcessing,
+ -1,
+ "Buffer of insufficient length");*/
+ return kBadParameterError;
+ }
+ memcpy(&version[position], my_version, length);
+ bytes_remaining -= length;
+ position += length;
+
+ std::list<ProcessingComponent*>::const_iterator it;
+ for (it = component_list_.begin(); it != component_list_.end(); it++) {
+ char component_version[256];
+ strcpy(component_version, "\n");
+ int err = (*it)->get_version(&component_version[1],
+ sizeof(component_version) - 1);
+ if (err != kNoError) {
+ return err;
+ }
+ if (strncmp(&component_version[1], "\0", 1) == 0) {
+ // Assume empty if first byte is NULL.
+ continue;
+ }
+
+ length = static_cast<WebRtc_UWord32>(strlen(component_version));
+ if (bytes_remaining < length) {
+ /*WEBRTC_TRACE(webrtc::kTraceError,
+ webrtc::kTraceAudioProcessing,
+ -1,
+ "Buffer of insufficient length");*/
+ return kBadParameterError;
+ }
+ memcpy(&version[position], component_version, length);
+ bytes_remaining -= length;
+ position += length;
+ }
+
+ return kNoError;
+}
+
+WebRtc_Word32 AudioProcessingImpl::ChangeUniqueId(const WebRtc_Word32 id) {
+ CriticalSectionScoped crit_scoped(*crit_);
+ /*WEBRTC_TRACE(webrtc::kTraceModuleCall,
+ webrtc::kTraceAudioProcessing,
+ id_,
+ "ChangeUniqueId(new id = %d)",
+ id);*/
+ id_ = id;
+
+ return kNoError;
+}
+
+int AudioProcessingImpl::WriteMessageToDebugFile() {
+ int32_t size = event_msg_->ByteSize();
+ if (size <= 0) {
+ return kUnspecifiedError;
+ }
+#if defined(WEBRTC_BIG_ENDIAN)
+ // TODO(ajm): Use little-endian "on the wire". For the moment, we can be
+ // pretty safe in assuming little-endian.
+#endif
+
+ if (!event_msg_->SerializeToString(&event_str_)) {
+ return kUnspecifiedError;
+ }
+
+ // Write message preceded by its size.
+ if (!debug_file_->Write(&size, sizeof(int32_t))) {
+ return kFileError;
+ }
+ if (!debug_file_->Write(event_str_.data(), event_str_.length())) {
+ return kFileError;
+ }
+
+ event_msg_->Clear();
+
+ return 0;
+}
+
+int AudioProcessingImpl::WriteInitMessage() {
+ event_msg_->set_type(audioproc::Event::INIT);
+ audioproc::Init* msg = event_msg_->mutable_init();
+ msg->set_sample_rate(sample_rate_hz_);
+ msg->set_device_sample_rate(echo_cancellation_->device_sample_rate_hz());
+ msg->set_num_input_channels(num_input_channels_);
+ msg->set_num_output_channels(num_output_channels_);
+ msg->set_num_reverse_channels(num_reverse_channels_);
+
+ int err = WriteMessageToDebugFile();
+ if (err != kNoError) {
+ return err;
+ }
+
+ return kNoError;
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/audio_processing_impl.h b/src/modules/audio_processing/main/source/audio_processing_impl.h
new file mode 100644
index 0000000..fc35937
--- /dev/null
+++ b/src/modules/audio_processing/main/source/audio_processing_impl.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_PROCESSING_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_PROCESSING_IMPL_H_
+
+#include <list>
+#include <string>
+
+#include "audio_processing.h"
+
+namespace webrtc {
+namespace audioproc {
+class Event;
+} // audioproc
+class AudioBuffer;
+class CriticalSectionWrapper;
+class EchoCancellationImpl;
+class EchoControlMobileImpl;
+class FileWrapper;
+class GainControlImpl;
+class HighPassFilterImpl;
+class LevelEstimatorImpl;
+class NoiseSuppressionImpl;
+class ProcessingComponent;
+class VoiceDetectionImpl;
+
+class AudioProcessingImpl : public AudioProcessing {
+ public:
+ enum {
+ kSampleRate8kHz = 8000,
+ kSampleRate16kHz = 16000,
+ kSampleRate32kHz = 32000
+ };
+
+ explicit AudioProcessingImpl(int id);
+ virtual ~AudioProcessingImpl();
+
+ CriticalSectionWrapper* crit() const;
+
+ int split_sample_rate_hz() const;
+ bool was_stream_delay_set() const;
+
+ // AudioProcessing methods.
+ virtual int Initialize();
+ virtual int InitializeLocked();
+ virtual int set_sample_rate_hz(int rate);
+ virtual int sample_rate_hz() const;
+ virtual int set_num_channels(int input_channels, int output_channels);
+ virtual int num_input_channels() const;
+ virtual int num_output_channels() const;
+ virtual int set_num_reverse_channels(int channels);
+ virtual int num_reverse_channels() const;
+ virtual int ProcessStream(AudioFrame* frame);
+ virtual int AnalyzeReverseStream(AudioFrame* frame);
+ virtual int set_stream_delay_ms(int delay);
+ virtual int stream_delay_ms() const;
+ virtual int StartDebugRecording(const char filename[kMaxFilenameSize]);
+ virtual int StopDebugRecording();
+ virtual EchoCancellation* echo_cancellation() const;
+ virtual EchoControlMobile* echo_control_mobile() const;
+ virtual GainControl* gain_control() const;
+ virtual HighPassFilter* high_pass_filter() const;
+ virtual LevelEstimator* level_estimator() const;
+ virtual NoiseSuppression* noise_suppression() const;
+ virtual VoiceDetection* voice_detection() const;
+
+ // Module methods.
+ virtual WebRtc_Word32 Version(WebRtc_Word8* version,
+ WebRtc_UWord32& remainingBufferInBytes,
+ WebRtc_UWord32& position) const;
+ virtual WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id);
+
+ private:
+ int WriteMessageToDebugFile();
+ int WriteInitMessage();
+
+ int id_;
+
+ EchoCancellationImpl* echo_cancellation_;
+ EchoControlMobileImpl* echo_control_mobile_;
+ GainControlImpl* gain_control_;
+ HighPassFilterImpl* high_pass_filter_;
+ LevelEstimatorImpl* level_estimator_;
+ NoiseSuppressionImpl* noise_suppression_;
+ VoiceDetectionImpl* voice_detection_;
+
+ std::list<ProcessingComponent*> component_list_;
+
+ FileWrapper* debug_file_;
+ audioproc::Event* event_msg_; // Protobuf message.
+ std::string event_str_; // Memory for protobuf serialization.
+ CriticalSectionWrapper* crit_;
+
+ AudioBuffer* render_audio_;
+ AudioBuffer* capture_audio_;
+
+ int sample_rate_hz_;
+ int split_sample_rate_hz_;
+ int samples_per_channel_;
+ int stream_delay_ms_;
+ bool was_stream_delay_set_;
+
+ int num_reverse_channels_;
+ int num_input_channels_;
+ int num_output_channels_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_PROCESSING_IMPL_H_
diff --git a/src/modules/audio_processing/main/source/debug.proto b/src/modules/audio_processing/main/source/debug.proto
new file mode 100644
index 0000000..4b3a163
--- /dev/null
+++ b/src/modules/audio_processing/main/source/debug.proto
@@ -0,0 +1,37 @@
+syntax = "proto2";
+option optimize_for = LITE_RUNTIME;
+package webrtc.audioproc;
+
+message Init {
+ optional int32 sample_rate = 1;
+ optional int32 device_sample_rate = 2;
+ optional int32 num_input_channels = 3;
+ optional int32 num_output_channels = 4;
+ optional int32 num_reverse_channels = 5;
+}
+
+message ReverseStream {
+ optional bytes data = 1;
+}
+
+message Stream {
+ optional bytes input_data = 1;
+ optional bytes output_data = 2;
+ optional int32 delay = 3;
+ optional sint32 drift = 4;
+ optional int32 level = 5;
+}
+
+message Event {
+ enum Type {
+ INIT = 0;
+ REVERSE_STREAM = 1;
+ STREAM = 2;
+ }
+
+ required Type type = 1;
+
+ optional Init init = 2;
+ optional ReverseStream reverse_stream = 3;
+ optional Stream stream = 4;
+}
diff --git a/src/modules/audio_processing/main/source/echo_cancellation_impl.cc b/src/modules/audio_processing/main/source/echo_cancellation_impl.cc
new file mode 100644
index 0000000..886d5f1
--- /dev/null
+++ b/src/modules/audio_processing/main/source/echo_cancellation_impl.cc
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "echo_cancellation_impl.h"
+
+#include <cassert>
+#include <string.h>
+
+#include "critical_section_wrapper.h"
+#include "echo_cancellation.h"
+
+#include "audio_processing_impl.h"
+#include "audio_buffer.h"
+
+namespace webrtc {
+
+typedef void Handle;
+
+namespace {
+WebRtc_Word16 MapSetting(EchoCancellation::SuppressionLevel level) {
+ switch (level) {
+ case EchoCancellation::kLowSuppression:
+ return kAecNlpConservative;
+ case EchoCancellation::kModerateSuppression:
+ return kAecNlpModerate;
+ case EchoCancellation::kHighSuppression:
+ return kAecNlpAggressive;
+ default:
+ return -1;
+ }
+}
+
+int MapError(int err) {
+ switch (err) {
+ case AEC_UNSUPPORTED_FUNCTION_ERROR:
+ return AudioProcessing::kUnsupportedFunctionError;
+ break;
+ case AEC_BAD_PARAMETER_ERROR:
+ return AudioProcessing::kBadParameterError;
+ break;
+ case AEC_BAD_PARAMETER_WARNING:
+ return AudioProcessing::kBadStreamParameterWarning;
+ break;
+ default:
+ // AEC_UNSPECIFIED_ERROR
+ // AEC_UNINITIALIZED_ERROR
+ // AEC_NULL_POINTER_ERROR
+ return AudioProcessing::kUnspecifiedError;
+ }
+}
+} // namespace
+
+EchoCancellationImpl::EchoCancellationImpl(const AudioProcessingImpl* apm)
+ : ProcessingComponent(apm),
+ apm_(apm),
+ drift_compensation_enabled_(false),
+ metrics_enabled_(false),
+ suppression_level_(kModerateSuppression),
+ device_sample_rate_hz_(48000),
+ stream_drift_samples_(0),
+ was_stream_drift_set_(false),
+ stream_has_echo_(false) {}
+
+EchoCancellationImpl::~EchoCancellationImpl() {}
+
+int EchoCancellationImpl::ProcessRenderAudio(const AudioBuffer* audio) {
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ assert(audio->samples_per_split_channel() <= 160);
+ assert(audio->num_channels() == apm_->num_reverse_channels());
+
+ int err = apm_->kNoError;
+
+ // The ordering convention must be followed to pass to the correct AEC.
+ size_t handle_index = 0;
+ for (int i = 0; i < apm_->num_output_channels(); i++) {
+ for (int j = 0; j < audio->num_channels(); j++) {
+ Handle* my_handle = static_cast<Handle*>(handle(handle_index));
+ err = WebRtcAec_BufferFarend(
+ my_handle,
+ audio->low_pass_split_data(j),
+ static_cast<WebRtc_Word16>(audio->samples_per_split_channel()));
+
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle); // TODO(ajm): warning possible?
+ }
+
+ handle_index++;
+ }
+ }
+
+ return apm_->kNoError;
+}
+
+int EchoCancellationImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ if (!apm_->was_stream_delay_set()) {
+ return apm_->kStreamParameterNotSetError;
+ }
+
+ if (drift_compensation_enabled_ && !was_stream_drift_set_) {
+ return apm_->kStreamParameterNotSetError;
+ }
+
+ assert(audio->samples_per_split_channel() <= 160);
+ assert(audio->num_channels() == apm_->num_output_channels());
+
+ int err = apm_->kNoError;
+
+ // The ordering convention must be followed to pass to the correct AEC.
+ size_t handle_index = 0;
+ stream_has_echo_ = false;
+ for (int i = 0; i < audio->num_channels(); i++) {
+ for (int j = 0; j < apm_->num_reverse_channels(); j++) {
+ Handle* my_handle = handle(handle_index);
+ err = WebRtcAec_Process(
+ my_handle,
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i),
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i),
+ static_cast<WebRtc_Word16>(audio->samples_per_split_channel()),
+ apm_->stream_delay_ms(),
+ stream_drift_samples_);
+
+ if (err != apm_->kNoError) {
+ err = GetHandleError(my_handle);
+ // TODO(ajm): Figure out how to return warnings properly.
+ if (err != apm_->kBadStreamParameterWarning) {
+ return err;
+ }
+ }
+
+ WebRtc_Word16 status = 0;
+ err = WebRtcAec_get_echo_status(my_handle, &status);
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle);
+ }
+
+ if (status == 1) {
+ stream_has_echo_ = true;
+ }
+
+ handle_index++;
+ }
+ }
+
+ was_stream_drift_set_ = false;
+ return apm_->kNoError;
+}
+
+int EchoCancellationImpl::Enable(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ // Ensure AEC and AECM are not both enabled.
+ if (enable && apm_->echo_control_mobile()->is_enabled()) {
+ return apm_->kBadParameterError;
+ }
+
+ return EnableComponent(enable);
+}
+
+bool EchoCancellationImpl::is_enabled() const {
+ return is_component_enabled();
+}
+
+int EchoCancellationImpl::set_suppression_level(SuppressionLevel level) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (MapSetting(level) == -1) {
+ return apm_->kBadParameterError;
+ }
+
+ suppression_level_ = level;
+ return Configure();
+}
+
+EchoCancellation::SuppressionLevel EchoCancellationImpl::suppression_level()
+ const {
+ return suppression_level_;
+}
+
+int EchoCancellationImpl::enable_drift_compensation(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ drift_compensation_enabled_ = enable;
+ return Configure();
+}
+
+bool EchoCancellationImpl::is_drift_compensation_enabled() const {
+ return drift_compensation_enabled_;
+}
+
+int EchoCancellationImpl::set_device_sample_rate_hz(int rate) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (rate < 8000 || rate > 96000) {
+ return apm_->kBadParameterError;
+ }
+
+ device_sample_rate_hz_ = rate;
+ return Initialize();
+}
+
+int EchoCancellationImpl::device_sample_rate_hz() const {
+ return device_sample_rate_hz_;
+}
+
+int EchoCancellationImpl::set_stream_drift_samples(int drift) {
+ was_stream_drift_set_ = true;
+ stream_drift_samples_ = drift;
+ return apm_->kNoError;
+}
+
+int EchoCancellationImpl::stream_drift_samples() const {
+ return stream_drift_samples_;
+}
+
+int EchoCancellationImpl::enable_metrics(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ metrics_enabled_ = enable;
+ return Configure();
+}
+
+bool EchoCancellationImpl::are_metrics_enabled() const {
+ return metrics_enabled_;
+}
+
+// TODO(ajm): we currently just use the metrics from the first AEC. Think more
+// aboue the best way to extend this to multi-channel.
+int EchoCancellationImpl::GetMetrics(Metrics* metrics) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (metrics == NULL) {
+ return apm_->kNullPointerError;
+ }
+
+ if (!is_component_enabled() || !metrics_enabled_) {
+ return apm_->kNotEnabledError;
+ }
+
+ AecMetrics my_metrics;
+ memset(&my_metrics, 0, sizeof(my_metrics));
+ memset(metrics, 0, sizeof(Metrics));
+
+ Handle* my_handle = static_cast<Handle*>(handle(0));
+ int err = WebRtcAec_GetMetrics(my_handle, &my_metrics);
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle);
+ }
+
+ metrics->residual_echo_return_loss.instant = my_metrics.rerl.instant;
+ metrics->residual_echo_return_loss.average = my_metrics.rerl.average;
+ metrics->residual_echo_return_loss.maximum = my_metrics.rerl.max;
+ metrics->residual_echo_return_loss.minimum = my_metrics.rerl.min;
+
+ metrics->echo_return_loss.instant = my_metrics.erl.instant;
+ metrics->echo_return_loss.average = my_metrics.erl.average;
+ metrics->echo_return_loss.maximum = my_metrics.erl.max;
+ metrics->echo_return_loss.minimum = my_metrics.erl.min;
+
+ metrics->echo_return_loss_enhancement.instant = my_metrics.erle.instant;
+ metrics->echo_return_loss_enhancement.average = my_metrics.erle.average;
+ metrics->echo_return_loss_enhancement.maximum = my_metrics.erle.max;
+ metrics->echo_return_loss_enhancement.minimum = my_metrics.erle.min;
+
+ metrics->a_nlp.instant = my_metrics.aNlp.instant;
+ metrics->a_nlp.average = my_metrics.aNlp.average;
+ metrics->a_nlp.maximum = my_metrics.aNlp.max;
+ metrics->a_nlp.minimum = my_metrics.aNlp.min;
+
+ return apm_->kNoError;
+}
+
+bool EchoCancellationImpl::stream_has_echo() const {
+ return stream_has_echo_;
+}
+
+int EchoCancellationImpl::Initialize() {
+ int err = ProcessingComponent::Initialize();
+ if (err != apm_->kNoError || !is_component_enabled()) {
+ return err;
+ }
+
+ was_stream_drift_set_ = false;
+
+ return apm_->kNoError;
+}
+
+int EchoCancellationImpl::get_version(char* version,
+ int version_len_bytes) const {
+ if (WebRtcAec_get_version(version, version_len_bytes) != 0) {
+ return apm_->kBadParameterError;
+ }
+
+ return apm_->kNoError;
+}
+
+void* EchoCancellationImpl::CreateHandle() const {
+ Handle* handle = NULL;
+ if (WebRtcAec_Create(&handle) != apm_->kNoError) {
+ handle = NULL;
+ } else {
+ assert(handle != NULL);
+ }
+
+ return handle;
+}
+
+int EchoCancellationImpl::DestroyHandle(void* handle) const {
+ assert(handle != NULL);
+ return WebRtcAec_Free(static_cast<Handle*>(handle));
+}
+
+int EchoCancellationImpl::InitializeHandle(void* handle) const {
+ assert(handle != NULL);
+ return WebRtcAec_Init(static_cast<Handle*>(handle),
+ apm_->sample_rate_hz(),
+ device_sample_rate_hz_);
+}
+
+int EchoCancellationImpl::ConfigureHandle(void* handle) const {
+ assert(handle != NULL);
+ AecConfig config;
+ config.metricsMode = metrics_enabled_;
+ config.nlpMode = MapSetting(suppression_level_);
+ config.skewMode = drift_compensation_enabled_;
+
+ return WebRtcAec_set_config(static_cast<Handle*>(handle), config);
+}
+
+int EchoCancellationImpl::num_handles_required() const {
+ return apm_->num_output_channels() *
+ apm_->num_reverse_channels();
+}
+
+int EchoCancellationImpl::GetHandleError(void* handle) const {
+ assert(handle != NULL);
+ return MapError(WebRtcAec_get_error_code(static_cast<Handle*>(handle)));
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/echo_cancellation_impl.h b/src/modules/audio_processing/main/source/echo_cancellation_impl.h
new file mode 100644
index 0000000..071c18f
--- /dev/null
+++ b/src/modules/audio_processing/main/source/echo_cancellation_impl.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_ECHO_CANCELLATION_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_ECHO_CANCELLATION_IMPL_H_
+
+#include "audio_processing.h"
+#include "processing_component.h"
+
+namespace webrtc {
+class AudioProcessingImpl;
+class AudioBuffer;
+
+class EchoCancellationImpl : public EchoCancellation,
+ public ProcessingComponent {
+ public:
+ explicit EchoCancellationImpl(const AudioProcessingImpl* apm);
+ virtual ~EchoCancellationImpl();
+
+ int ProcessRenderAudio(const AudioBuffer* audio);
+ int ProcessCaptureAudio(AudioBuffer* audio);
+
+ // EchoCancellation implementation.
+ virtual bool is_enabled() const;
+ virtual int device_sample_rate_hz() const;
+ virtual int stream_drift_samples() const;
+
+ // ProcessingComponent implementation.
+ virtual int Initialize();
+ virtual int get_version(char* version, int version_len_bytes) const;
+
+ private:
+ // EchoCancellation implementation.
+ virtual int Enable(bool enable);
+ virtual int enable_drift_compensation(bool enable);
+ virtual bool is_drift_compensation_enabled() const;
+ virtual int set_device_sample_rate_hz(int rate);
+ virtual int set_stream_drift_samples(int drift);
+ virtual int set_suppression_level(SuppressionLevel level);
+ virtual SuppressionLevel suppression_level() const;
+ virtual int enable_metrics(bool enable);
+ virtual bool are_metrics_enabled() const;
+ virtual bool stream_has_echo() const;
+ virtual int GetMetrics(Metrics* metrics);
+
+ // ProcessingComponent implementation.
+ virtual void* CreateHandle() const;
+ virtual int InitializeHandle(void* handle) const;
+ virtual int ConfigureHandle(void* handle) const;
+ virtual int DestroyHandle(void* handle) const;
+ virtual int num_handles_required() const;
+ virtual int GetHandleError(void* handle) const;
+
+ const AudioProcessingImpl* apm_;
+ bool drift_compensation_enabled_;
+ bool metrics_enabled_;
+ SuppressionLevel suppression_level_;
+ int device_sample_rate_hz_;
+ int stream_drift_samples_;
+ bool was_stream_drift_set_;
+ bool stream_has_echo_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_ECHO_CANCELLATION_IMPL_H_
diff --git a/src/modules/audio_processing/main/source/echo_control_mobile_impl.cc b/src/modules/audio_processing/main/source/echo_control_mobile_impl.cc
new file mode 100644
index 0000000..ff15255
--- /dev/null
+++ b/src/modules/audio_processing/main/source/echo_control_mobile_impl.cc
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "echo_control_mobile_impl.h"
+
+#include <cassert>
+#include <cstring>
+
+#include "critical_section_wrapper.h"
+#include "echo_control_mobile.h"
+
+#include "audio_processing_impl.h"
+#include "audio_buffer.h"
+
+namespace webrtc {
+
+typedef void Handle;
+
+namespace {
+WebRtc_Word16 MapSetting(EchoControlMobile::RoutingMode mode) {
+ switch (mode) {
+ case EchoControlMobile::kQuietEarpieceOrHeadset:
+ return 0;
+ case EchoControlMobile::kEarpiece:
+ return 1;
+ case EchoControlMobile::kLoudEarpiece:
+ return 2;
+ case EchoControlMobile::kSpeakerphone:
+ return 3;
+ case EchoControlMobile::kLoudSpeakerphone:
+ return 4;
+ default:
+ return -1;
+ }
+}
+
+int MapError(int err) {
+ switch (err) {
+ case AECM_UNSUPPORTED_FUNCTION_ERROR:
+ return AudioProcessing::kUnsupportedFunctionError;
+ case AECM_NULL_POINTER_ERROR:
+ return AudioProcessing::kNullPointerError;
+ case AECM_BAD_PARAMETER_ERROR:
+ return AudioProcessing::kBadParameterError;
+ case AECM_BAD_PARAMETER_WARNING:
+ return AudioProcessing::kBadStreamParameterWarning;
+ default:
+ // AECM_UNSPECIFIED_ERROR
+ // AECM_UNINITIALIZED_ERROR
+ return AudioProcessing::kUnspecifiedError;
+ }
+}
+} // namespace
+
+size_t EchoControlMobile::echo_path_size_bytes() {
+ return WebRtcAecm_echo_path_size_bytes();
+}
+
+EchoControlMobileImpl::EchoControlMobileImpl(const AudioProcessingImpl* apm)
+ : ProcessingComponent(apm),
+ apm_(apm),
+ routing_mode_(kSpeakerphone),
+ comfort_noise_enabled_(true),
+ external_echo_path_(NULL) {}
+
+EchoControlMobileImpl::~EchoControlMobileImpl() {
+ if (external_echo_path_ != NULL) {
+ delete [] external_echo_path_;
+ external_echo_path_ = NULL;
+ }
+}
+
+int EchoControlMobileImpl::ProcessRenderAudio(const AudioBuffer* audio) {
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ assert(audio->samples_per_split_channel() <= 160);
+ assert(audio->num_channels() == apm_->num_reverse_channels());
+
+ int err = apm_->kNoError;
+
+ // The ordering convention must be followed to pass to the correct AECM.
+ size_t handle_index = 0;
+ for (int i = 0; i < apm_->num_output_channels(); i++) {
+ for (int j = 0; j < audio->num_channels(); j++) {
+ Handle* my_handle = static_cast<Handle*>(handle(handle_index));
+ err = WebRtcAecm_BufferFarend(
+ my_handle,
+ audio->low_pass_split_data(j),
+ static_cast<WebRtc_Word16>(audio->samples_per_split_channel()));
+
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle); // TODO(ajm): warning possible?
+ }
+
+ handle_index++;
+ }
+ }
+
+ return apm_->kNoError;
+}
+
+int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ if (!apm_->was_stream_delay_set()) {
+ return apm_->kStreamParameterNotSetError;
+ }
+
+ assert(audio->samples_per_split_channel() <= 160);
+ assert(audio->num_channels() == apm_->num_output_channels());
+
+ int err = apm_->kNoError;
+
+ // The ordering convention must be followed to pass to the correct AECM.
+ size_t handle_index = 0;
+ for (int i = 0; i < audio->num_channels(); i++) {
+ // TODO(ajm): improve how this works, possibly inside AECM.
+ // This is kind of hacked up.
+ WebRtc_Word16* noisy = audio->low_pass_reference(i);
+ WebRtc_Word16* clean = audio->low_pass_split_data(i);
+ if (noisy == NULL) {
+ noisy = clean;
+ clean = NULL;
+ }
+ for (int j = 0; j < apm_->num_reverse_channels(); j++) {
+ Handle* my_handle = static_cast<Handle*>(handle(handle_index));
+ err = WebRtcAecm_Process(
+ my_handle,
+ noisy,
+ clean,
+ audio->low_pass_split_data(i),
+ static_cast<WebRtc_Word16>(audio->samples_per_split_channel()),
+ apm_->stream_delay_ms());
+
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle); // TODO(ajm): warning possible?
+ }
+
+ handle_index++;
+ }
+ }
+
+ return apm_->kNoError;
+}
+
+int EchoControlMobileImpl::Enable(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ // Ensure AEC and AECM are not both enabled.
+ if (enable && apm_->echo_cancellation()->is_enabled()) {
+ return apm_->kBadParameterError;
+ }
+
+ return EnableComponent(enable);
+}
+
+bool EchoControlMobileImpl::is_enabled() const {
+ return is_component_enabled();
+}
+
+int EchoControlMobileImpl::set_routing_mode(RoutingMode mode) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (MapSetting(mode) == -1) {
+ return apm_->kBadParameterError;
+ }
+
+ routing_mode_ = mode;
+ return Configure();
+}
+
+EchoControlMobile::RoutingMode EchoControlMobileImpl::routing_mode()
+ const {
+ return routing_mode_;
+}
+
+int EchoControlMobileImpl::enable_comfort_noise(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ comfort_noise_enabled_ = enable;
+ return Configure();
+}
+
+bool EchoControlMobileImpl::is_comfort_noise_enabled() const {
+ return comfort_noise_enabled_;
+}
+
+int EchoControlMobileImpl::SetEchoPath(const void* echo_path,
+ size_t size_bytes) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (echo_path == NULL) {
+ return apm_->kNullPointerError;
+ }
+ if (size_bytes != echo_path_size_bytes()) {
+ // Size mismatch
+ return apm_->kBadParameterError;
+ }
+
+ if (external_echo_path_ == NULL) {
+ external_echo_path_ = new unsigned char[size_bytes];
+ }
+ memcpy(external_echo_path_, echo_path, size_bytes);
+
+ return Initialize();
+}
+
+int EchoControlMobileImpl::GetEchoPath(void* echo_path,
+ size_t size_bytes) const {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (echo_path == NULL) {
+ return apm_->kNullPointerError;
+ }
+ if (size_bytes != echo_path_size_bytes()) {
+ // Size mismatch
+ return apm_->kBadParameterError;
+ }
+ if (!is_component_enabled()) {
+ return apm_->kNotEnabledError;
+ }
+
+ // Get the echo path from the first channel
+ Handle* my_handle = static_cast<Handle*>(handle(0));
+ if (WebRtcAecm_GetEchoPath(my_handle, echo_path, size_bytes) != 0) {
+ return GetHandleError(my_handle);
+ }
+
+ return apm_->kNoError;
+}
+
+int EchoControlMobileImpl::Initialize() {
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ if (apm_->sample_rate_hz() == apm_->kSampleRate32kHz) {
+ // AECM doesn't support super-wideband.
+ return apm_->kBadSampleRateError;
+ }
+
+ return ProcessingComponent::Initialize();
+}
+
+int EchoControlMobileImpl::get_version(char* version,
+ int version_len_bytes) const {
+ if (WebRtcAecm_get_version(version, version_len_bytes) != 0) {
+ return apm_->kBadParameterError;
+ }
+
+ return apm_->kNoError;
+}
+
+void* EchoControlMobileImpl::CreateHandle() const {
+ Handle* handle = NULL;
+ if (WebRtcAecm_Create(&handle) != apm_->kNoError) {
+ handle = NULL;
+ } else {
+ assert(handle != NULL);
+ }
+
+ return handle;
+}
+
+int EchoControlMobileImpl::DestroyHandle(void* handle) const {
+ return WebRtcAecm_Free(static_cast<Handle*>(handle));
+}
+
+int EchoControlMobileImpl::InitializeHandle(void* handle) const {
+ assert(handle != NULL);
+ Handle* my_handle = static_cast<Handle*>(handle);
+ if (WebRtcAecm_Init(my_handle, apm_->sample_rate_hz()) != 0) {
+ return GetHandleError(my_handle);
+ }
+ if (external_echo_path_ != NULL) {
+ if (WebRtcAecm_InitEchoPath(my_handle,
+ external_echo_path_,
+ echo_path_size_bytes()) != 0) {
+ return GetHandleError(my_handle);
+ }
+ }
+
+ return apm_->kNoError;
+}
+
+int EchoControlMobileImpl::ConfigureHandle(void* handle) const {
+ AecmConfig config;
+ config.cngMode = comfort_noise_enabled_;
+ config.echoMode = MapSetting(routing_mode_);
+
+ return WebRtcAecm_set_config(static_cast<Handle*>(handle), config);
+}
+
+int EchoControlMobileImpl::num_handles_required() const {
+ return apm_->num_output_channels() *
+ apm_->num_reverse_channels();
+}
+
+int EchoControlMobileImpl::GetHandleError(void* handle) const {
+ assert(handle != NULL);
+ return MapError(WebRtcAecm_get_error_code(static_cast<Handle*>(handle)));
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/echo_control_mobile_impl.h b/src/modules/audio_processing/main/source/echo_control_mobile_impl.h
new file mode 100644
index 0000000..6314e66
--- /dev/null
+++ b/src/modules/audio_processing/main/source/echo_control_mobile_impl.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_ECHO_CONTROL_MOBILE_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_ECHO_CONTROL_MOBILE_IMPL_H_
+
+#include "audio_processing.h"
+#include "processing_component.h"
+
+namespace webrtc {
+class AudioProcessingImpl;
+class AudioBuffer;
+
+class EchoControlMobileImpl : public EchoControlMobile,
+ public ProcessingComponent {
+ public:
+ explicit EchoControlMobileImpl(const AudioProcessingImpl* apm);
+ virtual ~EchoControlMobileImpl();
+
+ int ProcessRenderAudio(const AudioBuffer* audio);
+ int ProcessCaptureAudio(AudioBuffer* audio);
+
+ // EchoControlMobile implementation.
+ virtual bool is_enabled() const;
+
+ // ProcessingComponent implementation.
+ virtual int Initialize();
+ virtual int get_version(char* version, int version_len_bytes) const;
+
+ private:
+ // EchoControlMobile implementation.
+ virtual int Enable(bool enable);
+ virtual int set_routing_mode(RoutingMode mode);
+ virtual RoutingMode routing_mode() const;
+ virtual int enable_comfort_noise(bool enable);
+ virtual bool is_comfort_noise_enabled() const;
+ virtual int SetEchoPath(const void* echo_path, size_t size_bytes);
+ virtual int GetEchoPath(void* echo_path, size_t size_bytes) const;
+
+ // ProcessingComponent implementation.
+ virtual void* CreateHandle() const;
+ virtual int InitializeHandle(void* handle) const;
+ virtual int ConfigureHandle(void* handle) const;
+ virtual int DestroyHandle(void* handle) const;
+ virtual int num_handles_required() const;
+ virtual int GetHandleError(void* handle) const;
+
+ const AudioProcessingImpl* apm_;
+ RoutingMode routing_mode_;
+ bool comfort_noise_enabled_;
+ unsigned char* external_echo_path_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_ECHO_CONTROL_MOBILE_IMPL_H_
diff --git a/src/modules/audio_processing/main/source/gain_control_impl.cc b/src/modules/audio_processing/main/source/gain_control_impl.cc
new file mode 100644
index 0000000..dc3e565
--- /dev/null
+++ b/src/modules/audio_processing/main/source/gain_control_impl.cc
@@ -0,0 +1,391 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "gain_control_impl.h"
+
+#include <cassert>
+
+#include "critical_section_wrapper.h"
+#include "gain_control.h"
+
+#include "audio_processing_impl.h"
+#include "audio_buffer.h"
+
+namespace webrtc {
+
+typedef void Handle;
+
+/*template <class T>
+class GainControlHandle : public ComponentHandle<T> {
+ public:
+ GainControlHandle();
+ virtual ~GainControlHandle();
+
+ virtual int Create();
+ virtual T* ptr() const;
+
+ private:
+ T* handle;
+};*/
+
+namespace {
+WebRtc_Word16 MapSetting(GainControl::Mode mode) {
+ switch (mode) {
+ case GainControl::kAdaptiveAnalog:
+ return kAgcModeAdaptiveAnalog;
+ break;
+ case GainControl::kAdaptiveDigital:
+ return kAgcModeAdaptiveDigital;
+ break;
+ case GainControl::kFixedDigital:
+ return kAgcModeFixedDigital;
+ break;
+ default:
+ return -1;
+ }
+}
+} // namespace
+
+GainControlImpl::GainControlImpl(const AudioProcessingImpl* apm)
+ : ProcessingComponent(apm),
+ apm_(apm),
+ mode_(kAdaptiveAnalog),
+ minimum_capture_level_(0),
+ maximum_capture_level_(255),
+ limiter_enabled_(true),
+ target_level_dbfs_(3),
+ compression_gain_db_(9),
+ analog_capture_level_(0),
+ was_analog_level_set_(false),
+ stream_is_saturated_(false) {}
+
+GainControlImpl::~GainControlImpl() {}
+
+int GainControlImpl::ProcessRenderAudio(AudioBuffer* audio) {
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ assert(audio->samples_per_split_channel() <= 160);
+
+ WebRtc_Word16* mixed_data = audio->low_pass_split_data(0);
+ if (audio->num_channels() > 1) {
+ audio->CopyAndMixLowPass(1);
+ mixed_data = audio->mixed_low_pass_data(0);
+ }
+
+ for (int i = 0; i < num_handles(); i++) {
+ Handle* my_handle = static_cast<Handle*>(handle(i));
+ int err = WebRtcAgc_AddFarend(
+ my_handle,
+ mixed_data,
+ static_cast<WebRtc_Word16>(audio->samples_per_split_channel()));
+
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle);
+ }
+ }
+
+ return apm_->kNoError;
+}
+
+int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ assert(audio->samples_per_split_channel() <= 160);
+ assert(audio->num_channels() == num_handles());
+
+ int err = apm_->kNoError;
+
+ if (mode_ == kAdaptiveAnalog) {
+ for (int i = 0; i < num_handles(); i++) {
+ Handle* my_handle = static_cast<Handle*>(handle(i));
+ err = WebRtcAgc_AddMic(
+ my_handle,
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i),
+ static_cast<WebRtc_Word16>(audio->samples_per_split_channel()));
+
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle);
+ }
+ }
+ } else if (mode_ == kAdaptiveDigital) {
+
+ for (int i = 0; i < num_handles(); i++) {
+ Handle* my_handle = static_cast<Handle*>(handle(i));
+ WebRtc_Word32 capture_level_out = 0;
+
+ err = WebRtcAgc_VirtualMic(
+ my_handle,
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i),
+ static_cast<WebRtc_Word16>(audio->samples_per_split_channel()),
+ //capture_levels_[i],
+ analog_capture_level_,
+ &capture_level_out);
+
+ capture_levels_[i] = capture_level_out;
+
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle);
+ }
+
+ }
+ }
+
+ return apm_->kNoError;
+}
+
+int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ if (mode_ == kAdaptiveAnalog && !was_analog_level_set_) {
+ return apm_->kStreamParameterNotSetError;
+ }
+
+ assert(audio->samples_per_split_channel() <= 160);
+ assert(audio->num_channels() == num_handles());
+
+ stream_is_saturated_ = false;
+ for (int i = 0; i < num_handles(); i++) {
+ Handle* my_handle = static_cast<Handle*>(handle(i));
+ WebRtc_Word32 capture_level_out = 0;
+ WebRtc_UWord8 saturation_warning = 0;
+
+ int err = WebRtcAgc_Process(
+ my_handle,
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i),
+ static_cast<WebRtc_Word16>(audio->samples_per_split_channel()),
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i),
+ capture_levels_[i],
+ &capture_level_out,
+ apm_->echo_cancellation()->stream_has_echo(),
+ &saturation_warning);
+
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle);
+ }
+
+ capture_levels_[i] = capture_level_out;
+ if (saturation_warning == 1) {
+ stream_is_saturated_ = true;
+ }
+ }
+
+ if (mode_ == kAdaptiveAnalog) {
+ // Take the analog level to be the average across the handles.
+ analog_capture_level_ = 0;
+ for (int i = 0; i < num_handles(); i++) {
+ analog_capture_level_ += capture_levels_[i];
+ }
+
+ analog_capture_level_ /= num_handles();
+ }
+
+ was_analog_level_set_ = false;
+ return apm_->kNoError;
+}
+
+// TODO(ajm): ensure this is called under kAdaptiveAnalog.
+int GainControlImpl::set_stream_analog_level(int level) {
+ was_analog_level_set_ = true;
+ if (level < minimum_capture_level_ || level > maximum_capture_level_) {
+ return apm_->kBadParameterError;
+ }
+
+ if (mode_ == kAdaptiveAnalog) {
+ if (level != analog_capture_level_) {
+ // The analog level has been changed; update our internal levels.
+ capture_levels_.assign(num_handles(), level);
+ }
+ }
+ analog_capture_level_ = level;
+
+ return apm_->kNoError;
+}
+
+int GainControlImpl::stream_analog_level() {
+ // TODO(ajm): enable this assertion?
+ //assert(mode_ == kAdaptiveAnalog);
+
+ return analog_capture_level_;
+}
+
+int GainControlImpl::Enable(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ return EnableComponent(enable);
+}
+
+bool GainControlImpl::is_enabled() const {
+ return is_component_enabled();
+}
+
+int GainControlImpl::set_mode(Mode mode) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (MapSetting(mode) == -1) {
+ return apm_->kBadParameterError;
+ }
+
+ mode_ = mode;
+ return Initialize();
+}
+
+GainControl::Mode GainControlImpl::mode() const {
+ return mode_;
+}
+
+int GainControlImpl::set_analog_level_limits(int minimum,
+ int maximum) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (minimum < 0) {
+ return apm_->kBadParameterError;
+ }
+
+ if (maximum > 65535) {
+ return apm_->kBadParameterError;
+ }
+
+ if (maximum < minimum) {
+ return apm_->kBadParameterError;
+ }
+
+ minimum_capture_level_ = minimum;
+ maximum_capture_level_ = maximum;
+
+ return Initialize();
+}
+
+int GainControlImpl::analog_level_minimum() const {
+ return minimum_capture_level_;
+}
+
+int GainControlImpl::analog_level_maximum() const {
+ return maximum_capture_level_;
+}
+
+bool GainControlImpl::stream_is_saturated() const {
+ return stream_is_saturated_;
+}
+
+int GainControlImpl::set_target_level_dbfs(int level) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (level > 31 || level < 0) {
+ return apm_->kBadParameterError;
+ }
+
+ target_level_dbfs_ = level;
+ return Configure();
+}
+
+int GainControlImpl::target_level_dbfs() const {
+ return target_level_dbfs_;
+}
+
+int GainControlImpl::set_compression_gain_db(int gain) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (gain < 0 || gain > 90) {
+ return apm_->kBadParameterError;
+ }
+
+ compression_gain_db_ = gain;
+ return Configure();
+}
+
+int GainControlImpl::compression_gain_db() const {
+ return compression_gain_db_;
+}
+
+int GainControlImpl::enable_limiter(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ limiter_enabled_ = enable;
+ return Configure();
+}
+
+bool GainControlImpl::is_limiter_enabled() const {
+ return limiter_enabled_;
+}
+
+int GainControlImpl::Initialize() {
+ int err = ProcessingComponent::Initialize();
+ if (err != apm_->kNoError || !is_component_enabled()) {
+ return err;
+ }
+
+ analog_capture_level_ =
+ (maximum_capture_level_ - minimum_capture_level_) >> 1;
+ capture_levels_.assign(num_handles(), analog_capture_level_);
+ was_analog_level_set_ = false;
+
+ return apm_->kNoError;
+}
+
+int GainControlImpl::get_version(char* version, int version_len_bytes) const {
+ if (WebRtcAgc_Version(version, version_len_bytes) != 0) {
+ return apm_->kBadParameterError;
+ }
+
+ return apm_->kNoError;
+}
+
+void* GainControlImpl::CreateHandle() const {
+ Handle* handle = NULL;
+ if (WebRtcAgc_Create(&handle) != apm_->kNoError) {
+ handle = NULL;
+ } else {
+ assert(handle != NULL);
+ }
+
+ return handle;
+}
+
+int GainControlImpl::DestroyHandle(void* handle) const {
+ return WebRtcAgc_Free(static_cast<Handle*>(handle));
+}
+
+int GainControlImpl::InitializeHandle(void* handle) const {
+ return WebRtcAgc_Init(static_cast<Handle*>(handle),
+ minimum_capture_level_,
+ maximum_capture_level_,
+ MapSetting(mode_),
+ apm_->sample_rate_hz());
+}
+
+int GainControlImpl::ConfigureHandle(void* handle) const {
+ WebRtcAgc_config_t config;
+ // TODO(ajm): Flip the sign here (since AGC expects a positive value) if we
+ // change the interface.
+ //assert(target_level_dbfs_ <= 0);
+ //config.targetLevelDbfs = static_cast<WebRtc_Word16>(-target_level_dbfs_);
+ config.targetLevelDbfs = static_cast<WebRtc_Word16>(target_level_dbfs_);
+ config.compressionGaindB =
+ static_cast<WebRtc_Word16>(compression_gain_db_);
+ config.limiterEnable = limiter_enabled_;
+
+ return WebRtcAgc_set_config(static_cast<Handle*>(handle), config);
+}
+
+int GainControlImpl::num_handles_required() const {
+ return apm_->num_output_channels();
+}
+
+int GainControlImpl::GetHandleError(void* handle) const {
+ // The AGC has no get_error() function.
+ // (Despite listing errors in its interface...)
+ assert(handle != NULL);
+ return apm_->kUnspecifiedError;
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/gain_control_impl.h b/src/modules/audio_processing/main/source/gain_control_impl.h
new file mode 100644
index 0000000..7b6987e
--- /dev/null
+++ b/src/modules/audio_processing/main/source/gain_control_impl.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_GAIN_CONTROL_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_GAIN_CONTROL_IMPL_H_
+
+#include <vector>
+
+#include "audio_processing.h"
+#include "processing_component.h"
+
+namespace webrtc {
+class AudioProcessingImpl;
+class AudioBuffer;
+
+class GainControlImpl : public GainControl,
+ public ProcessingComponent {
+ public:
+ explicit GainControlImpl(const AudioProcessingImpl* apm);
+ virtual ~GainControlImpl();
+
+ int ProcessRenderAudio(AudioBuffer* audio);
+ int AnalyzeCaptureAudio(AudioBuffer* audio);
+ int ProcessCaptureAudio(AudioBuffer* audio);
+
+ // ProcessingComponent implementation.
+ virtual int Initialize();
+ virtual int get_version(char* version, int version_len_bytes) const;
+
+ // GainControl implementation.
+ virtual bool is_enabled() const;
+ virtual int stream_analog_level();
+
+ private:
+ // GainControl implementation.
+ virtual int Enable(bool enable);
+ virtual int set_stream_analog_level(int level);
+ virtual int set_mode(Mode mode);
+ virtual Mode mode() const;
+ virtual int set_target_level_dbfs(int level);
+ virtual int target_level_dbfs() const;
+ virtual int set_compression_gain_db(int gain);
+ virtual int compression_gain_db() const;
+ virtual int enable_limiter(bool enable);
+ virtual bool is_limiter_enabled() const;
+ virtual int set_analog_level_limits(int minimum, int maximum);
+ virtual int analog_level_minimum() const;
+ virtual int analog_level_maximum() const;
+ virtual bool stream_is_saturated() const;
+
+ // ProcessingComponent implementation.
+ virtual void* CreateHandle() const;
+ virtual int InitializeHandle(void* handle) const;
+ virtual int ConfigureHandle(void* handle) const;
+ virtual int DestroyHandle(void* handle) const;
+ virtual int num_handles_required() const;
+ virtual int GetHandleError(void* handle) const;
+
+ const AudioProcessingImpl* apm_;
+ Mode mode_;
+ int minimum_capture_level_;
+ int maximum_capture_level_;
+ bool limiter_enabled_;
+ int target_level_dbfs_;
+ int compression_gain_db_;
+ std::vector<int> capture_levels_;
+ int analog_capture_level_;
+ bool was_analog_level_set_;
+ bool stream_is_saturated_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_GAIN_CONTROL_IMPL_H_
diff --git a/src/modules/audio_processing/main/source/high_pass_filter_impl.cc b/src/modules/audio_processing/main/source/high_pass_filter_impl.cc
new file mode 100644
index 0000000..fa6d5d5
--- /dev/null
+++ b/src/modules/audio_processing/main/source/high_pass_filter_impl.cc
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "high_pass_filter_impl.h"
+
+#include <cassert>
+
+#include "critical_section_wrapper.h"
+#include "typedefs.h"
+#include "signal_processing_library.h"
+
+#include "audio_processing_impl.h"
+#include "audio_buffer.h"
+
+namespace webrtc {
+namespace {
+const WebRtc_Word16 kFilterCoefficients8kHz[5] =
+ {3798, -7596, 3798, 7807, -3733};
+
+const WebRtc_Word16 kFilterCoefficients[5] =
+ {4012, -8024, 4012, 8002, -3913};
+
+struct FilterState {
+ WebRtc_Word16 y[4];
+ WebRtc_Word16 x[2];
+ const WebRtc_Word16* ba;
+};
+
+int InitializeFilter(FilterState* hpf, int sample_rate_hz) {
+ assert(hpf != NULL);
+
+ if (sample_rate_hz == AudioProcessingImpl::kSampleRate8kHz) {
+ hpf->ba = kFilterCoefficients8kHz;
+ } else {
+ hpf->ba = kFilterCoefficients;
+ }
+
+ WebRtcSpl_MemSetW16(hpf->x, 0, 2);
+ WebRtcSpl_MemSetW16(hpf->y, 0, 4);
+
+ return AudioProcessing::kNoError;
+}
+
+int Filter(FilterState* hpf, WebRtc_Word16* data, int length) {
+ assert(hpf != NULL);
+
+ WebRtc_Word32 tmp_int32 = 0;
+ WebRtc_Word16* y = hpf->y;
+ WebRtc_Word16* x = hpf->x;
+ const WebRtc_Word16* ba = hpf->ba;
+
+ for (int i = 0; i < length; i++) {
+ // y[i] = b[0] * x[i] + b[1] * x[i-1] + b[2] * x[i-2]
+ // + -a[1] * y[i-1] + -a[2] * y[i-2];
+
+ tmp_int32 =
+ WEBRTC_SPL_MUL_16_16(y[1], ba[3]); // -a[1] * y[i-1] (low part)
+ tmp_int32 +=
+ WEBRTC_SPL_MUL_16_16(y[3], ba[4]); // -a[2] * y[i-2] (low part)
+ tmp_int32 = (tmp_int32 >> 15);
+ tmp_int32 +=
+ WEBRTC_SPL_MUL_16_16(y[0], ba[3]); // -a[1] * y[i-1] (high part)
+ tmp_int32 +=
+ WEBRTC_SPL_MUL_16_16(y[2], ba[4]); // -a[2] * y[i-2] (high part)
+ tmp_int32 = (tmp_int32 << 1);
+
+ tmp_int32 += WEBRTC_SPL_MUL_16_16(data[i], ba[0]); // b[0]*x[0]
+ tmp_int32 += WEBRTC_SPL_MUL_16_16(x[0], ba[1]); // b[1]*x[i-1]
+ tmp_int32 += WEBRTC_SPL_MUL_16_16(x[1], ba[2]); // b[2]*x[i-2]
+
+ // Update state (input part)
+ x[1] = x[0];
+ x[0] = data[i];
+
+ // Update state (filtered part)
+ y[2] = y[0];
+ y[3] = y[1];
+ y[0] = static_cast<WebRtc_Word16>(tmp_int32 >> 13);
+ y[1] = static_cast<WebRtc_Word16>((tmp_int32 -
+ WEBRTC_SPL_LSHIFT_W32(static_cast<WebRtc_Word32>(y[0]), 13)) << 2);
+
+ // Rounding in Q12, i.e. add 2^11
+ tmp_int32 += 2048;
+
+ // Saturate (to 2^27) so that the HP filtered signal does not overflow
+ tmp_int32 = WEBRTC_SPL_SAT(static_cast<WebRtc_Word32>(134217727),
+ tmp_int32,
+ static_cast<WebRtc_Word32>(-134217728));
+
+ // Convert back to Q0 and use rounding
+ data[i] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp_int32, 12);
+
+ }
+
+ return AudioProcessing::kNoError;
+}
+} // namespace
+
+typedef FilterState Handle;
+
+HighPassFilterImpl::HighPassFilterImpl(const AudioProcessingImpl* apm)
+ : ProcessingComponent(apm),
+ apm_(apm) {}
+
+HighPassFilterImpl::~HighPassFilterImpl() {}
+
+int HighPassFilterImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+ int err = apm_->kNoError;
+
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ assert(audio->samples_per_split_channel() <= 160);
+
+ for (int i = 0; i < num_handles(); i++) {
+ Handle* my_handle = static_cast<Handle*>(handle(i));
+ err = Filter(my_handle,
+ audio->low_pass_split_data(i),
+ audio->samples_per_split_channel());
+
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle);
+ }
+ }
+
+ return apm_->kNoError;
+}
+
+int HighPassFilterImpl::Enable(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ return EnableComponent(enable);
+}
+
+bool HighPassFilterImpl::is_enabled() const {
+ return is_component_enabled();
+}
+
+int HighPassFilterImpl::get_version(char* version,
+ int version_len_bytes) const {
+ // An empty string is used to indicate no version information.
+ memset(version, 0, version_len_bytes);
+ return apm_->kNoError;
+}
+
+void* HighPassFilterImpl::CreateHandle() const {
+ return new FilterState;
+}
+
+int HighPassFilterImpl::DestroyHandle(void* handle) const {
+ delete static_cast<Handle*>(handle);
+ return apm_->kNoError;
+}
+
+int HighPassFilterImpl::InitializeHandle(void* handle) const {
+ return InitializeFilter(static_cast<Handle*>(handle),
+ apm_->sample_rate_hz());
+}
+
+int HighPassFilterImpl::ConfigureHandle(void* /*handle*/) const {
+ return apm_->kNoError; // Not configurable.
+}
+
+int HighPassFilterImpl::num_handles_required() const {
+ return apm_->num_output_channels();
+}
+
+int HighPassFilterImpl::GetHandleError(void* handle) const {
+ // The component has no detailed errors.
+ assert(handle != NULL);
+ return apm_->kUnspecifiedError;
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/high_pass_filter_impl.h b/src/modules/audio_processing/main/source/high_pass_filter_impl.h
new file mode 100644
index 0000000..4c23754
--- /dev/null
+++ b/src/modules/audio_processing/main/source/high_pass_filter_impl.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_HIGH_PASS_FILTER_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_HIGH_PASS_FILTER_IMPL_H_
+
+#include "audio_processing.h"
+#include "processing_component.h"
+
+namespace webrtc {
+class AudioProcessingImpl;
+class AudioBuffer;
+
+class HighPassFilterImpl : public HighPassFilter,
+ public ProcessingComponent {
+ public:
+ explicit HighPassFilterImpl(const AudioProcessingImpl* apm);
+ virtual ~HighPassFilterImpl();
+
+ int ProcessCaptureAudio(AudioBuffer* audio);
+
+ // HighPassFilter implementation.
+ virtual bool is_enabled() const;
+
+ // ProcessingComponent implementation.
+ virtual int get_version(char* version, int version_len_bytes) const;
+
+ private:
+ // HighPassFilter implementation.
+ virtual int Enable(bool enable);
+
+ // ProcessingComponent implementation.
+ virtual void* CreateHandle() const;
+ virtual int InitializeHandle(void* handle) const;
+ virtual int ConfigureHandle(void* handle) const;
+ virtual int DestroyHandle(void* handle) const;
+ virtual int num_handles_required() const;
+ virtual int GetHandleError(void* handle) const;
+
+ const AudioProcessingImpl* apm_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_HIGH_PASS_FILTER_IMPL_H_
diff --git a/src/modules/audio_processing/main/source/level_estimator_impl.cc b/src/modules/audio_processing/main/source/level_estimator_impl.cc
new file mode 100644
index 0000000..799a962
--- /dev/null
+++ b/src/modules/audio_processing/main/source/level_estimator_impl.cc
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "level_estimator_impl.h"
+
+#include <cassert>
+#include <cstring>
+
+#include "critical_section_wrapper.h"
+
+#include "audio_processing_impl.h"
+#include "audio_buffer.h"
+
+// TODO(ajm): implement the underlying level estimator component.
+
+namespace webrtc {
+
+typedef void Handle;
+
+namespace {
+/*int EstimateLevel(AudioBuffer* audio, Handle* my_handle) {
+ assert(audio->samples_per_split_channel() <= 160);
+
+ WebRtc_Word16* mixed_data = audio->low_pass_split_data(0);
+ if (audio->num_channels() > 1) {
+ audio->CopyAndMixLowPass(1);
+ mixed_data = audio->mixed_low_pass_data(0);
+ }
+
+ int err = UpdateLvlEst(my_handle,
+ mixed_data,
+ audio->samples_per_split_channel());
+ if (err != AudioProcessing::kNoError) {
+ return GetHandleError(my_handle);
+ }
+
+ return AudioProcessing::kNoError;
+}
+
+int GetMetricsLocal(Handle* my_handle, LevelEstimator::Metrics* metrics) {
+ level_t levels;
+ memset(&levels, 0, sizeof(levels));
+
+ int err = ExportLevels(my_handle, &levels, 2);
+ if (err != AudioProcessing::kNoError) {
+ return err;
+ }
+ metrics->signal.instant = levels.instant;
+ metrics->signal.average = levels.average;
+ metrics->signal.maximum = levels.max;
+ metrics->signal.minimum = levels.min;
+
+ err = ExportLevels(my_handle, &levels, 1);
+ if (err != AudioProcessing::kNoError) {
+ return err;
+ }
+ metrics->speech.instant = levels.instant;
+ metrics->speech.average = levels.average;
+ metrics->speech.maximum = levels.max;
+ metrics->speech.minimum = levels.min;
+
+ err = ExportLevels(my_handle, &levels, 0);
+ if (err != AudioProcessing::kNoError) {
+ return err;
+ }
+ metrics->noise.instant = levels.instant;
+ metrics->noise.average = levels.average;
+ metrics->noise.maximum = levels.max;
+ metrics->noise.minimum = levels.min;
+
+ return AudioProcessing::kNoError;
+}*/
+} // namespace
+
+LevelEstimatorImpl::LevelEstimatorImpl(const AudioProcessingImpl* apm)
+ : ProcessingComponent(apm),
+ apm_(apm) {}
+
+LevelEstimatorImpl::~LevelEstimatorImpl() {}
+
+int LevelEstimatorImpl::AnalyzeReverseStream(AudioBuffer* /*audio*/) {
+ return apm_->kUnsupportedComponentError;
+ /*if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ return EstimateLevel(audio, static_cast<Handle*>(handle(1)));*/
+}
+
+int LevelEstimatorImpl::ProcessCaptureAudio(AudioBuffer* /*audio*/) {
+ return apm_->kUnsupportedComponentError;
+ /*if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ return EstimateLevel(audio, static_cast<Handle*>(handle(0)));*/
+}
+
+int LevelEstimatorImpl::Enable(bool /*enable*/) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ return apm_->kUnsupportedComponentError;
+ //return EnableComponent(enable);
+}
+
+bool LevelEstimatorImpl::is_enabled() const {
+ return is_component_enabled();
+}
+
+int LevelEstimatorImpl::GetMetrics(LevelEstimator::Metrics* /*metrics*/,
+ LevelEstimator::Metrics* /*reverse_metrics*/) {
+ return apm_->kUnsupportedComponentError;
+ /*if (!is_component_enabled()) {
+ return apm_->kNotEnabledError;
+ }
+
+ int err = GetMetricsLocal(static_cast<Handle*>(handle(0)), metrics);
+ if (err != apm_->kNoError) {
+ return err;
+ }
+
+ err = GetMetricsLocal(static_cast<Handle*>(handle(1)), reverse_metrics);
+ if (err != apm_->kNoError) {
+ return err;
+ }
+
+ return apm_->kNoError;*/
+}
+
+int LevelEstimatorImpl::get_version(char* version,
+ int version_len_bytes) const {
+ // An empty string is used to indicate no version information.
+ memset(version, 0, version_len_bytes);
+ return apm_->kNoError;
+}
+
+void* LevelEstimatorImpl::CreateHandle() const {
+ Handle* handle = NULL;
+ /*if (CreateLvlEst(&handle) != apm_->kNoError) {
+ handle = NULL;
+ } else {
+ assert(handle != NULL);
+ }*/
+
+ return handle;
+}
+
+int LevelEstimatorImpl::DestroyHandle(void* /*handle*/) const {
+ return apm_->kUnsupportedComponentError;
+ //return FreeLvlEst(static_cast<Handle*>(handle));
+}
+
+int LevelEstimatorImpl::InitializeHandle(void* /*handle*/) const {
+ return apm_->kUnsupportedComponentError;
+ /*const double kIntervalSeconds = 1.5;
+ return InitLvlEst(static_cast<Handle*>(handle),
+ apm_->sample_rate_hz(),
+ kIntervalSeconds);*/
+}
+
+int LevelEstimatorImpl::ConfigureHandle(void* /*handle*/) const {
+ return apm_->kUnsupportedComponentError;
+ //return apm_->kNoError;
+}
+
+int LevelEstimatorImpl::num_handles_required() const {
+ return apm_->kUnsupportedComponentError;
+ //return 2;
+}
+
+int LevelEstimatorImpl::GetHandleError(void* handle) const {
+ // The component has no detailed errors.
+ assert(handle != NULL);
+ return apm_->kUnspecifiedError;
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/level_estimator_impl.h b/src/modules/audio_processing/main/source/level_estimator_impl.h
new file mode 100644
index 0000000..1515722
--- /dev/null
+++ b/src/modules/audio_processing/main/source/level_estimator_impl.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_LEVEL_ESTIMATOR_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_LEVEL_ESTIMATOR_IMPL_H_
+
+#include "audio_processing.h"
+#include "processing_component.h"
+
+namespace webrtc {
+class AudioProcessingImpl;
+class AudioBuffer;
+
+class LevelEstimatorImpl : public LevelEstimator,
+ public ProcessingComponent {
+ public:
+ explicit LevelEstimatorImpl(const AudioProcessingImpl* apm);
+ virtual ~LevelEstimatorImpl();
+
+ int AnalyzeReverseStream(AudioBuffer* audio);
+ int ProcessCaptureAudio(AudioBuffer* audio);
+
+ // LevelEstimator implementation.
+ virtual bool is_enabled() const;
+
+ // ProcessingComponent implementation.
+ virtual int get_version(char* version, int version_len_bytes) const;
+
+ private:
+ // LevelEstimator implementation.
+ virtual int Enable(bool enable);
+ virtual int GetMetrics(Metrics* metrics, Metrics* reverse_metrics);
+
+ // ProcessingComponent implementation.
+ virtual void* CreateHandle() const;
+ virtual int InitializeHandle(void* handle) const;
+ virtual int ConfigureHandle(void* handle) const;
+ virtual int DestroyHandle(void* handle) const;
+ virtual int num_handles_required() const;
+ virtual int GetHandleError(void* handle) const;
+
+ const AudioProcessingImpl* apm_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_LEVEL_ESTIMATOR_IMPL_H_
diff --git a/src/modules/audio_processing/main/source/noise_suppression_impl.cc b/src/modules/audio_processing/main/source/noise_suppression_impl.cc
new file mode 100644
index 0000000..f899f35
--- /dev/null
+++ b/src/modules/audio_processing/main/source/noise_suppression_impl.cc
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "noise_suppression_impl.h"
+
+#include <cassert>
+
+#include "critical_section_wrapper.h"
+#if defined(WEBRTC_NS_FLOAT)
+#include "noise_suppression.h"
+#elif defined(WEBRTC_NS_FIXED)
+#include "noise_suppression_x.h"
+#endif
+
+#include "audio_processing_impl.h"
+#include "audio_buffer.h"
+
+namespace webrtc {
+
+#if defined(WEBRTC_NS_FLOAT)
+typedef NsHandle Handle;
+#elif defined(WEBRTC_NS_FIXED)
+typedef NsxHandle Handle;
+#endif
+
+namespace {
+int MapSetting(NoiseSuppression::Level level) {
+ switch (level) {
+ case NoiseSuppression::kLow:
+ return 0;
+ case NoiseSuppression::kModerate:
+ return 1;
+ case NoiseSuppression::kHigh:
+ return 2;
+ case NoiseSuppression::kVeryHigh:
+ return 3;
+ default:
+ return -1;
+ }
+}
+} // namespace
+
+NoiseSuppressionImpl::NoiseSuppressionImpl(const AudioProcessingImpl* apm)
+ : ProcessingComponent(apm),
+ apm_(apm),
+ level_(kModerate) {}
+
+NoiseSuppressionImpl::~NoiseSuppressionImpl() {}
+
+int NoiseSuppressionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+ int err = apm_->kNoError;
+
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+ assert(audio->samples_per_split_channel() <= 160);
+ assert(audio->num_channels() == num_handles());
+
+ for (int i = 0; i < num_handles(); i++) {
+ Handle* my_handle = static_cast<Handle*>(handle(i));
+#if defined(WEBRTC_NS_FLOAT)
+ err = WebRtcNs_Process(static_cast<Handle*>(handle(i)),
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i),
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i));
+#elif defined(WEBRTC_NS_FIXED)
+ err = WebRtcNsx_Process(static_cast<Handle*>(handle(i)),
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i),
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i));
+#endif
+
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle);
+ }
+ }
+
+ return apm_->kNoError;
+}
+
+int NoiseSuppressionImpl::Enable(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ return EnableComponent(enable);
+}
+
+bool NoiseSuppressionImpl::is_enabled() const {
+ return is_component_enabled();
+}
+
+int NoiseSuppressionImpl::set_level(Level level) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (MapSetting(level) == -1) {
+ return apm_->kBadParameterError;
+ }
+
+ level_ = level;
+ return Configure();
+}
+
+NoiseSuppression::Level NoiseSuppressionImpl::level() const {
+ return level_;
+}
+
+int NoiseSuppressionImpl::get_version(char* version,
+ int version_len_bytes) const {
+#if defined(WEBRTC_NS_FLOAT)
+ if (WebRtcNs_get_version(version, version_len_bytes) != 0)
+#elif defined(WEBRTC_NS_FIXED)
+ if (WebRtcNsx_get_version(version, version_len_bytes) != 0)
+#endif
+ {
+ return apm_->kBadParameterError;
+ }
+
+ return apm_->kNoError;
+}
+
+void* NoiseSuppressionImpl::CreateHandle() const {
+ Handle* handle = NULL;
+#if defined(WEBRTC_NS_FLOAT)
+ if (WebRtcNs_Create(&handle) != apm_->kNoError)
+#elif defined(WEBRTC_NS_FIXED)
+ if (WebRtcNsx_Create(&handle) != apm_->kNoError)
+#endif
+ {
+ handle = NULL;
+ } else {
+ assert(handle != NULL);
+ }
+
+ return handle;
+}
+
+int NoiseSuppressionImpl::DestroyHandle(void* handle) const {
+#if defined(WEBRTC_NS_FLOAT)
+ return WebRtcNs_Free(static_cast<Handle*>(handle));
+#elif defined(WEBRTC_NS_FIXED)
+ return WebRtcNsx_Free(static_cast<Handle*>(handle));
+#endif
+}
+
+int NoiseSuppressionImpl::InitializeHandle(void* handle) const {
+#if defined(WEBRTC_NS_FLOAT)
+ return WebRtcNs_Init(static_cast<Handle*>(handle), apm_->sample_rate_hz());
+#elif defined(WEBRTC_NS_FIXED)
+ return WebRtcNsx_Init(static_cast<Handle*>(handle), apm_->sample_rate_hz());
+#endif
+}
+
+int NoiseSuppressionImpl::ConfigureHandle(void* handle) const {
+#if defined(WEBRTC_NS_FLOAT)
+ return WebRtcNs_set_policy(static_cast<Handle*>(handle),
+ MapSetting(level_));
+#elif defined(WEBRTC_NS_FIXED)
+ return WebRtcNsx_set_policy(static_cast<Handle*>(handle),
+ MapSetting(level_));
+#endif
+}
+
+int NoiseSuppressionImpl::num_handles_required() const {
+ return apm_->num_output_channels();
+}
+
+int NoiseSuppressionImpl::GetHandleError(void* handle) const {
+ // The NS has no get_error() function.
+ assert(handle != NULL);
+ return apm_->kUnspecifiedError;
+}
+} // namespace webrtc
+
diff --git a/src/modules/audio_processing/main/source/noise_suppression_impl.h b/src/modules/audio_processing/main/source/noise_suppression_impl.h
new file mode 100644
index 0000000..c9ff9b3
--- /dev/null
+++ b/src/modules/audio_processing/main/source/noise_suppression_impl.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_NOISE_SUPPRESSION_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_NOISE_SUPPRESSION_IMPL_H_
+
+#include "audio_processing.h"
+#include "processing_component.h"
+
+namespace webrtc {
+class AudioProcessingImpl;
+class AudioBuffer;
+
+class NoiseSuppressionImpl : public NoiseSuppression,
+ public ProcessingComponent {
+ public:
+ explicit NoiseSuppressionImpl(const AudioProcessingImpl* apm);
+ virtual ~NoiseSuppressionImpl();
+
+ int ProcessCaptureAudio(AudioBuffer* audio);
+
+ // NoiseSuppression implementation.
+ virtual bool is_enabled() const;
+
+ // ProcessingComponent implementation.
+ virtual int get_version(char* version, int version_len_bytes) const;
+
+ private:
+ // NoiseSuppression implementation.
+ virtual int Enable(bool enable);
+ virtual int set_level(Level level);
+ virtual Level level() const;
+
+ // ProcessingComponent implementation.
+ virtual void* CreateHandle() const;
+ virtual int InitializeHandle(void* handle) const;
+ virtual int ConfigureHandle(void* handle) const;
+ virtual int DestroyHandle(void* handle) const;
+ virtual int num_handles_required() const;
+ virtual int GetHandleError(void* handle) const;
+
+ const AudioProcessingImpl* apm_;
+ Level level_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_NOISE_SUPPRESSION_IMPL_H_
diff --git a/src/modules/audio_processing/main/source/processing_component.cc b/src/modules/audio_processing/main/source/processing_component.cc
new file mode 100644
index 0000000..9ac1257
--- /dev/null
+++ b/src/modules/audio_processing/main/source/processing_component.cc
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "processing_component.h"
+
+#include <cassert>
+
+#include "audio_processing_impl.h"
+
+namespace webrtc {
+
+ProcessingComponent::ProcessingComponent(const AudioProcessingImpl* apm)
+ : apm_(apm),
+ initialized_(false),
+ enabled_(false),
+ num_handles_(0) {}
+
+ProcessingComponent::~ProcessingComponent() {
+ assert(initialized_ == false);
+}
+
+int ProcessingComponent::Destroy() {
+ while (!handles_.empty()) {
+ DestroyHandle(handles_.back());
+ handles_.pop_back();
+ }
+ initialized_ = false;
+
+ return apm_->kNoError;
+}
+
+int ProcessingComponent::EnableComponent(bool enable) {
+ if (enable && !enabled_) {
+ enabled_ = enable; // Must be set before Initialize() is called.
+
+ int err = Initialize();
+ if (err != apm_->kNoError) {
+ enabled_ = false;
+ return err;
+ }
+ } else {
+ enabled_ = enable;
+ }
+
+ return apm_->kNoError;
+}
+
+bool ProcessingComponent::is_component_enabled() const {
+ return enabled_;
+}
+
+void* ProcessingComponent::handle(int index) const {
+ assert(index < num_handles_);
+ return handles_[index];
+}
+
+int ProcessingComponent::num_handles() const {
+ return num_handles_;
+}
+
+int ProcessingComponent::Initialize() {
+ if (!enabled_) {
+ return apm_->kNoError;
+ }
+
+ num_handles_ = num_handles_required();
+ if (num_handles_ > static_cast<int>(handles_.size())) {
+ handles_.resize(num_handles_, NULL);
+ }
+
+ assert(static_cast<int>(handles_.size()) >= num_handles_);
+ for (int i = 0; i < num_handles_; i++) {
+ if (handles_[i] == NULL) {
+ handles_[i] = CreateHandle();
+ if (handles_[i] == NULL) {
+ return apm_->kCreationFailedError;
+ }
+ }
+
+ int err = InitializeHandle(handles_[i]);
+ if (err != apm_->kNoError) {
+ return GetHandleError(handles_[i]);
+ }
+ }
+
+ initialized_ = true;
+ return Configure();
+}
+
+int ProcessingComponent::Configure() {
+ if (!initialized_) {
+ return apm_->kNoError;
+ }
+
+ assert(static_cast<int>(handles_.size()) >= num_handles_);
+ for (int i = 0; i < num_handles_; i++) {
+ int err = ConfigureHandle(handles_[i]);
+ if (err != apm_->kNoError) {
+ return GetHandleError(handles_[i]);
+ }
+ }
+
+ return apm_->kNoError;
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/processing_component.h b/src/modules/audio_processing/main/source/processing_component.h
new file mode 100644
index 0000000..3d8a02b
--- /dev/null
+++ b/src/modules/audio_processing/main/source/processing_component.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_PROCESSING_COMPONENT_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_PROCESSING_COMPONENT_H_
+
+#include <vector>
+
+#include "audio_processing.h"
+
+namespace webrtc {
+class AudioProcessingImpl;
+
+/*template <class T>
+class ComponentHandle {
+ public:
+ ComponentHandle();
+ virtual ~ComponentHandle();
+
+ virtual int Create() = 0;
+ virtual T* ptr() const = 0;
+};*/
+
+class ProcessingComponent {
+ public:
+ explicit ProcessingComponent(const AudioProcessingImpl* apm);
+ virtual ~ProcessingComponent();
+
+ virtual int Initialize();
+ virtual int Destroy();
+ virtual int get_version(char* version, int version_len_bytes) const = 0;
+
+ protected:
+ virtual int Configure();
+ int EnableComponent(bool enable);
+ bool is_component_enabled() const;
+ void* handle(int index) const;
+ int num_handles() const;
+
+ private:
+ virtual void* CreateHandle() const = 0;
+ virtual int InitializeHandle(void* handle) const = 0;
+ virtual int ConfigureHandle(void* handle) const = 0;
+ virtual int DestroyHandle(void* handle) const = 0;
+ virtual int num_handles_required() const = 0;
+ virtual int GetHandleError(void* handle) const = 0;
+
+ const AudioProcessingImpl* apm_;
+ std::vector<void*> handles_;
+ bool initialized_;
+ bool enabled_;
+ int num_handles_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_PROCESSING_COMPONENT_H__
diff --git a/src/modules/audio_processing/main/source/splitting_filter.cc b/src/modules/audio_processing/main/source/splitting_filter.cc
new file mode 100644
index 0000000..1526141
--- /dev/null
+++ b/src/modules/audio_processing/main/source/splitting_filter.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "splitting_filter.h"
+#include "signal_processing_library.h"
+
+namespace webrtc {
+
+void SplittingFilterAnalysis(const WebRtc_Word16* in_data,
+ WebRtc_Word16* low_band,
+ WebRtc_Word16* high_band,
+ WebRtc_Word32* filter_state1,
+ WebRtc_Word32* filter_state2)
+{
+ WebRtcSpl_AnalysisQMF(in_data, low_band, high_band, filter_state1, filter_state2);
+}
+
+void SplittingFilterSynthesis(const WebRtc_Word16* low_band,
+ const WebRtc_Word16* high_band,
+ WebRtc_Word16* out_data,
+ WebRtc_Word32* filt_state1,
+ WebRtc_Word32* filt_state2)
+{
+ WebRtcSpl_SynthesisQMF(low_band, high_band, out_data, filt_state1, filt_state2);
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/splitting_filter.h b/src/modules/audio_processing/main/source/splitting_filter.h
new file mode 100644
index 0000000..661bfb2
--- /dev/null
+++ b/src/modules/audio_processing/main/source/splitting_filter.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_SPLITTING_FILTER_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_SPLITTING_FILTER_H_
+
+#include "typedefs.h"
+#include "signal_processing_library.h"
+
+namespace webrtc {
+/*
+ * SplittingFilterbank_analysisQMF(...)
+ *
+ * Splits a super-wb signal into two subbands: 0-8 kHz and 8-16 kHz.
+ *
+ * Input:
+ * - in_data : super-wb audio signal
+ *
+ * Input & Output:
+ * - filt_state1: Filter state for first all-pass filter
+ * - filt_state2: Filter state for second all-pass filter
+ *
+ * Output:
+ * - low_band : The signal from the 0-4 kHz band
+ * - high_band : The signal from the 4-8 kHz band
+ */
+void SplittingFilterAnalysis(const WebRtc_Word16* in_data,
+ WebRtc_Word16* low_band,
+ WebRtc_Word16* high_band,
+ WebRtc_Word32* filt_state1,
+ WebRtc_Word32* filt_state2);
+
+/*
+ * SplittingFilterbank_synthesisQMF(...)
+ *
+ * Combines the two subbands (0-8 and 8-16 kHz) into a super-wb signal.
+ *
+ * Input:
+ * - low_band : The signal with the 0-8 kHz band
+ * - high_band : The signal with the 8-16 kHz band
+ *
+ * Input & Output:
+ * - filt_state1: Filter state for first all-pass filter
+ * - filt_state2: Filter state for second all-pass filter
+ *
+ * Output:
+ * - out_data : super-wb speech signal
+ */
+void SplittingFilterSynthesis(const WebRtc_Word16* low_band,
+ const WebRtc_Word16* high_band,
+ WebRtc_Word16* out_data,
+ WebRtc_Word32* filt_state1,
+ WebRtc_Word32* filt_state2);
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_SPLITTING_FILTER_H_
diff --git a/src/modules/audio_processing/main/source/voice_detection_impl.cc b/src/modules/audio_processing/main/source/voice_detection_impl.cc
new file mode 100644
index 0000000..3eb446e
--- /dev/null
+++ b/src/modules/audio_processing/main/source/voice_detection_impl.cc
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_detection_impl.h"
+
+#include <cassert>
+
+#include "critical_section_wrapper.h"
+#include "webrtc_vad.h"
+
+#include "audio_processing_impl.h"
+#include "audio_buffer.h"
+
+namespace webrtc {
+
+typedef VadInst Handle;
+
+namespace {
+WebRtc_Word16 MapSetting(VoiceDetection::Likelihood likelihood) {
+ switch (likelihood) {
+ case VoiceDetection::kVeryLowLikelihood:
+ return 3;
+ break;
+ case VoiceDetection::kLowLikelihood:
+ return 2;
+ break;
+ case VoiceDetection::kModerateLikelihood:
+ return 1;
+ break;
+ case VoiceDetection::kHighLikelihood:
+ return 0;
+ break;
+ default:
+ return -1;
+ }
+}
+} // namespace
+
+
+VoiceDetectionImpl::VoiceDetectionImpl(const AudioProcessingImpl* apm)
+ : ProcessingComponent(apm),
+ apm_(apm),
+ stream_has_voice_(false),
+ using_external_vad_(false),
+ likelihood_(kLowLikelihood),
+ frame_size_ms_(10),
+ frame_size_samples_(0) {}
+
+VoiceDetectionImpl::~VoiceDetectionImpl() {}
+
+int VoiceDetectionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ if (using_external_vad_) {
+ using_external_vad_ = false;
+ return apm_->kNoError;
+ }
+ assert(audio->samples_per_split_channel() <= 160);
+
+ WebRtc_Word16* mixed_data = audio->low_pass_split_data(0);
+ if (audio->num_channels() > 1) {
+ audio->CopyAndMixLowPass(1);
+ mixed_data = audio->mixed_low_pass_data(0);
+ }
+
+ // TODO(ajm): concatenate data in frame buffer here.
+
+ int vad_ret_val;
+ vad_ret_val = WebRtcVad_Process(static_cast<Handle*>(handle(0)),
+ apm_->split_sample_rate_hz(),
+ mixed_data,
+ frame_size_samples_);
+
+ if (vad_ret_val == 0) {
+ stream_has_voice_ = false;
+ } else if (vad_ret_val == 1) {
+ stream_has_voice_ = true;
+ } else {
+ return apm_->kUnspecifiedError;
+ }
+
+ return apm_->kNoError;
+}
+
+int VoiceDetectionImpl::Enable(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ return EnableComponent(enable);
+}
+
+bool VoiceDetectionImpl::is_enabled() const {
+ return is_component_enabled();
+}
+
+int VoiceDetectionImpl::set_stream_has_voice(bool has_voice) {
+ using_external_vad_ = true;
+ stream_has_voice_ = has_voice;
+ return apm_->kNoError;
+}
+
+bool VoiceDetectionImpl::stream_has_voice() const {
+ // TODO(ajm): enable this assertion?
+ //assert(using_external_vad_ || is_component_enabled());
+ return stream_has_voice_;
+}
+
+int VoiceDetectionImpl::set_likelihood(VoiceDetection::Likelihood likelihood) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (MapSetting(likelihood) == -1) {
+ return apm_->kBadParameterError;
+ }
+
+ likelihood_ = likelihood;
+ return Configure();
+}
+
+VoiceDetection::Likelihood VoiceDetectionImpl::likelihood() const {
+ return likelihood_;
+}
+
+int VoiceDetectionImpl::set_frame_size_ms(int size) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ assert(size == 10); // TODO(ajm): remove when supported.
+ if (size != 10 &&
+ size != 20 &&
+ size != 30) {
+ return apm_->kBadParameterError;
+ }
+
+ frame_size_ms_ = size;
+
+ return Initialize();
+}
+
+int VoiceDetectionImpl::frame_size_ms() const {
+ return frame_size_ms_;
+}
+
+int VoiceDetectionImpl::Initialize() {
+ int err = ProcessingComponent::Initialize();
+ if (err != apm_->kNoError || !is_component_enabled()) {
+ return err;
+ }
+
+ using_external_vad_ = false;
+ frame_size_samples_ = frame_size_ms_ * (apm_->split_sample_rate_hz() / 1000);
+ // TODO(ajm): intialize frame buffer here.
+
+ return apm_->kNoError;
+}
+
+int VoiceDetectionImpl::get_version(char* version,
+ int version_len_bytes) const {
+ if (WebRtcVad_get_version(version, version_len_bytes) != 0) {
+ return apm_->kBadParameterError;
+ }
+
+ return apm_->kNoError;
+}
+
+void* VoiceDetectionImpl::CreateHandle() const {
+ Handle* handle = NULL;
+ if (WebRtcVad_Create(&handle) != apm_->kNoError) {
+ handle = NULL;
+ } else {
+ assert(handle != NULL);
+ }
+
+ return handle;
+}
+
+int VoiceDetectionImpl::DestroyHandle(void* handle) const {
+ return WebRtcVad_Free(static_cast<Handle*>(handle));
+}
+
+int VoiceDetectionImpl::InitializeHandle(void* handle) const {
+ return WebRtcVad_Init(static_cast<Handle*>(handle));
+}
+
+int VoiceDetectionImpl::ConfigureHandle(void* handle) const {
+ return WebRtcVad_set_mode(static_cast<Handle*>(handle),
+ MapSetting(likelihood_));
+}
+
+int VoiceDetectionImpl::num_handles_required() const {
+ return 1;
+}
+
+int VoiceDetectionImpl::GetHandleError(void* handle) const {
+ // The VAD has no get_error() function.
+ assert(handle != NULL);
+ return apm_->kUnspecifiedError;
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/voice_detection_impl.h b/src/modules/audio_processing/main/source/voice_detection_impl.h
new file mode 100644
index 0000000..ef212d1
--- /dev/null
+++ b/src/modules/audio_processing/main/source/voice_detection_impl.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_VOICE_DETECTION_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_VOICE_DETECTION_IMPL_H_
+
+#include "audio_processing.h"
+#include "processing_component.h"
+
+namespace webrtc {
+class AudioProcessingImpl;
+class AudioBuffer;
+
+class VoiceDetectionImpl : public VoiceDetection,
+ public ProcessingComponent {
+ public:
+ explicit VoiceDetectionImpl(const AudioProcessingImpl* apm);
+ virtual ~VoiceDetectionImpl();
+
+ int ProcessCaptureAudio(AudioBuffer* audio);
+
+ // VoiceDetection implementation.
+ virtual bool is_enabled() const;
+
+ // ProcessingComponent implementation.
+ virtual int Initialize();
+ virtual int get_version(char* version, int version_len_bytes) const;
+
+ private:
+ // VoiceDetection implementation.
+ virtual int Enable(bool enable);
+ virtual int set_stream_has_voice(bool has_voice);
+ virtual bool stream_has_voice() const;
+ virtual int set_likelihood(Likelihood likelihood);
+ virtual Likelihood likelihood() const;
+ virtual int set_frame_size_ms(int size);
+ virtual int frame_size_ms() const;
+
+ // ProcessingComponent implementation.
+ virtual void* CreateHandle() const;
+ virtual int InitializeHandle(void* handle) const;
+ virtual int ConfigureHandle(void* handle) const;
+ virtual int DestroyHandle(void* handle) const;
+ virtual int num_handles_required() const;
+ virtual int GetHandleError(void* handle) const;
+
+ const AudioProcessingImpl* apm_;
+ bool stream_has_voice_;
+ bool using_external_vad_;
+ Likelihood likelihood_;
+ int frame_size_ms_;
+ int frame_size_samples_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_VOICE_DETECTION_IMPL_H_