diff options
author | Arun Raghavan <git@arunraghavan.net> | 2015-10-13 22:52:52 +0530 |
---|---|---|
committer | Arun Raghavan <git@arunraghavan.net> | 2015-10-15 16:18:47 +0530 |
commit | 7fcd4d2df5e2ceebf830d6fdfc6e8222a2486e99 (patch) | |
tree | 2f3e52c6699616b3cb2d6fda4516e123992ac7e8 | |
parent | 65a286080655e350c8f4e6b743c3b8233cf72d07 (diff) |
build: More build fixes and cleanups
19 files changed, 2130 insertions, 138 deletions
diff --git a/webrtc/Makefile.am b/webrtc/Makefile.am index a5d17bc..015082d 100644 --- a/webrtc/Makefile.am +++ b/webrtc/Makefile.am @@ -1,5 +1,12 @@ -SUBDIRS = base common_audio system_wrappers modules +SUBDIRS = . base common_audio system_wrappers modules -noinst_HEADERS = common.h +noinst_HEADERS = common.h \ + common_types.h \ + typedefs.h + +noinst_LTLIBRARIES = libwebrtc.la + +libwebrtc_la_SOURCES = common_types.cc +libwebrtc_la_CXXFLAGS = $(AM_CXXFLAGS) $(COMMON_CXXFLAGS) EXTRA_DIST = BUILD.gn PATENTS LICENSE_THIRD_PARTY diff --git a/webrtc/base/Makefile.am b/webrtc/base/Makefile.am index 3a88183..11638b2 100644 --- a/webrtc/base/Makefile.am +++ b/webrtc/base/Makefile.am @@ -3,7 +3,6 @@ noinst_LTLIBRARIES = libbase.la noinst_HEADERS = arraysize.h \ atomicops.h \ basictypes.h \ - checks.h \ constructormagic.h \ safe_conversions.h \ safe_conversions_impl.h \ @@ -13,6 +12,8 @@ noinst_HEADERS = arraysize.h \ libbase_la_SOURCES = criticalsection.cc \ criticalsection.h \ + checks.cc \ + checks.h \ event.cc \ event.h \ platform_thread.cc \ diff --git a/webrtc/base/checks.cc b/webrtc/base/checks.cc new file mode 100644 index 0000000..49a31f2 --- /dev/null +++ b/webrtc/base/checks.cc @@ -0,0 +1,127 @@ +/* + * Copyright 2006 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// Most of this was borrowed (with minor modifications) from V8's and Chromium's +// src/base/logging.cc. + +// Use the C++ version to provide __GLIBCXX__. +#include <cstdarg> +#include <cstdio> +#include <cstdlib> + +#if defined(__GLIBCXX__) && !defined(__UCLIBC__) +#include <cxxabi.h> +#include <execinfo.h> +#endif + +#if defined(WEBRTC_ANDROID) +#define LOG_TAG "rtc" +#include <android/log.h> // NOLINT +#endif + +#include "webrtc/base/checks.h" + +#if defined(_MSC_VER) +// Warning C4722: destructor never returns, potential memory leak. +// FatalMessage's dtor very intentionally aborts. +#pragma warning(disable:4722) +#endif + +namespace rtc { + +void VPrintError(const char* format, va_list args) { +#if defined(WEBRTC_ANDROID) + __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args); +#else + vfprintf(stderr, format, args); +#endif +} + +void PrintError(const char* format, ...) { + va_list args; + va_start(args, format); + VPrintError(format, args); + va_end(args); +} + +// TODO(ajm): This works on Mac (although the parsing fails) but I don't seem +// to get usable symbols on Linux. This is copied from V8. Chromium has a more +// advanced stace trace system; also more difficult to copy. +void DumpBacktrace() { +#if defined(__GLIBCXX__) && !defined(__UCLIBC__) + void* trace[100]; + int size = backtrace(trace, sizeof(trace) / sizeof(*trace)); + char** symbols = backtrace_symbols(trace, size); + PrintError("\n==== C stack trace ===============================\n\n"); + if (size == 0) { + PrintError("(empty)\n"); + } else if (symbols == NULL) { + PrintError("(no symbols)\n"); + } else { + for (int i = 1; i < size; ++i) { + char mangled[201]; + if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT + PrintError("%2d: ", i); + int status; + size_t length; + char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status); + PrintError("%s\n", demangled != NULL ? demangled : mangled); + free(demangled); + } else { + // If parsing failed, at least print the unparsed symbol. + PrintError("%s\n", symbols[i]); + } + } + } + free(symbols); +#endif +} + +FatalMessage::FatalMessage(const char* file, int line) { + Init(file, line); +} + +FatalMessage::FatalMessage(const char* file, int line, std::string* result) { + Init(file, line); + stream_ << "Check failed: " << *result << std::endl << "# "; + delete result; +} + +NO_RETURN FatalMessage::~FatalMessage() { + fflush(stdout); + fflush(stderr); + stream_ << std::endl << "#" << std::endl; + PrintError(stream_.str().c_str()); + DumpBacktrace(); + fflush(stderr); + abort(); +} + +void FatalMessage::Init(const char* file, int line) { + stream_ << std::endl << std::endl << "#" << std::endl << "# Fatal error in " + << file << ", line " << line << std::endl << "# "; +} + +// MSVC doesn't like complex extern templates and DLLs. +#if !defined(COMPILER_MSVC) +// Explicit instantiations for commonly used comparisons. +template std::string* MakeCheckOpString<int, int>( + const int&, const int&, const char* names); +template std::string* MakeCheckOpString<unsigned long, unsigned long>( + const unsigned long&, const unsigned long&, const char* names); +template std::string* MakeCheckOpString<unsigned long, unsigned int>( + const unsigned long&, const unsigned int&, const char* names); +template std::string* MakeCheckOpString<unsigned int, unsigned long>( + const unsigned int&, const unsigned long&, const char* names); +template std::string* MakeCheckOpString<std::string, std::string>( + const std::string&, const std::string&, const char* name); +#endif + +} // namespace rtc diff --git a/webrtc/common_audio/Makefile.am b/webrtc/common_audio/Makefile.am index 8dff0f7..709bf71 100644 --- a/webrtc/common_audio/Makefile.am +++ b/webrtc/common_audio/Makefile.am @@ -69,6 +69,7 @@ libcommon_audio_la_SOURCES = resampler/include/push_resampler.h \ audio_converter.h \ audio_ring_buffer.cc \ audio_ring_buffer.h \ + audio_util.cc \ blocker.cc \ blocker.h \ channel_buffer.cc \ diff --git a/webrtc/common_audio/audio_util.cc b/webrtc/common_audio/audio_util.cc new file mode 100644 index 0000000..2ce2eba --- /dev/null +++ b/webrtc/common_audio/audio_util.cc @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/common_audio/include/audio_util.h" + +#include "webrtc/typedefs.h" + +namespace webrtc { + +void FloatToS16(const float* src, size_t size, int16_t* dest) { + for (size_t i = 0; i < size; ++i) + dest[i] = FloatToS16(src[i]); +} + +void S16ToFloat(const int16_t* src, size_t size, float* dest) { + for (size_t i = 0; i < size; ++i) + dest[i] = S16ToFloat(src[i]); +} + +void FloatS16ToS16(const float* src, size_t size, int16_t* dest) { + for (size_t i = 0; i < size; ++i) + dest[i] = FloatS16ToS16(src[i]); +} + +void FloatToFloatS16(const float* src, size_t size, float* dest) { + for (size_t i = 0; i < size; ++i) + dest[i] = FloatToFloatS16(src[i]); +} + +void FloatS16ToFloat(const float* src, size_t size, float* dest) { + for (size_t i = 0; i < size; ++i) + dest[i] = FloatS16ToFloat(src[i]); +} + +template <> +void DownmixInterleavedToMono<int16_t>(const int16_t* interleaved, + size_t num_frames, + int num_channels, + int16_t* deinterleaved) { + DownmixInterleavedToMonoImpl<int16_t, int32_t>(interleaved, num_frames, + num_channels, deinterleaved); +} + +} // namespace webrtc diff --git a/webrtc/common_types.cc b/webrtc/common_types.cc new file mode 100644 index 0000000..7b99f3c --- /dev/null +++ b/webrtc/common_types.cc @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/common_types.h" + +#include <string.h> + +namespace webrtc { + +int InStream::Rewind() { return -1; } + +int OutStream::Rewind() { return -1; } + +StreamDataCounters::StreamDataCounters() : first_packet_time_ms(-1) {} + +RTPHeaderExtension::RTPHeaderExtension() + : hasTransmissionTimeOffset(false), + transmissionTimeOffset(0), + hasAbsoluteSendTime(false), + absoluteSendTime(0), + hasTransportSequenceNumber(false), + transportSequenceNumber(0), + hasAudioLevel(false), + voiceActivity(false), + audioLevel(0), + hasVideoRotation(false), + videoRotation(0) { +} + +RTPHeader::RTPHeader() + : markerBit(false), + payloadType(0), + sequenceNumber(0), + timestamp(0), + ssrc(0), + numCSRCs(0), + paddingLength(0), + headerLength(0), + payload_type_frequency(0), + extension() { + memset(&arrOfCSRCs, 0, sizeof(arrOfCSRCs)); +} + +} // namespace webrtc diff --git a/webrtc/modules/audio_coding/Makefile.am b/webrtc/modules/audio_coding/Makefile.am index 29269ae..5304dac 100644 --- a/webrtc/modules/audio_coding/Makefile.am +++ b/webrtc/modules/audio_coding/Makefile.am @@ -3,11 +3,18 @@ noinst_LTLIBRARIES = libaudio_coding.la libaudio_coding_la_SOURCES = codecs/isac/main/interface/isac.h \ codecs/isac/main/source/arith_routines.c \ codecs/isac/main/source/arith_routines.h \ + codecs/isac/main/source/arith_routines_hist.c \ + codecs/isac/main/source/arith_routines_logist.c \ codecs/isac/main/source/codec.h \ codecs/isac/main/source/encode_lpc_swb.c \ codecs/isac/main/source/encode_lpc_swb.h \ codecs/isac/main/source/entropy_coding.c \ codecs/isac/main/source/entropy_coding.h \ + codecs/isac/main/source/filter_functions.c \ + codecs/isac/main/source/filterbanks.c \ + codecs/isac/main/source/filterbank_tables.c \ + codecs/isac/main/source/filterbank_tables.h \ + codecs/isac/main/source/intialize.c \ codecs/isac/main/source/lpc_analysis.c \ codecs/isac/main/source/lpc_analysis.h \ codecs/isac/main/source/lpc_gain_swb_tables.c \ @@ -21,6 +28,7 @@ libaudio_coding_la_SOURCES = codecs/isac/main/interface/isac.h \ codecs/isac/main/source/os_specific_inline.h \ codecs/isac/main/source/pitch_estimator.c \ codecs/isac/main/source/pitch_estimator.h \ + codecs/isac/main/source/pitch_filter.c \ codecs/isac/main/source/pitch_gain_tables.c \ codecs/isac/main/source/pitch_gain_tables.h \ codecs/isac/main/source/pitch_lag_tables.c \ diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/arith_routines_hist.c b/webrtc/modules/audio_coding/codecs/isac/main/source/arith_routines_hist.c new file mode 100644 index 0000000..63e4928 --- /dev/null +++ b/webrtc/modules/audio_coding/codecs/isac/main/source/arith_routines_hist.c @@ -0,0 +1,291 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "settings.h" +#include "arith_routines.h" + + +/* + * code symbols into arithmetic bytestream + */ +void WebRtcIsac_EncHistMulti(Bitstr *streamdata, /* in-/output struct containing bitstream */ + const int *data, /* input: data vector */ + const uint16_t **cdf, /* input: array of cdf arrays */ + const int N) /* input: data vector length */ +{ + uint32_t W_lower, W_upper; + uint32_t W_upper_LSB, W_upper_MSB; + uint8_t *stream_ptr; + uint8_t *stream_ptr_carry; + uint32_t cdf_lo, cdf_hi; + int k; + + + /* point to beginning of stream buffer */ + stream_ptr = streamdata->stream + streamdata->stream_index; + W_upper = streamdata->W_upper; + + for (k=N; k>0; k--) + { + /* fetch cdf_lower and cdf_upper from cdf tables */ + cdf_lo = (uint32_t) *(*cdf + *data); + cdf_hi = (uint32_t) *(*cdf++ + *data++ + 1); + + /* update interval */ + W_upper_LSB = W_upper & 0x0000FFFF; + W_upper_MSB = W_upper >> 16; + W_lower = W_upper_MSB * cdf_lo; + W_lower += (W_upper_LSB * cdf_lo) >> 16; + W_upper = W_upper_MSB * cdf_hi; + W_upper += (W_upper_LSB * cdf_hi) >> 16; + + /* shift interval such that it begins at zero */ + W_upper -= ++W_lower; + + /* add integer to bitstream */ + streamdata->streamval += W_lower; + + /* handle carry */ + if (streamdata->streamval < W_lower) + { + /* propagate carry */ + stream_ptr_carry = stream_ptr; + while (!(++(*--stream_ptr_carry))); + } + + /* renormalize interval, store most significant byte of streamval and update streamval */ + while ( !(W_upper & 0xFF000000) ) /* W_upper < 2^24 */ + { + W_upper <<= 8; + *stream_ptr++ = (uint8_t) (streamdata->streamval >> 24); + streamdata->streamval <<= 8; + } + } + + /* calculate new stream_index */ + streamdata->stream_index = (int)(stream_ptr - streamdata->stream); + streamdata->W_upper = W_upper; + + return; +} + + + +/* + * function to decode more symbols from the arithmetic bytestream, using method of bisection + * cdf tables should be of size 2^k-1 (which corresponds to an alphabet size of 2^k-2) + */ +int WebRtcIsac_DecHistBisectMulti(int *data, /* output: data vector */ + Bitstr *streamdata, /* in-/output struct containing bitstream */ + const uint16_t **cdf, /* input: array of cdf arrays */ + const uint16_t *cdf_size, /* input: array of cdf table sizes+1 (power of two: 2^k) */ + const int N) /* input: data vector length */ +{ + uint32_t W_lower, W_upper; + uint32_t W_tmp; + uint32_t W_upper_LSB, W_upper_MSB; + uint32_t streamval; + const uint8_t *stream_ptr; + const uint16_t *cdf_ptr; + int size_tmp; + int k; + + W_lower = 0; //to remove warning -DH + stream_ptr = streamdata->stream + streamdata->stream_index; + W_upper = streamdata->W_upper; + if (W_upper == 0) + /* Should not be possible in normal operation */ + return -2; + + if (streamdata->stream_index == 0) /* first time decoder is called for this stream */ + { + /* read first word from bytestream */ + streamval = *stream_ptr << 24; + streamval |= *++stream_ptr << 16; + streamval |= *++stream_ptr << 8; + streamval |= *++stream_ptr; + } else { + streamval = streamdata->streamval; + } + + for (k=N; k>0; k--) + { + /* find the integer *data for which streamval lies in [W_lower+1, W_upper] */ + W_upper_LSB = W_upper & 0x0000FFFF; + W_upper_MSB = W_upper >> 16; + + /* start halfway the cdf range */ + size_tmp = *cdf_size++ >> 1; + cdf_ptr = *cdf + (size_tmp - 1); + + /* method of bisection */ + for ( ;; ) + { + W_tmp = W_upper_MSB * *cdf_ptr; + W_tmp += (W_upper_LSB * *cdf_ptr) >> 16; + size_tmp >>= 1; + if (size_tmp == 0) break; + if (streamval > W_tmp) + { + W_lower = W_tmp; + cdf_ptr += size_tmp; + } else { + W_upper = W_tmp; + cdf_ptr -= size_tmp; + } + } + if (streamval > W_tmp) + { + W_lower = W_tmp; + *data++ = (int)(cdf_ptr - *cdf++); + } else { + W_upper = W_tmp; + *data++ = (int)(cdf_ptr - *cdf++ - 1); + } + + /* shift interval to start at zero */ + W_upper -= ++W_lower; + + /* add integer to bitstream */ + streamval -= W_lower; + + /* renormalize interval and update streamval */ + while ( !(W_upper & 0xFF000000) ) /* W_upper < 2^24 */ + { + /* read next byte from stream */ + streamval = (streamval << 8) | *++stream_ptr; + W_upper <<= 8; + } + + if (W_upper == 0) + /* Should not be possible in normal operation */ + return -2; + + + } + + streamdata->stream_index = (int)(stream_ptr - streamdata->stream); + streamdata->W_upper = W_upper; + streamdata->streamval = streamval; + + + /* find number of bytes in original stream (determined by current interval width) */ + if ( W_upper > 0x01FFFFFF ) + return streamdata->stream_index - 2; + else + return streamdata->stream_index - 1; +} + + + +/* + * function to decode more symbols from the arithmetic bytestream, taking single step up or + * down at a time + * cdf tables can be of arbitrary size, but large tables may take a lot of iterations + */ +int WebRtcIsac_DecHistOneStepMulti(int *data, /* output: data vector */ + Bitstr *streamdata, /* in-/output struct containing bitstream */ + const uint16_t **cdf, /* input: array of cdf arrays */ + const uint16_t *init_index, /* input: vector of initial cdf table search entries */ + const int N) /* input: data vector length */ +{ + uint32_t W_lower, W_upper; + uint32_t W_tmp; + uint32_t W_upper_LSB, W_upper_MSB; + uint32_t streamval; + const uint8_t *stream_ptr; + const uint16_t *cdf_ptr; + int k; + + + stream_ptr = streamdata->stream + streamdata->stream_index; + W_upper = streamdata->W_upper; + if (W_upper == 0) + /* Should not be possible in normal operation */ + return -2; + + if (streamdata->stream_index == 0) /* first time decoder is called for this stream */ + { + /* read first word from bytestream */ + streamval = *stream_ptr << 24; + streamval |= *++stream_ptr << 16; + streamval |= *++stream_ptr << 8; + streamval |= *++stream_ptr; + } else { + streamval = streamdata->streamval; + } + + + for (k=N; k>0; k--) + { + /* find the integer *data for which streamval lies in [W_lower+1, W_upper] */ + W_upper_LSB = W_upper & 0x0000FFFF; + W_upper_MSB = W_upper >> 16; + + /* start at the specified table entry */ + cdf_ptr = *cdf + (*init_index++); + W_tmp = W_upper_MSB * *cdf_ptr; + W_tmp += (W_upper_LSB * *cdf_ptr) >> 16; + if (streamval > W_tmp) + { + for ( ;; ) + { + W_lower = W_tmp; + if (cdf_ptr[0]==65535) + /* range check */ + return -3; + W_tmp = W_upper_MSB * *++cdf_ptr; + W_tmp += (W_upper_LSB * *cdf_ptr) >> 16; + if (streamval <= W_tmp) break; + } + W_upper = W_tmp; + *data++ = (int)(cdf_ptr - *cdf++ - 1); + } else { + for ( ;; ) + { + W_upper = W_tmp; + --cdf_ptr; + if (cdf_ptr<*cdf) { + /* range check */ + return -3; + } + W_tmp = W_upper_MSB * *cdf_ptr; + W_tmp += (W_upper_LSB * *cdf_ptr) >> 16; + if (streamval > W_tmp) break; + } + W_lower = W_tmp; + *data++ = (int)(cdf_ptr - *cdf++); + } + + /* shift interval to start at zero */ + W_upper -= ++W_lower; + /* add integer to bitstream */ + streamval -= W_lower; + + /* renormalize interval and update streamval */ + while ( !(W_upper & 0xFF000000) ) /* W_upper < 2^24 */ + { + /* read next byte from stream */ + streamval = (streamval << 8) | *++stream_ptr; + W_upper <<= 8; + } + } + + streamdata->stream_index = (int)(stream_ptr - streamdata->stream); + streamdata->W_upper = W_upper; + streamdata->streamval = streamval; + + + /* find number of bytes in original stream (determined by current interval width) */ + if ( W_upper > 0x01FFFFFF ) + return streamdata->stream_index - 2; + else + return streamdata->stream_index - 1; +} diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/arith_routines_logist.c b/webrtc/modules/audio_coding/codecs/isac/main/source/arith_routines_logist.c new file mode 100644 index 0000000..eeed7ae --- /dev/null +++ b/webrtc/modules/audio_coding/codecs/isac/main/source/arith_routines_logist.c @@ -0,0 +1,294 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* + * arith_routines.h + * + * This file contains functions for arithmatically encoding and + * decoding DFT coefficients. + * + */ + + +#include "arith_routines.h" + + + +static const int32_t kHistEdgesQ15[51] = { + -327680, -314573, -301466, -288359, -275252, -262144, -249037, -235930, -222823, -209716, + -196608, -183501, -170394, -157287, -144180, -131072, -117965, -104858, -91751, -78644, + -65536, -52429, -39322, -26215, -13108, 0, 13107, 26214, 39321, 52428, + 65536, 78643, 91750, 104857, 117964, 131072, 144179, 157286, 170393, 183500, + 196608, 209715, 222822, 235929, 249036, 262144, 275251, 288358, 301465, 314572, + 327680}; + + +static const int kCdfSlopeQ0[51] = { /* Q0 */ + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 13, 23, 47, 87, 154, 315, 700, 1088, + 2471, 6064, 14221, 21463, 36634, 36924, 19750, 13270, 5806, 2312, + 1095, 660, 316, 145, 86, 41, 32, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 0}; + + +static const int kCdfQ16[51] = { /* Q16 */ + 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, + 20, 22, 24, 29, 38, 57, 92, 153, 279, 559, + 994, 1983, 4408, 10097, 18682, 33336, 48105, 56005, 61313, 63636, + 64560, 64998, 65262, 65389, 65447, 65481, 65497, 65510, 65512, 65514, + 65516, 65518, 65520, 65522, 65524, 65526, 65528, 65530, 65532, 65534, + 65535}; + + + +/* function to be converted to fixed point */ +static __inline uint32_t piecewise(int32_t xinQ15) { + + int32_t ind, qtmp1, qtmp2, qtmp3; + uint32_t tmpUW32; + + + qtmp2 = xinQ15; + + if (qtmp2 < kHistEdgesQ15[0]) { + qtmp2 = kHistEdgesQ15[0]; + } + if (qtmp2 > kHistEdgesQ15[50]) { + qtmp2 = kHistEdgesQ15[50]; + } + + qtmp1 = qtmp2 - kHistEdgesQ15[0]; /* Q15 - Q15 = Q15 */ + ind = (qtmp1 * 5) >> 16; /* 2^16 / 5 = 0.4 in Q15 */ + /* Q15 -> Q0 */ + qtmp1 = qtmp2 - kHistEdgesQ15[ind]; /* Q15 - Q15 = Q15 */ + qtmp2 = kCdfSlopeQ0[ind] * qtmp1; /* Q0 * Q15 = Q15 */ + qtmp3 = qtmp2>>15; /* Q15 -> Q0 */ + + tmpUW32 = kCdfQ16[ind] + qtmp3; /* Q0 + Q0 = Q0 */ + return tmpUW32; +} + + + +int WebRtcIsac_EncLogisticMulti2( + Bitstr *streamdata, /* in-/output struct containing bitstream */ + int16_t *dataQ7, /* input: data vector */ + const uint16_t *envQ8, /* input: side info vector defining the width of the pdf */ + const int N, /* input: data vector length / 2 */ + const int16_t isSWB12kHz) +{ + uint32_t W_lower, W_upper; + uint32_t W_upper_LSB, W_upper_MSB; + uint8_t *stream_ptr; + uint8_t *maxStreamPtr; + uint8_t *stream_ptr_carry; + uint32_t cdf_lo, cdf_hi; + int k; + + /* point to beginning of stream buffer */ + stream_ptr = streamdata->stream + streamdata->stream_index; + W_upper = streamdata->W_upper; + + maxStreamPtr = streamdata->stream + STREAM_SIZE_MAX_60 - 1; + for (k = 0; k < N; k++) + { + /* compute cdf_lower and cdf_upper by evaluating the piecewise linear cdf */ + cdf_lo = piecewise((*dataQ7 - 64) * *envQ8); + cdf_hi = piecewise((*dataQ7 + 64) * *envQ8); + + /* test and clip if probability gets too small */ + while (cdf_lo+1 >= cdf_hi) { + /* clip */ + if (*dataQ7 > 0) { + *dataQ7 -= 128; + cdf_hi = cdf_lo; + cdf_lo = piecewise((*dataQ7 - 64) * *envQ8); + } else { + *dataQ7 += 128; + cdf_lo = cdf_hi; + cdf_hi = piecewise((*dataQ7 + 64) * *envQ8); + } + } + + dataQ7++; + // increment only once per 4 iterations for SWB-16kHz or WB + // increment only once per 2 iterations for SWB-12kHz + envQ8 += (isSWB12kHz)? (k & 1):((k & 1) & (k >> 1)); + + + /* update interval */ + W_upper_LSB = W_upper & 0x0000FFFF; + W_upper_MSB = W_upper >> 16; + W_lower = W_upper_MSB * cdf_lo; + W_lower += (W_upper_LSB * cdf_lo) >> 16; + W_upper = W_upper_MSB * cdf_hi; + W_upper += (W_upper_LSB * cdf_hi) >> 16; + + /* shift interval such that it begins at zero */ + W_upper -= ++W_lower; + + /* add integer to bitstream */ + streamdata->streamval += W_lower; + + /* handle carry */ + if (streamdata->streamval < W_lower) + { + /* propagate carry */ + stream_ptr_carry = stream_ptr; + while (!(++(*--stream_ptr_carry))); + } + + /* renormalize interval, store most significant byte of streamval and update streamval */ + while ( !(W_upper & 0xFF000000) ) /* W_upper < 2^24 */ + { + W_upper <<= 8; + *stream_ptr++ = (uint8_t) (streamdata->streamval >> 24); + + if(stream_ptr > maxStreamPtr) + { + return -ISAC_DISALLOWED_BITSTREAM_LENGTH; + } + streamdata->streamval <<= 8; + } + } + + /* calculate new stream_index */ + streamdata->stream_index = (int)(stream_ptr - streamdata->stream); + streamdata->W_upper = W_upper; + + return 0; +} + + + +int WebRtcIsac_DecLogisticMulti2( + int16_t *dataQ7, /* output: data vector */ + Bitstr *streamdata, /* in-/output struct containing bitstream */ + const uint16_t *envQ8, /* input: side info vector defining the width of the pdf */ + const int16_t *ditherQ7,/* input: dither vector */ + const int N, /* input: data vector length */ + const int16_t isSWB12kHz) +{ + uint32_t W_lower, W_upper; + uint32_t W_tmp; + uint32_t W_upper_LSB, W_upper_MSB; + uint32_t streamval; + const uint8_t *stream_ptr; + uint32_t cdf_tmp; + int16_t candQ7; + int k; + + stream_ptr = streamdata->stream + streamdata->stream_index; + W_upper = streamdata->W_upper; + if (streamdata->stream_index == 0) /* first time decoder is called for this stream */ + { + /* read first word from bytestream */ + streamval = *stream_ptr << 24; + streamval |= *++stream_ptr << 16; + streamval |= *++stream_ptr << 8; + streamval |= *++stream_ptr; + } else { + streamval = streamdata->streamval; + } + + + for (k = 0; k < N; k++) + { + /* find the integer *data for which streamval lies in [W_lower+1, W_upper] */ + W_upper_LSB = W_upper & 0x0000FFFF; + W_upper_MSB = W_upper >> 16; + + /* find first candidate by inverting the logistic cdf */ + candQ7 = - *ditherQ7 + 64; + cdf_tmp = piecewise(candQ7 * *envQ8); + + W_tmp = W_upper_MSB * cdf_tmp; + W_tmp += (W_upper_LSB * cdf_tmp) >> 16; + if (streamval > W_tmp) + { + W_lower = W_tmp; + candQ7 += 128; + cdf_tmp = piecewise(candQ7 * *envQ8); + + W_tmp = W_upper_MSB * cdf_tmp; + W_tmp += (W_upper_LSB * cdf_tmp) >> 16; + while (streamval > W_tmp) + { + W_lower = W_tmp; + candQ7 += 128; + cdf_tmp = piecewise(candQ7 * *envQ8); + + W_tmp = W_upper_MSB * cdf_tmp; + W_tmp += (W_upper_LSB * cdf_tmp) >> 16; + + /* error check */ + if (W_lower == W_tmp) return -1; + } + W_upper = W_tmp; + + /* another sample decoded */ + *dataQ7 = candQ7 - 64; + } + else + { + W_upper = W_tmp; + candQ7 -= 128; + cdf_tmp = piecewise(candQ7 * *envQ8); + + W_tmp = W_upper_MSB * cdf_tmp; + W_tmp += (W_upper_LSB * cdf_tmp) >> 16; + while ( !(streamval > W_tmp) ) + { + W_upper = W_tmp; + candQ7 -= 128; + cdf_tmp = piecewise(candQ7 * *envQ8); + + W_tmp = W_upper_MSB * cdf_tmp; + W_tmp += (W_upper_LSB * cdf_tmp) >> 16; + + /* error check */ + if (W_upper == W_tmp) return -1; + } + W_lower = W_tmp; + + /* another sample decoded */ + *dataQ7 = candQ7 + 64; + } + ditherQ7++; + dataQ7++; + // increment only once per 4 iterations for SWB-16kHz or WB + // increment only once per 2 iterations for SWB-12kHz + envQ8 += (isSWB12kHz)? (k & 1):((k & 1) & (k >> 1)); + + /* shift interval to start at zero */ + W_upper -= ++W_lower; + + /* add integer to bitstream */ + streamval -= W_lower; + + /* renormalize interval and update streamval */ + while ( !(W_upper & 0xFF000000) ) /* W_upper < 2^24 */ + { + /* read next byte from stream */ + streamval = (streamval << 8) | *++stream_ptr; + W_upper <<= 8; + } + } + + streamdata->stream_index = (int)(stream_ptr - streamdata->stream); + streamdata->W_upper = W_upper; + streamdata->streamval = streamval; + + /* find number of bytes in original stream (determined by current interval width) */ + if ( W_upper > 0x01FFFFFF ) + return streamdata->stream_index - 2; + else + return streamdata->stream_index - 1; +} diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c b/webrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c new file mode 100644 index 0000000..d47eb1f --- /dev/null +++ b/webrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include <memory.h> +#include <string.h> +#ifdef WEBRTC_ANDROID +#include <stdlib.h> +#endif +#include "pitch_estimator.h" +#include "lpc_analysis.h" +#include "codec.h" + + + +void WebRtcIsac_AllPoleFilter(double* InOut, + double* Coef, + size_t lengthInOut, + int orderCoef) { + /* the state of filter is assumed to be in InOut[-1] to InOut[-orderCoef] */ + double scal; + double sum; + size_t n; + int k; + + //if (fabs(Coef[0]-1.0)<0.001) { + if ( (Coef[0] > 0.9999) && (Coef[0] < 1.0001) ) + { + for(n = 0; n < lengthInOut; n++) + { + sum = Coef[1] * InOut[-1]; + for(k = 2; k <= orderCoef; k++){ + sum += Coef[k] * InOut[-k]; + } + *InOut++ -= sum; + } + } + else + { + scal = 1.0 / Coef[0]; + for(n=0;n<lengthInOut;n++) + { + *InOut *= scal; + for(k=1;k<=orderCoef;k++){ + *InOut -= scal*Coef[k]*InOut[-k]; + } + InOut++; + } + } +} + + +void WebRtcIsac_AllZeroFilter(double* In, + double* Coef, + size_t lengthInOut, + int orderCoef, + double* Out) { + /* the state of filter is assumed to be in In[-1] to In[-orderCoef] */ + + size_t n; + int k; + double tmp; + + for(n = 0; n < lengthInOut; n++) + { + tmp = In[0] * Coef[0]; + + for(k = 1; k <= orderCoef; k++){ + tmp += Coef[k] * In[-k]; + } + + *Out++ = tmp; + In++; + } +} + + +void WebRtcIsac_ZeroPoleFilter(double* In, + double* ZeroCoef, + double* PoleCoef, + size_t lengthInOut, + int orderCoef, + double* Out) { + /* the state of the zero section is assumed to be in In[-1] to In[-orderCoef] */ + /* the state of the pole section is assumed to be in Out[-1] to Out[-orderCoef] */ + + WebRtcIsac_AllZeroFilter(In,ZeroCoef,lengthInOut,orderCoef,Out); + WebRtcIsac_AllPoleFilter(Out,PoleCoef,lengthInOut,orderCoef); +} + + +void WebRtcIsac_AutoCorr(double* r, const double* x, size_t N, size_t order) { + size_t lag, n; + double sum, prod; + const double *x_lag; + + for (lag = 0; lag <= order; lag++) + { + sum = 0.0f; + x_lag = &x[lag]; + prod = x[0] * x_lag[0]; + for (n = 1; n < N - lag; n++) { + sum += prod; + prod = x[n] * x_lag[n]; + } + sum += prod; + r[lag] = sum; + } + +} + + +void WebRtcIsac_BwExpand(double* out, double* in, double coef, size_t length) { + size_t i; + double chirp; + + chirp = coef; + + out[0] = in[0]; + for (i = 1; i < length; i++) { + out[i] = chirp * in[i]; + chirp *= coef; + } +} + +void WebRtcIsac_WeightingFilter(const double* in, + double* weiout, + double* whiout, + WeightFiltstr* wfdata) { + double tmpbuffer[PITCH_FRAME_LEN + PITCH_WLPCBUFLEN]; + double corr[PITCH_WLPCORDER+1], rc[PITCH_WLPCORDER+1]; + double apol[PITCH_WLPCORDER+1], apolr[PITCH_WLPCORDER+1]; + double rho=0.9, *inp, *dp, *dp2; + double whoutbuf[PITCH_WLPCBUFLEN + PITCH_WLPCORDER]; + double weoutbuf[PITCH_WLPCBUFLEN + PITCH_WLPCORDER]; + double *weo, *who, opol[PITCH_WLPCORDER+1], ext[PITCH_WLPCWINLEN]; + int k, n, endpos, start; + + /* Set up buffer and states */ + memcpy(tmpbuffer, wfdata->buffer, sizeof(double) * PITCH_WLPCBUFLEN); + memcpy(tmpbuffer+PITCH_WLPCBUFLEN, in, sizeof(double) * PITCH_FRAME_LEN); + memcpy(wfdata->buffer, tmpbuffer+PITCH_FRAME_LEN, sizeof(double) * PITCH_WLPCBUFLEN); + + dp=weoutbuf; + dp2=whoutbuf; + for (k=0;k<PITCH_WLPCORDER;k++) { + *dp++ = wfdata->weostate[k]; + *dp2++ = wfdata->whostate[k]; + opol[k]=0.0; + } + opol[0]=1.0; + opol[PITCH_WLPCORDER]=0.0; + weo=dp; + who=dp2; + + endpos=PITCH_WLPCBUFLEN + PITCH_SUBFRAME_LEN; + inp=tmpbuffer + PITCH_WLPCBUFLEN; + + for (n=0; n<PITCH_SUBFRAMES; n++) { + /* Windowing */ + start=endpos-PITCH_WLPCWINLEN; + for (k=0; k<PITCH_WLPCWINLEN; k++) { + ext[k]=wfdata->window[k]*tmpbuffer[start+k]; + } + + /* Get LPC polynomial */ + WebRtcIsac_AutoCorr(corr, ext, PITCH_WLPCWINLEN, PITCH_WLPCORDER); + corr[0]=1.01*corr[0]+1.0; /* White noise correction */ + WebRtcIsac_LevDurb(apol, rc, corr, PITCH_WLPCORDER); + WebRtcIsac_BwExpand(apolr, apol, rho, PITCH_WLPCORDER+1); + + /* Filtering */ + WebRtcIsac_ZeroPoleFilter(inp, apol, apolr, PITCH_SUBFRAME_LEN, PITCH_WLPCORDER, weo); + WebRtcIsac_ZeroPoleFilter(inp, apolr, opol, PITCH_SUBFRAME_LEN, PITCH_WLPCORDER, who); + + inp+=PITCH_SUBFRAME_LEN; + endpos+=PITCH_SUBFRAME_LEN; + weo+=PITCH_SUBFRAME_LEN; + who+=PITCH_SUBFRAME_LEN; + } + + /* Export filter states */ + for (k=0;k<PITCH_WLPCORDER;k++) { + wfdata->weostate[k]=weoutbuf[PITCH_FRAME_LEN+k]; + wfdata->whostate[k]=whoutbuf[PITCH_FRAME_LEN+k]; + } + + /* Export output data */ + memcpy(weiout, weoutbuf+PITCH_WLPCORDER, sizeof(double) * PITCH_FRAME_LEN); + memcpy(whiout, whoutbuf+PITCH_WLPCORDER, sizeof(double) * PITCH_FRAME_LEN); +} + + +static const double APupper[ALLPASSSECTIONS] = {0.0347, 0.3826}; +static const double APlower[ALLPASSSECTIONS] = {0.1544, 0.744}; + + +void WebRtcIsac_AllpassFilterForDec(double* InOut, + const double* APSectionFactors, + size_t lengthInOut, + double* FilterState) { + //This performs all-pass filtering--a series of first order all-pass sections are used + //to filter the input in a cascade manner. + size_t n,j; + double temp; + for (j=0; j<ALLPASSSECTIONS; j++){ + for (n=0;n<lengthInOut;n+=2){ + temp = InOut[n]; //store input + InOut[n] = FilterState[j] + APSectionFactors[j]*temp; + FilterState[j] = -APSectionFactors[j]*InOut[n] + temp; + } + } +} + +void WebRtcIsac_DecimateAllpass(const double* in, + double* state_in, + size_t N, + double* out) { + size_t n; + double data_vec[PITCH_FRAME_LEN]; + + /* copy input */ + memcpy(data_vec+1, in, sizeof(double) * (N-1)); + + data_vec[0] = state_in[2*ALLPASSSECTIONS]; //the z^(-1) state + state_in[2*ALLPASSSECTIONS] = in[N-1]; + + WebRtcIsac_AllpassFilterForDec(data_vec+1, APupper, N, state_in); + WebRtcIsac_AllpassFilterForDec(data_vec, APlower, N, state_in+ALLPASSSECTIONS); + + for (n=0;n<N/2;n++) + out[n] = data_vec[2*n] + data_vec[2*n+1]; + +} + + +/* create high-pass filter ocefficients + * z = 0.998 * exp(j*2*pi*35/8000); + * p = 0.94 * exp(j*2*pi*140/8000); + * HP_b = [1, -2*real(z), abs(z)^2]; + * HP_a = [1, -2*real(p), abs(p)^2]; */ +static const double a_coef[2] = { 1.86864659625574, -0.88360000000000}; +static const double b_coef[2] = {-1.99524591718270, 0.99600400000000}; + +/* second order high-pass filter */ +void WebRtcIsac_Highpass(const double* in, + double* out, + double* state, + size_t N) { + size_t k; + + for (k=0; k<N; k++) { + *out = *in + state[1]; + state[1] = state[0] + b_coef[0] * *in + a_coef[0] * *out; + state[0] = b_coef[1] * *in++ + a_coef[1] * *out++; + } +} diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/filterbank_tables.c b/webrtc/modules/audio_coding/codecs/isac/main/source/filterbank_tables.c new file mode 100644 index 0000000..0f844af --- /dev/null +++ b/webrtc/modules/audio_coding/codecs/isac/main/source/filterbank_tables.c @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* filterbank_tables.c*/ +/* This file contains variables that are used in filterbanks.c*/ + +#include "filterbank_tables.h" +#include "settings.h" + +/* The composite all-pass filter factors */ +const float WebRtcIsac_kCompositeApFactorsFloat[4] = { + 0.03470000000000f, 0.15440000000000f, 0.38260000000000f, 0.74400000000000f}; + +/* The upper channel all-pass filter factors */ +const float WebRtcIsac_kUpperApFactorsFloat[2] = { + 0.03470000000000f, 0.38260000000000f}; + +/* The lower channel all-pass filter factors */ +const float WebRtcIsac_kLowerApFactorsFloat[2] = { + 0.15440000000000f, 0.74400000000000f}; + +/* The matrix for transforming the backward composite state to upper channel state */ +const float WebRtcIsac_kTransform1Float[8] = { + -0.00158678506084f, 0.00127157815343f, -0.00104805672709f, 0.00084837248079f, + 0.00134467983258f, -0.00107756549387f, 0.00088814793277f, -0.00071893072525f}; + +/* The matrix for transforming the backward composite state to lower channel state */ +const float WebRtcIsac_kTransform2Float[8] = { + -0.00170686041697f, 0.00136780109829f, -0.00112736532350f, 0.00091257055385f, + 0.00103094281812f, -0.00082615076557f, 0.00068092756088f, -0.00055119165484f}; diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/filterbank_tables.h b/webrtc/modules/audio_coding/codecs/isac/main/source/filterbank_tables.h new file mode 100644 index 0000000..e8fda5e --- /dev/null +++ b/webrtc/modules/audio_coding/codecs/isac/main/source/filterbank_tables.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* + * filterbank_tables.h + * + * Header file for variables that are defined in + * filterbank_tables.c. + * + */ + +#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_FILTERBANK_TABLES_H_ +#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_FILTERBANK_TABLES_H_ + +#include "structs.h" + +/********************* Coefficient Tables ************************/ +/* The number of composite all-pass filter factors */ +#define NUMBEROFCOMPOSITEAPSECTIONS 4 + +/* The number of all-pass filter factors in an upper or lower channel*/ +#define NUMBEROFCHANNELAPSECTIONS 2 + +/* The composite all-pass filter factors */ +extern const float WebRtcIsac_kCompositeApFactorsFloat[4]; + +/* The upper channel all-pass filter factors */ +extern const float WebRtcIsac_kUpperApFactorsFloat[2]; + +/* The lower channel all-pass filter factors */ +extern const float WebRtcIsac_kLowerApFactorsFloat[2]; + +/* The matrix for transforming the backward composite state to upper channel state */ +extern const float WebRtcIsac_kTransform1Float[8]; + +/* The matrix for transforming the backward composite state to lower channel state */ +extern const float WebRtcIsac_kTransform2Float[8]; + +#endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_FILTERBANK_TABLES_H_ */ diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/filterbanks.c b/webrtc/modules/audio_coding/codecs/isac/main/source/filterbanks.c new file mode 100644 index 0000000..671fd32 --- /dev/null +++ b/webrtc/modules/audio_coding/codecs/isac/main/source/filterbanks.c @@ -0,0 +1,346 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* + * filterbanks.c + * + * This file contains function WebRtcIsac_AllPassFilter2Float, + * WebRtcIsac_SplitAndFilter, and WebRtcIsac_FilterAndCombine + * which implement filterbanks that produce decimated lowpass and + * highpass versions of a signal, and performs reconstruction. + * + */ + +#include "settings.h" +#include "filterbank_tables.h" +#include "codec.h" + +/* This function performs all-pass filtering--a series of first order all-pass + * sections are used to filter the input in a cascade manner. + * The input is overwritten!! + */ +static void WebRtcIsac_AllPassFilter2Float(float *InOut, const float *APSectionFactors, + int lengthInOut, int NumberOfSections, + float *FilterState) +{ + int n, j; + float temp; + for (j=0; j<NumberOfSections; j++){ + for (n=0;n<lengthInOut;n++){ + temp = FilterState[j] + APSectionFactors[j] * InOut[n]; + FilterState[j] = -APSectionFactors[j] * temp + InOut[n]; + InOut[n] = temp; + } + } +} + +/* HPstcoeff_in = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */ +static const float kHpStCoefInFloat[4] = +{-1.94895953203325f, 0.94984516000000f, -0.05101826139794f, 0.05015484000000f}; + +/* Function WebRtcIsac_SplitAndFilter + * This function creates low-pass and high-pass decimated versions of part of + the input signal, and part of the signal in the input 'lookahead buffer'. + + INPUTS: + in: a length FRAMESAMPLES array of input samples + prefiltdata: input data structure containing the filterbank states + and lookahead samples from the previous encoding + iteration. + OUTPUTS: + LP: a FRAMESAMPLES_HALF array of low-pass filtered samples that + have been phase equalized. The first QLOOKAHEAD samples are + based on the samples in the two prefiltdata->INLABUFx arrays + each of length QLOOKAHEAD. + The remaining FRAMESAMPLES_HALF-QLOOKAHEAD samples are based + on the first FRAMESAMPLES_HALF-QLOOKAHEAD samples of the input + array in[]. + HP: a FRAMESAMPLES_HALF array of high-pass filtered samples that + have been phase equalized. The first QLOOKAHEAD samples are + based on the samples in the two prefiltdata->INLABUFx arrays + each of length QLOOKAHEAD. + The remaining FRAMESAMPLES_HALF-QLOOKAHEAD samples are based + on the first FRAMESAMPLES_HALF-QLOOKAHEAD samples of the input + array in[]. + + LP_la: a FRAMESAMPLES_HALF array of low-pass filtered samples. + These samples are not phase equalized. They are computed + from the samples in the in[] array. + HP_la: a FRAMESAMPLES_HALF array of high-pass filtered samples + that are not phase equalized. They are computed from + the in[] vector. + prefiltdata: this input data structure's filterbank state and + lookahead sample buffers are updated for the next + encoding iteration. +*/ +void WebRtcIsac_SplitAndFilterFloat(float *pin, float *LP, float *HP, + double *LP_la, double *HP_la, + PreFiltBankstr *prefiltdata) +{ + int k,n; + float CompositeAPFilterState[NUMBEROFCOMPOSITEAPSECTIONS]; + float ForTransform_CompositeAPFilterState[NUMBEROFCOMPOSITEAPSECTIONS]; + float ForTransform_CompositeAPFilterState2[NUMBEROFCOMPOSITEAPSECTIONS]; + float tempinoutvec[FRAMESAMPLES+MAX_AR_MODEL_ORDER]; + float tempin_ch1[FRAMESAMPLES+MAX_AR_MODEL_ORDER]; + float tempin_ch2[FRAMESAMPLES+MAX_AR_MODEL_ORDER]; + float in[FRAMESAMPLES]; + float ftmp; + + + /* High pass filter */ + + for (k=0;k<FRAMESAMPLES;k++) { + in[k] = pin[k] + kHpStCoefInFloat[2] * prefiltdata->HPstates_float[0] + + kHpStCoefInFloat[3] * prefiltdata->HPstates_float[1]; + ftmp = pin[k] - kHpStCoefInFloat[0] * prefiltdata->HPstates_float[0] - + kHpStCoefInFloat[1] * prefiltdata->HPstates_float[1]; + prefiltdata->HPstates_float[1] = prefiltdata->HPstates_float[0]; + prefiltdata->HPstates_float[0] = ftmp; + } + + /* + % backwards all-pass filtering to obtain zero-phase + [tmp1(N2+LA:-1:LA+1, 1), state1] = filter(Q.coef, Q.coef(end:-1:1), in(N:-2:2)); + tmp1(LA:-1:1) = filter(Q.coef, Q.coef(end:-1:1), Q.LookAheadBuf1, state1); + Q.LookAheadBuf1 = in(N:-2:N-2*LA+2); + */ + /*Backwards all-pass filter the odd samples of the input (upper channel) + to eventually obtain zero phase. The composite all-pass filter (comprised of both + the upper and lower channel all-pass filsters in series) is used for the + filtering. */ + + /* First Channel */ + + /*initial state of composite filter is zero */ + for (k=0;k<NUMBEROFCOMPOSITEAPSECTIONS;k++){ + CompositeAPFilterState[k] = 0.0; + } + /* put every other sample of input into a temporary vector in reverse (backward) order*/ + for (k=0;k<FRAMESAMPLES_HALF;k++) { + tempinoutvec[k] = in[FRAMESAMPLES-1-2*k]; + } + + /* now all-pass filter the backwards vector. Output values overwrite the input vector. */ + WebRtcIsac_AllPassFilter2Float(tempinoutvec, WebRtcIsac_kCompositeApFactorsFloat, + FRAMESAMPLES_HALF, NUMBEROFCOMPOSITEAPSECTIONS, CompositeAPFilterState); + + /* save the backwards filtered output for later forward filtering, + but write it in forward order*/ + for (k=0;k<FRAMESAMPLES_HALF;k++) { + tempin_ch1[FRAMESAMPLES_HALF+QLOOKAHEAD-1-k] = tempinoutvec[k]; + } + + /* save the backwards filter state becaue it will be transformed + later into a forward state */ + for (k=0; k<NUMBEROFCOMPOSITEAPSECTIONS; k++) { + ForTransform_CompositeAPFilterState[k] = CompositeAPFilterState[k]; + } + + /* now backwards filter the samples in the lookahead buffer. The samples were + placed there in the encoding of the previous frame. The output samples + overwrite the input samples */ + WebRtcIsac_AllPassFilter2Float(prefiltdata->INLABUF1_float, + WebRtcIsac_kCompositeApFactorsFloat, QLOOKAHEAD, + NUMBEROFCOMPOSITEAPSECTIONS, CompositeAPFilterState); + + /* save the output, but write it in forward order */ + /* write the lookahead samples for the next encoding iteration. Every other + sample at the end of the input frame is written in reverse order for the + lookahead length. Exported in the prefiltdata structure. */ + for (k=0;k<QLOOKAHEAD;k++) { + tempin_ch1[QLOOKAHEAD-1-k]=prefiltdata->INLABUF1_float[k]; + prefiltdata->INLABUF1_float[k]=in[FRAMESAMPLES-1-2*k]; + } + + /* Second Channel. This is exactly like the first channel, except that the + even samples are now filtered instead (lower channel). */ + for (k=0;k<NUMBEROFCOMPOSITEAPSECTIONS;k++){ + CompositeAPFilterState[k] = 0.0; + } + + for (k=0;k<FRAMESAMPLES_HALF;k++) { + tempinoutvec[k] = in[FRAMESAMPLES-2-2*k]; + } + + WebRtcIsac_AllPassFilter2Float(tempinoutvec, WebRtcIsac_kCompositeApFactorsFloat, + FRAMESAMPLES_HALF, NUMBEROFCOMPOSITEAPSECTIONS, CompositeAPFilterState); + + for (k=0;k<FRAMESAMPLES_HALF;k++) { + tempin_ch2[FRAMESAMPLES_HALF+QLOOKAHEAD-1-k] = tempinoutvec[k]; + } + + for (k=0; k<NUMBEROFCOMPOSITEAPSECTIONS; k++) { + ForTransform_CompositeAPFilterState2[k] = CompositeAPFilterState[k]; + } + + + WebRtcIsac_AllPassFilter2Float(prefiltdata->INLABUF2_float, + WebRtcIsac_kCompositeApFactorsFloat, QLOOKAHEAD,NUMBEROFCOMPOSITEAPSECTIONS, + CompositeAPFilterState); + + for (k=0;k<QLOOKAHEAD;k++) { + tempin_ch2[QLOOKAHEAD-1-k]=prefiltdata->INLABUF2_float[k]; + prefiltdata->INLABUF2_float[k]=in[FRAMESAMPLES-2-2*k]; + } + + /* Transform filter states from backward to forward */ + /*At this point, each of the states of the backwards composite filters for the + two channels are transformed into forward filtering states for the corresponding + forward channel filters. Each channel's forward filtering state from the previous + encoding iteration is added to the transformed state to get a proper forward state */ + + /* So the existing NUMBEROFCOMPOSITEAPSECTIONS x 1 (4x1) state vector is multiplied by a + NUMBEROFCHANNELAPSECTIONSxNUMBEROFCOMPOSITEAPSECTIONS (2x4) transform matrix to get the + new state that is added to the previous 2x1 input state */ + + for (k=0;k<NUMBEROFCHANNELAPSECTIONS;k++){ /* k is row variable */ + for (n=0; n<NUMBEROFCOMPOSITEAPSECTIONS;n++){/* n is column variable */ + prefiltdata->INSTAT1_float[k] += ForTransform_CompositeAPFilterState[n]* + WebRtcIsac_kTransform1Float[k*NUMBEROFCHANNELAPSECTIONS+n]; + prefiltdata->INSTAT2_float[k] += ForTransform_CompositeAPFilterState2[n]* + WebRtcIsac_kTransform2Float[k*NUMBEROFCHANNELAPSECTIONS+n]; + } + } + + /*obtain polyphase components by forward all-pass filtering through each channel */ + /* the backward filtered samples are now forward filtered with the corresponding channel filters */ + /* The all pass filtering automatically updates the filter states which are exported in the + prefiltdata structure */ + WebRtcIsac_AllPassFilter2Float(tempin_ch1,WebRtcIsac_kUpperApFactorsFloat, + FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS, prefiltdata->INSTAT1_float); + WebRtcIsac_AllPassFilter2Float(tempin_ch2,WebRtcIsac_kLowerApFactorsFloat, + FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS, prefiltdata->INSTAT2_float); + + /* Now Construct low-pass and high-pass signals as combinations of polyphase components */ + for (k=0; k<FRAMESAMPLES_HALF; k++) { + LP[k] = 0.5f*(tempin_ch1[k] + tempin_ch2[k]);/* low pass signal*/ + HP[k] = 0.5f*(tempin_ch1[k] - tempin_ch2[k]);/* high pass signal*/ + } + + /* Lookahead LP and HP signals */ + /* now create low pass and high pass signals of the input vector. However, no + backwards filtering is performed, and hence no phase equalization is involved. + Also, the input contains some samples that are lookahead samples. The high pass + and low pass signals that are created are used outside this function for analysis + (not encoding) purposes */ + + /* set up input */ + for (k=0; k<FRAMESAMPLES_HALF; k++) { + tempin_ch1[k]=in[2*k+1]; + tempin_ch2[k]=in[2*k]; + } + + /* the input filter states are passed in and updated by the all-pass filtering routine and + exported in the prefiltdata structure*/ + WebRtcIsac_AllPassFilter2Float(tempin_ch1,WebRtcIsac_kUpperApFactorsFloat, + FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS, prefiltdata->INSTATLA1_float); + WebRtcIsac_AllPassFilter2Float(tempin_ch2,WebRtcIsac_kLowerApFactorsFloat, + FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS, prefiltdata->INSTATLA2_float); + + for (k=0; k<FRAMESAMPLES_HALF; k++) { + LP_la[k] = (float)(0.5f*(tempin_ch1[k] + tempin_ch2[k])); /*low pass */ + HP_la[k] = (double)(0.5f*(tempin_ch1[k] - tempin_ch2[k])); /* high pass */ + } + + +}/*end of WebRtcIsac_SplitAndFilter */ + + +/* Combining */ + +/* HPstcoeff_out_1 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */ +static const float kHpStCoefOut1Float[4] = +{-1.99701049409000f, 0.99714204490000f, 0.01701049409000f, -0.01704204490000f}; + +/* HPstcoeff_out_2 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */ +static const float kHpStCoefOut2Float[4] = +{-1.98645294509837f, 0.98672435560000f, 0.00645294509837f, -0.00662435560000f}; + + +/* Function WebRtcIsac_FilterAndCombine */ +/* This is a decoder function that takes the decimated + length FRAMESAMPLES_HALF input low-pass and + high-pass signals and creates a reconstructed fullband + output signal of length FRAMESAMPLES. WebRtcIsac_FilterAndCombine + is the sibling function of WebRtcIsac_SplitAndFilter */ +/* INPUTS: + inLP: a length FRAMESAMPLES_HALF array of input low-pass + samples. + inHP: a length FRAMESAMPLES_HALF array of input high-pass + samples. + postfiltdata: input data structure containing the filterbank + states from the previous decoding iteration. + OUTPUTS: + Out: a length FRAMESAMPLES array of output reconstructed + samples (fullband) based on the input low-pass and + high-pass signals. + postfiltdata: the input data structure containing the filterbank + states is updated for the next decoding iteration */ +void WebRtcIsac_FilterAndCombineFloat(float *InLP, + float *InHP, + float *Out, + PostFiltBankstr *postfiltdata) +{ + int k; + float tempin_ch1[FRAMESAMPLES+MAX_AR_MODEL_ORDER]; + float tempin_ch2[FRAMESAMPLES+MAX_AR_MODEL_ORDER]; + float ftmp, ftmp2; + + /* Form the polyphase signals*/ + for (k=0;k<FRAMESAMPLES_HALF;k++) { + tempin_ch1[k]=InLP[k]+InHP[k]; /* Construct a new upper channel signal*/ + tempin_ch2[k]=InLP[k]-InHP[k]; /* Construct a new lower channel signal*/ + } + + + /* all-pass filter the new upper channel signal. HOWEVER, use the all-pass filter factors + that were used as a lower channel at the encoding side. So at the decoder, the + corresponding all-pass filter factors for each channel are swapped.*/ + WebRtcIsac_AllPassFilter2Float(tempin_ch1, WebRtcIsac_kLowerApFactorsFloat, + FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS,postfiltdata->STATE_0_UPPER_float); + + /* Now, all-pass filter the new lower channel signal. But since all-pass filter factors + at the decoder are swapped from the ones at the encoder, the 'upper' channel + all-pass filter factors (WebRtcIsac_kUpperApFactorsFloat) are used to filter this new + lower channel signal */ + WebRtcIsac_AllPassFilter2Float(tempin_ch2, WebRtcIsac_kUpperApFactorsFloat, + FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS,postfiltdata->STATE_0_LOWER_float); + + + /* Merge outputs to form the full length output signal.*/ + for (k=0;k<FRAMESAMPLES_HALF;k++) { + Out[2*k]=tempin_ch2[k]; + Out[2*k+1]=tempin_ch1[k]; + } + + + /* High pass filter */ + + for (k=0;k<FRAMESAMPLES;k++) { + ftmp2 = Out[k] + kHpStCoefOut1Float[2] * postfiltdata->HPstates1_float[0] + + kHpStCoefOut1Float[3] * postfiltdata->HPstates1_float[1]; + ftmp = Out[k] - kHpStCoefOut1Float[0] * postfiltdata->HPstates1_float[0] - + kHpStCoefOut1Float[1] * postfiltdata->HPstates1_float[1]; + postfiltdata->HPstates1_float[1] = postfiltdata->HPstates1_float[0]; + postfiltdata->HPstates1_float[0] = ftmp; + Out[k] = ftmp2; + } + + for (k=0;k<FRAMESAMPLES;k++) { + ftmp2 = Out[k] + kHpStCoefOut2Float[2] * postfiltdata->HPstates2_float[0] + + kHpStCoefOut2Float[3] * postfiltdata->HPstates2_float[1]; + ftmp = Out[k] - kHpStCoefOut2Float[0] * postfiltdata->HPstates2_float[0] - + kHpStCoefOut2Float[1] * postfiltdata->HPstates2_float[1]; + postfiltdata->HPstates2_float[1] = postfiltdata->HPstates2_float[0]; + postfiltdata->HPstates2_float[0] = ftmp; + Out[k] = ftmp2; + } +} diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/intialize.c b/webrtc/modules/audio_coding/codecs/isac/main/source/intialize.c new file mode 100644 index 0000000..01e683c --- /dev/null +++ b/webrtc/modules/audio_coding/codecs/isac/main/source/intialize.c @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* encode.c - Encoding function for the iSAC coder */ + +#include "structs.h" +#include "codec.h" +#include "pitch_estimator.h" + +#include <math.h> + +void WebRtcIsac_InitMasking(MaskFiltstr *maskdata) { + + int k; + + for (k = 0; k < WINLEN; k++) { + maskdata->DataBufferLo[k] = 0.0; + maskdata->DataBufferHi[k] = 0.0; + } + for (k = 0; k < ORDERLO+1; k++) { + maskdata->CorrBufLo[k] = 0.0; + maskdata->PreStateLoF[k] = 0.0; + maskdata->PreStateLoG[k] = 0.0; + maskdata->PostStateLoF[k] = 0.0; + maskdata->PostStateLoG[k] = 0.0; + } + for (k = 0; k < ORDERHI+1; k++) { + maskdata->CorrBufHi[k] = 0.0; + maskdata->PreStateHiF[k] = 0.0; + maskdata->PreStateHiG[k] = 0.0; + maskdata->PostStateHiF[k] = 0.0; + maskdata->PostStateHiG[k] = 0.0; + } + + maskdata->OldEnergy = 10.0; + return; +} + +void WebRtcIsac_InitPreFilterbank(PreFiltBankstr *prefiltdata) +{ + int k; + + for (k = 0; k < QLOOKAHEAD; k++) { + prefiltdata->INLABUF1[k] = 0; + prefiltdata->INLABUF2[k] = 0; + + prefiltdata->INLABUF1_float[k] = 0; + prefiltdata->INLABUF2_float[k] = 0; + } + for (k = 0; k < 2*(QORDER-1); k++) { + prefiltdata->INSTAT1[k] = 0; + prefiltdata->INSTAT2[k] = 0; + prefiltdata->INSTATLA1[k] = 0; + prefiltdata->INSTATLA2[k] = 0; + + prefiltdata->INSTAT1_float[k] = 0; + prefiltdata->INSTAT2_float[k] = 0; + prefiltdata->INSTATLA1_float[k] = 0; + prefiltdata->INSTATLA2_float[k] = 0; + } + + /* High pass filter states */ + prefiltdata->HPstates[0] = 0.0; + prefiltdata->HPstates[1] = 0.0; + + prefiltdata->HPstates_float[0] = 0.0f; + prefiltdata->HPstates_float[1] = 0.0f; + + return; +} + +void WebRtcIsac_InitPostFilterbank(PostFiltBankstr *postfiltdata) +{ + int k; + + for (k = 0; k < 2*POSTQORDER; k++) { + postfiltdata->STATE_0_LOWER[k] = 0; + postfiltdata->STATE_0_UPPER[k] = 0; + + postfiltdata->STATE_0_LOWER_float[k] = 0; + postfiltdata->STATE_0_UPPER_float[k] = 0; + } + + /* High pass filter states */ + postfiltdata->HPstates1[0] = 0.0; + postfiltdata->HPstates1[1] = 0.0; + + postfiltdata->HPstates2[0] = 0.0; + postfiltdata->HPstates2[1] = 0.0; + + postfiltdata->HPstates1_float[0] = 0.0f; + postfiltdata->HPstates1_float[1] = 0.0f; + + postfiltdata->HPstates2_float[0] = 0.0f; + postfiltdata->HPstates2_float[1] = 0.0f; + + return; +} + + +void WebRtcIsac_InitPitchFilter(PitchFiltstr *pitchfiltdata) +{ + int k; + + for (k = 0; k < PITCH_BUFFSIZE; k++) { + pitchfiltdata->ubuf[k] = 0.0; + } + pitchfiltdata->ystate[0] = 0.0; + for (k = 1; k < (PITCH_DAMPORDER); k++) { + pitchfiltdata->ystate[k] = 0.0; + } + pitchfiltdata->oldlagp[0] = 50.0; + pitchfiltdata->oldgainp[0] = 0.0; +} + +void WebRtcIsac_InitWeightingFilter(WeightFiltstr *wfdata) +{ + int k; + double t, dtmp, dtmp2, denum, denum2; + + for (k=0;k<PITCH_WLPCBUFLEN;k++) + wfdata->buffer[k]=0.0; + + for (k=0;k<PITCH_WLPCORDER;k++) { + wfdata->istate[k]=0.0; + wfdata->weostate[k]=0.0; + wfdata->whostate[k]=0.0; + } + + /* next part should be in Matlab, writing to a global table */ + t = 0.5; + denum = 1.0 / ((double) PITCH_WLPCWINLEN); + denum2 = denum * denum; + for (k=0;k<PITCH_WLPCWINLEN;k++) { + dtmp = PITCH_WLPCASYM * t * denum + (1-PITCH_WLPCASYM) * t * t * denum2; + dtmp *= 3.14159265; + dtmp2 = sin(dtmp); + wfdata->window[k] = dtmp2 * dtmp2; + t++; + } +} + +/* clear all buffers */ +void WebRtcIsac_InitPitchAnalysis(PitchAnalysisStruct *State) +{ + int k; + + for (k = 0; k < PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2; k++) + State->dec_buffer[k] = 0.0; + for (k = 0; k < 2*ALLPASSSECTIONS+1; k++) + State->decimator_state[k] = 0.0; + for (k = 0; k < 2; k++) + State->hp_state[k] = 0.0; + for (k = 0; k < QLOOKAHEAD; k++) + State->whitened_buf[k] = 0.0; + for (k = 0; k < QLOOKAHEAD; k++) + State->inbuf[k] = 0.0; + + WebRtcIsac_InitPitchFilter(&(State->PFstr_wght)); + + WebRtcIsac_InitPitchFilter(&(State->PFstr)); + + WebRtcIsac_InitWeightingFilter(&(State->Wghtstr)); +} diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/pitch_filter.c b/webrtc/modules/audio_coding/codecs/isac/main/source/pitch_filter.c new file mode 100644 index 0000000..f03d230 --- /dev/null +++ b/webrtc/modules/audio_coding/codecs/isac/main/source/pitch_filter.c @@ -0,0 +1,383 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "pitch_estimator.h" + +#include <math.h> +#include <memory.h> +#include <stdlib.h> + +#include "os_specific_inline.h" + +/* + * We are implementing the following filters; + * + * Pre-filtering: + * y(z) = x(z) + damper(z) * gain * (x(z) + y(z)) * z ^ (-lag); + * + * Post-filtering: + * y(z) = x(z) - damper(z) * gain * (x(z) + y(z)) * z ^ (-lag); + * + * Note that |lag| is a floating number so we perform an interpolation to + * obtain the correct |lag|. + * + */ + +static const double kDampFilter[PITCH_DAMPORDER] = {-0.07, 0.25, 0.64, 0.25, + -0.07}; + +/* interpolation coefficients; generated by design_pitch_filter.m */ +static const double kIntrpCoef[PITCH_FRACS][PITCH_FRACORDER] = { + {-0.02239172458614, 0.06653315052934, -0.16515880017569, 0.60701333734125, + 0.64671399919202, -0.20249000396417, 0.09926548334755, -0.04765933793109, + 0.01754159521746}, + {-0.01985640750434, 0.05816126837866, -0.13991265473714, 0.44560418147643, + 0.79117042386876, -0.20266133815188, 0.09585268418555, -0.04533310458084, + 0.01654127246314}, + {-0.01463300534216, 0.04229888475060, -0.09897034715253, 0.28284326017787, + 0.90385267956632, -0.16976950138649, 0.07704272393639, -0.03584218578311, + 0.01295781500709}, + {-0.00764851320885, 0.02184035544377, -0.04985561057281, 0.13083306574393, + 0.97545011664662, -0.10177807997561, 0.04400901776474, -0.02010737175166, + 0.00719783432422}, + {-0.00000000000000, 0.00000000000000, -0.00000000000001, 0.00000000000001, + 0.99999999999999, 0.00000000000001, -0.00000000000001, 0.00000000000000, + -0.00000000000000}, + {0.00719783432422, -0.02010737175166, 0.04400901776474, -0.10177807997562, + 0.97545011664663, 0.13083306574393, -0.04985561057280, 0.02184035544377, + -0.00764851320885}, + {0.01295781500710, -0.03584218578312, 0.07704272393640, -0.16976950138650, + 0.90385267956634, 0.28284326017785, -0.09897034715252, 0.04229888475059, + -0.01463300534216}, + {0.01654127246315, -0.04533310458085, 0.09585268418557, -0.20266133815190, + 0.79117042386878, 0.44560418147640, -0.13991265473712, 0.05816126837865, + -0.01985640750433} +}; + +/* + * Enumerating the operation of the filter. + * iSAC has 4 different pitch-filter which are very similar in their structure. + * + * kPitchFilterPre : In this mode the filter is operating as pitch + * pre-filter. This is used at the encoder. + * kPitchFilterPost : In this mode the filter is operating as pitch + * post-filter. This is the inverse of pre-filter and used + * in the decoder. + * kPitchFilterPreLa : This is, in structure, similar to pre-filtering but + * utilizing 3 millisecond lookahead. It is used to + * obtain the signal for LPC analysis. + * kPitchFilterPreGain : This is, in structure, similar to pre-filtering but + * differential changes in gain is considered. This is + * used to find the optimal gain. + */ +typedef enum { + kPitchFilterPre, kPitchFilterPost, kPitchFilterPreLa, kPitchFilterPreGain +} PitchFilterOperation; + +/* + * Structure with parameters used for pitch-filtering. + * buffer : a buffer where the sum of previous inputs and outputs + * are stored. + * damper_state : the state of the damping filter. The filter is defined by + * |kDampFilter|. + * interpol_coeff : pointer to a set of coefficient which are used to utilize + * fractional pitch by interpolation. + * gain : pitch-gain to be applied to the current segment of input. + * lag : pitch-lag for the current segment of input. + * lag_offset : the offset of lag w.r.t. current sample. + * sub_frame : sub-frame index, there are 4 pitch sub-frames in an iSAC + * frame. + * This specifies the usage of the filter. See + * 'PitchFilterOperation' for operational modes. + * num_samples : number of samples to be processed in each segment. + * index : index of the input and output sample. + * damper_state_dg : state of damping filter for different trial gains. + * gain_mult : differential changes to gain. + */ +typedef struct { + double buffer[PITCH_INTBUFFSIZE + QLOOKAHEAD]; + double damper_state[PITCH_DAMPORDER]; + const double *interpol_coeff; + double gain; + double lag; + int lag_offset; + + int sub_frame; + PitchFilterOperation mode; + int num_samples; + int index; + + double damper_state_dg[4][PITCH_DAMPORDER]; + double gain_mult[4]; +} PitchFilterParam; + +/********************************************************************** + * FilterSegment() + * Filter one segment, a quarter of a frame. + * + * Inputs + * in_data : pointer to the input signal of 30 ms at 8 kHz sample-rate. + * filter_param : pitch filter parameters. + * + * Outputs + * out_data : pointer to a buffer where the filtered signal is written to. + * out_dg : [only used in kPitchFilterPreGain] pointer to a buffer + * where the output of different gain values (differential + * change to gain) is written. + */ +static void FilterSegment(const double* in_data, PitchFilterParam* parameters, + double* out_data, + double out_dg[][PITCH_FRAME_LEN + QLOOKAHEAD]) { + int n; + int m; + int j; + double sum; + double sum2; + /* Index of |parameters->buffer| where the output is written to. */ + int pos = parameters->index + PITCH_BUFFSIZE; + /* Index of |parameters->buffer| where samples are read for fractional-lag + * computation. */ + int pos_lag = pos - parameters->lag_offset; + + for (n = 0; n < parameters->num_samples; ++n) { + /* Shift low pass filter states. */ + for (m = PITCH_DAMPORDER - 1; m > 0; --m) { + parameters->damper_state[m] = parameters->damper_state[m - 1]; + } + /* Filter to get fractional pitch. */ + sum = 0.0; + for (m = 0; m < PITCH_FRACORDER; ++m) { + sum += parameters->buffer[pos_lag + m] * parameters->interpol_coeff[m]; + } + /* Multiply with gain. */ + parameters->damper_state[0] = parameters->gain * sum; + + if (parameters->mode == kPitchFilterPreGain) { + int lag_index = parameters->index - parameters->lag_offset; + int m_tmp = (lag_index < 0) ? -lag_index : 0; + /* Update the damper state for the new sample. */ + for (m = PITCH_DAMPORDER - 1; m > 0; --m) { + for (j = 0; j < 4; ++j) { + parameters->damper_state_dg[j][m] = + parameters->damper_state_dg[j][m - 1]; + } + } + + for (j = 0; j < parameters->sub_frame + 1; ++j) { + /* Filter for fractional pitch. */ + sum2 = 0.0; + for (m = PITCH_FRACORDER-1; m >= m_tmp; --m) { + /* |lag_index + m| is always larger than or equal to zero, see how + * m_tmp is computed. This is equivalent to assume samples outside + * |out_dg[j]| are zero. */ + sum2 += out_dg[j][lag_index + m] * parameters->interpol_coeff[m]; + } + /* Add the contribution of differential gain change. */ + parameters->damper_state_dg[j][0] = parameters->gain_mult[j] * sum + + parameters->gain * sum2; + } + + /* Filter with damping filter, and store the results. */ + for (j = 0; j < parameters->sub_frame + 1; ++j) { + sum = 0.0; + for (m = 0; m < PITCH_DAMPORDER; ++m) { + sum -= parameters->damper_state_dg[j][m] * kDampFilter[m]; + } + out_dg[j][parameters->index] = sum; + } + } + /* Filter with damping filter. */ + sum = 0.0; + for (m = 0; m < PITCH_DAMPORDER; ++m) { + sum += parameters->damper_state[m] * kDampFilter[m]; + } + + /* Subtract from input and update buffer. */ + out_data[parameters->index] = in_data[parameters->index] - sum; + parameters->buffer[pos] = in_data[parameters->index] + + out_data[parameters->index]; + + ++parameters->index; + ++pos; + ++pos_lag; + } + return; +} + +/* Update filter parameters based on the pitch-gains and pitch-lags. */ +static void Update(PitchFilterParam* parameters) { + double fraction; + int fraction_index; + /* Compute integer lag-offset. */ + parameters->lag_offset = WebRtcIsac_lrint(parameters->lag + PITCH_FILTDELAY + + 0.5); + /* Find correct set of coefficients for computing fractional pitch. */ + fraction = parameters->lag_offset - (parameters->lag + PITCH_FILTDELAY); + fraction_index = WebRtcIsac_lrint(PITCH_FRACS * fraction - 0.5); + parameters->interpol_coeff = kIntrpCoef[fraction_index]; + + if (parameters->mode == kPitchFilterPreGain) { + /* If in this mode make a differential change to pitch gain. */ + parameters->gain_mult[parameters->sub_frame] += 0.2; + if (parameters->gain_mult[parameters->sub_frame] > 1.0) { + parameters->gain_mult[parameters->sub_frame] = 1.0; + } + if (parameters->sub_frame > 0) { + parameters->gain_mult[parameters->sub_frame - 1] -= 0.2; + } + } +} + +/****************************************************************************** + * FilterFrame() + * Filter a frame of 30 millisecond, given pitch-lags and pitch-gains. + * + * Inputs + * in_data : pointer to the input signal of 30 ms at 8 kHz sample-rate. + * lags : pointer to pitch-lags, 4 lags per frame. + * gains : pointer to pitch-gians, 4 gains per frame. + * mode : defining the functionality of the filter. It takes the + * following values. + * kPitchFilterPre: Pitch pre-filter, used at encoder. + * kPitchFilterPost: Pitch post-filter, used at decoder. + * kPitchFilterPreLa: Pitch pre-filter with lookahead. + * kPitchFilterPreGain: Pitch pre-filter used to otain optimal + * pitch-gains. + * + * Outputs + * out_data : pointer to a buffer where the filtered signal is written to. + * out_dg : [only used in kPitchFilterPreGain] pointer to a buffer + * where the output of different gain values (differential + * change to gain) is written. + */ +static void FilterFrame(const double* in_data, PitchFiltstr* filter_state, + double* lags, double* gains, PitchFilterOperation mode, + double* out_data, + double out_dg[][PITCH_FRAME_LEN + QLOOKAHEAD]) { + PitchFilterParam filter_parameters; + double gain_delta, lag_delta; + double old_lag, old_gain; + int n; + int m; + const double kEnhancer = 1.3; + + /* Set up buffer and states. */ + filter_parameters.index = 0; + filter_parameters.lag_offset = 0; + filter_parameters.mode = mode; + /* Copy states to local variables. */ + memcpy(filter_parameters.buffer, filter_state->ubuf, + sizeof(filter_state->ubuf)); + memcpy(filter_parameters.damper_state, filter_state->ystate, + sizeof(filter_state->ystate)); + + if (mode == kPitchFilterPreGain) { + /* Clear buffers. */ + memset(filter_parameters.gain_mult, 0, sizeof(filter_parameters.gain_mult)); + memset(filter_parameters.damper_state_dg, 0, + sizeof(filter_parameters.damper_state_dg)); + for (n = 0; n < PITCH_SUBFRAMES; ++n) { + //memset(out_dg[n], 0, sizeof(double) * (PITCH_FRAME_LEN + QLOOKAHEAD)); + memset(out_dg[n], 0, sizeof(out_dg[n])); + } + } else if (mode == kPitchFilterPost) { + /* Make output more periodic. Negative sign is to change the structure + * of the filter. */ + for (n = 0; n < PITCH_SUBFRAMES; ++n) { + gains[n] *= -kEnhancer; + } + } + + old_lag = *filter_state->oldlagp; + old_gain = *filter_state->oldgainp; + + /* No interpolation if pitch lag step is big. */ + if ((lags[0] > (PITCH_UPSTEP * old_lag)) || + (lags[0] < (PITCH_DOWNSTEP * old_lag))) { + old_lag = lags[0]; + old_gain = gains[0]; + + if (mode == kPitchFilterPreGain) { + filter_parameters.gain_mult[0] = 1.0; + } + } + + filter_parameters.num_samples = PITCH_UPDATE; + for (m = 0; m < PITCH_SUBFRAMES; ++m) { + /* Set the sub-frame value. */ + filter_parameters.sub_frame = m; + /* Calculate interpolation steps for pitch-lag and pitch-gain. */ + lag_delta = (lags[m] - old_lag) / PITCH_GRAN_PER_SUBFRAME; + filter_parameters.lag = old_lag; + gain_delta = (gains[m] - old_gain) / PITCH_GRAN_PER_SUBFRAME; + filter_parameters.gain = old_gain; + /* Store for the next sub-frame. */ + old_lag = lags[m]; + old_gain = gains[m]; + + for (n = 0; n < PITCH_GRAN_PER_SUBFRAME; ++n) { + /* Step-wise interpolation of pitch gains and lags. As pitch-lag changes, + * some parameters of filter need to be update. */ + filter_parameters.gain += gain_delta; + filter_parameters.lag += lag_delta; + /* Update parameters according to new lag value. */ + Update(&filter_parameters); + /* Filter a segment of input. */ + FilterSegment(in_data, &filter_parameters, out_data, out_dg); + } + } + + if (mode != kPitchFilterPreGain) { + /* Export buffer and states. */ + memcpy(filter_state->ubuf, &filter_parameters.buffer[PITCH_FRAME_LEN], + sizeof(filter_state->ubuf)); + memcpy(filter_state->ystate, filter_parameters.damper_state, + sizeof(filter_state->ystate)); + + /* Store for the next frame. */ + *filter_state->oldlagp = old_lag; + *filter_state->oldgainp = old_gain; + } + + if ((mode == kPitchFilterPreGain) || (mode == kPitchFilterPreLa)) { + /* Filter the lookahead segment, this is treated as the last sub-frame. So + * set |pf_param| to last sub-frame. */ + filter_parameters.sub_frame = PITCH_SUBFRAMES - 1; + filter_parameters.num_samples = QLOOKAHEAD; + FilterSegment(in_data, &filter_parameters, out_data, out_dg); + } +} + +void WebRtcIsac_PitchfilterPre(double* in_data, double* out_data, + PitchFiltstr* pf_state, double* lags, + double* gains) { + FilterFrame(in_data, pf_state, lags, gains, kPitchFilterPre, out_data, NULL); +} + +void WebRtcIsac_PitchfilterPre_la(double* in_data, double* out_data, + PitchFiltstr* pf_state, double* lags, + double* gains) { + FilterFrame(in_data, pf_state, lags, gains, kPitchFilterPreLa, out_data, + NULL); +} + +void WebRtcIsac_PitchfilterPre_gains( + double* in_data, double* out_data, + double out_dg[][PITCH_FRAME_LEN + QLOOKAHEAD], PitchFiltstr *pf_state, + double* lags, double* gains) { + FilterFrame(in_data, pf_state, lags, gains, kPitchFilterPreGain, out_data, + out_dg); +} + +void WebRtcIsac_PitchfilterPost(double* in_data, double* out_data, + PitchFiltstr* pf_state, double* lags, + double* gains) { + FilterFrame(in_data, pf_state, lags, gains, kPitchFilterPost, out_data, NULL); +} diff --git a/webrtc/modules/audio_processing/Makefile.am b/webrtc/modules/audio_processing/Makefile.am index bf7f3d0..9c3d65d 100644 --- a/webrtc/modules/audio_processing/Makefile.am +++ b/webrtc/modules/audio_processing/Makefile.am @@ -50,6 +50,10 @@ libwebrtc_audio_processing_la_SOURCES = include/audio_processing.h \ beamformer/nonlinear_beamformer.h \ beamformer/covariance_matrix_generator.cc \ beamformer/nonlinear_beamformer.cc \ + intelligibility/intelligibility_enhancer.h \ + intelligibility/intelligibility_utils.h \ + intelligibility/intelligibility_enhancer.cc \ + intelligibility/intelligibility_utils.cc \ logging/aec_logging.h \ logging/aec_logging_file_handling.h \ logging/aec_logging_file_handling.cc \ @@ -125,7 +129,8 @@ libwebrtc_audio_processing_la_SOURCES = include/audio_processing.h \ voice_detection_impl.h if NS_FIXED -COMMON_CXXFLAGS += -DWEBRTC_NS_FIXED=0 +COMMON_CFLAGS += -DWEBRTC_NS_FIXED=1 +COMMON_CXXFLAGS += -DWEBRTC_NS_FIXED=1 libwebrtc_audio_processing_la_SOURCES += \ ns/include/noise_suppression_x.h \ ns/noise_suppression_x.c \ @@ -134,7 +139,8 @@ libwebrtc_audio_processing_la_SOURCES += \ ns/nsx_core.h \ ns/nsx_core_c.c else -COMMON_CXXFLAGS += -DWEBRTC_NS_FIXED=1 +COMMON_CFLAGS += -DWEBRTC_NS_FLOAT=1 +COMMON_CXXFLAGS += -DWEBRTC_NS_FLOAT=1 libwebrtc_audio_processing_la_SOURCES += \ ns/include/noise_suppression.h \ ns/noise_suppression.c \ @@ -147,9 +153,10 @@ endif libwebrtc_audio_processing_la_CFLAGS = $(AM_CFLAGS) $(COMMON_CFLAGS) libwebrtc_audio_processing_la_CXXFLAGS = $(AM_CXXFLAGS) $(COMMON_CXXFLAGS) -libwebrtc_audio_processing_la_LIBADD = $(top_builddir)/webrtc/base/libbase.la \ +libwebrtc_audio_processing_la_LIBADD = $(top_builddir)/webrtc/libwebrtc.la \ + $(top_builddir)/webrtc/base/libbase.la \ $(top_builddir)/webrtc/system_wrappers/libsystem_wrappers.la \ - $(top_builddir)/webrtc/common_audio/libcommon_audio.la + $(top_builddir)/webrtc/common_audio/libcommon_audio.la \ $(top_builddir)/webrtc/modules/audio_coding/libaudio_coding.la libwebrtc_audio_processing_la_LDFLAGS = $(AM_LDFLAGS) -version-info $(LIBWEBRTC_AUDIO_PROCESSING_VERSION_INFO) diff --git a/webrtc/modules/audio_processing/audio_processing.gypi b/webrtc/modules/audio_processing/audio_processing.gypi deleted file mode 100644 index a107458..0000000 --- a/webrtc/modules/audio_processing/audio_processing.gypi +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. -# -# Use of this source code is governed by a BSD-style license -# that can be found in the LICENSE file in the root of the source -# tree. An additional intellectual property rights grant can be found -# in the file PATENTS. All contributing project authors may -# be found in the AUTHORS file in the root of the source tree. - -{ - 'variables': { - 'protoc_out_dir': '<(SHARED_INTERMEDIATE_DIR)/protoc_out', - 'protoc_out_relpath': 'webrtc/audio_processing', - }, - 'targets': [ - { - 'target_name': 'audio_processing', - 'type': '<(library)', - 'conditions': [ - ['prefer_fixed_point==1', { - 'dependencies': ['ns_fix'], - 'defines': ['WEBRTC_NS_FIXED'], - }, { - 'dependencies': ['ns'], - 'defines': ['WEBRTC_NS_FLOAT'], - }], - ['build_with_chromium==1', { - 'dependencies': [ - '<(webrtc_root)/../protobuf/protobuf.gyp:protobuf_lite', - ], - }, { - 'dependencies': [ - '<(webrtc_root)/../third_party/protobuf/protobuf.gyp:protobuf_lite', - ], - }], - ], - 'dependencies': [ - 'debug_proto', - 'aec', - 'aecm', - 'agc', - '<(webrtc_root)/common_audio/common_audio.gyp:spl', - '<(webrtc_root)/common_audio/common_audio.gyp:vad', - '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers', - ], - 'include_dirs': [ - 'interface', - '../interface', - '<(protoc_out_dir)', - ], - 'direct_dependent_settings': { - 'include_dirs': [ - 'interface', - '../interface', - ], - }, - 'sources': [ - 'interface/audio_processing.h', - 'audio_buffer.cc', - 'audio_buffer.h', - 'audio_processing_impl.cc', - 'audio_processing_impl.h', - 'echo_cancellation_impl.cc', - 'echo_cancellation_impl.h', - 'echo_control_mobile_impl.cc', - 'echo_control_mobile_impl.h', - 'gain_control_impl.cc', - 'gain_control_impl.h', - 'high_pass_filter_impl.cc', - 'high_pass_filter_impl.h', - 'level_estimator_impl.cc', - 'level_estimator_impl.h', - 'noise_suppression_impl.cc', - 'noise_suppression_impl.h', - 'splitting_filter.cc', - 'splitting_filter.h', - 'processing_component.cc', - 'processing_component.h', - 'voice_detection_impl.cc', - 'voice_detection_impl.h', - '<(protoc_out_dir)/<(protoc_out_relpath)/debug.pb.cc', - '<(protoc_out_dir)/<(protoc_out_relpath)/debug.pb.h', - ], - }, - { - # Protobuf compiler / generate rule for audio_processing - 'target_name': 'debug_proto', - 'type': 'none', - 'variables': { - 'proto_relpath': '<(webrtc_root)/modules/audio_processing', - }, - 'sources': [ - '<(proto_relpath)/debug.proto', - ], - 'rules': [ - { - 'rule_name': 'genproto', - 'extension': 'proto', - 'inputs': [ - '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)', - ], - 'outputs': [ - '<(protoc_out_dir)/<(protoc_out_relpath)/<(RULE_INPUT_ROOT).pb.cc', - '<(protoc_out_dir)/<(protoc_out_relpath)/<(RULE_INPUT_ROOT).pb.h', - ], - 'action': [ - '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)', - '--proto_path=<(proto_relpath)', - '<(proto_relpath)/<(RULE_INPUT_NAME)', - '--cpp_out=<(protoc_out_dir)/<(protoc_out_relpath)', - ], - 'message': 'Generating C++ code from <(RULE_INPUT_PATH)', - }, - ], - 'conditions': [ - ['build_with_chromium==1', { - 'dependencies': [ - '<(webrtc_root)/../protobuf/protobuf.gyp:protoc#host', - ], - }, { - 'dependencies': [ - '<(webrtc_root)/../third_party/protobuf/protobuf.gyp:protoc#host', - ], - }], - ], - # This target exports a hard dependency because it generates header - # files. - 'hard_dependency': 1, - }, - ], -} diff --git a/webrtc/system_wrappers/Makefile.am b/webrtc/system_wrappers/Makefile.am index 4344da7..0dce7e5 100644 --- a/webrtc/system_wrappers/Makefile.am +++ b/webrtc/system_wrappers/Makefile.am @@ -25,13 +25,14 @@ libsystem_wrappers_la_SOURCES = interface/aligned_malloc.h \ source/critical_section_win.h \ source/logging.cc \ source/metrics_default.cc \ + source/rw_lock.cc \ source/rw_lock_generic.h \ source/rw_lock_posix.h \ source/rw_lock_win.h \ source/sleep.cc \ source/thread.cc \ source/thread_posix.h \ - source/thread_win.h + source/thread_win.h \ source/trace_impl.cc \ source/trace_impl.h \ source/trace_posix.h \ diff --git a/webrtc/system_wrappers/source/rw_lock.cc b/webrtc/system_wrappers/source/rw_lock.cc new file mode 100644 index 0000000..8b76eb8 --- /dev/null +++ b/webrtc/system_wrappers/source/rw_lock.cc @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/system_wrappers/interface/rw_lock_wrapper.h" + +#include <assert.h> + +#if defined(_WIN32) +#include "webrtc/system_wrappers/source/rw_lock_generic.h" +#include "webrtc/system_wrappers/source/rw_lock_win.h" +#else +#include "webrtc/system_wrappers/source/rw_lock_posix.h" +#endif + +namespace webrtc { + +RWLockWrapper* RWLockWrapper::CreateRWLock() { +#ifdef _WIN32 + // Native implementation is faster, so use that if available. + RWLockWrapper* lock = RWLockWin::Create(); + if (lock) { + return lock; + } + return new RWLockGeneric(); +#else + return RWLockPosix::Create(); +#endif +} + +} // namespace webrtc |