Calculate frame size in AudioEffect
This commit is contained in:
parent
60b927a412
commit
ff3d7a969f
7 changed files with 32 additions and 25 deletions
|
|
@ -2,16 +2,14 @@
|
||||||
|
|
||||||
namespace SpeexWebRTCTest {
|
namespace SpeexWebRTCTest {
|
||||||
|
|
||||||
AudioEffect::AudioEffect(unsigned int frameSize,
|
AudioEffect::AudioEffect(const QAudioFormat& mainFormat, const QAudioFormat& auxFormat)
|
||||||
const QAudioFormat& mainFormat,
|
: mainFormat_(mainFormat), auxFormat_(auxFormat)
|
||||||
const QAudioFormat& auxFormat)
|
|
||||||
: frameSize_(frameSize), mainFormat_(mainFormat), auxFormat_(auxFormat)
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int AudioEffect::getFrameSize() const
|
unsigned int AudioEffect::getFrameSize() const
|
||||||
{
|
{
|
||||||
return frameSize_;
|
return mainFormat_.sampleRate() * requiredFrameSizeMs() / 1000;
|
||||||
}
|
}
|
||||||
|
|
||||||
const QAudioFormat& AudioEffect::getMainFormat() const
|
const QAudioFormat& AudioEffect::getMainFormat() const
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,7 @@ class AudioEffect : public QObject
|
||||||
{
|
{
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
AudioEffect(unsigned int frameSize, const QAudioFormat& mainFormat, const QAudioFormat& auxFormat);
|
AudioEffect(const QAudioFormat& mainFormat, const QAudioFormat& auxFormat);
|
||||||
|
|
||||||
virtual void processFrame(QAudioBuffer& mainBuffer, const QAudioBuffer& auxBuffer) = 0;
|
virtual void processFrame(QAudioBuffer& mainBuffer, const QAudioBuffer& auxBuffer) = 0;
|
||||||
|
|
||||||
|
|
@ -30,11 +30,12 @@ protected:
|
||||||
voiceActive_ = active;
|
voiceActive_ = active;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual unsigned int requiredFrameSizeMs() const = 0;
|
||||||
|
|
||||||
signals:
|
signals:
|
||||||
void voiceActivityChanged(bool voice);
|
void voiceActivityChanged(bool voice);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const unsigned int frameSize_;
|
|
||||||
const QAudioFormat mainFormat_;
|
const QAudioFormat mainFormat_;
|
||||||
const QAudioFormat auxFormat_;
|
const QAudioFormat auxFormat_;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -196,15 +196,11 @@ Backend AudioProcessor::getCurrentBackend() const
|
||||||
void AudioProcessor::switchBackend(Backend backend)
|
void AudioProcessor::switchBackend(Backend backend)
|
||||||
{
|
{
|
||||||
if (backend == Backend::Speex)
|
if (backend == Backend::Speex)
|
||||||
{
|
dsp_.reset(new SpeexDSP(format_, monitorFormat_));
|
||||||
bufferSize_ = format_.sampleRate() * 0.025; // 25ms
|
|
||||||
dsp_.reset(new SpeexDSP(bufferSize_, format_, monitorFormat_));
|
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
dsp_.reset(new WebRTCDSP(format_, monitorFormat_));
|
||||||
bufferSize_ = format_.sampleRate() * 0.01; // 10ms
|
|
||||||
dsp_.reset(new WebRTCDSP(bufferSize_, format_, monitorFormat_));
|
bufferSize_ = dsp_->getFrameSize();
|
||||||
}
|
|
||||||
|
|
||||||
connect(dsp_.get(), &AudioEffect::voiceActivityChanged, this,
|
connect(dsp_.get(), &AudioEffect::voiceActivityChanged, this,
|
||||||
&AudioProcessor::voiceActivityChanged);
|
&AudioProcessor::voiceActivityChanged);
|
||||||
|
|
|
||||||
|
|
@ -13,8 +13,8 @@ int on = 1;
|
||||||
int off = 0;
|
int off = 0;
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
SpeexDSP::SpeexDSP(unsigned int frameSize, const QAudioFormat& mainFormat, const QAudioFormat& auxFormat)
|
SpeexDSP::SpeexDSP(const QAudioFormat& mainFormat, const QAudioFormat& auxFormat)
|
||||||
: AudioEffect(frameSize, mainFormat, auxFormat)
|
: AudioEffect(mainFormat, auxFormat)
|
||||||
{
|
{
|
||||||
preprocess_ = speex_preprocess_state_init(getFrameSize(), getMainFormat().sampleRate());
|
preprocess_ = speex_preprocess_state_init(getFrameSize(), getMainFormat().sampleRate());
|
||||||
echo_ = speex_echo_state_init_mc(getFrameSize(), getFrameSize() * 10,
|
echo_ = speex_echo_state_init_mc(getFrameSize(), getFrameSize() * 10,
|
||||||
|
|
@ -78,4 +78,9 @@ void SpeexDSP::setParameter(const QString& param, QVariant value)
|
||||||
throw std::invalid_argument("Invalid param");
|
throw std::invalid_argument("Invalid param");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsigned int SpeexDSP::requiredFrameSizeMs() const
|
||||||
|
{
|
||||||
|
return 25;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace SpeexWebRTCTest
|
} // namespace SpeexWebRTCTest
|
||||||
|
|
|
||||||
|
|
@ -10,17 +10,19 @@ typedef struct SpeexEchoState_ SpeexEchoState;
|
||||||
|
|
||||||
namespace SpeexWebRTCTest {
|
namespace SpeexWebRTCTest {
|
||||||
|
|
||||||
class SpeexDSP : public AudioEffect
|
class SpeexDSP final : public AudioEffect
|
||||||
{
|
{
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
SpeexDSP(unsigned int frameSize, const QAudioFormat& mainFormat, const QAudioFormat& auxFormat);
|
SpeexDSP(const QAudioFormat& mainFormat, const QAudioFormat& auxFormat);
|
||||||
~SpeexDSP() override;
|
~SpeexDSP() override;
|
||||||
|
|
||||||
void processFrame(QAudioBuffer& mainBuffer, const QAudioBuffer& auxBuffer) override;
|
void processFrame(QAudioBuffer& mainBuffer, const QAudioBuffer& auxBuffer) override;
|
||||||
void setParameter(const QString& param, QVariant value) override;
|
void setParameter(const QString& param, QVariant value) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
unsigned int requiredFrameSizeMs() const override;
|
||||||
|
|
||||||
SpeexPreprocessState* preprocess_ = nullptr;
|
SpeexPreprocessState* preprocess_ = nullptr;
|
||||||
SpeexEchoState* echo_ = nullptr;
|
SpeexEchoState* echo_ = nullptr;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -37,10 +37,8 @@ void convert(const webrtc::AudioFrame& from, QAudioBuffer& to)
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
WebRTCDSP::WebRTCDSP(unsigned int frameSize,
|
WebRTCDSP::WebRTCDSP(const QAudioFormat& mainFormat, const QAudioFormat& auxFormat)
|
||||||
const QAudioFormat& mainFormat,
|
: AudioEffect(mainFormat, auxFormat)
|
||||||
const QAudioFormat& auxFormat)
|
|
||||||
: AudioEffect(frameSize, mainFormat, auxFormat)
|
|
||||||
{
|
{
|
||||||
apm_ = webrtc::AudioProcessing::Create();
|
apm_ = webrtc::AudioProcessing::Create();
|
||||||
if (!apm_)
|
if (!apm_)
|
||||||
|
|
@ -181,4 +179,9 @@ void WebRTCDSP::setParameter(const QString& param, QVariant value)
|
||||||
throw std::invalid_argument("Invalid param");
|
throw std::invalid_argument("Invalid param");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsigned int WebRTCDSP::requiredFrameSizeMs() const
|
||||||
|
{
|
||||||
|
return 10;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace SpeexWebRTCTest
|
} // namespace SpeexWebRTCTest
|
||||||
|
|
|
||||||
|
|
@ -9,17 +9,19 @@ class AudioProcessing;
|
||||||
|
|
||||||
namespace SpeexWebRTCTest {
|
namespace SpeexWebRTCTest {
|
||||||
|
|
||||||
class WebRTCDSP : public AudioEffect
|
class WebRTCDSP final : public AudioEffect
|
||||||
{
|
{
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
WebRTCDSP(unsigned int frameSize, const QAudioFormat& mainFormat, const QAudioFormat& auxFormat);
|
WebRTCDSP(const QAudioFormat& mainFormat, const QAudioFormat& auxFormat);
|
||||||
~WebRTCDSP() override;
|
~WebRTCDSP() override;
|
||||||
|
|
||||||
void processFrame(QAudioBuffer& mainBuffer, const QAudioBuffer& auxBuffer) override;
|
void processFrame(QAudioBuffer& mainBuffer, const QAudioBuffer& auxBuffer) override;
|
||||||
void setParameter(const QString& param, QVariant value) override;
|
void setParameter(const QString& param, QVariant value) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
unsigned int requiredFrameSizeMs() const override;
|
||||||
|
|
||||||
webrtc::AudioProcessing* apm_;
|
webrtc::AudioProcessing* apm_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
Reference in a new issue