diff options
author | wu@webrtc.org <wu@webrtc.org@4adac7df-926f-26a2-2b94-8c16560cd09d> | 2014-06-05 20:34:08 +0000 |
---|---|---|
committer | wu@webrtc.org <wu@webrtc.org@4adac7df-926f-26a2-2b94-8c16560cd09d> | 2014-06-05 20:34:08 +0000 |
commit | 81f8df9af96c6b4bf43234f2a0162146a5da6112 (patch) | |
tree | 9c40832ad59dac6f440d07f1a3fb9524dbd24b60 /voice_engine/voe_base_impl.cc | |
parent | 553b68f8800030af6af2a5dd3a941258cd05a275 (diff) | |
download | webrtc-81f8df9af96c6b4bf43234f2a0162146a5da6112.tar.gz |
Fix the chain that propagates the audio frame's rtp and ntp timestamp including:
* In AudioCodingModuleImpl::PlayoutData10Ms, don't reset the timestamp got from GetAudio.
* When there're more than one participant, set AudioFrame's RTP timestamp to 0.
* Copy ntp_time_ms_ in AudioFrame::CopyFrom method.
* In RemixAndResample, pass src frame's timestamp_ and ntp_time_ms_ to the dst frame.
* Fix how |elapsed_time_ms| is computed in channel.cc by adding GetPlayoutFrequency.
Tweaks on ntp_time_ms_:
* Init ntp_time_ms_ to -1 in AudioFrame ctor.
* When there're more than one participant, set AudioFrame's ntp_time_ms_ to an invalid value. I.e. we don't support ntp_time_ms_ in multiple participants case before the mixing is moved to chrome.
Added elapsed_time_ms to AudioFrame and pass it to chrome, where we don't have the information about the rtp timestmp's sample rate, i.e. can't convert rtp timestamp to ms.
BUG=3111
R=henrik.lundin@webrtc.org, turaj@webrtc.org, xians@webrtc.org
TBR=andrew
andrew to take another look on audio_conference_mixer_impl.cc
Review URL: https://webrtc-codereview.appspot.com/14559004
git-svn-id: http://webrtc.googlecode.com/svn/trunk/webrtc@6346 4adac7df-926f-26a2-2b94-8c16560cd09d
Diffstat (limited to 'voice_engine/voe_base_impl.cc')
-rw-r--r-- | voice_engine/voe_base_impl.cc | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/voice_engine/voe_base_impl.cc b/voice_engine/voe_base_impl.cc index cfedd405..ad6314a7 100644 --- a/voice_engine/voe_base_impl.cc +++ b/voice_engine/voe_base_impl.cc @@ -149,7 +149,7 @@ int32_t VoEBaseImpl::NeedMorePlayData( uint32_t samplesPerSec, void* audioSamples, uint32_t& nSamplesOut, - uint32_t* rtp_timestamp, + int64_t* elapsed_time_ms, int64_t* ntp_time_ms) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1), @@ -160,7 +160,7 @@ int32_t VoEBaseImpl::NeedMorePlayData( GetPlayoutData(static_cast<int>(samplesPerSec), static_cast<int>(nChannels), static_cast<int>(nSamples), true, audioSamples, - rtp_timestamp, ntp_time_ms); + elapsed_time_ms, ntp_time_ms); nSamplesOut = _audioFrame.samples_per_channel_; @@ -237,13 +237,13 @@ void VoEBaseImpl::PushCaptureData(int voe_channel, const void* audio_data, void VoEBaseImpl::PullRenderData(int bits_per_sample, int sample_rate, int number_of_channels, int number_of_frames, void* audio_data, - uint32_t* rtp_timestamp, + int64_t* elapsed_time_ms, int64_t* ntp_time_ms) { assert(bits_per_sample == 16); assert(number_of_frames == static_cast<int>(sample_rate / 100)); GetPlayoutData(sample_rate, number_of_channels, number_of_frames, false, - audio_data, rtp_timestamp, ntp_time_ms); + audio_data, elapsed_time_ms, ntp_time_ms); } int VoEBaseImpl::RegisterVoiceEngineObserver(VoiceEngineObserver& observer) @@ -1087,7 +1087,7 @@ int VoEBaseImpl::ProcessRecordedDataWithAPM( void VoEBaseImpl::GetPlayoutData(int sample_rate, int number_of_channels, int number_of_frames, bool feed_data_to_apm, void* audio_data, - uint32_t* rtp_timestamp, + int64_t* elapsed_time_ms, int64_t* ntp_time_ms) { assert(_shared->output_mixer() != NULL); @@ -1110,7 +1110,7 @@ void VoEBaseImpl::GetPlayoutData(int sample_rate, int number_of_channels, memcpy(audio_data, _audioFrame.data_, sizeof(int16_t) * number_of_frames * number_of_channels); - *rtp_timestamp = _audioFrame.timestamp_; + *elapsed_time_ms = _audioFrame.elapsed_time_ms_; *ntp_time_ms = _audioFrame.ntp_time_ms_; } |