summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCheng-Yi Chiang <cychiang@chromium.org>2021-02-17 23:21:24 +0800
committerCommit Bot <commit-bot@chromium.org>2021-03-03 18:15:40 +0000
commit86e1a560610cb3a8dfbf6cedb7984c85830d6855 (patch)
treece84cd20c034528dff607fd9e16f3b5a6f82cbf4
parent75f9a6f3f931e8ab5dd0964ce1a0724953b7a4dd (diff)
downloadadhd-86e1a560610cb3a8dfbf6cedb7984c85830d6855.tar.gz
CRAS: Align new output stream callback time to existing input stream
We want to align the input stream POST and output stream FETCH schedule in order to reduce number of wake up time. Optimize for the case where an input stream is created first, then an output stream is created later. If input and output stream are both using the same block size and rate, align output stream initial callback time with input stream next_cb_ts, also use the sleep interval of input stream as the initial sleep interval of the output stream. The above actions align the output stream to the existing input stream in the beginning. To keep the alignment as much as we can, we assume the input and output devices on the same card have the same estimated rate. The estimated rate changes sleep interval. To avoid any fluctuation on the estimated rate breaks the alignemtn, we use output device estimated rate for the input device if they are both on internal sound card, and are using the same rate. The tricky part is that we need to update estimated rate in every wake up cycle in order to get the latest estimated rate and hence sleep interval from output device to input device. Note that to bring back alignment from underrun or overrun would be in the future work. BUG=b:163511204 TEST=check 480 block size input and output streams are aligned in WebRTC case. TEST=check audio.CrasPerf.playback_capture test CPU cycle count is reduced by about 33%. Change-Id: I31c000609255193b0971adcb438abc0e343ae4ad Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/adhd/+/2703368 Reviewed-by: Yu-Hsuan Hsu <yuhsuan@chromium.org> Commit-Queue: Cheng-Yi Chiang <cychiang@chromium.org> Tested-by: Cheng-Yi Chiang <cychiang@chromium.org>
-rw-r--r--cras/src/server/audio_thread.c3
-rw-r--r--cras/src/server/cras_iodev.c11
-rw-r--r--cras/src/server/cras_iodev.h3
-rw-r--r--cras/src/server/dev_io.c211
-rw-r--r--cras/src/server/dev_io.h5
-rw-r--r--cras/src/server/dev_stream.c14
-rw-r--r--cras/src/server/dev_stream.h19
-rw-r--r--cras/src/tests/audio_thread_unittest.cc9
-rw-r--r--cras/src/tests/dev_io_stubs.cc5
-rw-r--r--cras/src/tests/dev_io_unittest.cc89
-rw-r--r--cras/src/tests/dev_stream_unittest.cc68
-rw-r--r--cras/src/tests/iodev_stub.cc36
-rw-r--r--cras/src/tests/iodev_stub.h6
-rw-r--r--cras/src/tests/iodev_unittest.cc14
-rw-r--r--cras/src/tests/timing_unittest.cc78
15 files changed, 475 insertions, 96 deletions
diff --git a/cras/src/server/audio_thread.c b/cras/src/server/audio_thread.c
index cd155e82..48bb0dc2 100644
--- a/cras/src/server/audio_thread.c
+++ b/cras/src/server/audio_thread.c
@@ -443,7 +443,8 @@ static int thread_add_stream(struct audio_thread *thread,
{
int rc;
- rc = dev_io_append_stream(&thread->open_devs[stream->direction], stream,
+ rc = dev_io_append_stream(&thread->open_devs[CRAS_STREAM_OUTPUT],
+ &thread->open_devs[CRAS_STREAM_INPUT], stream,
iodevs, num_iodevs);
if (rc < 0)
return rc;
diff --git a/cras/src/server/cras_iodev.c b/cras/src/server/cras_iodev.c
index fe78f63e..66446853 100644
--- a/cras/src/server/cras_iodev.c
+++ b/cras/src/server/cras_iodev.c
@@ -732,6 +732,17 @@ bool cras_iodev_is_aec_use_case(const struct cras_ionode *node)
return false;
}
+bool cras_iodev_is_on_internal_card(const struct cras_ionode *node)
+{
+ if (node->type == CRAS_NODE_TYPE_INTERNAL_SPEAKER)
+ return true;
+ if (node->type == CRAS_NODE_TYPE_HEADPHONE)
+ return true;
+ if (node->type == CRAS_NODE_TYPE_MIC)
+ return true;
+ return false;
+}
+
float cras_iodev_get_software_volume_scaler(struct cras_iodev *iodev)
{
unsigned int volume;
diff --git a/cras/src/server/cras_iodev.h b/cras/src/server/cras_iodev.h
index efe1d3e0..18a0962c 100644
--- a/cras/src/server/cras_iodev.h
+++ b/cras/src/server/cras_iodev.h
@@ -462,6 +462,9 @@ void cras_iodev_set_active_node(struct cras_iodev *iodev,
/* Checks if the node is the typical playback or capture option for AEC usage. */
bool cras_iodev_is_aec_use_case(const struct cras_ionode *node);
+/* Checks if the node is a playback or capture node on internal card. */
+bool cras_iodev_is_on_internal_card(const struct cras_ionode *node);
+
/* Adjust the system volume based on the volume of the given node. */
static inline unsigned int
cras_iodev_adjust_node_volume(const struct cras_ionode *node,
diff --git a/cras/src/server/dev_io.c b/cras/src/server/dev_io.c
index 6b4a0999..ddaaeb29 100644
--- a/cras/src/server/dev_io.c
+++ b/cras/src/server/dev_io.c
@@ -63,11 +63,42 @@ static inline struct cras_iodev *get_master_dev(const struct dev_stream *stream)
/* Updates the estimated sample rate of open device to all attached
* streams.
*/
-static void update_estimated_rate(struct open_dev *adev)
+static void update_estimated_rate(struct open_dev *adev,
+ struct open_dev *odev_list,
+ bool self_rate_need_update)
{
struct cras_iodev *master_dev;
struct cras_iodev *dev = adev->dev;
+ struct cras_iodev *tracked_dev = NULL;
struct dev_stream *dev_stream;
+ double dev_rate_ratio;
+ double master_dev_rate_ratio;
+
+ /*
+ * If there is an output device on the same sound card running with the same
+ * sampling rate, use the rate of that output device for this device.
+ */
+ if (dev->direction == CRAS_STREAM_INPUT &&
+ cras_iodev_is_on_internal_card(dev->active_node)) {
+ struct open_dev *odev;
+ DL_FOREACH (odev_list, odev) {
+ if (!cras_iodev_is_on_internal_card(
+ odev->dev->active_node))
+ continue;
+ if (odev->dev->format->frame_rate !=
+ dev->format->frame_rate)
+ continue;
+ tracked_dev = odev->dev;
+ break;
+ }
+ }
+
+ /*
+ * Self-owned rate esimator does not need to udpate rate. There is no tracked
+ * output device. So there is no need to update.
+ */
+ if (!self_rate_need_update && !tracked_dev)
+ return;
DL_FOREACH (dev->streams, dev_stream) {
master_dev = get_master_dev(dev_stream);
@@ -76,11 +107,19 @@ static void update_estimated_rate(struct open_dev *adev)
continue;
}
- dev_stream_set_dev_rate(
- dev_stream, dev->format->frame_rate,
- cras_iodev_get_est_rate_ratio(dev),
- cras_iodev_get_est_rate_ratio(master_dev),
- adev->coarse_rate_adjust);
+ if (tracked_dev) {
+ dev_rate_ratio =
+ cras_iodev_get_est_rate_ratio(tracked_dev);
+ master_dev_rate_ratio = dev_rate_ratio;
+ } else {
+ dev_rate_ratio = cras_iodev_get_est_rate_ratio(dev);
+ master_dev_rate_ratio =
+ cras_iodev_get_est_rate_ratio(master_dev);
+ }
+
+ dev_stream_set_dev_rate(dev_stream, dev->format->frame_rate,
+ dev_rate_ratio, master_dev_rate_ratio,
+ adev->coarse_rate_adjust);
}
}
@@ -471,7 +510,7 @@ static int set_input_dev_wake_ts(struct open_dev *adev, bool *need_to_drop)
* adev - The device to capture samples from.
* Returns 0 on success.
*/
-static int capture_to_streams(struct open_dev *adev)
+static int capture_to_streams(struct open_dev *adev, struct open_dev *odev_list)
{
struct cras_iodev *idev = adev->dev;
snd_pcm_uframes_t remainder, hw_level, cap_limit;
@@ -493,14 +532,29 @@ static int capture_to_streams(struct open_dev *adev)
ATLOG(atlog, AUDIO_THREAD_READ_AUDIO_TSTAMP, idev->info.idx,
hw_tstamp.tv_sec, hw_tstamp.tv_nsec);
if (timespec_is_nonzero(&hw_tstamp)) {
+ bool self_rate_need_update;
+
if (hw_level < idev->min_cb_level / 2)
adev->coarse_rate_adjust = 1;
else if (hw_level > idev->max_cb_level * 2)
adev->coarse_rate_adjust = -1;
else
adev->coarse_rate_adjust = 0;
- if (cras_iodev_update_rate(idev, hw_level, &hw_tstamp))
- update_estimated_rate(adev);
+
+ /*
+ * This values means whether the rate estimator in the device
+ * wants to update estimated rate.
+ */
+ self_rate_need_update =
+ !!cras_iodev_update_rate(idev, hw_level, &hw_tstamp);
+
+ /*
+ * Always calls update_estimated_rate so that new output rate
+ * has a chance to propagate to input. In update_estimated_rate,
+ * it will decide whether the new rate is from self rate estimator
+ * or from the tracked output device.
+ */
+ update_estimated_rate(adev, odev_list, self_rate_need_update);
}
cap_limit = get_stream_limit(adev, hw_level, &cap_limit_stream);
@@ -754,7 +808,7 @@ int write_output_samples(struct open_dev **odevs, struct open_dev *adev,
adev->coarse_rate_adjust = 0;
if (cras_iodev_update_rate(odev, hw_level, &hw_tstamp))
- update_estimated_rate(adev);
+ update_estimated_rate(adev, NULL, true);
}
ATLOG(atlog, AUDIO_THREAD_FILL_AUDIO, adev->dev->info.idx, hw_level,
odev->min_cb_level);
@@ -968,16 +1022,17 @@ static void handle_dev_err(int err_rc, struct open_dev **odevs,
dev_io_rm_open_dev(odevs, adev);
}
-int dev_io_capture(struct open_dev **list)
+int dev_io_capture(struct open_dev **list, struct open_dev **olist)
{
struct open_dev *idev_list = *list;
+ struct open_dev *odev_list = *olist;
struct open_dev *adev;
int rc;
DL_FOREACH (idev_list, adev) {
if (!cras_iodev_is_open(adev->dev))
continue;
- rc = capture_to_streams(adev);
+ rc = capture_to_streams(adev, odev_list);
if (rc < 0)
handle_dev_err(rc, list, adev);
}
@@ -1128,7 +1183,7 @@ void dev_io_run(struct open_dev **odevs, struct open_dev **idevs,
update_longest_wake(*idevs, &now);
dev_io_playback_fetch(*odevs);
- dev_io_capture(idevs);
+ dev_io_capture(idevs, odevs);
dev_io_send_captured_samples(*idevs);
dev_io_playback_write(odevs, output_converter);
}
@@ -1282,14 +1337,61 @@ static void delete_stream_from_dev(struct cras_iodev *dev,
dev_stream_destroy(out);
}
-int dev_io_append_stream(struct open_dev **dev_list,
+/*
+ * Finds a matched input stream from open device list.
+ * The definition of the matched streams: Two streams having
+ * the same sampling rate and the same cb_threshold.
+ * This means their sleep time intervals should be very close
+ * if we neglect device estimated rate.
+ */
+static struct dev_stream *
+find_matched_input_stream(const struct cras_rstream *out_stream,
+ struct open_dev *odev_list)
+{
+ struct open_dev *odev;
+ struct dev_stream *dev_stream;
+ size_t out_rate = out_stream->format.frame_rate;
+ size_t out_cb_threshold = cras_rstream_get_cb_threshold(out_stream);
+
+ DL_FOREACH (odev_list, odev) {
+ DL_FOREACH (odev->dev->streams, dev_stream) {
+ if (dev_stream->stream->format.frame_rate != out_rate)
+ continue;
+ if (cras_rstream_get_cb_threshold(dev_stream->stream) !=
+ out_cb_threshold)
+ continue;
+ return dev_stream;
+ }
+ }
+ return NULL;
+}
+
+static bool
+find_matched_input_stream_next_cb_ts(const struct cras_rstream *stream,
+ struct open_dev *odev_list,
+ const struct timespec **next_cb_ts,
+ const struct timespec **sleep_interval_ts)
+{
+ struct dev_stream *dev_stream =
+ find_matched_input_stream(stream, odev_list);
+ if (dev_stream) {
+ *next_cb_ts = dev_stream_next_cb_ts(dev_stream);
+ *sleep_interval_ts = dev_stream_sleep_interval_ts(dev_stream);
+ return *next_cb_ts != NULL;
+ }
+ return false;
+}
+
+int dev_io_append_stream(struct open_dev **odevs, struct open_dev **idevs,
struct cras_rstream *stream,
struct cras_iodev **iodevs, unsigned int num_iodevs)
{
+ struct open_dev **dev_list;
struct open_dev *open_dev;
struct cras_iodev *dev;
struct dev_stream *out;
struct timespec init_cb_ts;
+ const struct timespec *init_sleep_interval_ts = NULL;
struct timespec extra_sleep;
const struct timespec *stream_ts;
unsigned int i;
@@ -1297,6 +1399,11 @@ int dev_io_append_stream(struct open_dev **dev_list,
int level;
int rc = 0;
+ if (stream->direction == CRAS_STREAM_OUTPUT)
+ dev_list = odevs;
+ else
+ dev_list = idevs;
+
for (i = 0; i < num_iodevs; i++) {
DL_SEARCH_SCALAR(*dev_list, open_dev, dev, iodevs[i]);
if (!open_dev)
@@ -1341,35 +1448,55 @@ int dev_io_append_stream(struct open_dev **dev_list,
* may cause device buffer level stack up.
*/
if (stream->direction == CRAS_STREAM_OUTPUT) {
- DL_FOREACH (dev->streams, out) {
- stream_ts = dev_stream_next_cb_ts(out);
- if (stream_ts &&
- (!cb_ts_set ||
- timespec_after(&init_cb_ts, stream_ts))) {
- init_cb_ts = *stream_ts;
- cb_ts_set = true;
+ /*
+ * If there is a matched input stream, find its next cb time.
+ * Use that as the initial cb time for this output stream.
+ */
+ const struct timespec *in_stream_ts;
+ const struct timespec *in_stream_sleep_interval_ts;
+ bool found_matched_input;
+ found_matched_input =
+ find_matched_input_stream_next_cb_ts(
+ stream, *idevs, &in_stream_ts,
+ &in_stream_sleep_interval_ts);
+ if (found_matched_input) {
+ init_cb_ts = *in_stream_ts;
+ init_sleep_interval_ts =
+ in_stream_sleep_interval_ts;
+ } else {
+ DL_FOREACH (dev->streams, out) {
+ stream_ts = dev_stream_next_cb_ts(out);
+ if (stream_ts &&
+ (!cb_ts_set ||
+ timespec_after(&init_cb_ts,
+ stream_ts))) {
+ init_cb_ts = *stream_ts;
+ cb_ts_set = true;
+ }
}
- }
- if (!cb_ts_set) {
- level = cras_iodev_get_valid_frames(
- dev, &init_cb_ts);
- if (level < 0) {
- syslog(LOG_ERR,
- "Failed to set output init_cb_ts, rc = %d",
- level);
- rc = -EINVAL;
- break;
+ if (!cb_ts_set) {
+ level = cras_iodev_get_valid_frames(
+ dev, &init_cb_ts);
+ if (level < 0) {
+ syslog(LOG_ERR,
+ "Failed to set output init_cb_ts, rc = %d",
+ level);
+ rc = -EINVAL;
+ break;
+ }
+ level -= cras_frames_at_rate(
+ stream->format.frame_rate,
+ cras_rstream_get_cb_threshold(
+ stream),
+ dev->format->frame_rate);
+ if (level < 0)
+ level = 0;
+ cras_frames_to_time(
+ level, dev->format->frame_rate,
+ &extra_sleep);
+ add_timespecs(&init_cb_ts,
+ &extra_sleep);
}
- level -= cras_frames_at_rate(
- stream->format.frame_rate,
- cras_rstream_get_cb_threshold(stream),
- dev->format->frame_rate);
- if (level < 0)
- level = 0;
- cras_frames_to_time(level,
- dev->format->frame_rate,
- &extra_sleep);
- add_timespecs(&init_cb_ts, &extra_sleep);
}
} else {
/*
@@ -1388,7 +1515,7 @@ int dev_io_append_stream(struct open_dev **dev_list,
}
out = dev_stream_create(stream, dev->info.idx, dev->format, dev,
- &init_cb_ts);
+ &init_cb_ts, init_sleep_interval_ts);
if (!out) {
rc = -EINVAL;
break;
diff --git a/cras/src/server/dev_io.h b/cras/src/server/dev_io.h
index 259bbabd..ca71a809 100644
--- a/cras/src/server/dev_io.h
+++ b/cras/src/server/dev_io.h
@@ -58,8 +58,9 @@ int write_output_samples(struct open_dev **odevs, struct open_dev *adev,
* Captures samples from each device in the list.
* list - Pointer to the list of input devices. Devices that fail to read
* will be removed from the list.
+ * olist - Pointer to the list of output devices.
*/
-int dev_io_capture(struct open_dev **list);
+int dev_io_capture(struct open_dev **list, struct open_dev **olist);
/*
* Send samples that have been captured to their streams.
@@ -101,7 +102,7 @@ struct open_dev *dev_io_find_open_dev(struct open_dev *odev_list,
unsigned int dev_idx);
/* Append a new stream to a specified set of iodevs. */
-int dev_io_append_stream(struct open_dev **dev_list,
+int dev_io_append_stream(struct open_dev **odevs, struct open_dev **idevs,
struct cras_rstream *stream,
struct cras_iodev **iodevs, unsigned int num_iodevs);
diff --git a/cras/src/server/dev_stream.c b/cras/src/server/dev_stream.c
index 025aeddd..2473b1a5 100644
--- a/cras/src/server/dev_stream.c
+++ b/cras/src/server/dev_stream.c
@@ -63,7 +63,8 @@ unsigned int max_frames_for_conversion(unsigned int stream_frames,
struct dev_stream *dev_stream_create(struct cras_rstream *stream,
unsigned int dev_id,
const struct cras_audio_format *dev_fmt,
- void *dev_ptr, struct timespec *cb_ts)
+ void *dev_ptr, struct timespec *cb_ts,
+ const struct timespec *sleep_interval_ts)
{
struct dev_stream *out;
struct cras_audio_format *stream_fmt = &stream->format;
@@ -122,8 +123,15 @@ struct dev_stream *dev_stream_create(struct cras_rstream *stream,
out->conv_buffer = byte_buffer_create(buf_bytes);
out->conv_area = cras_audio_area_create(ofmt->num_channels);
- cras_frames_to_time(cras_rstream_get_cb_threshold(stream),
- stream_fmt->frame_rate, &stream->sleep_interval_ts);
+ /* Use sleep interval hint from argument if it is provided */
+ if (sleep_interval_ts) {
+ stream->sleep_interval_ts = *sleep_interval_ts;
+ } else {
+ cras_frames_to_time(cras_rstream_get_cb_threshold(stream),
+ stream_fmt->frame_rate,
+ &stream->sleep_interval_ts);
+ }
+
stream->next_cb_ts = *cb_ts;
/* Sets up the stream & dev pair. */
diff --git a/cras/src/server/dev_stream.h b/cras/src/server/dev_stream.h
index c39a8017..a4247518 100644
--- a/cras/src/server/dev_stream.h
+++ b/cras/src/server/dev_stream.h
@@ -46,10 +46,27 @@ struct dev_stream {
int is_running;
};
+/*
+ * Creates a dev_stream.
+ *
+ * Args:
+ * stream - The associated rstream.
+ * dev_id - Index of the device.
+ * dev_fmt - The format of the device.
+ * dev_ptr - A pointer to the device
+ * cb_ts - A pointer to the initial callback time.
+ * sleep_interval_ts - A pointer to the initial sleep interval.
+ * Set to null to calculate the value from device rate and block size.
+ * Note that we need this argument so that output device sleep interval
+ * can use input device sleep interval in the beginning to have perfect
+ * alignment in WebRTC use case.
+ * Returns the pointer to the created dev_stream.
+ */
struct dev_stream *dev_stream_create(struct cras_rstream *stream,
unsigned int dev_id,
const struct cras_audio_format *dev_fmt,
- void *dev_ptr, struct timespec *cb_ts);
+ void *dev_ptr, struct timespec *cb_ts,
+ const struct timespec *sleep_interval_ts);
void dev_stream_destroy(struct dev_stream *dev_stream);
/*
diff --git a/cras/src/tests/audio_thread_unittest.cc b/cras/src/tests/audio_thread_unittest.cc
index 14e98243..34d8347c 100644
--- a/cras/src/tests/audio_thread_unittest.cc
+++ b/cras/src/tests/audio_thread_unittest.cc
@@ -56,6 +56,7 @@ static struct cras_iodev* cras_iodev_start_ramp_odev;
static enum CRAS_IODEV_RAMP_REQUEST cras_iodev_start_ramp_request;
static struct timespec clock_gettime_retspec;
static struct timespec init_cb_ts_;
+static struct timespec sleep_interval_ts_;
static std::map<const struct dev_stream*, struct timespec>
dev_stream_wake_time_val;
static int cras_device_monitor_set_device_mute_state_called;
@@ -1225,10 +1226,12 @@ struct dev_stream* dev_stream_create(struct cras_rstream* stream,
unsigned int dev_id,
const struct cras_audio_format* dev_fmt,
void* dev_ptr,
- struct timespec* cb_ts) {
+ struct timespec* cb_ts,
+ const struct timespec* sleep_interval_ts) {
struct dev_stream* out = static_cast<dev_stream*>(calloc(1, sizeof(*out)));
out->stream = stream;
init_cb_ts_ = *cb_ts;
+ sleep_interval_ts_ = *sleep_interval_ts;
return out;
}
@@ -1418,6 +1421,10 @@ int cras_iodev_drop_frames_by_time(struct cras_iodev* iodev,
return 0;
}
+bool cras_iodev_is_on_internal_card(const struct cras_ionode* node) {
+ return 0;
+}
+
// From librt.
int clock_gettime(clockid_t clk_id, struct timespec* tp) {
*tp = clock_gettime_retspec;
diff --git a/cras/src/tests/dev_io_stubs.cc b/cras/src/tests/dev_io_stubs.cc
index b74162b8..805fae3b 100644
--- a/cras/src/tests/dev_io_stubs.cc
+++ b/cras/src/tests/dev_io_stubs.cc
@@ -151,6 +151,11 @@ void add_stream_to_dev(IodevPtr& dev, const StreamPtr& stream) {
static_cast<size_t>(dev->max_cb_level));
dev->largest_cb_level = std::max(stream->rstream->cb_threshold,
static_cast<size_t>(dev->max_cb_level));
+
+ if (stream->rstream->master_dev.dev_id == NO_DEVICE) {
+ stream->rstream->master_dev.dev_id = dev->info.idx;
+ stream->rstream->master_dev.dev_ptr = dev.get();
+ }
}
void fill_audio_format(cras_audio_format* format, unsigned int rate) {
diff --git a/cras/src/tests/dev_io_unittest.cc b/cras/src/tests/dev_io_unittest.cc
index ab104aac..69a80960 100644
--- a/cras/src/tests/dev_io_unittest.cc
+++ b/cras/src/tests/dev_io_unittest.cc
@@ -8,6 +8,7 @@
#include <time.h>
#include <memory>
+#include <unordered_map>
extern "C" {
#include "cras_iodev.h" // stubbed
@@ -29,6 +30,13 @@ struct audio_thread_event_log* atlog;
static float dev_stream_capture_software_gain_scaler_val;
static float input_data_get_software_gain_scaler_val;
static unsigned int dev_stream_capture_avail_ret = 480;
+struct set_dev_rate_data {
+ unsigned int dev_rate;
+ double dev_rate_ratio;
+ double master_rate_ratio;
+ int coarse_rate_adjust;
+};
+std::unordered_map<struct dev_stream*, set_dev_rate_data> set_dev_rate_map;
namespace {
@@ -39,6 +47,7 @@ class DevIoSuite : public testing::Test {
iodev_stub_reset();
rstream_stub_reset();
fill_audio_format(&format, 48000);
+ set_dev_rate_map.clear();
stream = create_stream(1, 1, CRAS_STREAM_INPUT, cb_threshold, &format);
}
@@ -70,6 +79,7 @@ TEST_F(DevIoSuite, SendCapturedFails) {
TEST_F(DevIoSuite, CaptureGain) {
struct open_dev* dev_list = NULL;
+ struct open_dev* odev_list = NULL;
struct timespec ts;
DevicePtr dev = create_device(CRAS_STREAM_INPUT, cb_threshold, &format,
CRAS_NODE_TYPE_MIC);
@@ -82,20 +92,80 @@ TEST_F(DevIoSuite, CaptureGain) {
/* The applied scaler gain should match what is reported by input_data. */
dev->dev->active_node->ui_gain_scaler = 1.0f;
input_data_get_software_gain_scaler_val = 1.0f;
- dev_io_capture(&dev_list);
+ dev_io_capture(&dev_list, &odev_list);
EXPECT_EQ(1.0f, dev_stream_capture_software_gain_scaler_val);
input_data_get_software_gain_scaler_val = 0.99f;
- dev_io_capture(&dev_list);
+ dev_io_capture(&dev_list, &odev_list);
EXPECT_EQ(0.99f, dev_stream_capture_software_gain_scaler_val);
dev->dev->active_node->ui_gain_scaler = 0.6f;
input_data_get_software_gain_scaler_val = 0.7f;
- dev_io_capture(&dev_list);
+ dev_io_capture(&dev_list, &odev_list);
EXPECT_FLOAT_EQ(0.42f, dev_stream_capture_software_gain_scaler_val);
}
/*
+ * When input and output devices are on the internal sound card,
+ * and their device rates are the same, use the estimated rate
+ * on the output device as the estimated rate of input device.
+ */
+TEST_F(DevIoSuite, CopyOutputEstimatedRate) {
+ struct open_dev* idev_list = NULL;
+ struct open_dev* odev_list = NULL;
+ struct timespec ts;
+ DevicePtr out_dev = create_device(CRAS_STREAM_OUTPUT, cb_threshold, &format,
+ CRAS_NODE_TYPE_INTERNAL_SPEAKER);
+ DevicePtr in_dev = create_device(CRAS_STREAM_INPUT, cb_threshold, &format,
+ CRAS_NODE_TYPE_MIC);
+
+ in_dev->dev->state = CRAS_IODEV_STATE_NORMAL_RUN;
+ iodev_stub_frames_queued(in_dev->dev.get(), 20, ts);
+ DL_APPEND(idev_list, in_dev->odev.get());
+ add_stream_to_dev(in_dev->dev, stream);
+ DL_APPEND(odev_list, out_dev->odev.get());
+ iodev_stub_on_internal_card(out_dev->dev->active_node, 1);
+ iodev_stub_on_internal_card(in_dev->dev->active_node, 1);
+
+ iodev_stub_est_rate_ratio(in_dev->dev.get(), 0.8f);
+ iodev_stub_est_rate_ratio(out_dev->dev.get(), 1.2f);
+
+ dev_io_capture(&idev_list, &odev_list);
+
+ EXPECT_FLOAT_EQ(1.2f, set_dev_rate_map[stream->dstream.get()].dev_rate_ratio);
+}
+
+/*
+ * When input and output devices are not both on the internal sound card,
+ * estimated rates are independent.
+ */
+TEST_F(DevIoSuite, InputOutputIndependentEstimatedRate) {
+ struct open_dev* idev_list = NULL;
+ struct open_dev* odev_list = NULL;
+ struct timespec ts;
+ DevicePtr out_dev = create_device(CRAS_STREAM_OUTPUT, cb_threshold, &format,
+ CRAS_NODE_TYPE_INTERNAL_SPEAKER);
+ DevicePtr in_dev = create_device(CRAS_STREAM_INPUT, cb_threshold, &format,
+ CRAS_NODE_TYPE_USB);
+
+ in_dev->dev->state = CRAS_IODEV_STATE_NORMAL_RUN;
+ iodev_stub_frames_queued(in_dev->dev.get(), 20, ts);
+ DL_APPEND(idev_list, in_dev->odev.get());
+ add_stream_to_dev(in_dev->dev, stream);
+ DL_APPEND(odev_list, out_dev->odev.get());
+ iodev_stub_on_internal_card(out_dev->dev->active_node, 1);
+ iodev_stub_on_internal_card(in_dev->dev->active_node, 0);
+
+ iodev_stub_est_rate_ratio(in_dev->dev.get(), 0.8f);
+ iodev_stub_est_rate_ratio(out_dev->dev.get(), 1.2f);
+ iodev_stub_update_rate(in_dev->dev.get(), 1);
+
+ dev_io_capture(&idev_list, &odev_list);
+
+ EXPECT_FLOAT_EQ(0.8f, set_dev_rate_map[stream->dstream.get()].dev_rate_ratio);
+}
+
+/*
* If any hw_level is larger than 1.5 * largest_cb_level and
* DROP_FRAMES_THRESHOLD_MS, reset all input devices.
*/
@@ -333,7 +403,15 @@ void dev_stream_set_dev_rate(struct dev_stream* dev_stream,
unsigned int dev_rate,
double dev_rate_ratio,
double master_rate_ratio,
- int coarse_rate_adjust) {}
+ int coarse_rate_adjust) {
+ set_dev_rate_data new_data;
+ new_data.dev_rate = dev_rate;
+ new_data.dev_rate_ratio = dev_rate_ratio;
+ new_data.master_rate_ratio = master_rate_ratio;
+ new_data.coarse_rate_adjust = coarse_rate_adjust;
+
+ set_dev_rate_map[dev_stream] = new_data;
+}
int dev_stream_capture_update_rstream(struct dev_stream* dev_stream) {
return 0;
}
@@ -373,7 +451,8 @@ struct dev_stream* dev_stream_create(struct cras_rstream* stream,
unsigned int dev_id,
const struct cras_audio_format* dev_fmt,
void* dev_ptr,
- struct timespec* cb_ts) {
+ struct timespec* cb_ts,
+ const struct timespec* sleep_interval_ts) {
return 0;
}
int cras_device_monitor_error_close(unsigned int dev_idx) {
diff --git a/cras/src/tests/dev_stream_unittest.cc b/cras/src/tests/dev_stream_unittest.cc
index 640ca932..b8c27170 100644
--- a/cras/src/tests/dev_stream_unittest.cc
+++ b/cras/src/tests/dev_stream_unittest.cc
@@ -334,7 +334,7 @@ TEST_F(CreateSuite, CreateSRC44to48) {
out_fmt.frame_rate = 48000; // Output from converter is device rate.
config_format_converter_conv = reinterpret_cast<struct cras_fmt_conv*>(0x33);
dev_stream =
- dev_stream_create(&rstream_, 0, &fmt_s16le_48, (void*)0x55, &cb_ts);
+ dev_stream_create(&rstream_, 0, &fmt_s16le_48, (void*)0x55, &cb_ts, NULL);
EXPECT_EQ(1, config_format_converter_called);
EXPECT_NE(static_cast<byte_buffer*>(NULL), dev_stream->conv_buffer);
// Converter tmp and output buffers are large enough for device output.
@@ -346,6 +346,24 @@ TEST_F(CreateSuite, CreateSRC44to48) {
dev_stream_destroy(dev_stream);
}
+TEST_F(CreateSuite, CreateOutputWithSchedule) {
+ struct dev_stream* dev_stream;
+ unsigned int dev_id = 9;
+ // init_cb_ts and non-null init_sleep_ts will be used.
+ struct timespec init_cb_ts = {1, 2};
+ struct timespec init_sleep_ts = {3, 4};
+
+ rstream_.direction = CRAS_STREAM_OUTPUT;
+ dev_stream = dev_stream_create(&rstream_, dev_id, &fmt_s16le_48, (void*)0x55,
+ &init_cb_ts, &init_sleep_ts);
+
+ EXPECT_EQ(init_cb_ts.tv_sec, rstream_.next_cb_ts.tv_sec);
+ EXPECT_EQ(init_cb_ts.tv_nsec, rstream_.next_cb_ts.tv_nsec);
+ EXPECT_EQ(init_sleep_ts.tv_sec, rstream_.sleep_interval_ts.tv_sec);
+ EXPECT_EQ(init_sleep_ts.tv_nsec, rstream_.sleep_interval_ts.tv_nsec);
+ dev_stream_destroy(dev_stream);
+}
+
TEST_F(CreateSuite, CreateSRC44from48Input) {
struct dev_stream* dev_stream;
struct cras_audio_format processed_fmt = fmt_s16le_48;
@@ -358,7 +376,7 @@ TEST_F(CreateSuite, CreateSRC44from48Input) {
config_format_converter_conv = reinterpret_cast<struct cras_fmt_conv*>(0x33);
cras_rstream_post_processing_format_val = &processed_fmt;
dev_stream =
- dev_stream_create(&rstream_, 0, &fmt_s16le_48, (void*)0x55, &cb_ts);
+ dev_stream_create(&rstream_, 0, &fmt_s16le_48, (void*)0x55, &cb_ts, NULL);
EXPECT_EQ(1, config_format_converter_called);
EXPECT_NE(static_cast<byte_buffer*>(NULL), dev_stream->conv_buffer);
// Converter tmp and output buffers are large enough for device input.
@@ -378,8 +396,8 @@ TEST_F(CreateSuite, CreateSRC48to44) {
in_fmt.frame_rate = 48000; // Stream rate.
out_fmt.frame_rate = 44100; // Device rate.
config_format_converter_conv = reinterpret_cast<struct cras_fmt_conv*>(0x33);
- dev_stream =
- dev_stream_create(&rstream_, 0, &fmt_s16le_44_1, (void*)0x55, &cb_ts);
+ dev_stream = dev_stream_create(&rstream_, 0, &fmt_s16le_44_1, (void*)0x55,
+ &cb_ts, NULL);
EXPECT_EQ(1, config_format_converter_called);
EXPECT_NE(static_cast<byte_buffer*>(NULL), dev_stream->conv_buffer);
// Converter tmp and output buffers are large enough for stream input.
@@ -396,8 +414,8 @@ TEST_F(CreateSuite, CreateSRC48from44Input) {
in_fmt.frame_rate = 44100; // Device rate.
out_fmt.frame_rate = 48000; // Stream rate.
config_format_converter_conv = reinterpret_cast<struct cras_fmt_conv*>(0x33);
- dev_stream =
- dev_stream_create(&rstream_, 0, &fmt_s16le_44_1, (void*)0x55, &cb_ts);
+ dev_stream = dev_stream_create(&rstream_, 0, &fmt_s16le_44_1, (void*)0x55,
+ &cb_ts, NULL);
EXPECT_EQ(1, config_format_converter_called);
EXPECT_NE(static_cast<byte_buffer*>(NULL), dev_stream->conv_buffer);
// Converter tmp and output buffers are large enough for stream output.
@@ -414,7 +432,7 @@ TEST_F(CreateSuite, CreateSRC8to48) {
out_fmt.frame_rate = 48000; // Device rate.
config_format_converter_conv = reinterpret_cast<struct cras_fmt_conv*>(0x33);
dev_stream =
- dev_stream_create(&rstream_, 0, &fmt_s16le_48, (void*)0x55, &cb_ts);
+ dev_stream_create(&rstream_, 0, &fmt_s16le_48, (void*)0x55, &cb_ts, NULL);
EXPECT_EQ(1, config_format_converter_called);
EXPECT_NE(static_cast<byte_buffer*>(NULL), dev_stream->conv_buffer);
// Converter tmp and output buffers are large enough for device output.
@@ -435,7 +453,7 @@ TEST_F(CreateSuite, CreateSRC8from48Input) {
out_fmt.frame_rate = 8000; // Stream rate.
config_format_converter_conv = reinterpret_cast<struct cras_fmt_conv*>(0x33);
dev_stream =
- dev_stream_create(&rstream_, 0, &fmt_s16le_48, (void*)0x55, &cb_ts);
+ dev_stream_create(&rstream_, 0, &fmt_s16le_48, (void*)0x55, &cb_ts, NULL);
EXPECT_EQ(1, config_format_converter_called);
EXPECT_NE(static_cast<byte_buffer*>(NULL), dev_stream->conv_buffer);
// Converter tmp and output buffers are large enough for device input.
@@ -455,7 +473,7 @@ TEST_F(CreateSuite, CreateSRC48to8) {
out_fmt.frame_rate = 8000; // Device rate.
config_format_converter_conv = reinterpret_cast<struct cras_fmt_conv*>(0x33);
dev_stream =
- dev_stream_create(&rstream_, 0, &fmt_s16le_8, (void*)0x55, &cb_ts);
+ dev_stream_create(&rstream_, 0, &fmt_s16le_8, (void*)0x55, &cb_ts, NULL);
EXPECT_EQ(1, config_format_converter_called);
EXPECT_NE(static_cast<byte_buffer*>(NULL), dev_stream->conv_buffer);
// Converter tmp and output buffers are large enough for stream input.
@@ -473,7 +491,7 @@ TEST_F(CreateSuite, CreateSRC48from8Input) {
out_fmt.frame_rate = 48000; // Stream rate.
config_format_converter_conv = reinterpret_cast<struct cras_fmt_conv*>(0x33);
dev_stream =
- dev_stream_create(&rstream_, 0, &fmt_s16le_8, (void*)0x55, &cb_ts);
+ dev_stream_create(&rstream_, 0, &fmt_s16le_8, (void*)0x55, &cb_ts, NULL);
EXPECT_EQ(1, config_format_converter_called);
EXPECT_NE(static_cast<byte_buffer*>(NULL), dev_stream->conv_buffer);
// Converter tmp and output buffers are large enough for stream output.
@@ -490,8 +508,8 @@ TEST_F(CreateSuite, CreateSRC48MonoFrom44StereoInput) {
in_fmt.frame_rate = 44100; // Device rate.
out_fmt.frame_rate = 48000; // Stream rate.
config_format_converter_conv = reinterpret_cast<struct cras_fmt_conv*>(0x33);
- dev_stream =
- dev_stream_create(&rstream_, 0, &fmt_s16le_44_1, (void*)0x55, &cb_ts);
+ dev_stream = dev_stream_create(&rstream_, 0, &fmt_s16le_44_1, (void*)0x55,
+ &cb_ts, NULL);
EXPECT_EQ(1, config_format_converter_called);
EXPECT_NE(static_cast<byte_buffer*>(NULL), dev_stream->conv_buffer);
// Converter tmp and output buffers are large enough for stream output.
@@ -510,8 +528,8 @@ TEST_F(CreateSuite, CaptureAvailConvBufHasSamples) {
rstream_.format = fmt_s16le_48;
rstream_.direction = CRAS_STREAM_INPUT;
config_format_converter_conv = reinterpret_cast<struct cras_fmt_conv*>(0x33);
- dev_stream =
- dev_stream_create(&rstream_, 0, &fmt_s16le_44_1, (void*)0x55, &cb_ts);
+ dev_stream = dev_stream_create(&rstream_, 0, &fmt_s16le_44_1, (void*)0x55,
+ &cb_ts, NULL);
EXPECT_EQ(1, config_format_converter_called);
EXPECT_NE(static_cast<byte_buffer*>(NULL), dev_stream->conv_buffer);
EXPECT_LE(
@@ -538,7 +556,7 @@ TEST_F(CreateSuite, SetDevRateNotMasterDev) {
rstream_.master_dev.dev_id = 4;
config_format_converter_conv = reinterpret_cast<struct cras_fmt_conv*>(0x33);
dev_stream = dev_stream_create(&rstream_, dev_id, &fmt_s16le_44_1,
- (void*)0x55, &cb_ts);
+ (void*)0x55, &cb_ts, NULL);
dev_stream_set_dev_rate(dev_stream, 44100, 1.01, 1.0, 0);
EXPECT_EQ(1, cras_fmt_conv_set_linear_resample_rates_called);
@@ -567,7 +585,7 @@ TEST_F(CreateSuite, SetDevRateMasterDev) {
rstream_.master_dev.dev_id = dev_id;
config_format_converter_conv = reinterpret_cast<struct cras_fmt_conv*>(0x33);
dev_stream = dev_stream_create(&rstream_, dev_id, &fmt_s16le_44_1,
- (void*)0x55, &cb_ts);
+ (void*)0x55, &cb_ts, NULL);
dev_stream_set_dev_rate(dev_stream, 44100, 1.01, 1.0, 0);
EXPECT_EQ(1, cras_fmt_conv_set_linear_resample_rates_called);
@@ -661,7 +679,7 @@ TEST_F(CreateSuite, DevStreamFlushAudioMessages) {
unsigned int dev_id = 9;
dev_stream = dev_stream_create(&rstream_, dev_id, &fmt_s16le_44_1,
- (void*)0x55, &cb_ts);
+ (void*)0x55, &cb_ts, NULL);
dev_stream_flush_old_audio_messages(dev_stream);
EXPECT_EQ(1, cras_rstream_flush_old_audio_messages_called);
@@ -673,7 +691,7 @@ TEST_F(CreateSuite, DevStreamIsPending) {
unsigned int dev_id = 9;
dev_stream = dev_stream_create(&rstream_, dev_id, &fmt_s16le_44_1,
- (void*)0x55, &cb_ts);
+ (void*)0x55, &cb_ts, NULL);
// dev_stream_is_pending_reply is only a wrapper.
cras_rstream_is_pending_reply_ret = 0;
@@ -694,7 +712,7 @@ TEST_F(CreateSuite, StreamCanSend) {
rstream_.direction = CRAS_STREAM_INPUT;
dev_stream = dev_stream_create(&rstream_, dev_id, &fmt_s16le_44_1,
- (void*)0x55, &cb_ts);
+ (void*)0x55, &cb_ts, NULL);
// Assume there is a next_cb_ts on rstream.
rstream_.next_cb_ts.tv_sec = 1;
@@ -791,7 +809,7 @@ TEST_F(CreateSuite, StreamCanSendBulkAudio) {
rstream_.direction = CRAS_STREAM_INPUT;
rstream_.flags |= BULK_AUDIO_OK;
dev_stream = dev_stream_create(&rstream_, dev_id, &fmt_s16le_44_1,
- (void*)0x55, &cb_ts);
+ (void*)0x55, &cb_ts, NULL);
// Assume there is a next_cb_ts on rstream.
rstream_.next_cb_ts.tv_sec = 1;
@@ -864,7 +882,7 @@ TEST_F(CreateSuite, TriggerOnlyStreamSendOnlyOnce) {
rstream_.direction = CRAS_STREAM_INPUT;
dev_stream = dev_stream_create(&rstream_, dev_id, &fmt_s16le_44_1,
- (void*)0x55, &cb_ts);
+ (void*)0x55, &cb_ts, NULL);
dev_stream->stream->flags = TRIGGER_ONLY;
dev_stream->stream->triggered = 0;
@@ -896,7 +914,7 @@ TEST_F(CreateSuite, InputDevStreamWakeTimeByNextCbTs) {
rstream_.direction = CRAS_STREAM_INPUT;
dev_stream = dev_stream_create(&rstream_, dev_id, &fmt_s16le_44_1,
- (void*)0x55, &cb_ts);
+ (void*)0x55, &cb_ts, NULL);
// Assume there is a next_cb_ts on rstream.
rstream_.next_cb_ts.tv_sec = 1;
@@ -929,8 +947,8 @@ TEST_F(CreateSuite, InputDevStreamWakeTimeByDevice) {
int needed_frames_from_device = 0;
rstream_.direction = CRAS_STREAM_INPUT;
- dev_stream =
- dev_stream_create(&rstream_, dev_id, &fmt_s16le_48, (void*)0x55, &cb_ts);
+ dev_stream = dev_stream_create(&rstream_, dev_id, &fmt_s16le_48, (void*)0x55,
+ &cb_ts, NULL);
// Assume there is a next_cb_ts on rstream, that is, 1.005 seconds.
rstream_.next_cb_ts.tv_sec = 1;
@@ -994,7 +1012,7 @@ TEST_F(CreateSuite, UpdateNextWakeTime) {
rstream_.direction = CRAS_STREAM_OUTPUT;
dev_stream = dev_stream_create(&rstream_, dev_id, &fmt_s16le_44_1,
- (void*)0x55, &cb_ts);
+ (void*)0x55, &cb_ts, NULL);
// Case 1: The new next_cb_ts is greater than now. Do not need to reschedule.
rstream_.next_cb_ts.tv_sec = 2;
diff --git a/cras/src/tests/iodev_stub.cc b/cras/src/tests/iodev_stub.cc
index 3dbb61d1..2e84faac 100644
--- a/cras/src/tests/iodev_stub.cc
+++ b/cras/src/tests/iodev_stub.cc
@@ -21,12 +21,30 @@ struct cb_data {
std::unordered_map<cras_iodev*, cb_data> frames_queued_map;
std::unordered_map<cras_iodev*, cb_data> valid_frames_map;
std::unordered_map<cras_iodev*, timespec> drop_time_map;
+std::unordered_map<const cras_iodev*, double> est_rate_ratio_map;
+std::unordered_map<const cras_iodev*, int> update_rate_map;
+std::unordered_map<const cras_ionode*, int> on_internal_card_map;
} // namespace
void iodev_stub_reset() {
frames_queued_map.clear();
valid_frames_map.clear();
drop_time_map.clear();
+ est_rate_ratio_map.clear();
+ update_rate_map.clear();
+ on_internal_card_map.clear();
+}
+
+void iodev_stub_est_rate_ratio(cras_iodev* iodev, double ratio) {
+ est_rate_ratio_map.insert({iodev, ratio});
+}
+
+void iodev_stub_update_rate(cras_iodev* iodev, int data) {
+ update_rate_map.insert({iodev, data});
+}
+
+void iodev_stub_on_internal_card(cras_ionode* node, int data) {
+ on_internal_card_map.insert({node, data});
}
void iodev_stub_frames_queued(cras_iodev* iodev, int ret, timespec ts) {
@@ -67,7 +85,11 @@ int cras_iodev_get_valid_frames(struct cras_iodev* iodev,
}
double cras_iodev_get_est_rate_ratio(const struct cras_iodev* iodev) {
- return 1.0;
+ auto elem = est_rate_ratio_map.find(iodev);
+ if (elem != est_rate_ratio_map.end()) {
+ return elem->second;
+ }
+ return 1.0f;
}
int cras_iodev_get_dsp_delay(const struct cras_iodev* iodev) {
@@ -93,6 +115,10 @@ struct dev_stream* cras_iodev_rm_stream(struct cras_iodev* iodev,
int cras_iodev_update_rate(struct cras_iodev* iodev,
unsigned int level,
struct timespec* level_tstamp) {
+ auto elem = update_rate_map.find(iodev);
+ if (elem != update_rate_map.end()) {
+ return elem->second;
+ }
return 0;
}
@@ -188,4 +214,12 @@ int cras_iodev_drop_frames_by_time(struct cras_iodev* iodev,
drop_time_map.insert({iodev, ts});
return 0;
}
+
+bool cras_iodev_is_on_internal_card(const struct cras_ionode* node) {
+ auto elem = on_internal_card_map.find(node);
+ if (elem != on_internal_card_map.end()) {
+ return elem->second;
+ }
+ return 1;
+}
} // extern "C"
diff --git a/cras/src/tests/iodev_stub.h b/cras/src/tests/iodev_stub.h
index dde1b9f4..e8016dd3 100644
--- a/cras/src/tests/iodev_stub.h
+++ b/cras/src/tests/iodev_stub.h
@@ -10,6 +10,12 @@
void iodev_stub_reset();
+void iodev_stub_est_rate_ratio(cras_iodev* iodev, double ratio);
+
+void iodev_stub_update_rate(cras_iodev* iodev, int data);
+
+void iodev_stub_on_internal_card(cras_ionode* node, int data);
+
void iodev_stub_frames_queued(cras_iodev* iodev, int ret, timespec ts);
void iodev_stub_valid_frames(cras_iodev* iodev, int ret, timespec ts);
diff --git a/cras/src/tests/iodev_unittest.cc b/cras/src/tests/iodev_unittest.cc
index 21dc4d57..24b2b38d 100644
--- a/cras/src/tests/iodev_unittest.cc
+++ b/cras/src/tests/iodev_unittest.cc
@@ -2404,6 +2404,20 @@ TEST(IoDev, DeviceOverrun) {
EXPECT_EQ(1, cras_audio_thread_event_dev_overrun_called);
}
+TEST(IoDev, OnInternalCard) {
+ static struct cras_ionode node;
+ node.type = CRAS_NODE_TYPE_INTERNAL_SPEAKER;
+ EXPECT_EQ(1, cras_iodev_is_on_internal_card(&node));
+ node.type = CRAS_NODE_TYPE_HEADPHONE;
+ EXPECT_EQ(1, cras_iodev_is_on_internal_card(&node));
+ node.type = CRAS_NODE_TYPE_MIC;
+ EXPECT_EQ(1, cras_iodev_is_on_internal_card(&node));
+ node.type = CRAS_NODE_TYPE_USB;
+ EXPECT_EQ(0, cras_iodev_is_on_internal_card(&node));
+ node.type = CRAS_NODE_TYPE_BLUETOOTH;
+ EXPECT_EQ(0, cras_iodev_is_on_internal_card(&node));
+}
+
extern "C" {
struct main_thread_event_log* main_log;
diff --git a/cras/src/tests/timing_unittest.cc b/cras/src/tests/timing_unittest.cc
index 8a2de65f..964f30c3 100644
--- a/cras/src/tests/timing_unittest.cc
+++ b/cras/src/tests/timing_unittest.cc
@@ -111,20 +111,21 @@ int clock_gettime(clockid_t clk_id, struct timespec* tp) {
// Add a new input stream, make sure the initial next_cb_ts is 0.
TEST_F(TimingSuite, NewInputStreamInit) {
- struct open_dev* dev_list_ = NULL;
+ struct open_dev* odev_list_ = NULL;
+ struct open_dev* idev_list_ = NULL;
cras_audio_format format;
fill_audio_format(&format, 48000);
DevicePtr dev =
create_device(CRAS_STREAM_INPUT, 1024, &format, CRAS_NODE_TYPE_MIC);
- DL_APPEND(dev_list_, dev->odev.get());
+ DL_APPEND(idev_list_, dev->odev.get());
struct cras_iodev* iodev = dev->odev->dev;
ShmPtr shm = create_shm(480);
RstreamPtr rstream =
create_rstream(1, CRAS_STREAM_INPUT, 480, &format, shm.get());
- dev_io_append_stream(&dev_list_, rstream.get(), &iodev, 1);
+ dev_io_append_stream(&odev_list_, &idev_list_, rstream.get(), &iodev, 1);
EXPECT_EQ(0, rstream->next_cb_ts.tv_sec);
EXPECT_EQ(0, rstream->next_cb_ts.tv_nsec);
@@ -806,23 +807,68 @@ TEST_F(TimingSuite, HotwordStreamBulkDataIsNotPending) {
// When a new output stream is added, there are two rules to determine the
// initial next_cb_ts.
-// 1. If the device already has streams, the next_cb_ts will be the earliest
+// 1. If there is a matched input stream, use the next_cb_ts and
+// sleep_interval_ts from that input stream as the initial values.
+// 2. If the device already has streams, the next_cb_ts will be the earliest
// next callback time from these streams.
-// 2. If there are no other streams, the next_cb_ts will be set to the time
+// 3. If there are no other streams, the next_cb_ts will be set to the time
// when the valid frames in device is lower than cb_threshold. (If it is
// already lower than cb_threshold, set next_cb_ts to now.)
// Test rule 1.
+// There is a matched input stream. The next_cb_ts of the newly added output
+// stream will use the next_cb_ts from the input stream.
+TEST_F(TimingSuite, NewOutputStreamInitExistMatchedStream) {
+ struct open_dev* odev_list_ = NULL;
+ struct open_dev* idev_list_ = NULL;
+
+ cras_audio_format format;
+ fill_audio_format(&format, 48000);
+ DevicePtr out_dev = create_device(CRAS_STREAM_OUTPUT, 1024, &format,
+ CRAS_NODE_TYPE_HEADPHONE);
+ DL_APPEND(odev_list_, out_dev->odev.get());
+ struct cras_iodev* out_iodev = out_dev->odev->dev;
+
+ DevicePtr in_dev =
+ create_device(CRAS_STREAM_INPUT, 1024, &format, CRAS_NODE_TYPE_MIC);
+ DL_APPEND(idev_list_, in_dev->odev.get());
+
+ StreamPtr in_stream = create_stream(1, 1, CRAS_STREAM_INPUT, 480, &format);
+ add_stream_to_dev(in_dev->dev, in_stream);
+ in_stream->rstream->next_cb_ts.tv_sec = 54321;
+ in_stream->rstream->next_cb_ts.tv_nsec = 12345;
+ in_stream->rstream->sleep_interval_ts.tv_sec = 321;
+ in_stream->rstream->sleep_interval_ts.tv_nsec = 123;
+
+ ShmPtr shm = create_shm(480);
+ RstreamPtr rstream =
+ create_rstream(1, CRAS_STREAM_OUTPUT, 480, &format, shm.get());
+
+ dev_io_append_stream(&odev_list_, &idev_list_, rstream.get(), &out_iodev, 1);
+
+ EXPECT_EQ(in_stream->rstream->next_cb_ts.tv_sec, rstream->next_cb_ts.tv_sec);
+ EXPECT_EQ(in_stream->rstream->next_cb_ts.tv_nsec,
+ rstream->next_cb_ts.tv_nsec);
+ EXPECT_EQ(in_stream->rstream->sleep_interval_ts.tv_sec,
+ rstream->sleep_interval_ts.tv_sec);
+ EXPECT_EQ(in_stream->rstream->sleep_interval_ts.tv_nsec,
+ rstream->sleep_interval_ts.tv_nsec);
+
+ dev_stream_destroy(out_iodev->streams);
+}
+
+// Test rule 2.
// The device already has streams, the next_cb_ts will be the earliest
// next_cb_ts from these streams.
TEST_F(TimingSuite, NewOutputStreamInitStreamInDevice) {
- struct open_dev* dev_list_ = NULL;
+ struct open_dev* odev_list_ = NULL;
+ struct open_dev* idev_list_ = NULL;
cras_audio_format format;
fill_audio_format(&format, 48000);
DevicePtr dev = create_device(CRAS_STREAM_OUTPUT, 1024, &format,
CRAS_NODE_TYPE_HEADPHONE);
- DL_APPEND(dev_list_, dev->odev.get());
+ DL_APPEND(odev_list_, dev->odev.get());
struct cras_iodev* iodev = dev->odev->dev;
StreamPtr stream = create_stream(1, 1, CRAS_STREAM_OUTPUT, 480, &format);
@@ -834,7 +880,7 @@ TEST_F(TimingSuite, NewOutputStreamInitStreamInDevice) {
RstreamPtr rstream =
create_rstream(1, CRAS_STREAM_OUTPUT, 480, &format, shm.get());
- dev_io_append_stream(&dev_list_, rstream.get(), &iodev, 1);
+ dev_io_append_stream(&odev_list_, &idev_list_, rstream.get(), &iodev, 1);
EXPECT_EQ(stream->rstream->next_cb_ts.tv_sec, rstream->next_cb_ts.tv_sec);
EXPECT_EQ(stream->rstream->next_cb_ts.tv_nsec, rstream->next_cb_ts.tv_nsec);
@@ -842,17 +888,18 @@ TEST_F(TimingSuite, NewOutputStreamInitStreamInDevice) {
dev_stream_destroy(iodev->streams->next);
}
-// Test rule 2.
+// Test rule 3.
// The there are no streams and no frames in device buffer. The next_cb_ts
// will be set to now.
TEST_F(TimingSuite, NewOutputStreamInitNoStreamNoFramesInDevice) {
- struct open_dev* dev_list_ = NULL;
+ struct open_dev* odev_list_ = NULL;
+ struct open_dev* idev_list_ = NULL;
cras_audio_format format;
fill_audio_format(&format, 48000);
DevicePtr dev = create_device(CRAS_STREAM_OUTPUT, 1024, &format,
CRAS_NODE_TYPE_HEADPHONE);
- DL_APPEND(dev_list_, dev->odev.get());
+ DL_APPEND(odev_list_, dev->odev.get());
struct cras_iodev* iodev = dev->odev->dev;
struct timespec start;
@@ -862,7 +909,7 @@ TEST_F(TimingSuite, NewOutputStreamInitNoStreamNoFramesInDevice) {
RstreamPtr rstream =
create_rstream(1, CRAS_STREAM_OUTPUT, 480, &format, shm.get());
- dev_io_append_stream(&dev_list_, rstream.get(), &iodev, 1);
+ dev_io_append_stream(&odev_list_, &idev_list_, rstream.get(), &iodev, 1);
EXPECT_EQ(start.tv_sec, rstream->next_cb_ts.tv_sec);
EXPECT_EQ(start.tv_nsec, rstream->next_cb_ts.tv_nsec);
@@ -875,13 +922,14 @@ TEST_F(TimingSuite, NewOutputStreamInitNoStreamNoFramesInDevice) {
// next_cb_ts will be set to the time that valid frames in device is lower
// than cb_threshold.
TEST_F(TimingSuite, NewOutputStreamInitNoStreamSomeFramesInDevice) {
- struct open_dev* dev_list_ = NULL;
+ struct open_dev* odev_list_ = NULL;
+ struct open_dev* idev_list_ = NULL;
cras_audio_format format;
fill_audio_format(&format, 48000);
DevicePtr dev = create_device(CRAS_STREAM_OUTPUT, 1024, &format,
CRAS_NODE_TYPE_HEADPHONE);
- DL_APPEND(dev_list_, dev->odev.get());
+ DL_APPEND(odev_list_, dev->odev.get());
struct cras_iodev* iodev = dev->odev->dev;
struct timespec start;
@@ -893,7 +941,7 @@ TEST_F(TimingSuite, NewOutputStreamInitNoStreamSomeFramesInDevice) {
RstreamPtr rstream =
create_rstream(1, CRAS_STREAM_OUTPUT, 480, &format, shm.get());
- dev_io_append_stream(&dev_list_, rstream.get(), &iodev, 1);
+ dev_io_append_stream(&odev_list_, &idev_list_, rstream.get(), &iodev, 1);
// The next_cb_ts should be 10ms from now. At that time there are
// only 480 valid frames in the device.