summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArun Raghavan <arun@arunraghavan.net>2017-02-28 10:45:25 +0530
committerArun Raghavan <arun@arunraghavan.net>2017-03-09 22:17:48 +0530
commitc82e4913e8666a47f11f91a313075ecdd7163d5a (patch)
tree3d75c716da8b56150c9f5ee0f59a26e72f855794
parent2898a5188c8aea99f4b0e9492981d48099a499f4 (diff)
alsa: Avoid creating tiny memchunks on write iterations
If the ALSA device supports granular pointer reporting, we end up in a situation where we write out a bunch of data, iterate, and then find a small amount of data available in the buffer (consumed while we were writing data into the available buffer space). We do this 10 times before quitting the write loop. This is inefficient in itself, but can also have wider consequences. For example, with module-combine-sink, this will end up pushing the same small chunks to all other devices too. Given both of these, it just makes sense to not try to write out data unless a minimum threshold is available. This could potentially be a fragment, but it's likely most robust to just work with a fraction of the total available buffer size.
-rw-r--r--src/modules/alsa/alsa-sink.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/src/modules/alsa/alsa-sink.c b/src/modules/alsa/alsa-sink.c
index 886c735f..073413f0 100644
--- a/src/modules/alsa/alsa-sink.c
+++ b/src/modules/alsa/alsa-sink.c
@@ -88,6 +88,8 @@
#define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
#define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
+#define DEFAULT_WRITE_ITERATION_THRESHOLD 0.03 /* don't iterate write if < 3% of the buffer is available */
+
struct userdata {
pa_core *core;
pa_module *module;
@@ -580,12 +582,19 @@ static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bo
break;
}
- if (++j > 10) {
+ j++;
+
+ if (j > 10) {
#ifdef DEBUG_TIMING
pa_log_debug("Not filling up, because already too many iterations.");
#endif
break;
+ } else if (j >= 2 && (n_bytes < (DEFAULT_WRITE_ITERATION_THRESHOLD * (u->hwbuf_size - u->hwbuf_unused)))) {
+#ifdef DEBUG_TIMING
+ pa_log_debug("Not filling up, because <%g%% available.", DEFAULT_WRITE_ITERATION_THRESHOLD * 100);
+#endif
+ break;
}
n_bytes -= u->hwbuf_unused;
@@ -754,12 +763,19 @@ static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bo
break;
}
- if (++j > 10) {
+ j++;
+
+ if (j > 10) {
#ifdef DEBUG_TIMING
pa_log_debug("Not filling up, because already too many iterations.");
#endif
break;
+ } else if (j >= 2 && (n_bytes < (DEFAULT_WRITE_ITERATION_THRESHOLD * (u->hwbuf_size - u->hwbuf_unused)))) {
+#ifdef DEBUG_TIMING
+ pa_log_debug("Not filling up, because <%g%% available.", DEFAULT_WRITE_ITERATION_THRESHOLD * 100);
+#endif
+ break;
}
n_bytes -= u->hwbuf_unused;