diff options
Diffstat (limited to 'sound/firewire/amdtp-stream.c')
-rw-r--r-- | sound/firewire/amdtp-stream.c | 65 |
1 files changed, 32 insertions, 33 deletions
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c index 9be2260e4ca2..881e30c049fc 100644 --- a/sound/firewire/amdtp-stream.c +++ b/sound/firewire/amdtp-stream.c @@ -349,26 +349,25 @@ void amdtp_stream_pcm_prepare(struct amdtp_stream *s) EXPORT_SYMBOL(amdtp_stream_pcm_prepare); static void pool_blocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs, - const unsigned int seq_size, unsigned int seq_tail, - unsigned int count) + unsigned int size, unsigned int pos, unsigned int count) { const unsigned int syt_interval = s->syt_interval; int i; for (i = 0; i < count; ++i) { - struct seq_desc *desc = descs + seq_tail; + struct seq_desc *desc = descs + pos; if (desc->syt_offset != CIP_SYT_NO_INFO) desc->data_blocks = syt_interval; else desc->data_blocks = 0; - seq_tail = (seq_tail + 1) % seq_size; + pos = (pos + 1) % size; } } static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs, - const unsigned int seq_size, unsigned int seq_tail, + unsigned int size, unsigned int pos, unsigned int count) { const enum cip_sfc sfc = s->sfc; @@ -376,7 +375,7 @@ static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct se int i; for (i = 0; i < count; ++i) { - struct seq_desc *desc = descs + seq_tail; + struct seq_desc *desc = descs + pos; if (!cip_sfc_is_base_44100(sfc)) { // Sample_rate / 8000 is an integer, and precomputed. @@ -403,7 +402,7 @@ static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct se state = phase; } - seq_tail = (seq_tail + 1) % seq_size; + pos = (pos + 1) % size; } s->ctx_data.rx.data_block_state = state; @@ -449,8 +448,7 @@ static unsigned int calculate_syt_offset(unsigned int *last_syt_offset, } static void pool_ideal_syt_offsets(struct amdtp_stream *s, struct seq_desc *descs, - const unsigned int seq_size, unsigned int seq_tail, - unsigned int count) + unsigned int size, unsigned int pos, unsigned int count) { const enum cip_sfc sfc = s->sfc; unsigned int last = s->ctx_data.rx.last_syt_offset; @@ -458,11 +456,11 @@ static void pool_ideal_syt_offsets(struct amdtp_stream *s, struct seq_desc *desc int i; for (i = 0; i < count; ++i) { - struct seq_desc *desc = descs + seq_tail; + struct seq_desc *desc = descs + pos; desc->syt_offset = calculate_syt_offset(&last, &state, sfc); - seq_tail = (seq_tail + 1) % seq_size; + pos = (pos + 1) % size; } s->ctx_data.rx.last_syt_offset = last; @@ -531,52 +529,49 @@ static void cache_seq(struct amdtp_stream *s, const struct pkt_desc *descs, unsi s->ctx_data.tx.cache.tail = cache_tail; } -static void pool_ideal_seq_descs(struct amdtp_stream *s, unsigned int count) +static void pool_ideal_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size, + unsigned int pos, unsigned int count) { - struct seq_desc *descs = s->ctx_data.rx.seq.descs; - unsigned int seq_tail = s->ctx_data.rx.seq.tail; - const unsigned int seq_size = s->ctx_data.rx.seq.size; - - pool_ideal_syt_offsets(s, descs, seq_size, seq_tail, count); + pool_ideal_syt_offsets(s, descs, size, pos, count); if (s->flags & CIP_BLOCKING) - pool_blocking_data_blocks(s, descs, seq_size, seq_tail, count); + pool_blocking_data_blocks(s, descs, size, pos, count); else - pool_ideal_nonblocking_data_blocks(s, descs, seq_size, seq_tail, count); - - s->ctx_data.rx.seq.tail = (seq_tail + count) % seq_size; + pool_ideal_nonblocking_data_blocks(s, descs, size, pos, count); } -static void pool_replayed_seq(struct amdtp_stream *s, unsigned int count) +static void pool_replayed_seq(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size, + unsigned int pos, unsigned int count) { struct amdtp_stream *target = s->ctx_data.rx.replay_target; const struct seq_desc *cache = target->ctx_data.tx.cache.descs; const unsigned int cache_size = target->ctx_data.tx.cache.size; unsigned int cache_head = s->ctx_data.rx.cache_head; - struct seq_desc *descs = s->ctx_data.rx.seq.descs; - const unsigned int seq_size = s->ctx_data.rx.seq.size; - unsigned int seq_tail = s->ctx_data.rx.seq.tail; int i; for (i = 0; i < count; ++i) { - descs[seq_tail] = cache[cache_head]; - seq_tail = (seq_tail + 1) % seq_size; + descs[pos] = cache[cache_head]; cache_head = (cache_head + 1) % cache_size; + pos = (pos + 1) % size; } - s->ctx_data.rx.seq.tail = seq_tail; s->ctx_data.rx.cache_head = cache_head; } static void pool_seq_descs(struct amdtp_stream *s, unsigned int count) { struct amdtp_domain *d = s->domain; + struct seq_desc *descs = s->ctx_data.rx.seq.descs; + const unsigned int size = s->ctx_data.rx.seq.size; + unsigned int pos = s->ctx_data.rx.seq.pos; + void (*pool_seq_descs)(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size, + unsigned int pos, unsigned int count); if (!d->replay.enable || !s->ctx_data.rx.replay_target) { - pool_ideal_seq_descs(s, count); + pool_seq_descs = pool_ideal_seq_descs; } else { if (!d->replay.on_the_fly) { - pool_replayed_seq(s, count); + pool_seq_descs = pool_replayed_seq; } else { struct amdtp_stream *tx = s->ctx_data.rx.replay_target; const unsigned int cache_size = tx->ctx_data.tx.cache.size; @@ -584,11 +579,15 @@ static void pool_seq_descs(struct amdtp_stream *s, unsigned int count) unsigned int cached_cycles = calculate_cached_cycle_count(tx, cache_head); if (cached_cycles > count && cached_cycles > cache_size / 2) - pool_replayed_seq(s, count); + pool_seq_descs = pool_replayed_seq; else - pool_ideal_seq_descs(s, count); + pool_seq_descs = pool_ideal_seq_descs; } } + + pool_seq_descs(s, descs, size, pos, count); + + s->ctx_data.rx.seq.pos = (pos + count) % size; } static void update_pcm_pointers(struct amdtp_stream *s, @@ -1644,7 +1643,7 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed, goto err_context; } s->ctx_data.rx.seq.size = queue_size; - s->ctx_data.rx.seq.tail = 0; + s->ctx_data.rx.seq.pos = 0; s->ctx_data.rx.seq.head = 0; entry = &initial_state[s->sfc]; |