amdtp-stream.c 23.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
 * with Common Isochronous Packet (IEC 61883-1) headers
 *
 * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
 * Licensed under the terms of the GNU General Public License, version 2.
 */

#include <linux/device.h>
#include <linux/err.h>
#include <linux/firewire.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <sound/pcm.h>
15
#include <sound/pcm_params.h>
16
#include "amdtp-stream.h"
17 18 19 20 21

#define TICKS_PER_CYCLE		3072
#define CYCLES_PER_SECOND	8000
#define TICKS_PER_SECOND	(TICKS_PER_CYCLE * CYCLES_PER_SECOND)

22
#define TRANSFER_DELAY_TICKS	0x2e00 /* 479.17 microseconds */
23

24 25
/* isochronous header parameters */
#define ISO_DATA_LENGTH_SHIFT	16
26 27
#define TAG_CIP			1

28
/* common isochronous packet header parameters */
29 30
#define CIP_EOH_SHIFT		31
#define CIP_EOH			(1u << CIP_EOH_SHIFT)
31
#define CIP_EOH_MASK		0x80000000
32 33 34 35 36 37
#define CIP_SID_SHIFT		24
#define CIP_SID_MASK		0x3f000000
#define CIP_DBS_MASK		0x00ff0000
#define CIP_DBS_SHIFT		16
#define CIP_DBC_MASK		0x000000ff
#define CIP_FMT_SHIFT		24
38
#define CIP_FMT_MASK		0x3f000000
39 40
#define CIP_FDF_MASK		0x00ff0000
#define CIP_FDF_SHIFT		16
41 42 43
#define CIP_SYT_MASK		0x0000ffff
#define CIP_SYT_NO_INFO		0xffff

44
/* Audio and Music transfer protocol specific parameters */
45
#define CIP_FMT_AM		0x10
46
#define AMDTP_FDF_NO_DATA	0xff
47 48 49 50 51

/* TODO: make these configurable */
#define INTERRUPT_INTERVAL	16
#define QUEUE_LENGTH		48

52
#define IN_PACKET_HEADER_SIZE	4
53 54
#define OUT_PACKET_HEADER_SIZE	0

55 56
static void pcm_period_tasklet(unsigned long data);

57
/**
58 59
 * amdtp_stream_init - initialize an AMDTP stream structure
 * @s: the AMDTP stream to initialize
60
 * @unit: the target of the stream
61
 * @dir: the direction of stream
62
 * @flags: the packet transmission method to use
63
 * @fmt: the value of fmt field in CIP header
64 65
 * @process_data_blocks: callback handler to process data blocks
 * @protocol_size: the size to allocate newly for protocol
66
 */
67
int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
68
		      enum amdtp_stream_direction dir, enum cip_flags flags,
69 70 71
		      unsigned int fmt,
		      amdtp_stream_process_data_blocks_t process_data_blocks,
		      unsigned int protocol_size)
72
{
73 74 75 76 77 78 79
	if (process_data_blocks == NULL)
		return -EINVAL;

	s->protocol = kzalloc(protocol_size, GFP_KERNEL);
	if (!s->protocol)
		return -ENOMEM;

80
	s->unit = unit;
81
	s->direction = dir;
82 83 84
	s->flags = flags;
	s->context = ERR_PTR(-1);
	mutex_init(&s->mutex);
85
	tasklet_init(&s->period_tasklet, pcm_period_tasklet, (unsigned long)s);
86
	s->packet_index = 0;
87

88 89 90 91
	init_waitqueue_head(&s->callback_wait);
	s->callbacked = false;
	s->sync_slave = NULL;

92
	s->fmt = fmt;
93
	s->process_data_blocks = process_data_blocks;
94

95 96
	return 0;
}
97
EXPORT_SYMBOL(amdtp_stream_init);
98 99

/**
100 101
 * amdtp_stream_destroy - free stream resources
 * @s: the AMDTP stream to destroy
102
 */
103
void amdtp_stream_destroy(struct amdtp_stream *s)
104
{
105
	WARN_ON(amdtp_stream_running(s));
106
	kfree(s->protocol);
107 108
	mutex_destroy(&s->mutex);
}
109
EXPORT_SYMBOL(amdtp_stream_destroy);
110

111
const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
112 113 114 115 116 117 118 119 120 121
	[CIP_SFC_32000]  =  8,
	[CIP_SFC_44100]  =  8,
	[CIP_SFC_48000]  =  8,
	[CIP_SFC_88200]  = 16,
	[CIP_SFC_96000]  = 16,
	[CIP_SFC_176400] = 32,
	[CIP_SFC_192000] = 32,
};
EXPORT_SYMBOL(amdtp_syt_intervals);

122
const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
123 124 125 126 127 128 129 130 131 132
	[CIP_SFC_32000]  =  32000,
	[CIP_SFC_44100]  =  44100,
	[CIP_SFC_48000]  =  48000,
	[CIP_SFC_88200]  =  88200,
	[CIP_SFC_96000]  =  96000,
	[CIP_SFC_176400] = 176400,
	[CIP_SFC_192000] = 192000,
};
EXPORT_SYMBOL(amdtp_rate_table);

133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
/**
 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
 * @s:		the AMDTP stream, which must be initialized.
 * @runtime:	the PCM substream runtime
 */
int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
					struct snd_pcm_runtime *runtime)
{
	int err;

	/*
	 * Currently firewire-lib processes 16 packets in one software
	 * interrupt callback. This equals to 2msec but actually the
	 * interval of the interrupts has a jitter.
	 * Additionally, even if adding a constraint to fit period size to
	 * 2msec, actual calculated frames per period doesn't equal to 2msec,
	 * depending on sampling rate.
	 * Anyway, the interval to call snd_pcm_period_elapsed() cannot 2msec.
	 * Here let us use 5msec for safe period interrupt.
	 */
	err = snd_pcm_hw_constraint_minmax(runtime,
					   SNDRV_PCM_HW_PARAM_PERIOD_TIME,
					   5000, UINT_MAX);
	if (err < 0)
		goto end;

	/* Non-Blocking stream has no more constraints */
	if (!(s->flags & CIP_BLOCKING))
		goto end;

	/*
	 * One AMDTP packet can include some frames. In blocking mode, the
	 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
	 * depending on its sampling rate. For accurate period interrupt, it's
167
	 * preferrable to align period/buffer sizes to current SYT_INTERVAL.
168
	 *
169 170
	 * TODO: These constraints can be improved with proper rules.
	 * Currently apply LCM of SYT_INTERVALs.
171 172 173 174 175 176 177 178 179 180 181 182
	 */
	err = snd_pcm_hw_constraint_step(runtime, 0,
					 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 32);
	if (err < 0)
		goto end;
	err = snd_pcm_hw_constraint_step(runtime, 0,
					 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 32);
end:
	return err;
}
EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints);

183
/**
184 185
 * amdtp_stream_set_parameters - set stream parameters
 * @s: the AMDTP stream to configure
186
 * @rate: the sample rate
187
 * @data_block_quadlets: the size of a data block in quadlet unit
188
 *
189
 * The parameters must be set before the stream is started, and must not be
190 191
 * changed while the stream is running.
 */
192 193
int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
				unsigned int data_block_quadlets)
194
{
195
	unsigned int sfc;
196

197
	for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) {
198
		if (amdtp_rate_table[sfc] == rate)
199 200 201 202
			break;
	}
	if (sfc == ARRAY_SIZE(amdtp_rate_table))
		return -EINVAL;
203 204

	s->sfc = sfc;
205
	s->data_block_quadlets = data_block_quadlets;
206
	s->syt_interval = amdtp_syt_intervals[sfc];
207 208 209 210 211 212

	/* default buffering in the device */
	s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
	if (s->flags & CIP_BLOCKING)
		/* additional buffering needed to adjust for no-data packets */
		s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate;
213

214
	return 0;
215
}
216
EXPORT_SYMBOL(amdtp_stream_set_parameters);
217 218

/**
219 220
 * amdtp_stream_get_max_payload - get the stream's packet size
 * @s: the AMDTP stream
221 222
 *
 * This function must not be called before the stream has been configured
223
 * with amdtp_stream_set_parameters().
224
 */
225
unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
226
{
227 228 229 230 231 232
	unsigned int multiplier = 1;

	if (s->flags & CIP_JUMBO_PAYLOAD)
		multiplier = 5;

	return 8 + s->syt_interval * s->data_block_quadlets * 4 * multiplier;
233
}
234
EXPORT_SYMBOL(amdtp_stream_get_max_payload);
235

236
/**
237 238
 * amdtp_stream_pcm_prepare - prepare PCM device for running
 * @s: the AMDTP stream
239 240 241
 *
 * This function should be called from the PCM device's .prepare callback.
 */
242
void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
243 244 245 246
{
	tasklet_kill(&s->period_tasklet);
	s->pcm_buffer_pointer = 0;
	s->pcm_period_pointer = 0;
247
	s->pointer_flush = true;
248
}
249
EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
250

251 252
static unsigned int calculate_data_blocks(struct amdtp_stream *s,
					  unsigned int syt)
253 254 255
{
	unsigned int phase, data_blocks;

256 257 258 259 260 261 262 263
	/* Blocking mode. */
	if (s->flags & CIP_BLOCKING) {
		/* This module generate empty packet for 'no data'. */
		if (syt == CIP_SYT_NO_INFO)
			data_blocks = 0;
		else
			data_blocks = s->syt_interval;
	/* Non-blocking mode. */
264
	} else {
265 266 267 268 269
		if (!cip_sfc_is_base_44100(s->sfc)) {
			/* Sample_rate / 8000 is an integer, and precomputed. */
			data_blocks = s->data_block_state;
		} else {
			phase = s->data_block_state;
270 271 272 273 274 275 276 277 278

		/*
		 * This calculates the number of data blocks per packet so that
		 * 1) the overall rate is correct and exactly synchronized to
		 *    the bus clock, and
		 * 2) packets with a rounded-up number of blocks occur as early
		 *    as possible in the sequence (to prevent underruns of the
		 *    device's buffer).
		 */
279 280 281 282 283 284 285 286 287 288 289
			if (s->sfc == CIP_SFC_44100)
				/* 6 6 5 6 5 6 5 ... */
				data_blocks = 5 + ((phase & 1) ^
						   (phase == 0 || phase >= 40));
			else
				/* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
				data_blocks = 11 * (s->sfc >> 1) + (phase == 0);
			if (++phase >= (80 >> (s->sfc >> 1)))
				phase = 0;
			s->data_block_state = phase;
		}
290 291 292 293 294
	}

	return data_blocks;
}

295
static unsigned int calculate_syt(struct amdtp_stream *s,
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
				  unsigned int cycle)
{
	unsigned int syt_offset, phase, index, syt;

	if (s->last_syt_offset < TICKS_PER_CYCLE) {
		if (!cip_sfc_is_base_44100(s->sfc))
			syt_offset = s->last_syt_offset + s->syt_offset_state;
		else {
		/*
		 * The time, in ticks, of the n'th SYT_INTERVAL sample is:
		 *   n * SYT_INTERVAL * 24576000 / sample_rate
		 * Modulo TICKS_PER_CYCLE, the difference between successive
		 * elements is about 1386.23.  Rounding the results of this
		 * formula to the SYT precision results in a sequence of
		 * differences that begins with:
		 *   1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
		 * This code generates _exactly_ the same sequence.
		 */
			phase = s->syt_offset_state;
			index = phase % 13;
			syt_offset = s->last_syt_offset;
			syt_offset += 1386 + ((index && !(index & 3)) ||
					      phase == 146);
			if (++phase >= 147)
				phase = 0;
			s->syt_offset_state = phase;
		}
	} else
		syt_offset = s->last_syt_offset - TICKS_PER_CYCLE;
	s->last_syt_offset = syt_offset;

327
	if (syt_offset < TICKS_PER_CYCLE) {
328
		syt_offset += s->transfer_delay;
329 330
		syt = (cycle + syt_offset / TICKS_PER_CYCLE) << 12;
		syt += syt_offset % TICKS_PER_CYCLE;
331

332
		return syt & CIP_SYT_MASK;
333
	} else {
334
		return CIP_SYT_NO_INFO;
335
	}
336 337
}

338 339 340
static void update_pcm_pointers(struct amdtp_stream *s,
				struct snd_pcm_substream *pcm,
				unsigned int frames)
341 342 343
{
	unsigned int ptr;

344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
	ptr = s->pcm_buffer_pointer + frames;
	if (ptr >= pcm->runtime->buffer_size)
		ptr -= pcm->runtime->buffer_size;
	ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;

	s->pcm_period_pointer += frames;
	if (s->pcm_period_pointer >= pcm->runtime->period_size) {
		s->pcm_period_pointer -= pcm->runtime->period_size;
		s->pointer_flush = false;
		tasklet_hi_schedule(&s->period_tasklet);
	}
}

static void pcm_period_tasklet(unsigned long data)
{
	struct amdtp_stream *s = (void *)data;
	struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);

	if (pcm)
		snd_pcm_period_elapsed(pcm);
}

static int queue_packet(struct amdtp_stream *s,
			unsigned int header_length,
			unsigned int payload_length, bool skip)
{
	struct fw_iso_packet p = {0};
371 372 373 374
	int err = 0;

	if (IS_ERR(s->context))
		goto end;
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400

	p.interrupt = IS_ALIGNED(s->packet_index + 1, INTERRUPT_INTERVAL);
	p.tag = TAG_CIP;
	p.header_length = header_length;
	p.payload_length = (!skip) ? payload_length : 0;
	p.skip = skip;
	err = fw_iso_context_queue(s->context, &p, &s->buffer.iso_buffer,
				   s->buffer.packets[s->packet_index].offset);
	if (err < 0) {
		dev_err(&s->unit->device, "queueing error: %d\n", err);
		goto end;
	}

	if (++s->packet_index >= QUEUE_LENGTH)
		s->packet_index = 0;
end:
	return err;
}

static inline int queue_out_packet(struct amdtp_stream *s,
				   unsigned int payload_length, bool skip)
{
	return queue_packet(s, OUT_PACKET_HEADER_SIZE,
			    payload_length, skip);
}

401 402 403 404 405 406
static inline int queue_in_packet(struct amdtp_stream *s)
{
	return queue_packet(s, IN_PACKET_HEADER_SIZE,
			    amdtp_stream_get_max_payload(s), false);
}

407 408
static int handle_out_packet(struct amdtp_stream *s, unsigned int data_blocks,
			     unsigned int syt)
409 410
{
	__be32 *buffer;
411
	unsigned int payload_length;
412
	unsigned int pcm_frames;
413 414
	struct snd_pcm_substream *pcm;

415
	buffer = s->buffer.packets[s->packet_index].buffer;
416
	pcm_frames = s->process_data_blocks(s, buffer + 2, data_blocks, &syt);
417

418
	buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) |
419
				(s->data_block_quadlets << CIP_DBS_SHIFT) |
420
				s->data_block_counter);
421 422 423 424
	buffer[1] = cpu_to_be32(CIP_EOH |
				((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
				((s->fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
				(syt & CIP_SYT_MASK));
425 426 427

	s->data_block_counter = (s->data_block_counter + data_blocks) & 0xff;

428
	payload_length = 8 + data_blocks * 4 * s->data_block_quadlets;
429 430
	if (queue_out_packet(s, payload_length, false) < 0)
		return -EIO;
431

432 433 434
	pcm = ACCESS_ONCE(s->pcm);
	if (pcm && pcm_frames > 0)
		update_pcm_pointers(s, pcm, pcm_frames);
435 436 437

	/* No need to return the number of handled data blocks. */
	return 0;
438 439
}

440
static int handle_in_packet(struct amdtp_stream *s,
441
			    unsigned int payload_quadlets, __be32 *buffer,
442
			    unsigned int *data_blocks, unsigned int syt)
443 444
{
	u32 cip_header[2];
445
	unsigned int fmt, fdf;
446
	unsigned int data_block_quadlets, data_block_counter, dbc_interval;
447 448
	struct snd_pcm_substream *pcm;
	unsigned int pcm_frames;
449
	bool lost;
450 451 452 453 454 455

	cip_header[0] = be32_to_cpu(buffer[0]);
	cip_header[1] = be32_to_cpu(buffer[1]);

	/*
	 * This module supports 'Two-quadlet CIP header with SYT field'.
456
	 * For convenience, also check FMT field is AM824 or not.
457 458
	 */
	if (((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
459
	    ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) {
460 461 462
		dev_info_ratelimited(&s->unit->device,
				"Invalid CIP header for AMDTP: %08X:%08X\n",
				cip_header[0], cip_header[1]);
463
		*data_blocks = 0;
464
		pcm_frames = 0;
465 466 467
		goto end;
	}

468 469 470
	/* Check valid protocol or not. */
	fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT;
	if (fmt != s->fmt) {
471 472 473 474 475 476
		dev_info_ratelimited(&s->unit->device,
				     "Detect unexpected protocol: %08x %08x\n",
				     cip_header[0], cip_header[1]);
		*data_blocks = 0;
		pcm_frames = 0;
		goto end;
477 478
	}

479
	/* Calculate data blocks */
480
	fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
481
	if (payload_quadlets < 3 ||
482
	    (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
483
		*data_blocks = 0;
484 485
	} else {
		data_block_quadlets =
486
			(cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
487 488
		/* avoid division by zero */
		if (data_block_quadlets == 0) {
489
			dev_err(&s->unit->device,
490 491
				"Detect invalid value in dbs field: %08X\n",
				cip_header[0]);
492
			return -EPROTO;
493
		}
494 495
		if (s->flags & CIP_WRONG_DBS)
			data_block_quadlets = s->data_block_quadlets;
496

497
		*data_blocks = (payload_quadlets - 2) / data_block_quadlets;
498 499 500
	}

	/* Check data block counter continuity */
501
	data_block_counter = cip_header[0] & CIP_DBC_MASK;
502
	if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
503 504 505
	    s->data_block_counter != UINT_MAX)
		data_block_counter = s->data_block_counter;

506 507 508
	if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
	     data_block_counter == s->tx_first_dbc) ||
	    s->data_block_counter == UINT_MAX) {
509 510
		lost = false;
	} else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
511
		lost = data_block_counter != s->data_block_counter;
512
	} else {
513
		if ((*data_blocks > 0) && (s->tx_dbc_interval > 0))
514 515
			dbc_interval = s->tx_dbc_interval;
		else
516
			dbc_interval = *data_blocks;
517

518
		lost = data_block_counter !=
519 520
		       ((s->data_block_counter + dbc_interval) & 0xff);
	}
521 522

	if (lost) {
523 524 525
		dev_err(&s->unit->device,
			"Detect discontinuity of CIP: %02X %02X\n",
			s->data_block_counter, data_block_counter);
526
		return -EIO;
527 528
	}

529
	pcm_frames = s->process_data_blocks(s, buffer + 2, *data_blocks, &syt);
530

531 532 533 534
	if (s->flags & CIP_DBC_IS_END_EVENT)
		s->data_block_counter = data_block_counter;
	else
		s->data_block_counter =
535
				(data_block_counter + *data_blocks) & 0xff;
536 537
end:
	if (queue_in_packet(s) < 0)
538
		return -EIO;
539

540 541 542
	pcm = ACCESS_ONCE(s->pcm);
	if (pcm && pcm_frames > 0)
		update_pcm_pointers(s, pcm, pcm_frames);
543

544
	return 0;
545 546
}

547 548 549
static void out_stream_callback(struct fw_iso_context *context, u32 cycle,
				size_t header_length, void *header,
				void *private_data)
550
{
551
	struct amdtp_stream *s = private_data;
552
	unsigned int i, syt, packets = header_length / 4;
553
	unsigned int data_blocks;
554

555 556 557
	if (s->packet_index < 0)
		return;

558 559 560 561 562 563 564
	/*
	 * Compute the cycle of the last queued packet.
	 * (We need only the four lowest bits for the SYT, so we can ignore
	 * that bits 0-11 must wrap around at 3072.)
	 */
	cycle += QUEUE_LENGTH - packets;

565 566
	for (i = 0; i < packets; ++i) {
		syt = calculate_syt(s, ++cycle);
567 568
		data_blocks = calculate_data_blocks(s, syt);

569 570 571 572 573
		if (handle_out_packet(s, data_blocks, syt) < 0) {
			s->packet_index = -1;
			amdtp_stream_pcm_abort(s);
			return;
		}
574
	}
575

576
	fw_iso_context_queue_flush(s->context);
577 578
}

579 580 581 582 583
static void in_stream_callback(struct fw_iso_context *context, u32 cycle,
			       size_t header_length, void *header,
			       void *private_data)
{
	struct amdtp_stream *s = private_data;
584 585
	unsigned int p, syt, packets;
	unsigned int payload_quadlets, max_payload_quadlets;
586
	unsigned int data_blocks;
587 588
	__be32 *buffer, *headers = header;

589 590 591
	if (s->packet_index < 0)
		return;

592 593 594
	/* The number of packets in buffer */
	packets = header_length / IN_PACKET_HEADER_SIZE;

595 596 597
	/* For buffer-over-run prevention. */
	max_payload_quadlets = amdtp_stream_get_max_payload(s) / 4;

598 599 600 601 602 603
	for (p = 0; p < packets; p++) {
		buffer = s->buffer.packets[s->packet_index].buffer;

		/* The number of quadlets in this packet */
		payload_quadlets =
			(be32_to_cpu(headers[p]) >> ISO_DATA_LENGTH_SHIFT) / 4;
604 605 606 607 608 609 610 611
		if (payload_quadlets > max_payload_quadlets) {
			dev_err(&s->unit->device,
				"Detect jumbo payload: %02x %02x\n",
				payload_quadlets, max_payload_quadlets);
			s->packet_index = -1;
			break;
		}

612
		syt = be32_to_cpu(buffer[1]) & CIP_SYT_MASK;
613
		if (handle_in_packet(s, payload_quadlets, buffer,
614
						&data_blocks, syt) < 0) {
615 616 617 618 619 620
			s->packet_index = -1;
			break;
		}

		/* Process sync slave stream */
		if (s->sync_slave && s->sync_slave->callbacked) {
621 622 623 624 625
			if (handle_out_packet(s->sync_slave,
					      data_blocks, syt) < 0) {
				s->packet_index = -1;
				break;
			}
626
		}
627 628
	}

629 630
	/* Queueing error or detecting discontinuity */
	if (s->packet_index < 0) {
631 632
		amdtp_stream_pcm_abort(s);

633 634 635 636 637 638 639 640 641 642 643 644
		/* Abort sync slave. */
		if (s->sync_slave) {
			s->sync_slave->packet_index = -1;
			amdtp_stream_pcm_abort(s->sync_slave);
		}
		return;
	}

	/* when sync to device, flush the packets for slave stream */
	if (s->sync_slave && s->sync_slave->callbacked)
		fw_iso_context_queue_flush(s->sync_slave->context);

645 646 647
	fw_iso_context_queue_flush(s->context);
}

648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
/* processing is done by master callback */
static void slave_stream_callback(struct fw_iso_context *context, u32 cycle,
				  size_t header_length, void *header,
				  void *private_data)
{
	return;
}

/* this is executed one time */
static void amdtp_stream_first_callback(struct fw_iso_context *context,
					u32 cycle, size_t header_length,
					void *header, void *private_data)
{
	struct amdtp_stream *s = private_data;

	/*
	 * For in-stream, first packet has come.
	 * For out-stream, prepared to transmit first packet
	 */
	s->callbacked = true;
	wake_up(&s->callback_wait);

	if (s->direction == AMDTP_IN_STREAM)
		context->callback.sc = in_stream_callback;
672
	else if (s->flags & CIP_SYNC_TO_DEVICE)
673 674 675 676 677 678 679
		context->callback.sc = slave_stream_callback;
	else
		context->callback.sc = out_stream_callback;

	context->callback.sc(context, cycle, header_length, header, s);
}

680
/**
681 682
 * amdtp_stream_start - start transferring packets
 * @s: the AMDTP stream to start
683 684 685 686
 * @channel: the isochronous channel on the bus
 * @speed: firewire speed code
 *
 * The stream cannot be started until it has been configured with
687 688
 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
 * device can be started.
689
 */
690
int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
691 692 693 694 695 696 697 698 699 700 701 702 703
{
	static const struct {
		unsigned int data_block;
		unsigned int syt_offset;
	} initial_state[] = {
		[CIP_SFC_32000]  = {  4, 3072 },
		[CIP_SFC_48000]  = {  6, 1024 },
		[CIP_SFC_96000]  = { 12, 1024 },
		[CIP_SFC_192000] = { 24, 1024 },
		[CIP_SFC_44100]  = {  0,   67 },
		[CIP_SFC_88200]  = {  0,   67 },
		[CIP_SFC_176400] = {  0,   67 },
	};
704 705
	unsigned int header_size;
	enum dma_data_direction dir;
706
	int type, tag, err;
707 708 709

	mutex_lock(&s->mutex);

710
	if (WARN_ON(amdtp_stream_running(s) ||
711
		    (s->data_block_quadlets < 1))) {
712 713 714 715
		err = -EBADFD;
		goto err_unlock;
	}

716 717 718 719 720
	if (s->direction == AMDTP_IN_STREAM &&
	    s->flags & CIP_SKIP_INIT_DBC_CHECK)
		s->data_block_counter = UINT_MAX;
	else
		s->data_block_counter = 0;
721 722 723 724
	s->data_block_state = initial_state[s->sfc].data_block;
	s->syt_offset_state = initial_state[s->sfc].syt_offset;
	s->last_syt_offset = TICKS_PER_CYCLE;

725 726 727 728 729 730 731 732 733 734
	/* initialize packet buffer */
	if (s->direction == AMDTP_IN_STREAM) {
		dir = DMA_FROM_DEVICE;
		type = FW_ISO_CONTEXT_RECEIVE;
		header_size = IN_PACKET_HEADER_SIZE;
	} else {
		dir = DMA_TO_DEVICE;
		type = FW_ISO_CONTEXT_TRANSMIT;
		header_size = OUT_PACKET_HEADER_SIZE;
	}
735
	err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH,
736
				      amdtp_stream_get_max_payload(s), dir);
737 738 739 740
	if (err < 0)
		goto err_unlock;

	s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
741
					   type, channel, speed, header_size,
742
					   amdtp_stream_first_callback, s);
743 744 745 746
	if (IS_ERR(s->context)) {
		err = PTR_ERR(s->context);
		if (err == -EBUSY)
			dev_err(&s->unit->device,
747
				"no free stream on this controller\n");
748 749 750
		goto err_buffer;
	}

751
	amdtp_stream_update(s);
752

753
	s->packet_index = 0;
754
	do {
755 756 757 758
		if (s->direction == AMDTP_IN_STREAM)
			err = queue_in_packet(s);
		else
			err = queue_out_packet(s, 0, true);
759 760 761
		if (err < 0)
			goto err_context;
	} while (s->packet_index > 0);
762

763
	/* NOTE: TAG1 matches CIP. This just affects in stream. */
764 765 766 767
	tag = FW_ISO_CONTEXT_MATCH_TAG1;
	if (s->flags & CIP_EMPTY_WITH_TAG0)
		tag |= FW_ISO_CONTEXT_MATCH_TAG0;

768
	s->callbacked = false;
769
	err = fw_iso_context_start(s->context, -1, 0, tag);
770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
	if (err < 0)
		goto err_context;

	mutex_unlock(&s->mutex);

	return 0;

err_context:
	fw_iso_context_destroy(s->context);
	s->context = ERR_PTR(-1);
err_buffer:
	iso_packets_buffer_destroy(&s->buffer, s->unit);
err_unlock:
	mutex_unlock(&s->mutex);

	return err;
}
787
EXPORT_SYMBOL(amdtp_stream_start);
788

789
/**
790 791
 * amdtp_stream_pcm_pointer - get the PCM buffer position
 * @s: the AMDTP stream that transports the PCM data
792 793 794
 *
 * Returns the current buffer position, in frames.
 */
795
unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s)
796
{
797
	/* this optimization is allowed to be racy */
798
	if (s->pointer_flush && amdtp_stream_running(s))
799 800 801
		fw_iso_context_flush_completions(s->context);
	else
		s->pointer_flush = true;
802 803 804

	return ACCESS_ONCE(s->pcm_buffer_pointer);
}
805
EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
806

807
/**
808 809
 * amdtp_stream_update - update the stream after a bus reset
 * @s: the AMDTP stream
810
 */
811
void amdtp_stream_update(struct amdtp_stream *s)
812
{
813
	/* Precomputing. */
814
	ACCESS_ONCE(s->source_node_id_field) =
815 816
		(fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) &
								CIP_SID_MASK;
817
}
818
EXPORT_SYMBOL(amdtp_stream_update);
819 820

/**
821 822
 * amdtp_stream_stop - stop sending packets
 * @s: the AMDTP stream to stop
823 824 825 826
 *
 * All PCM and MIDI devices of the stream must be stopped before the stream
 * itself can be stopped.
 */
827
void amdtp_stream_stop(struct amdtp_stream *s)
828 829 830
{
	mutex_lock(&s->mutex);

831
	if (!amdtp_stream_running(s)) {
832 833 834 835
		mutex_unlock(&s->mutex);
		return;
	}

836
	tasklet_kill(&s->period_tasklet);
837 838 839 840 841
	fw_iso_context_stop(s->context);
	fw_iso_context_destroy(s->context);
	s->context = ERR_PTR(-1);
	iso_packets_buffer_destroy(&s->buffer, s->unit);

842 843
	s->callbacked = false;

844 845
	mutex_unlock(&s->mutex);
}
846
EXPORT_SYMBOL(amdtp_stream_stop);
847 848

/**
849
 * amdtp_stream_pcm_abort - abort the running PCM device
850 851 852 853 854
 * @s: the AMDTP stream about to be stopped
 *
 * If the isochronous stream needs to be stopped asynchronously, call this
 * function first to stop the PCM device.
 */
855
void amdtp_stream_pcm_abort(struct amdtp_stream *s)
856 857 858 859
{
	struct snd_pcm_substream *pcm;

	pcm = ACCESS_ONCE(s->pcm);
860 861
	if (pcm)
		snd_pcm_stop_xrun(pcm);
862
}
863
EXPORT_SYMBOL(amdtp_stream_pcm_abort);