1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh
* Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh
*/
#include "esdacc.h"
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/ktime.h>
/* esdACC ID register layout */
#define ACC_ID_ID_MASK GENMASK(28, 0)
#define ACC_ID_EFF_FLAG BIT(29)
/* esdACC DLC register layout */
#define ACC_DLC_DLC_MASK GENMASK(3, 0)
#define ACC_DLC_RTR_FLAG BIT(4)
#define ACC_DLC_SSTX_FLAG BIT(24) /* Single Shot TX */
/* esdACC DLC in struct acc_bmmsg_rxtxdone::acc_dlc.len only! */
#define ACC_DLC_TXD_FLAG BIT(5)
/* ecc value of esdACC equals SJA1000's ECC register */
#define ACC_ECC_SEG 0x1f
#define ACC_ECC_DIR 0x20
#define ACC_ECC_BIT 0x00
#define ACC_ECC_FORM 0x40
#define ACC_ECC_STUFF 0x80
#define ACC_ECC_MASK 0xc0
/* esdACC Status Register bits. Unused bits not documented. */
#define ACC_REG_STATUS_MASK_STATUS_ES BIT(17)
#define ACC_REG_STATUS_MASK_STATUS_EP BIT(18)
#define ACC_REG_STATUS_MASK_STATUS_BS BIT(19)
/* esdACC Overview Module BM_IRQ_Mask register related defines */
/* Two bit wide command masks to mask or unmask a single core IRQ */
#define ACC_BM_IRQ_UNMASK BIT(0)
#define ACC_BM_IRQ_MASK (ACC_BM_IRQ_UNMASK << 1)
/* Command to unmask all IRQ sources. Created by shifting
* and oring the two bit wide ACC_BM_IRQ_UNMASK 16 times.
*/
#define ACC_BM_IRQ_UNMASK_ALL 0x55555555U
static void acc_resetmode_enter(struct acc_core *core)
{
acc_set_bits(core, ACC_CORE_OF_CTRL,
ACC_REG_CTRL_MASK_RESETMODE);
/* Read back reset mode bit to flush PCI write posting */
acc_resetmode_entered(core);
}
static void acc_resetmode_leave(struct acc_core *core)
{
acc_clear_bits(core, ACC_CORE_OF_CTRL,
ACC_REG_CTRL_MASK_RESETMODE);
/* Read back reset mode bit to flush PCI write posting */
acc_resetmode_entered(core);
}
static void acc_txq_put(struct acc_core *core, u32 acc_id, u32 acc_dlc,
const void *data)
{
acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_1,
*((const u32 *)(data + 4)));
acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_0,
*((const u32 *)data));
acc_write32(core, ACC_CORE_OF_TXFIFO_DLC, acc_dlc);
/* CAN id must be written at last. This write starts TX. */
acc_write32(core, ACC_CORE_OF_TXFIFO_ID, acc_id);
}
static u8 acc_tx_fifo_next(struct acc_core *core, u8 tx_fifo_idx)
{
++tx_fifo_idx;
if (tx_fifo_idx >= core->tx_fifo_size)
tx_fifo_idx = 0U;
return tx_fifo_idx;
}
/* Convert timestamp from esdACC time stamp ticks to ns
*
* The conversion factor ts2ns from time stamp counts to ns is basically
* ts2ns = NSEC_PER_SEC / timestamp_frequency
*
* We handle here only a fixed timestamp frequency of 80MHz. The
* resulting ts2ns factor would be 12.5.
*
* At the end we multiply by 12 and add the half of the HW timestamp
* to get a multiplication by 12.5. This way any overflow is
* avoided until ktime_t itself overflows.
*/
#define ACC_TS_FACTOR (NSEC_PER_SEC / ACC_TS_FREQ_80MHZ)
#define ACC_TS_80MHZ_SHIFT 1
static ktime_t acc_ts2ktime(struct acc_ov *ov, u64 ts)
{
u64 ns;
ns = (ts * ACC_TS_FACTOR) + (ts >> ACC_TS_80MHZ_SHIFT);
return ns_to_ktime(ns);
}
#undef ACC_TS_FACTOR
#undef ACC_TS_80MHZ_SHIFT
void acc_init_ov(struct acc_ov *ov, struct device *dev)
{
u32 temp;
temp = acc_ov_read32(ov, ACC_OV_OF_VERSION);
ov->version = temp;
ov->features = (temp >> 16);
temp = acc_ov_read32(ov, ACC_OV_OF_INFO);
ov->total_cores = temp;
ov->active_cores = (temp >> 8);
ov->core_frequency = acc_ov_read32(ov, ACC_OV_OF_CANCORE_FREQ);
ov->timestamp_frequency = acc_ov_read32(ov, ACC_OV_OF_TS_FREQ_LO);
/* Depending on esdACC feature NEW_PSC enable the new prescaler
* or adjust core_frequency according to the implicit division by 2.
*/
if (ov->features & ACC_OV_REG_FEAT_MASK_NEW_PSC) {
acc_ov_set_bits(ov, ACC_OV_OF_MODE,
ACC_OV_REG_MODE_MASK_NEW_PSC_ENABLE);
} else {
ov->core_frequency /= 2;
}
dev_dbg(dev,
"esdACC v%u, freq: %u/%u, feat/strap: 0x%x/0x%x, cores: %u/%u\n",
ov->version, ov->core_frequency, ov->timestamp_frequency,
ov->features, acc_ov_read32(ov, ACC_OV_OF_INFO) >> 16,
ov->active_cores, ov->total_cores);
}
void acc_init_bm_ptr(struct acc_ov *ov, struct acc_core *cores, const void *mem)
{
unsigned int u;
/* DMA buffer layout as follows where N is the number of CAN cores
* implemented in the FPGA, i.e. N = ov->total_cores
*
* Section Layout Section size
* ----------------------------------------------
* FIFO Card/Overview ACC_CORE_DMABUF_SIZE
* FIFO Core0 ACC_CORE_DMABUF_SIZE
* ... ...
* FIFO CoreN ACC_CORE_DMABUF_SIZE
* irq_cnt Card/Overview sizeof(u32)
* irq_cnt Core0 sizeof(u32)
* ... ...
* irq_cnt CoreN sizeof(u32)
*/
ov->bmfifo.messages = mem;
ov->bmfifo.irq_cnt = mem + (ov->total_cores + 1U) * ACC_CORE_DMABUF_SIZE;
for (u = 0U; u < ov->active_cores; u++) {
struct acc_core *core = &cores[u];
core->bmfifo.messages = mem + (u + 1U) * ACC_CORE_DMABUF_SIZE;
core->bmfifo.irq_cnt = ov->bmfifo.irq_cnt + (u + 1U);
}
}
int acc_open(struct net_device *netdev)
{
struct acc_net_priv *priv = netdev_priv(netdev);
struct acc_core *core = priv->core;
u32 tx_fifo_status;
u32 ctrl;
int err;
/* Retry to enter RESET mode if out of sync. */
if (priv->can.state != CAN_STATE_STOPPED) {
netdev_warn(netdev, "Entered %s() with bad can.state: %s\n",
__func__, can_get_state_str(priv->can.state));
acc_resetmode_enter(core);
priv->can.state = CAN_STATE_STOPPED;
}
err = open_candev(netdev);
if (err)
return err;
ctrl = ACC_REG_CTRL_MASK_IE_RXTX |
ACC_REG_CTRL_MASK_IE_TXERROR |
ACC_REG_CTRL_MASK_IE_ERRWARN |
ACC_REG_CTRL_MASK_IE_OVERRUN |
ACC_REG_CTRL_MASK_IE_ERRPASS;
if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
ctrl |= ACC_REG_CTRL_MASK_IE_BUSERR;
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
ctrl |= ACC_REG_CTRL_MASK_LOM;
acc_set_bits(core, ACC_CORE_OF_CTRL, ctrl);
acc_resetmode_leave(core);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
/* Resync TX FIFO indices to HW state after (re-)start. */
tx_fifo_status = acc_read32(core, ACC_CORE_OF_TXFIFO_STATUS);
core->tx_fifo_head = tx_fifo_status & 0xff;
core->tx_fifo_tail = (tx_fifo_status >> 8) & 0xff;
netif_start_queue(netdev);
return 0;
}
int acc_close(struct net_device *netdev)
{
struct acc_net_priv *priv = netdev_priv(netdev);
struct acc_core *core = priv->core;
acc_clear_bits(core, ACC_CORE_OF_CTRL,
ACC_REG_CTRL_MASK_IE_RXTX |
ACC_REG_CTRL_MASK_IE_TXERROR |
ACC_REG_CTRL_MASK_IE_ERRWARN |
ACC_REG_CTRL_MASK_IE_OVERRUN |
ACC_REG_CTRL_MASK_IE_ERRPASS |
ACC_REG_CTRL_MASK_IE_BUSERR);
netif_stop_queue(netdev);
acc_resetmode_enter(core);
priv->can.state = CAN_STATE_STOPPED;
/* Mark pending TX requests to be aborted after controller restart. */
acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
/* ACC_REG_CTRL_MASK_LOM is only accessible in RESET mode */
acc_clear_bits(core, ACC_CORE_OF_CTRL,
ACC_REG_CTRL_MASK_LOM);
close_candev(netdev);
return 0;
}
netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct acc_net_priv *priv = netdev_priv(netdev);
struct acc_core *core = priv->core;
struct can_frame *cf = (struct can_frame *)skb->data;
u8 tx_fifo_head = core->tx_fifo_head;
int fifo_usage;
u32 acc_id;
u32 acc_dlc;
if (can_dropped_invalid_skb(netdev, skb))
return NETDEV_TX_OK;
/* Access core->tx_fifo_tail only once because it may be changed
* from the interrupt level.
*/
fifo_usage = tx_fifo_head - core->tx_fifo_tail;
if (fifo_usage < 0)
fifo_usage += core->tx_fifo_size;
if (fifo_usage >= core->tx_fifo_size - 1) {
netdev_err(core->netdev,
"BUG: TX ring full when queue awake!\n");
netif_stop_queue(netdev);
return NETDEV_TX_BUSY;
}
if (fifo_usage == core->tx_fifo_size - 2)
netif_stop_queue(netdev);
acc_dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
if (cf->can_id & CAN_RTR_FLAG)
acc_dlc |= ACC_DLC_RTR_FLAG;
if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
acc_dlc |= ACC_DLC_SSTX_FLAG;
if (cf->can_id & CAN_EFF_FLAG) {
acc_id = cf->can_id & CAN_EFF_MASK;
acc_id |= ACC_ID_EFF_FLAG;
} else {
acc_id = cf->can_id & CAN_SFF_MASK;
}
can_put_echo_skb(skb, netdev, core->tx_fifo_head, 0);
core->tx_fifo_head = acc_tx_fifo_next(core, tx_fifo_head);
acc_txq_put(core, acc_id, acc_dlc, cf->data);
return NETDEV_TX_OK;
}
int acc_get_berr_counter(const struct net_device *netdev,
struct can_berr_counter *bec)
{
struct acc_net_priv *priv = netdev_priv(netdev);
u32 core_status = acc_read32(priv->core, ACC_CORE_OF_STATUS);
bec->txerr = (core_status >> 8) & 0xff;
bec->rxerr = core_status & 0xff;
return 0;
}
int acc_set_mode(struct net_device *netdev, enum can_mode mode)
{
struct acc_net_priv *priv = netdev_priv(netdev);
switch (mode) {
case CAN_MODE_START:
/* Paranoid FIFO index check. */
{
const u32 tx_fifo_status =
acc_read32(priv->core, ACC_CORE_OF_TXFIFO_STATUS);
const u8 hw_fifo_head = tx_fifo_status;
if (hw_fifo_head != priv->core->tx_fifo_head ||
hw_fifo_head != priv->core->tx_fifo_tail) {
netdev_warn(netdev,
"TX FIFO mismatch: T %2u H %2u; TFHW %#08x\n",
priv->core->tx_fifo_tail,
priv->core->tx_fifo_head,
tx_fifo_status);
}
}
acc_resetmode_leave(priv->core);
/* To leave the bus-off state the esdACC controller begins
* here a grace period where it counts 128 "idle conditions" (each
* of 11 consecutive recessive bits) on the bus as required
* by the CAN spec.
*
* During this time the TX FIFO may still contain already
* aborted "zombie" frames that are only drained from the FIFO
* at the end of the grace period.
*
* To not to interfere with this drain process we don't
* call netif_wake_queue() here. When the controller reaches
* the error-active state again, it informs us about that
* with an acc_bmmsg_errstatechange message. Then
* netif_wake_queue() is called from
* handle_core_msg_errstatechange() instead.
*/
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
int acc_set_bittiming(struct net_device *netdev)
{
struct acc_net_priv *priv = netdev_priv(netdev);
const struct can_bittiming *bt = &priv->can.bittiming;
u32 brp;
u32 btr;
if (priv->ov->features & ACC_OV_REG_FEAT_MASK_CANFD) {
u32 fbtr = 0;
netdev_dbg(netdev, "bit timing: brp %u, prop %u, ph1 %u ph2 %u, sjw %u\n",
bt->brp, bt->prop_seg,
bt->phase_seg1, bt->phase_seg2, bt->sjw);
brp = FIELD_PREP(ACC_REG_BRP_FD_MASK_BRP, bt->brp - 1);
btr = FIELD_PREP(ACC_REG_BTR_FD_MASK_TSEG1, bt->phase_seg1 + bt->prop_seg - 1);
btr |= FIELD_PREP(ACC_REG_BTR_FD_MASK_TSEG2, bt->phase_seg2 - 1);
btr |= FIELD_PREP(ACC_REG_BTR_FD_MASK_SJW, bt->sjw - 1);
/* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */
acc_write32(priv->core, ACC_CORE_OF_BRP, brp);
acc_write32(priv->core, ACC_CORE_OF_BTR, btr);
netdev_dbg(netdev, "esdACC: BRP %u, NBTR 0x%08x, DBTR 0x%08x",
brp, btr, fbtr);
} else {
netdev_dbg(netdev, "bit timing: brp %u, prop %u, ph1 %u ph2 %u, sjw %u\n",
bt->brp, bt->prop_seg,
bt->phase_seg1, bt->phase_seg2, bt->sjw);
brp = FIELD_PREP(ACC_REG_BRP_CL_MASK_BRP, bt->brp - 1);
btr = FIELD_PREP(ACC_REG_BTR_CL_MASK_TSEG1, bt->phase_seg1 + bt->prop_seg - 1);
btr |= FIELD_PREP(ACC_REG_BTR_CL_MASK_TSEG2, bt->phase_seg2 - 1);
btr |= FIELD_PREP(ACC_REG_BTR_CL_MASK_SJW, bt->sjw - 1);
/* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */
acc_write32(priv->core, ACC_CORE_OF_BRP, brp);
acc_write32(priv->core, ACC_CORE_OF_BTR, btr);
netdev_dbg(netdev, "esdACC: BRP %u, BTR 0x%08x", brp, btr);
}
return 0;
}
static void handle_core_msg_rxtxdone(struct acc_core *core,
const struct acc_bmmsg_rxtxdone *msg)
{
struct acc_net_priv *priv = netdev_priv(core->netdev);
struct net_device_stats *stats = &core->netdev->stats;
struct sk_buff *skb;
if (msg->acc_dlc.len & ACC_DLC_TXD_FLAG) {
u8 tx_fifo_tail = core->tx_fifo_tail;
if (core->tx_fifo_head == tx_fifo_tail) {
netdev_warn(core->netdev,
"TX interrupt, but queue is empty!?\n");
return;
}
/* Direct access echo skb to attach HW time stamp. */
skb = priv->can.echo_skb[tx_fifo_tail];
if (skb) {
skb_hwtstamps(skb)->hwtstamp =
acc_ts2ktime(priv->ov, msg->ts);
}
stats->tx_packets++;
stats->tx_bytes += can_get_echo_skb(core->netdev, tx_fifo_tail,
NULL);
core->tx_fifo_tail = acc_tx_fifo_next(core, tx_fifo_tail);
netif_wake_queue(core->netdev);
} else {
struct can_frame *cf;
skb = alloc_can_skb(core->netdev, &cf);
if (!skb) {
stats->rx_dropped++;
return;
}
cf->can_id = msg->id & ACC_ID_ID_MASK;
if (msg->id & ACC_ID_EFF_FLAG)
cf->can_id |= CAN_EFF_FLAG;
can_frame_set_cc_len(cf, msg->acc_dlc.len & ACC_DLC_DLC_MASK,
priv->can.ctrlmode);
if (msg->acc_dlc.len & ACC_DLC_RTR_FLAG) {
cf->can_id |= CAN_RTR_FLAG;
} else {
memcpy(cf->data, msg->data, cf->len);
stats->rx_bytes += cf->len;
}
stats->rx_packets++;
skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
netif_rx(skb);
}
}
static void handle_core_msg_txabort(struct acc_core *core,
const struct acc_bmmsg_txabort *msg)
{
struct net_device_stats *stats = &core->netdev->stats;
u8 tx_fifo_tail = core->tx_fifo_tail;
u32 abort_mask = msg->abort_mask; /* u32 extend to avoid warnings later */
/* The abort_mask shows which frames were aborted in esdACC's FIFO. */
while (tx_fifo_tail != core->tx_fifo_head && (abort_mask)) {
const u32 tail_mask = (1U << tx_fifo_tail);
if (!(abort_mask & tail_mask))
break;
abort_mask &= ~tail_mask;
can_free_echo_skb(core->netdev, tx_fifo_tail, NULL);
stats->tx_dropped++;
stats->tx_aborted_errors++;
tx_fifo_tail = acc_tx_fifo_next(core, tx_fifo_tail);
}
core->tx_fifo_tail = tx_fifo_tail;
if (abort_mask)
netdev_warn(core->netdev, "Unhandled aborted messages\n");
if (!acc_resetmode_entered(core))
netif_wake_queue(core->netdev);
}
static void handle_core_msg_overrun(struct acc_core *core,
const struct acc_bmmsg_overrun *msg)
{
struct acc_net_priv *priv = netdev_priv(core->netdev);
struct net_device_stats *stats = &core->netdev->stats;
struct can_frame *cf;
struct sk_buff *skb;
/* lost_cnt may be 0 if not supported by esdACC version */
if (msg->lost_cnt) {
stats->rx_errors += msg->lost_cnt;
stats->rx_over_errors += msg->lost_cnt;
} else {
stats->rx_errors++;
stats->rx_over_errors++;
}
skb = alloc_can_err_skb(core->netdev, &cf);
if (!skb)
return;
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
netif_rx(skb);
}
static void handle_core_msg_buserr(struct acc_core *core,
const struct acc_bmmsg_buserr *msg)
{
struct acc_net_priv *priv = netdev_priv(core->netdev);
struct net_device_stats *stats = &core->netdev->stats;
struct can_frame *cf;
struct sk_buff *skb;
const u32 reg_status = msg->reg_status;
const u8 rxerr = reg_status;
const u8 txerr = (reg_status >> 8);
u8 can_err_prot_type = 0U;
priv->can.can_stats.bus_error++;
/* Error occurred during transmission? */
if (msg->ecc & ACC_ECC_DIR) {
stats->rx_errors++;
} else {
can_err_prot_type |= CAN_ERR_PROT_TX;
stats->tx_errors++;
}
/* Determine error type */
switch (msg->ecc & ACC_ECC_MASK) {
case ACC_ECC_BIT:
can_err_prot_type |= CAN_ERR_PROT_BIT;
break;
case ACC_ECC_FORM:
can_err_prot_type |= CAN_ERR_PROT_FORM;
break;
case ACC_ECC_STUFF:
can_err_prot_type |= CAN_ERR_PROT_STUFF;
break;
default:
can_err_prot_type |= CAN_ERR_PROT_UNSPEC;
break;
}
skb = alloc_can_err_skb(core->netdev, &cf);
if (!skb)
return;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT;
/* Set protocol error type */
cf->data[2] = can_err_prot_type;
/* Set error location */
cf->data[3] = msg->ecc & ACC_ECC_SEG;
/* Insert CAN TX and RX error counters. */
cf->data[6] = txerr;
cf->data[7] = rxerr;
skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
netif_rx(skb);
}
static void
handle_core_msg_errstatechange(struct acc_core *core,
const struct acc_bmmsg_errstatechange *msg)
{
struct acc_net_priv *priv = netdev_priv(core->netdev);
struct can_frame *cf = NULL;
struct sk_buff *skb;
const u32 reg_status = msg->reg_status;
const u8 rxerr = reg_status;
const u8 txerr = (reg_status >> 8);
enum can_state new_state;
if (reg_status & ACC_REG_STATUS_MASK_STATUS_BS) {
new_state = CAN_STATE_BUS_OFF;
} else if (reg_status & ACC_REG_STATUS_MASK_STATUS_EP) {
new_state = CAN_STATE_ERROR_PASSIVE;
} else if (reg_status & ACC_REG_STATUS_MASK_STATUS_ES) {
new_state = CAN_STATE_ERROR_WARNING;
} else {
new_state = CAN_STATE_ERROR_ACTIVE;
if (priv->can.state == CAN_STATE_BUS_OFF) {
/* See comment in acc_set_mode() for CAN_MODE_START */
netif_wake_queue(core->netdev);
}
}
skb = alloc_can_err_skb(core->netdev, &cf);
if (new_state != priv->can.state) {
enum can_state tx_state, rx_state;
tx_state = (txerr >= rxerr) ?
new_state : CAN_STATE_ERROR_ACTIVE;
rx_state = (rxerr >= txerr) ?
new_state : CAN_STATE_ERROR_ACTIVE;
/* Always call can_change_state() to update the state
* even if alloc_can_err_skb() may have failed.
* can_change_state() can cope with a NULL cf pointer.
*/
can_change_state(core->netdev, cf, tx_state, rx_state);
}
if (skb) {
cf->can_id |= CAN_ERR_CNT;
cf->data[6] = txerr;
cf->data[7] = rxerr;
skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
netif_rx(skb);
}
if (new_state == CAN_STATE_BUS_OFF) {
acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
can_bus_off(core->netdev);
}
}
static void handle_core_interrupt(struct acc_core *core)
{
u32 msg_fifo_head = core->bmfifo.local_irq_cnt & 0xff;
while (core->bmfifo.msg_fifo_tail != msg_fifo_head) {
const union acc_bmmsg *msg =
&core->bmfifo.messages[core->bmfifo.msg_fifo_tail];
switch (msg->msg_id) {
case BM_MSG_ID_RXTXDONE:
handle_core_msg_rxtxdone(core, &msg->rxtxdone);
break;
case BM_MSG_ID_TXABORT:
handle_core_msg_txabort(core, &msg->txabort);
break;
case BM_MSG_ID_OVERRUN:
handle_core_msg_overrun(core, &msg->overrun);
break;
case BM_MSG_ID_BUSERR:
handle_core_msg_buserr(core, &msg->buserr);
break;
case BM_MSG_ID_ERRPASSIVE:
case BM_MSG_ID_ERRWARN:
handle_core_msg_errstatechange(core,
&msg->errstatechange);
break;
default:
/* Ignore all other BM messages (like the CAN-FD messages) */
break;
}
core->bmfifo.msg_fifo_tail =
(core->bmfifo.msg_fifo_tail + 1) & 0xff;
}
}
/**
* acc_card_interrupt() - handle the interrupts of an esdACC FPGA
*
* @ov: overview module structure
* @cores: array of core structures
*
* This function handles all interrupts pending for the overview module and the
* CAN cores of the esdACC FPGA.
*
* It examines for all cores (the overview module core and the CAN cores)
* the bmfifo.irq_cnt and compares it with the previously saved
* bmfifo.local_irq_cnt. An IRQ is pending if they differ. The esdACC FPGA
* updates the bmfifo.irq_cnt values by DMA.
*
* The pending interrupts are masked by writing to the IRQ mask register at
* ACC_OV_OF_BM_IRQ_MASK. This register has for each core a two bit command
* field evaluated as follows:
*
* Define, bit pattern: meaning
* 00: no action
* ACC_BM_IRQ_UNMASK, 01: unmask interrupt
* ACC_BM_IRQ_MASK, 10: mask interrupt
* 11: no action
*
* For each CAN core with a pending IRQ handle_core_interrupt() handles all
* busmaster messages from the message FIFO. The last handled message (FIFO
* index) is written to the CAN core to acknowledge its handling.
*
* Last step is to unmask all interrupts in the FPGA using
* ACC_BM_IRQ_UNMASK_ALL.
*
* Return:
* IRQ_HANDLED, if card generated an interrupt that was handled
* IRQ_NONE, if the interrupt is not ours
*/
irqreturn_t acc_card_interrupt(struct acc_ov *ov, struct acc_core *cores)
{
u32 irqmask;
int i;
/* First we look for whom interrupts are pending, card/overview
* or any of the cores. Two bits in irqmask are used for each;
* Each two bit field is set to ACC_BM_IRQ_MASK if an IRQ is
* pending.
*/
irqmask = 0U;
if (READ_ONCE(*ov->bmfifo.irq_cnt) != ov->bmfifo.local_irq_cnt) {
irqmask |= ACC_BM_IRQ_MASK;
ov->bmfifo.local_irq_cnt = READ_ONCE(*ov->bmfifo.irq_cnt);
}
for (i = 0; i < ov->active_cores; i++) {
struct acc_core *core = &cores[i];
if (READ_ONCE(*core->bmfifo.irq_cnt) != core->bmfifo.local_irq_cnt) {
irqmask |= (ACC_BM_IRQ_MASK << (2 * (i + 1)));
core->bmfifo.local_irq_cnt = READ_ONCE(*core->bmfifo.irq_cnt);
}
}
if (!irqmask)
return IRQ_NONE;
/* At second we tell the card we're working on them by writing irqmask,
* call handle_{ov|core}_interrupt and then acknowledge the
* interrupts by writing irq_cnt:
*/
acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, irqmask);
if (irqmask & ACC_BM_IRQ_MASK) {
/* handle_ov_interrupt(); - no use yet. */
acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_COUNTER,
ov->bmfifo.local_irq_cnt);
}
for (i = 0; i < ov->active_cores; i++) {
struct acc_core *core = &cores[i];
if (irqmask & (ACC_BM_IRQ_MASK << (2 * (i + 1)))) {
handle_core_interrupt(core);
acc_write32(core, ACC_OV_OF_BM_IRQ_COUNTER,
core->bmfifo.local_irq_cnt);
}
}
acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, ACC_BM_IRQ_UNMASK_ALL);
return IRQ_HANDLED;
}
|