1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
|
/* $NetBSD: if_mvxpevar.h,v 1.9 2022/04/04 19:33:45 andvar Exp $ */
/*
* Copyright (c) 2015 Internet Initiative Japan Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _IF_MVXPEVAR_H_
#define _IF_MVXPEVAR_H_
#include <net/if.h>
#include <dev/marvell/mvxpbmvar.h>
/*
* Limit of packet sizes.
*/
#define MVXPE_HWHEADER_SIZE 2 /* Marvell Header */
#define MVXPE_MRU 2000 /* Max Receive Unit */
#define MVXPE_MTU MVXPE_MRU /* Max Transmit Unit */
/*
* Default limit of queue length
*
* queue 0 is lowest priority and queue 7 is highest priority.
*
* XXX: packet classifier is not implement yet
*/
#define MVXPE_RX_QUEUE_LIMIT_0 IFQ_MAXLEN
#define MVXPE_RX_QUEUE_LIMIT_1 8
#define MVXPE_RX_QUEUE_LIMIT_2 8
#define MVXPE_RX_QUEUE_LIMIT_3 8
#define MVXPE_RX_QUEUE_LIMIT_4 8
#define MVXPE_RX_QUEUE_LIMIT_5 8
#define MVXPE_RX_QUEUE_LIMIT_6 8
#define MVXPE_RX_QUEUE_LIMIT_7 8
#define MVXPE_TX_QUEUE_LIMIT_0 IFQ_MAXLEN
#define MVXPE_TX_QUEUE_LIMIT_1 8
#define MVXPE_TX_QUEUE_LIMIT_2 8
#define MVXPE_TX_QUEUE_LIMIT_3 8
#define MVXPE_TX_QUEUE_LIMIT_4 8
#define MVXPE_TX_QUEUE_LIMIT_5 8
#define MVXPE_TX_QUEUE_LIMIT_6 8
#define MVXPE_TX_QUEUE_LIMIT_7 8
/* interrupt is triggered when corossing (queuelen / RATIO) */
#define MVXPE_RXTH_RATIO 8
#define MVXPE_RXTH_REFILL_RATIO 2
#define MVXPE_TXTH_RATIO 8
/*
* Device Register access
*/
#define MVXPE_READ(sc, reg) \
bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
#define MVXPE_WRITE(sc, reg, val) \
bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
#define MVXPE_READ_REGION(sc, reg, val, c) \
bus_space_read_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
#define MVXPE_WRITE_REGION(sc, reg, val, c) \
bus_space_write_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
#define MVXPE_READ_MIB(sc, reg) \
bus_space_read_4((sc)->sc_iot, (sc)->sc_mibh, (reg))
#define MVXPE_IS_LINKUP(sc) \
(MVXPE_READ((sc), MVXPE_PSR) & MVXPE_PSR_LINKUP)
#define MVXPE_IS_QUEUE_BUSY(queues, q) \
((((queues) >> (q)) & 0x1))
/*
* EEE: Lower Power Idle config
* Default timer is duration of MTU sized frame transmission.
* The timer can be negotiated by LLDP protocol, but we have no
* support.
*/
#define MVXPE_LPI_TS (MVXPE_MRU * 8 / 1000) /* [us] */
#define MVXPE_LPI_TW (MVXPE_MRU * 8 / 1000) /* [us] */
#define MVXPE_LPI_LI (MVXPE_MRU * 8 / 1000) /* [us] */
/*
* DMA Descriptor
*
* the ethernet device has 8 rx/tx DMA queues. each of queue has its own
* descriptor list. descriptors are simply index by counter inside the device.
*/
#define MVXPE_TX_RING_CNT IFQ_MAXLEN
#define MVXPE_TX_RING_MSK (MVXPE_TX_RING_CNT - 1)
#define MVXPE_TX_RING_NEXT(x) (((x) + 1) & MVXPE_TX_RING_MSK)
#define MVXPE_RX_RING_CNT IFQ_MAXLEN
#define MVXPE_RX_RING_MSK (MVXPE_RX_RING_CNT - 1)
#define MVXPE_RX_RING_NEXT(x) (((x) + 1) & MVXPE_RX_RING_MSK)
#define MVXPE_TX_SEGLIMIT 32
struct mvxpe_rx_ring {
/* Real descriptors array. shared by RxDMA */
struct mvxpe_rx_desc *rx_descriptors;
bus_dmamap_t rx_descriptors_map;
/* Management entries for each of descriptors */
struct mvxpe_rx_handle {
struct mvxpe_rx_desc *rxdesc_va;
off_t rxdesc_off; /* from rx_descriptors[0] */
struct mvxpbm_chunk *chunk;
} rx_handle[MVXPE_RX_RING_CNT];
/* locks */
kmutex_t rx_ring_mtx;
/* Index */
int rx_dma;
int rx_cpu;
/* Limit */
int rx_queue_len;
int rx_queue_th_received;
int rx_queue_th_free;
int rx_queue_th_time; /* [Tclk] */
};
struct mvxpe_tx_ring {
/* Real descriptors array. shared by TxDMA */
struct mvxpe_tx_desc *tx_descriptors;
bus_dmamap_t tx_descriptors_map;
/* Management entries for each of descriptors */
struct mvxpe_tx_handle {
struct mvxpe_tx_desc *txdesc_va;
off_t txdesc_off; /* from tx_descriptors[0] */
struct mbuf *txdesc_mbuf;
bus_dmamap_t txdesc_mbuf_map;
} tx_handle[MVXPE_TX_RING_CNT];
/* locks */
kmutex_t tx_ring_mtx;
/* Index */
int tx_used;
int tx_dma;
int tx_cpu;
/* Limit */
int tx_queue_len;
int tx_queue_th_free;
};
static __inline int
tx_counter_adv(int ctr, int n)
{
/* XXX: lock or atomic */
ctr += n;
while (ctr >= MVXPE_TX_RING_CNT)
ctr -= MVXPE_TX_RING_CNT;
return ctr;
}
static __inline int
rx_counter_adv(int ctr, int n)
{
/* XXX: lock or atomic */
ctr += n;
while (ctr >= MVXPE_TX_RING_CNT)
ctr -= MVXPE_TX_RING_CNT;
return ctr;
}
/*
* Timeout control
*/
#define MVXPE_PHY_TIMEOUT 10000 /* msec */
#define RX_DISABLE_TIMEOUT 0x1000000 /* times */
#define TX_DISABLE_TIMEOUT 0x1000000 /* times */
#define TX_FIFO_EMPTY_TIMEOUT 0x1000000 /* times */
/*
* Event counter
*/
#ifdef MVXPE_EVENT_COUNTERS
#define MVXPE_EVCNT_INCR(ev) (ev)->ev_count++
#define MVXPE_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
#else
#define MVXPE_EVCNT_INCR(ev) /* nothing */
#define MVXPE_EVCNT_ADD(ev, val) /* nothing */
#endif
struct mvxpe_evcnt {
/*
* Master Interrupt Handler
*/
struct evcnt ev_i_rxtxth;
struct evcnt ev_i_rxtx;
struct evcnt ev_i_misc;
/*
* RXTXTH Interrupt
*/
struct evcnt ev_rxtxth_txerr;
/*
* MISC Interrupt
*/
struct evcnt ev_misc_phystatuschng;
struct evcnt ev_misc_linkchange;
struct evcnt ev_misc_iae;
struct evcnt ev_misc_rxoverrun;
struct evcnt ev_misc_rxcrc;
struct evcnt ev_misc_rxlargepacket;
struct evcnt ev_misc_txunderrun;
struct evcnt ev_misc_prbserr;
struct evcnt ev_misc_srse;
struct evcnt ev_misc_txreq;
/*
* RxTx Interrupt
*/
struct evcnt ev_rxtx_rreq;
struct evcnt ev_rxtx_rpq;
struct evcnt ev_rxtx_tbrq;
struct evcnt ev_rxtx_rxtxth;
struct evcnt ev_rxtx_txerr;
struct evcnt ev_rxtx_misc;
/*
* Link
*/
struct evcnt ev_link_up;
struct evcnt ev_link_down;
/*
* Rx Descriptor
*/
struct evcnt ev_rxd_ce;
struct evcnt ev_rxd_or;
struct evcnt ev_rxd_mf;
struct evcnt ev_rxd_re;
struct evcnt ev_rxd_scat;
/*
* Tx Descriptor
*/
struct evcnt ev_txd_lc;
struct evcnt ev_txd_ur;
struct evcnt ev_txd_rl;
struct evcnt ev_txd_oth;
/*
* Status Registers
*/
struct evcnt ev_reg_pdfc; /* Rx Port Discard Frame Counter */
struct evcnt ev_reg_pofc; /* Rx Port Overrun Frame Counter */
struct evcnt ev_reg_txbadfcs; /* Tx BAD FCS Counter */
struct evcnt ev_reg_txdropped; /* Tx Dropped Counter */
struct evcnt ev_reg_lpic;
/* Device Driver Errors */
struct evcnt ev_drv_wdogsoft;
struct evcnt ev_drv_txerr;
struct evcnt ev_drv_rxq[MVXPE_QUEUE_SIZE];
struct evcnt ev_drv_rxqe[MVXPE_QUEUE_SIZE];
struct evcnt ev_drv_txq[MVXPE_QUEUE_SIZE];
struct evcnt ev_drv_txqe[MVXPE_QUEUE_SIZE];
};
/*
* Debug
*/
#ifdef MVXPE_DEBUG
#define DPRINTF(fmt, ...) \
do { \
if (mvxpe_debug >= 1) { \
printf("%s: ", __func__); \
printf((fmt), ##__VA_ARGS__); \
} \
} while (/*CONSTCOND*/0)
#define DPRINTFN(level , fmt, ...) \
do { \
if (mvxpe_debug >= (level)) { \
printf("%s: ", __func__); \
printf((fmt), ##__VA_ARGS__); \
} \
} while (/*CONSTCOND*/0)
#define DPRINTDEV(dev, level, fmt, ...) \
do { \
if (mvxpe_debug >= (level)) { \
device_printf((dev), \
"%s: "fmt , __func__, ##__VA_ARGS__); \
} \
} while (/*CONSTCOND*/0)
#define DPRINTSC(sc, level, fmt, ...) \
do { \
device_t dev = (sc)->sc_dev; \
if (mvxpe_debug >= (level)) { \
device_printf(dev, \
"%s: " fmt, __func__, ##__VA_ARGS__); \
} \
} while (/*CONSTCOND*/0)
#define DPRINTIFNET(ifp, level, fmt, ...) \
do { \
const char *xname = (ifp)->if_xname; \
if (mvxpe_debug >= (level)) { \
printf("%s: %s: " fmt, xname, __func__, ##__VA_ARGS__);\
} \
} while (/*CONSTCOND*/0)
#define DPRINTIFNET(ifp, level, fmt, ...) \
do { \
const char *xname = (ifp)->if_xname; \
if (mvxpe_debug >= (level)) { \
printf("%s: %s: " fmt, xname, __func__, ##__VA_ARGS__);\
} \
} while (/*CONSTCOND*/0)
#define DPRINTPRXS(level, q) \
do { \
uint32_t _reg = MVXPE_READ(sc, MVXPE_PRXS(q)); \
if (mvxpe_debug >= (level)) { \
printf("PRXS(queue %d) %#x: Occupied %d, NoOccupied %d.\n", \
q, _reg, MVXPE_PRXS_GET_ODC(_reg), \
MVXPE_PRXS_GET_NODC(_reg)); \
} \
} while (/*CONSTCOND*/0)
#else
#define DPRINTF(fmt, ...)
#define DPRINTFN(level, fmt, ...)
#define DPRINTDEV(dev, level, fmt, ...)
#define DPRINTSC(sc, level, fmt, ...)
#define DPRINTIFNET(ifp, level, fmt, ...)
#define DPRINTPRXS(level, reg)
#endif
#define KASSERT_SC_MTX(sc) \
KASSERT(mutex_owned(&(sc)->sc_mtx))
#define KASSERT_BM_MTX(sc) \
KASSERT(mutex_owned(&(sc)->sc_bm.bm_mtx))
#define KASSERT_RX_MTX(sc, q) \
KASSERT(mutex_owned(&(sc)->sc_rx_ring[(q)].rx_ring_mtx))
#define KASSERT_TX_MTX(sc, q) \
KASSERT(mutex_owned(&(sc)->sc_tx_ring[(q)].tx_ring_mtx))
/*
* Configuration parameters
*/
struct mvxpe_conf {
int cf_lpi; /* EEE Low Power IDLE enable */
int cf_fc; /* Flow Control enable */
};
/*
* sysctl(9) parameters
*/
struct mvxpe_softc;
struct mvxpe_sysctl_queue {
struct mvxpe_softc *sc;
int rxtx;
int queue;
};
#define MVXPE_SYSCTL_RX 0
#define MVXPE_SYSCTL_TX 1
struct mvxpe_sysctl_mib {
struct mvxpe_softc *sc;
int index;
uint64_t counter;
};
/*
* Ethernet Device main context
*/
struct mvxpe_softc {
device_t sc_dev;
int sc_port;
uint32_t sc_version;
/*
* sc_mtx must be held by interface functions to/from
* other frameworks. interrupt handler, sysctl handler,
* ioctl handler, and so on.
*/
kmutex_t sc_mtx;
/*
* Ethernet facilities
*/
struct ethercom sc_ethercom;
struct mii_data sc_mii;
u_int8_t sc_enaddr[ETHER_ADDR_LEN]; /* station addr */
u_short sc_if_flags;
int sc_wdogsoft;
/*
* Configuration Parameters
*/
struct mvxpe_conf sc_cf;
/*
* I/O Spaces
*/
bus_space_tag_t sc_iot;
bus_space_handle_t sc_ioh; /* all registers handle */
bus_space_handle_t sc_mibh; /* mib counter handle */
/*
* DMA Spaces
*/
bus_dma_tag_t sc_dmat;
struct mvxpe_rx_ring sc_rx_ring[MVXPE_QUEUE_SIZE];
struct mvxpe_tx_ring sc_tx_ring[MVXPE_QUEUE_SIZE];
int sc_tx_pending; /* total number of tx pkt */
/*
* Software Buffer Manager
*/
struct mvxpbm_softc *sc_bm;
/*
* Maintenance clock
*/
callout_t sc_tick_ch; /* tick callout */
/*
* Link State control
*/
uint32_t sc_linkstate;
/*
* Act as Rndom source
*/
krndsource_t sc_rnd_source;
/*
* Sysctl interfaces
*/
struct sysctllog *sc_mvxpe_clog;
struct mvxpe_sysctl_queue sc_sysctl_rx_queue[MVXPE_QUEUE_SIZE];
struct mvxpe_sysctl_queue sc_sysctl_tx_queue[MVXPE_QUEUE_SIZE];
/*
* MIB counter
*/
size_t sc_sysctl_mib_size;
struct mvxpe_sysctl_mib *sc_sysctl_mib;
#ifdef MVXPE_EVENT_COUNTERS
/*
* Event counter
*/
struct mvxpe_evcnt sc_ev;
#endif
};
#define MVXPE_RX_RING_MEM_VA(sc, q) \
((sc)->sc_rx_ring[(q)].rx_descriptors)
#define MVXPE_RX_RING_MEM_PA(sc, q) \
((sc)->sc_rx_ring[(q)].rx_descriptors_map->dm_segs[0].ds_addr)
#define MVXPE_RX_RING_MEM_MAP(sc, q) \
((sc)->sc_rx_ring[(q)].rx_descriptors_map)
#define MVXPE_RX_RING(sc, q) \
(&(sc)->sc_rx_ring[(q)])
#define MVXPE_RX_HANDLE(sc, q, i) \
(&(sc)->sc_rx_ring[(q)].rx_handle[(i)])
#define MVXPE_RX_DESC(sc, q, i) \
((sc)->sc_rx_ring[(q)].rx_handle[(i)].rxdesc_va)
#define MVXPE_RX_DESC_OFF(sc, q, i) \
((sc)->sc_rx_ring[(q)].rx_handle[(i)].rxdesc_off)
#define MVXPE_RX_PKTBUF(sc, q, i) \
((sc)->sc_rx_ring[(q)].rx_handle[(i)].chunk)
#define MVXPE_TX_RING_MEM_VA(sc, q) \
((sc)->sc_tx_ring[(q)].tx_descriptors)
#define MVXPE_TX_RING_MEM_PA(sc, q) \
((sc)->sc_tx_ring[(q)].tx_descriptors_map->dm_segs[0].ds_addr)
#define MVXPE_TX_RING_MEM_MAP(sc, q) \
((sc)->sc_tx_ring[(q)].tx_descriptors_map)
#define MVXPE_TX_RING(sc, q) \
(&(sc)->sc_tx_ring[(q)])
#define MVXPE_TX_HANDLE(sc, q, i) \
(&(sc)->sc_tx_ring[(q)].tx_handle[(i)])
#define MVXPE_TX_DESC(sc, q, i) \
((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_va)
#define MVXPE_TX_DESC_OFF(sc, q, i) \
((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_off)
#define MVXPE_TX_MBUF(sc, q, i) \
((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_mbuf)
#define MVXPE_TX_MAP(sc, q, i) \
((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_mbuf_map)
#endif /* _IF_MVXPEVAR_H_ */
|