Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/powerpc/pseries/phyp_llan.c
39507 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright 2013 Nathan Whitehorn
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26
* SUCH DAMAGE.
27
*/
28
29
#include <sys/param.h>
30
#include <sys/systm.h>
31
#include <sys/sockio.h>
32
#include <sys/endian.h>
33
#include <sys/lock.h>
34
#include <sys/mbuf.h>
35
#include <sys/module.h>
36
#include <sys/malloc.h>
37
#include <sys/mutex.h>
38
#include <sys/kernel.h>
39
#include <sys/socket.h>
40
41
#include <net/bpf.h>
42
#include <net/if.h>
43
#include <net/if_var.h>
44
#include <net/ethernet.h>
45
#include <net/if_dl.h>
46
#include <net/if_media.h>
47
#include <net/if_types.h>
48
49
#include <dev/ofw/openfirm.h>
50
#include <dev/ofw/ofw_bus.h>
51
#include <dev/ofw/ofw_bus_subr.h>
52
#include <machine/bus.h>
53
#include <machine/resource.h>
54
#include <sys/bus.h>
55
#include <sys/rman.h>
56
57
#include <powerpc/pseries/phyp-hvcall.h>
58
59
#define LLAN_MAX_RX_PACKETS 100
60
#define LLAN_MAX_TX_PACKETS 100
61
#define LLAN_RX_BUF_LEN 8*PAGE_SIZE
62
63
#define LLAN_BUFDESC_VALID (1ULL << 63)
64
#define LLAN_ADD_MULTICAST 0x1
65
#define LLAN_DEL_MULTICAST 0x2
66
#define LLAN_CLEAR_MULTICAST 0x3
67
68
struct llan_xfer {
69
struct mbuf *rx_mbuf;
70
bus_dmamap_t rx_dmamap;
71
uint64_t rx_bufdesc;
72
};
73
74
struct llan_receive_queue_entry { /* PAPR page 539 */
75
uint8_t control;
76
uint8_t reserved;
77
uint16_t offset;
78
uint32_t length;
79
uint64_t handle;
80
} __packed;
81
82
struct llan_softc {
83
device_t dev;
84
struct mtx io_lock;
85
86
cell_t unit;
87
uint8_t mac_address[8];
88
89
struct ifmedia media;
90
91
int irqid;
92
struct resource *irq;
93
void *irq_cookie;
94
95
bus_dma_tag_t rx_dma_tag;
96
bus_dma_tag_t rxbuf_dma_tag;
97
bus_dma_tag_t tx_dma_tag;
98
99
bus_dmamap_t tx_dma_map;
100
101
struct llan_receive_queue_entry *rx_buf;
102
int rx_dma_slot;
103
int rx_valid_val;
104
bus_dmamap_t rx_buf_map;
105
bus_addr_t rx_buf_phys;
106
bus_size_t rx_buf_len;
107
bus_addr_t input_buf_phys;
108
bus_addr_t filter_buf_phys;
109
struct llan_xfer rx_xfer[LLAN_MAX_RX_PACKETS];
110
111
struct ifnet *ifp;
112
};
113
114
static int llan_probe(device_t);
115
static int llan_attach(device_t);
116
static void llan_intr(void *xsc);
117
static void llan_init(void *xsc);
118
static void llan_start(struct ifnet *ifp);
119
static int llan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
120
static void llan_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
121
static int llan_media_change(struct ifnet *ifp);
122
static void llan_rx_load_cb(void *xsc, bus_dma_segment_t *segs, int nsegs,
123
int err);
124
static int llan_add_rxbuf(struct llan_softc *sc, struct llan_xfer *rx);
125
static int llan_set_multicast(struct llan_softc *sc);
126
127
static device_method_t llan_methods[] = {
128
DEVMETHOD(device_probe, llan_probe),
129
DEVMETHOD(device_attach, llan_attach),
130
131
DEVMETHOD_END
132
};
133
134
static driver_t llan_driver = {
135
"llan",
136
llan_methods,
137
sizeof(struct llan_softc)
138
};
139
140
DRIVER_MODULE(llan, vdevice, llan_driver, 0, 0);
141
142
static int
143
llan_probe(device_t dev)
144
{
145
if (!ofw_bus_is_compatible(dev,"IBM,l-lan"))
146
return (ENXIO);
147
148
device_set_desc(dev, "POWER Hypervisor Virtual Ethernet");
149
return (0);
150
}
151
152
static int
153
llan_attach(device_t dev)
154
{
155
struct llan_softc *sc;
156
phandle_t node;
157
int i;
158
ssize_t len;
159
160
sc = device_get_softc(dev);
161
sc->dev = dev;
162
163
/* Get firmware properties */
164
node = ofw_bus_get_node(dev);
165
len = OF_getprop(node, "local-mac-address", sc->mac_address,
166
sizeof(sc->mac_address));
167
/* If local-mac-address property has only 6 bytes (ETHER_ADDR_LEN)
168
* instead of 8 (sizeof(sc->mac_address)), then its value must be
169
* shifted 2 bytes to the right. */
170
if (len == ETHER_ADDR_LEN) {
171
bcopy(sc->mac_address, &sc->mac_address[2], len);
172
/* Zero out the first 2 bytes. */
173
bzero(sc->mac_address, 2);
174
}
175
OF_getencprop(node, "reg", &sc->unit, sizeof(sc->unit));
176
177
mtx_init(&sc->io_lock, "llan", NULL, MTX_DEF);
178
179
/* Setup interrupt */
180
sc->irqid = 0;
181
sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
182
RF_ACTIVE);
183
184
if (!sc->irq) {
185
device_printf(dev, "Could not allocate IRQ\n");
186
mtx_destroy(&sc->io_lock);
187
return (ENXIO);
188
}
189
190
bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE |
191
INTR_ENTROPY, NULL, llan_intr, sc, &sc->irq_cookie);
192
193
/* Setup DMA */
194
bus_dma_tag_create(bus_get_dma_tag(dev), 16, 0,
195
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
196
LLAN_RX_BUF_LEN, 1, BUS_SPACE_MAXSIZE_32BIT,
197
0, NULL, NULL, &sc->rx_dma_tag);
198
bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
199
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
200
BUS_SPACE_MAXSIZE, 1, BUS_SPACE_MAXSIZE_32BIT,
201
0, NULL, NULL, &sc->rxbuf_dma_tag);
202
bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
203
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
204
BUS_SPACE_MAXSIZE, 6, BUS_SPACE_MAXSIZE_32BIT, 0,
205
busdma_lock_mutex, &sc->io_lock, &sc->tx_dma_tag);
206
207
bus_dmamem_alloc(sc->rx_dma_tag, (void **)&sc->rx_buf,
208
BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rx_buf_map);
209
bus_dmamap_load(sc->rx_dma_tag, sc->rx_buf_map, sc->rx_buf,
210
LLAN_RX_BUF_LEN, llan_rx_load_cb, sc, 0);
211
212
/* TX DMA maps */
213
bus_dmamap_create(sc->tx_dma_tag, 0, &sc->tx_dma_map);
214
215
/* RX DMA */
216
for (i = 0; i < LLAN_MAX_RX_PACKETS; i++) {
217
bus_dmamap_create(sc->rxbuf_dma_tag, 0,
218
&sc->rx_xfer[i].rx_dmamap);
219
sc->rx_xfer[i].rx_mbuf = NULL;
220
}
221
222
/* Attach to network stack */
223
sc->ifp = if_alloc(IFT_ETHER);
224
if_setsoftc(sc->ifp, sc);
225
226
if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev));
227
if_setmtu(sc->ifp, ETHERMTU); /* XXX max-frame-size from OF? */
228
if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
229
if_sethwassist(sc->ifp, 0); /* XXX: ibm,illan-options */
230
if_setcapabilities(sc->ifp, 0);
231
if_setcapenable(sc->ifp, 0);
232
if_setstartfn(sc->ifp, llan_start);
233
if_setioctlfn(sc->ifp, llan_ioctl);
234
if_setinitfn(sc->ifp, llan_init);
235
236
ifmedia_init(&sc->media, IFM_IMASK, llan_media_change,
237
llan_media_status);
238
ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
239
ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
240
241
if_setsendqlen(sc->ifp, LLAN_MAX_RX_PACKETS);
242
if_setsendqready(sc->ifp);
243
244
ether_ifattach(sc->ifp, &sc->mac_address[2]);
245
246
/* We don't have link state reporting, so make it always up */
247
if_link_state_change(sc->ifp, LINK_STATE_UP);
248
249
return (0);
250
}
251
252
static int
253
llan_media_change(struct ifnet *ifp)
254
{
255
struct llan_softc *sc = if_getsoftc(ifp);
256
257
if (IFM_TYPE(sc->media.ifm_media) != IFM_ETHER)
258
return (EINVAL);
259
260
if (IFM_SUBTYPE(sc->media.ifm_media) != IFM_AUTO)
261
return (EINVAL);
262
263
return (0);
264
}
265
266
static void
267
llan_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
268
{
269
270
ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE | IFM_UNKNOWN | IFM_FDX;
271
ifmr->ifm_active = IFM_ETHER;
272
}
273
274
static void
275
llan_rx_load_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int err)
276
{
277
struct llan_softc *sc = xsc;
278
279
sc->rx_buf_phys = segs[0].ds_addr;
280
sc->rx_buf_len = segs[0].ds_len - 2*PAGE_SIZE;
281
sc->input_buf_phys = segs[0].ds_addr + segs[0].ds_len - PAGE_SIZE;
282
sc->filter_buf_phys = segs[0].ds_addr + segs[0].ds_len - 2*PAGE_SIZE;
283
}
284
285
static void
286
llan_init(void *xsc)
287
{
288
struct llan_softc *sc = xsc;
289
uint64_t rx_buf_desc;
290
uint64_t macaddr;
291
int i;
292
293
mtx_lock(&sc->io_lock);
294
295
phyp_hcall(H_FREE_LOGICAL_LAN, sc->unit);
296
297
/* Create buffers (page 539) */
298
sc->rx_dma_slot = 0;
299
sc->rx_valid_val = 1;
300
301
rx_buf_desc = LLAN_BUFDESC_VALID;
302
rx_buf_desc |= (sc->rx_buf_len << 32);
303
rx_buf_desc |= sc->rx_buf_phys;
304
memcpy(&macaddr, sc->mac_address, 8);
305
phyp_hcall(H_REGISTER_LOGICAL_LAN, sc->unit, sc->input_buf_phys,
306
rx_buf_desc, sc->filter_buf_phys, macaddr);
307
308
for (i = 0; i < LLAN_MAX_RX_PACKETS; i++)
309
llan_add_rxbuf(sc, &sc->rx_xfer[i]);
310
311
phyp_hcall(H_VIO_SIGNAL, sc->unit, 1); /* Enable interrupts */
312
313
/* Tell stack we're up */
314
if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
315
316
mtx_unlock(&sc->io_lock);
317
318
/* Check for pending receives scheduled before interrupt enable */
319
llan_intr(sc);
320
}
321
322
static int
323
llan_add_rxbuf(struct llan_softc *sc, struct llan_xfer *rx)
324
{
325
struct mbuf *m;
326
bus_dma_segment_t segs[1];
327
int error, nsegs;
328
329
mtx_assert(&sc->io_lock, MA_OWNED);
330
331
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
332
if (m == NULL)
333
return (ENOBUFS);
334
335
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
336
if (rx->rx_mbuf != NULL) {
337
bus_dmamap_sync(sc->rxbuf_dma_tag, rx->rx_dmamap,
338
BUS_DMASYNC_POSTREAD);
339
bus_dmamap_unload(sc->rxbuf_dma_tag, rx->rx_dmamap);
340
}
341
342
/* Save pointer to buffer structure */
343
m_copyback(m, 0, 8, (void *)&rx);
344
345
error = bus_dmamap_load_mbuf_sg(sc->rxbuf_dma_tag, rx->rx_dmamap, m,
346
segs, &nsegs, BUS_DMA_NOWAIT);
347
if (error != 0) {
348
device_printf(sc->dev,
349
"cannot load RX DMA map %p, error = %d\n", rx, error);
350
m_freem(m);
351
return (error);
352
}
353
354
/* If nsegs is wrong then the stack is corrupt. */
355
KASSERT(nsegs == 1,
356
("%s: too many DMA segments (%d)", __func__, nsegs));
357
rx->rx_mbuf = m;
358
359
bus_dmamap_sync(sc->rxbuf_dma_tag, rx->rx_dmamap, BUS_DMASYNC_PREREAD);
360
361
rx->rx_bufdesc = LLAN_BUFDESC_VALID;
362
rx->rx_bufdesc |= (((uint64_t)segs[0].ds_len) << 32);
363
rx->rx_bufdesc |= segs[0].ds_addr;
364
error = phyp_hcall(H_ADD_LOGICAL_LAN_BUFFER, sc->unit, rx->rx_bufdesc);
365
if (error != 0) {
366
m_freem(m);
367
rx->rx_mbuf = NULL;
368
return (ENOBUFS);
369
}
370
371
return (0);
372
}
373
374
static void
375
llan_intr(void *xsc)
376
{
377
struct llan_softc *sc = xsc;
378
struct llan_xfer *rx;
379
struct mbuf *m;
380
381
mtx_lock(&sc->io_lock);
382
restart:
383
phyp_hcall(H_VIO_SIGNAL, sc->unit, 0);
384
385
while ((sc->rx_buf[sc->rx_dma_slot].control >> 7) == sc->rx_valid_val) {
386
rx = (struct llan_xfer *)sc->rx_buf[sc->rx_dma_slot].handle;
387
m = rx->rx_mbuf;
388
m_adj(m, sc->rx_buf[sc->rx_dma_slot].offset - 8);
389
m->m_len = sc->rx_buf[sc->rx_dma_slot].length;
390
391
/* llan_add_rxbuf does DMA sync and unload as well as requeue */
392
if (llan_add_rxbuf(sc, rx) != 0) {
393
if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
394
continue;
395
}
396
397
if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
398
m_adj(m, sc->rx_buf[sc->rx_dma_slot].offset);
399
m->m_len = sc->rx_buf[sc->rx_dma_slot].length;
400
m->m_pkthdr.rcvif = sc->ifp;
401
m->m_pkthdr.len = m->m_len;
402
sc->rx_dma_slot++;
403
404
if (sc->rx_dma_slot >= sc->rx_buf_len/sizeof(sc->rx_buf[0])) {
405
sc->rx_dma_slot = 0;
406
sc->rx_valid_val = !sc->rx_valid_val;
407
}
408
409
mtx_unlock(&sc->io_lock);
410
if_input(sc->ifp, m);
411
mtx_lock(&sc->io_lock);
412
}
413
414
phyp_hcall(H_VIO_SIGNAL, sc->unit, 1);
415
416
/*
417
* H_VIO_SIGNAL enables interrupts for future packets only.
418
* Make sure none were queued between the end of the loop and the
419
* enable interrupts call.
420
*/
421
if ((sc->rx_buf[sc->rx_dma_slot].control >> 7) == sc->rx_valid_val)
422
goto restart;
423
424
mtx_unlock(&sc->io_lock);
425
}
426
427
static void
428
llan_send_packet(void *xsc, bus_dma_segment_t *segs, int nsegs,
429
bus_size_t mapsize, int error)
430
{
431
struct llan_softc *sc = xsc;
432
uint64_t bufdescs[6];
433
int i, err;
434
435
bzero(bufdescs, sizeof(bufdescs));
436
437
for (i = 0; i < nsegs; i++) {
438
bufdescs[i] = LLAN_BUFDESC_VALID;
439
bufdescs[i] |= (((uint64_t)segs[i].ds_len) << 32);
440
bufdescs[i] |= segs[i].ds_addr;
441
}
442
443
err = phyp_hcall(H_SEND_LOGICAL_LAN, sc->unit, bufdescs[0],
444
bufdescs[1], bufdescs[2], bufdescs[3], bufdescs[4], bufdescs[5], 0);
445
/*
446
* The hypercall returning implies completion -- or that the call will
447
* not complete. In principle, we should try a few times if we get back
448
* H_BUSY based on the continuation token in R4. For now, just drop
449
* the packet in such cases.
450
*/
451
if (err == H_SUCCESS)
452
if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
453
else
454
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
455
}
456
457
static void
458
llan_start_locked(struct ifnet *ifp)
459
{
460
struct llan_softc *sc = if_getsoftc(ifp);
461
int nsegs;
462
struct mbuf *mb_head, *m;
463
464
mtx_assert(&sc->io_lock, MA_OWNED);
465
466
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
467
IFF_DRV_RUNNING)
468
return;
469
470
while (!if_sendq_empty(ifp)) {
471
mb_head = if_dequeue(ifp);
472
473
if (mb_head == NULL)
474
break;
475
476
BPF_MTAP(ifp, mb_head);
477
478
for (m = mb_head, nsegs = 0; m != NULL; m = m->m_next)
479
nsegs++;
480
if (nsegs > 6) {
481
m = m_collapse(mb_head, M_NOWAIT, 6);
482
if (m == NULL) {
483
m_freem(mb_head);
484
continue;
485
}
486
}
487
488
bus_dmamap_load_mbuf(sc->tx_dma_tag, sc->tx_dma_map,
489
mb_head, llan_send_packet, sc, 0);
490
bus_dmamap_unload(sc->tx_dma_tag, sc->tx_dma_map);
491
m_freem(mb_head);
492
}
493
}
494
495
static void
496
llan_start(struct ifnet *ifp)
497
{
498
struct llan_softc *sc = if_getsoftc(ifp);
499
500
mtx_lock(&sc->io_lock);
501
llan_start_locked(ifp);
502
mtx_unlock(&sc->io_lock);
503
}
504
505
static u_int
506
llan_set_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
507
{
508
struct llan_softc *sc = arg;
509
uint64_t macaddr = 0;
510
511
memcpy((uint8_t *)&macaddr + 2, LLADDR(sdl), 6);
512
phyp_hcall(H_MULTICAST_CTRL, sc->unit, LLAN_ADD_MULTICAST, macaddr);
513
514
return (1);
515
}
516
517
static int
518
llan_set_multicast(struct llan_softc *sc)
519
{
520
struct ifnet *ifp = sc->ifp;
521
522
mtx_assert(&sc->io_lock, MA_OWNED);
523
524
phyp_hcall(H_MULTICAST_CTRL, sc->unit, LLAN_CLEAR_MULTICAST, 0);
525
526
if_foreach_llmaddr(ifp, llan_set_maddr, sc);
527
528
return (0);
529
}
530
531
static int
532
llan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
533
{
534
int err = 0;
535
struct llan_softc *sc = if_getsoftc(ifp);
536
537
switch (cmd) {
538
case SIOCADDMULTI:
539
case SIOCDELMULTI:
540
mtx_lock(&sc->io_lock);
541
if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0)
542
llan_set_multicast(sc);
543
mtx_unlock(&sc->io_lock);
544
break;
545
case SIOCGIFMEDIA:
546
case SIOCSIFMEDIA:
547
err = ifmedia_ioctl(ifp, (struct ifreq *)data, &sc->media, cmd);
548
break;
549
case SIOCSIFFLAGS:
550
default:
551
err = ether_ioctl(ifp, cmd, data);
552
break;
553
}
554
555
return (err);
556
}
557
558