Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/xen/io/netif.h
48254 views
1
/******************************************************************************
2
* netif.h
3
*
4
* Unified network-device I/O interface for Xen guest OSes.
5
*
6
* Permission is hereby granted, free of charge, to any person obtaining a copy
7
* of this software and associated documentation files (the "Software"), to
8
* deal in the Software without restriction, including without limitation the
9
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10
* sell copies of the Software, and to permit persons to whom the Software is
11
* furnished to do so, subject to the following conditions:
12
*
13
* The above copyright notice and this permission notice shall be included in
14
* all copies or substantial portions of the Software.
15
*
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22
* DEALINGS IN THE SOFTWARE.
23
*
24
* Copyright (c) 2003-2004, Keir Fraser
25
*/
26
27
#ifndef __XEN_PUBLIC_IO_NETIF_H__
28
#define __XEN_PUBLIC_IO_NETIF_H__
29
30
#include "ring.h"
31
#include "../grant_table.h"
32
33
/*
34
* Older implementation of Xen network frontend / backend has an
35
* implicit dependency on the MAX_SKB_FRAGS as the maximum number of
36
* ring slots a skb can use. Netfront / netback may not work as
37
* expected when frontend and backend have different MAX_SKB_FRAGS.
38
*
39
* A better approach is to add mechanism for netfront / netback to
40
* negotiate this value. However we cannot fix all possible
41
* frontends, so we need to define a value which states the minimum
42
* slots backend must support.
43
*
44
* The minimum value derives from older Linux kernel's MAX_SKB_FRAGS
45
* (18), which is proved to work with most frontends. Any new backend
46
* which doesn't negotiate with frontend should expect frontend to
47
* send a valid packet using slots up to this value.
48
*/
49
#define XEN_NETIF_NR_SLOTS_MIN 18
50
51
/*
52
* Notifications after enqueuing any type of message should be conditional on
53
* the appropriate req_event or rsp_event field in the shared ring.
54
* If the client sends notification for rx requests then it should specify
55
* feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume
56
* that it cannot safely queue packets (as it may not be kicked to send them).
57
*/
58
59
/*
60
* "feature-split-event-channels" is introduced to separate guest TX
61
* and RX notification. Backend either doesn't support this feature or
62
* advertises it via xenstore as 0 (disabled) or 1 (enabled).
63
*
64
* To make use of this feature, frontend should allocate two event
65
* channels for TX and RX, advertise them to backend as
66
* "event-channel-tx" and "event-channel-rx" respectively. If frontend
67
* doesn't want to use this feature, it just writes "event-channel"
68
* node as before.
69
*/
70
71
/*
72
* Multiple transmit and receive queues:
73
* If supported, the backend will write the key "multi-queue-max-queues" to
74
* the directory for that vif, and set its value to the maximum supported
75
* number of queues.
76
* Frontends that are aware of this feature and wish to use it can write the
77
* key "multi-queue-num-queues", set to the number they wish to use, which
78
* must be greater than zero, and no more than the value reported by the backend
79
* in "multi-queue-max-queues".
80
*
81
* Queues replicate the shared rings and event channels.
82
* "feature-split-event-channels" may optionally be used when using
83
* multiple queues, but is not mandatory.
84
*
85
* Each queue consists of one shared ring pair, i.e. there must be the same
86
* number of tx and rx rings.
87
*
88
* For frontends requesting just one queue, the usual event-channel and
89
* ring-ref keys are written as before, simplifying the backend processing
90
* to avoid distinguishing between a frontend that doesn't understand the
91
* multi-queue feature, and one that does, but requested only one queue.
92
*
93
* Frontends requesting two or more queues must not write the toplevel
94
* event-channel (or event-channel-{tx,rx}) and {tx,rx}-ring-ref keys,
95
* instead writing those keys under sub-keys having the name "queue-N" where
96
* N is the integer ID of the queue for which those keys belong. Queues
97
* are indexed from zero. For example, a frontend with two queues and split
98
* event channels must write the following set of queue-related keys:
99
*
100
* /local/domain/1/device/vif/0/multi-queue-num-queues = "2"
101
* /local/domain/1/device/vif/0/queue-0 = ""
102
* /local/domain/1/device/vif/0/queue-0/tx-ring-ref = "<ring-ref-tx0>"
103
* /local/domain/1/device/vif/0/queue-0/rx-ring-ref = "<ring-ref-rx0>"
104
* /local/domain/1/device/vif/0/queue-0/event-channel-tx = "<evtchn-tx0>"
105
* /local/domain/1/device/vif/0/queue-0/event-channel-rx = "<evtchn-rx0>"
106
* /local/domain/1/device/vif/0/queue-1 = ""
107
* /local/domain/1/device/vif/0/queue-1/tx-ring-ref = "<ring-ref-tx1>"
108
* /local/domain/1/device/vif/0/queue-1/rx-ring-ref = "<ring-ref-rx1"
109
* /local/domain/1/device/vif/0/queue-1/event-channel-tx = "<evtchn-tx1>"
110
* /local/domain/1/device/vif/0/queue-1/event-channel-rx = "<evtchn-rx1>"
111
*
112
* If there is any inconsistency in the XenStore data, the backend may
113
* choose not to connect any queues, instead treating the request as an
114
* error. This includes scenarios where more (or fewer) queues were
115
* requested than the frontend provided details for.
116
*
117
* Mapping of packets to queues is considered to be a function of the
118
* transmitting system (backend or frontend) and is not negotiated
119
* between the two. Guests are free to transmit packets on any queue
120
* they choose, provided it has been set up correctly. Guests must be
121
* prepared to receive packets on any queue they have requested be set up.
122
*/
123
124
/*
125
* "feature-no-csum-offload" should be used to turn IPv4 TCP/UDP checksum
126
* offload off or on. If it is missing then the feature is assumed to be on.
127
* "feature-ipv6-csum-offload" should be used to turn IPv6 TCP/UDP checksum
128
* offload on or off. If it is missing then the feature is assumed to be off.
129
*/
130
131
/*
132
* "feature-gso-tcpv4" and "feature-gso-tcpv6" advertise the capability to
133
* handle large TCP packets (in IPv4 or IPv6 form respectively). Neither
134
* frontends nor backends are assumed to be capable unless the flags are
135
* present.
136
*/
137
138
/*
139
* "feature-multicast-control" and "feature-dynamic-multicast-control"
140
* advertise the capability to filter ethernet multicast packets in the
141
* backend. If the frontend wishes to take advantage of this feature then
142
* it may set "request-multicast-control". If the backend only advertises
143
* "feature-multicast-control" then "request-multicast-control" must be set
144
* before the frontend moves into the connected state. The backend will
145
* sample the value on this state transition and any subsequent change in
146
* value will have no effect. However, if the backend also advertises
147
* "feature-dynamic-multicast-control" then "request-multicast-control"
148
* may be set by the frontend at any time. In this case, the backend will
149
* watch the value and re-sample on watch events.
150
*
151
* If the sampled value of "request-multicast-control" is set then the
152
* backend transmit side should no longer flood multicast packets to the
153
* frontend, it should instead drop any multicast packet that does not
154
* match in a filter list.
155
* The list is amended by the frontend by sending dummy transmit requests
156
* containing XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL} extra-info fragments as
157
* specified below.
158
* Note that the filter list may be amended even if the sampled value of
159
* "request-multicast-control" is not set, however the filter should only
160
* be applied if it is set.
161
*/
162
163
/*
164
* Control ring
165
* ============
166
*
167
* Some features, such as hashing (detailed below), require a
168
* significant amount of out-of-band data to be passed from frontend to
169
* backend. Use of xenstore is not suitable for large quantities of data
170
* because of quota limitations and so a dedicated 'control ring' is used.
171
* The ability of the backend to use a control ring is advertised by
172
* setting:
173
*
174
* /local/domain/X/backend/vif/<domid>/<vif>/feature-ctrl-ring = "1"
175
*
176
* The frontend provides a control ring to the backend by setting:
177
*
178
* /local/domain/<domid>/device/vif/<vif>/ctrl-ring-ref = <gref>
179
* /local/domain/<domid>/device/vif/<vif>/event-channel-ctrl = <port>
180
*
181
* where <gref> is the grant reference of the shared page used to
182
* implement the control ring and <port> is an event channel to be used
183
* as a mailbox interrupt. These keys must be set before the frontend
184
* moves into the connected state.
185
*
186
* The control ring uses a fixed request/response message size and is
187
* balanced (i.e. one request to one response), so operationally it is much
188
* the same as a transmit or receive ring.
189
* Note that there is no requirement that responses are issued in the same
190
* order as requests.
191
*/
192
193
/*
194
* Link state
195
* ==========
196
*
197
* The backend can advertise its current link (carrier) state to the
198
* frontend using the /local/domain/X/backend/vif/<domid>/<vif>/carrier
199
* node. If this node is not present, then the frontend should assume that
200
* the link is up (for compatibility with backends that do not implement
201
* this feature). If this node is present, then a value of "0" should be
202
* interpreted by the frontend as the link being down (no carrier) and a
203
* value of "1" should be interpreted as the link being up (carrier
204
* present).
205
*/
206
207
/*
208
* MTU
209
* ===
210
*
211
* The toolstack may set a value of MTU for the frontend by setting the
212
* /local/domain/<domid>/device/vif/<vif>/mtu node with the MTU value in
213
* octets. If this node is absent the frontend should assume an MTU value
214
* of 1500 octets. A frontend is also at liberty to ignore this value so
215
* it is only suitable for informing the frontend that a packet payload
216
* >1500 octets is permitted.
217
*/
218
219
/*
220
* Hash types
221
* ==========
222
*
223
* For the purposes of the definitions below, 'Packet[]' is an array of
224
* octets containing an IP packet without options, 'Array[X..Y]' means a
225
* sub-array of 'Array' containing bytes X thru Y inclusive, and '+' is
226
* used to indicate concatenation of arrays.
227
*/
228
229
/*
230
* A hash calculated over an IP version 4 header as follows:
231
*
232
* Buffer[0..8] = Packet[12..15] (source address) +
233
* Packet[16..19] (destination address)
234
*
235
* Result = Hash(Buffer, 8)
236
*/
237
#define _XEN_NETIF_CTRL_HASH_TYPE_IPV4 0
238
#define XEN_NETIF_CTRL_HASH_TYPE_IPV4 \
239
(1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV4)
240
241
/*
242
* A hash calculated over an IP version 4 header and TCP header as
243
* follows:
244
*
245
* Buffer[0..12] = Packet[12..15] (source address) +
246
* Packet[16..19] (destination address) +
247
* Packet[20..21] (source port) +
248
* Packet[22..23] (destination port)
249
*
250
* Result = Hash(Buffer, 12)
251
*/
252
#define _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP 1
253
#define XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP \
254
(1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
255
256
/*
257
* A hash calculated over an IP version 6 header as follows:
258
*
259
* Buffer[0..32] = Packet[8..23] (source address ) +
260
* Packet[24..39] (destination address)
261
*
262
* Result = Hash(Buffer, 32)
263
*/
264
#define _XEN_NETIF_CTRL_HASH_TYPE_IPV6 2
265
#define XEN_NETIF_CTRL_HASH_TYPE_IPV6 \
266
(1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV6)
267
268
/*
269
* A hash calculated over an IP version 6 header and TCP header as
270
* follows:
271
*
272
* Buffer[0..36] = Packet[8..23] (source address) +
273
* Packet[24..39] (destination address) +
274
* Packet[40..41] (source port) +
275
* Packet[42..43] (destination port)
276
*
277
* Result = Hash(Buffer, 36)
278
*/
279
#define _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP 3
280
#define XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP \
281
(1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
282
283
/*
284
* Hash algorithms
285
* ===============
286
*/
287
288
#define XEN_NETIF_CTRL_HASH_ALGORITHM_NONE 0
289
290
/*
291
* Toeplitz hash:
292
*/
293
294
#define XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ 1
295
296
/*
297
* This algorithm uses a 'key' as well as the data buffer itself.
298
* (Buffer[] and Key[] are treated as shift-registers where the MSB of
299
* Buffer/Key[0] is considered 'left-most' and the LSB of Buffer/Key[N-1]
300
* is the 'right-most').
301
*
302
* Value = 0
303
* For number of bits in Buffer[]
304
* If (left-most bit of Buffer[] is 1)
305
* Value ^= left-most 32 bits of Key[]
306
* Key[] << 1
307
* Buffer[] << 1
308
*
309
* The code below is provided for convenience where an operating system
310
* does not already provide an implementation.
311
*/
312
#ifdef XEN_NETIF_DEFINE_TOEPLITZ
313
static uint32_t xen_netif_toeplitz_hash(const uint8_t *key,
314
unsigned int keylen,
315
const uint8_t *buf,
316
unsigned int buflen)
317
{
318
unsigned int keyi, bufi;
319
uint64_t prefix = 0;
320
uint64_t hash = 0;
321
322
/* Pre-load prefix with the first 8 bytes of the key */
323
for (keyi = 0; keyi < 8; keyi++) {
324
prefix <<= 8;
325
prefix |= (keyi < keylen) ? key[keyi] : 0;
326
}
327
328
for (bufi = 0; bufi < buflen; bufi++) {
329
uint8_t byte = buf[bufi];
330
unsigned int bit;
331
332
for (bit = 0; bit < 8; bit++) {
333
if (byte & 0x80)
334
hash ^= prefix;
335
prefix <<= 1;
336
byte <<=1;
337
}
338
339
/*
340
* 'prefix' has now been left-shifted by 8, so
341
* OR in the next byte.
342
*/
343
prefix |= (keyi < keylen) ? key[keyi] : 0;
344
keyi++;
345
}
346
347
/* The valid part of the hash is in the upper 32 bits. */
348
return hash >> 32;
349
}
350
#endif /* XEN_NETIF_DEFINE_TOEPLITZ */
351
352
/*
353
* Control requests (struct xen_netif_ctrl_request)
354
* ================================================
355
*
356
* All requests have the following format:
357
*
358
* 0 1 2 3 4 5 6 7 octet
359
* +-----+-----+-----+-----+-----+-----+-----+-----+
360
* | id | type | data[0] |
361
* +-----+-----+-----+-----+-----+-----+-----+-----+
362
* | data[1] | data[2] |
363
* +-----+-----+-----+-----+-----------------------+
364
*
365
* id: the request identifier, echoed in response.
366
* type: the type of request (see below)
367
* data[]: any data associated with the request (determined by type)
368
*/
369
370
struct xen_netif_ctrl_request {
371
uint16_t id;
372
uint16_t type;
373
374
#define XEN_NETIF_CTRL_TYPE_INVALID 0
375
#define XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS 1
376
#define XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS 2
377
#define XEN_NETIF_CTRL_TYPE_SET_HASH_KEY 3
378
#define XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE 4
379
#define XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE 5
380
#define XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING 6
381
#define XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM 7
382
#define XEN_NETIF_CTRL_TYPE_GET_GREF_MAPPING_SIZE 8
383
#define XEN_NETIF_CTRL_TYPE_ADD_GREF_MAPPING 9
384
#define XEN_NETIF_CTRL_TYPE_DEL_GREF_MAPPING 10
385
386
uint32_t data[3];
387
};
388
389
/*
390
* Control responses (struct xen_netif_ctrl_response)
391
* ==================================================
392
*
393
* All responses have the following format:
394
*
395
* 0 1 2 3 4 5 6 7 octet
396
* +-----+-----+-----+-----+-----+-----+-----+-----+
397
* | id | type | status |
398
* +-----+-----+-----+-----+-----+-----+-----+-----+
399
* | data |
400
* +-----+-----+-----+-----+
401
*
402
* id: the corresponding request identifier
403
* type: the type of the corresponding request
404
* status: the status of request processing
405
* data: any data associated with the response (determined by type and
406
* status)
407
*/
408
409
struct xen_netif_ctrl_response {
410
uint16_t id;
411
uint16_t type;
412
uint32_t status;
413
414
#define XEN_NETIF_CTRL_STATUS_SUCCESS 0
415
#define XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED 1
416
#define XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER 2
417
#define XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW 3
418
419
uint32_t data;
420
};
421
422
/*
423
* Static Grants (struct xen_netif_gref)
424
* =====================================
425
*
426
* A frontend may provide a fixed set of grant references to be mapped on
427
* the backend. The message of type XEN_NETIF_CTRL_TYPE_ADD_GREF_MAPPING
428
* prior its usage in the command ring allows for creation of these mappings.
429
* The backend will maintain a fixed amount of these mappings.
430
*
431
* XEN_NETIF_CTRL_TYPE_GET_GREF_MAPPING_SIZE lets a frontend query how many
432
* of these mappings can be kept.
433
*
434
* Each entry in the XEN_NETIF_CTRL_TYPE_{ADD,DEL}_GREF_MAPPING input table has
435
* the following format:
436
*
437
* 0 1 2 3 4 5 6 7 octet
438
* +-----+-----+-----+-----+-----+-----+-----+-----+
439
* | grant ref | flags | status |
440
* +-----+-----+-----+-----+-----+-----+-----+-----+
441
*
442
* grant ref: grant reference (IN)
443
* flags: flags describing the control operation (IN)
444
* status: XEN_NETIF_CTRL_STATUS_* (OUT)
445
*
446
* 'status' is an output parameter which does not require to be set to zero
447
* prior to its usage in the corresponding control messages.
448
*/
449
450
struct xen_netif_gref {
451
grant_ref_t ref;
452
uint16_t flags;
453
454
#define _XEN_NETIF_CTRLF_GREF_readonly 0
455
#define XEN_NETIF_CTRLF_GREF_readonly (1U<<_XEN_NETIF_CTRLF_GREF_readonly)
456
457
uint16_t status;
458
};
459
460
/*
461
* Control messages
462
* ================
463
*
464
* XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM
465
* --------------------------------------
466
*
467
* This is sent by the frontend to set the desired hash algorithm.
468
*
469
* Request:
470
*
471
* type = XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM
472
* data[0] = a XEN_NETIF_CTRL_HASH_ALGORITHM_* value
473
* data[1] = 0
474
* data[2] = 0
475
*
476
* Response:
477
*
478
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
479
* supported
480
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - The algorithm is not
481
* supported
482
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
483
*
484
* NOTE: Setting data[0] to XEN_NETIF_CTRL_HASH_ALGORITHM_NONE disables
485
* hashing and the backend is free to choose how it steers packets
486
* to queues (which is the default behaviour).
487
*
488
* XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS
489
* ----------------------------------
490
*
491
* This is sent by the frontend to query the types of hash supported by
492
* the backend.
493
*
494
* Request:
495
*
496
* type = XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS
497
* data[0] = 0
498
* data[1] = 0
499
* data[2] = 0
500
*
501
* Response:
502
*
503
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not supported
504
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
505
* data = supported hash types (if operation was successful)
506
*
507
* NOTE: A valid hash algorithm must be selected before this operation can
508
* succeed.
509
*
510
* XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS
511
* ----------------------------------
512
*
513
* This is sent by the frontend to set the types of hash that the backend
514
* should calculate. (See above for hash type definitions).
515
* Note that the 'maximal' type of hash should always be chosen. For
516
* example, if the frontend sets both IPV4 and IPV4_TCP hash types then
517
* the latter hash type should be calculated for any TCP packet and the
518
* former only calculated for non-TCP packets.
519
*
520
* Request:
521
*
522
* type = XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS
523
* data[0] = bitwise OR of XEN_NETIF_CTRL_HASH_TYPE_* values
524
* data[1] = 0
525
* data[2] = 0
526
*
527
* Response:
528
*
529
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
530
* supported
531
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - One or more flag
532
* value is invalid or
533
* unsupported
534
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
535
* data = 0
536
*
537
* NOTE: A valid hash algorithm must be selected before this operation can
538
* succeed.
539
* Also, setting data[0] to zero disables hashing and the backend
540
* is free to choose how it steers packets to queues.
541
*
542
* XEN_NETIF_CTRL_TYPE_SET_HASH_KEY
543
* --------------------------------
544
*
545
* This is sent by the frontend to set the key of the hash if the algorithm
546
* requires it. (See hash algorithms above).
547
*
548
* Request:
549
*
550
* type = XEN_NETIF_CTRL_TYPE_SET_HASH_KEY
551
* data[0] = grant reference of page containing the key (assumed to
552
* start at beginning of grant)
553
* data[1] = size of key in octets
554
* data[2] = 0
555
*
556
* Response:
557
*
558
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
559
* supported
560
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Key size is invalid
561
* XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW - Key size is larger
562
* than the backend
563
* supports
564
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
565
* data = 0
566
*
567
* NOTE: Any key octets not specified are assumed to be zero (the key
568
* is assumed to be empty by default) and specifying a new key
569
* invalidates any previous key, hence specifying a key size of
570
* zero will clear the key (which ensures that the calculated hash
571
* will always be zero).
572
* The maximum size of key is algorithm and backend specific, but
573
* is also limited by the single grant reference.
574
* The grant reference may be read-only and must remain valid until
575
* the response has been processed.
576
*
577
* XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE
578
* -----------------------------------------
579
*
580
* This is sent by the frontend to query the maximum size of mapping
581
* table supported by the backend. The size is specified in terms of
582
* table entries.
583
*
584
* Request:
585
*
586
* type = XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE
587
* data[0] = 0
588
* data[1] = 0
589
* data[2] = 0
590
*
591
* Response:
592
*
593
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not supported
594
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
595
* data = maximum number of entries allowed in the mapping table
596
* (if operation was successful) or zero if a mapping table is
597
* not supported (i.e. hash mapping is done only by modular
598
* arithmetic).
599
*
600
* XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
601
* -------------------------------------
602
*
603
* This is sent by the frontend to set the actual size of the mapping
604
* table to be used by the backend. The size is specified in terms of
605
* table entries.
606
* Any previous table is invalidated by this message and any new table
607
* is assumed to be zero filled.
608
*
609
* Request:
610
*
611
* type = XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
612
* data[0] = number of entries in mapping table
613
* data[1] = 0
614
* data[2] = 0
615
*
616
* Response:
617
*
618
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
619
* supported
620
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Table size is invalid
621
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
622
* data = 0
623
*
624
* NOTE: Setting data[0] to 0 means that hash mapping should be done
625
* using modular arithmetic.
626
*
627
* XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING
628
* ------------------------------------
629
*
630
* This is sent by the frontend to set the content of the table mapping
631
* hash value to queue number. The backend should calculate the hash from
632
* the packet header, use it as an index into the table (modulo the size
633
* of the table) and then steer the packet to the queue number found at
634
* that index.
635
*
636
* Request:
637
*
638
* type = XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING
639
* data[0] = grant reference of page containing the mapping (sub-)table
640
* (assumed to start at beginning of grant)
641
* data[1] = size of (sub-)table in entries
642
* data[2] = offset, in entries, of sub-table within overall table
643
*
644
* Response:
645
*
646
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
647
* supported
648
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Table size or content
649
* is invalid
650
* XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW - Table size is larger
651
* than the backend
652
* supports
653
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
654
* data = 0
655
*
656
* NOTE: The overall table has the following format:
657
*
658
* 0 1 2 3 4 5 6 7 octet
659
* +-----+-----+-----+-----+-----+-----+-----+-----+
660
* | mapping[0] | mapping[1] |
661
* +-----+-----+-----+-----+-----+-----+-----+-----+
662
* | . |
663
* | . |
664
* | . |
665
* +-----+-----+-----+-----+-----+-----+-----+-----+
666
* | mapping[N-2] | mapping[N-1] |
667
* +-----+-----+-----+-----+-----+-----+-----+-----+
668
*
669
* where N is specified by a XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
670
* message and each mapping must specifies a queue between 0 and
671
* "multi-queue-num-queues" (see above).
672
* The backend may support a mapping table larger than can be
673
* mapped by a single grant reference. Thus sub-tables within a
674
* larger table can be individually set by sending multiple messages
675
* with differing offset values. Specifying a new sub-table does not
676
* invalidate any table data outside that range.
677
* The grant reference may be read-only and must remain valid until
678
* the response has been processed.
679
*
680
* XEN_NETIF_CTRL_TYPE_GET_GREF_MAPPING_SIZE
681
* -----------------------------------------
682
*
683
* This is sent by the frontend to fetch the number of grefs that can be kept
684
* mapped in the backend.
685
*
686
* Request:
687
*
688
* type = XEN_NETIF_CTRL_TYPE_GET_GREF_MAPPING_SIZE
689
* data[0] = queue index (assumed 0 for single queue)
690
* data[1] = 0
691
* data[2] = 0
692
*
693
* Response:
694
*
695
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
696
* supported
697
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - The queue index is
698
* out of range
699
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
700
* data = maximum number of entries allowed in the gref mapping table
701
* (if operation was successful) or zero if it is not supported.
702
*
703
* XEN_NETIF_CTRL_TYPE_ADD_GREF_MAPPING
704
* ------------------------------------
705
*
706
* This is sent by the frontend for backend to map a list of grant
707
* references.
708
*
709
* Request:
710
*
711
* type = XEN_NETIF_CTRL_TYPE_ADD_GREF_MAPPING
712
* data[0] = queue index
713
* data[1] = grant reference of page containing the mapping list
714
* (r/w and assumed to start at beginning of page)
715
* data[2] = size of list in entries
716
*
717
* Response:
718
*
719
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
720
* supported
721
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Operation failed
722
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
723
*
724
* NOTE: Each entry in the input table has the format outlined
725
* in struct xen_netif_gref.
726
* Contrary to XEN_NETIF_CTRL_TYPE_DEL_GREF_MAPPING, the struct
727
* xen_netif_gref 'status' field is not used and therefore the response
728
* 'status' determines the success of this operation. In case of
729
* failure none of grants mappings get added in the backend.
730
*
731
* XEN_NETIF_CTRL_TYPE_DEL_GREF_MAPPING
732
* ------------------------------------
733
*
734
* This is sent by the frontend for backend to unmap a list of grant
735
* references.
736
*
737
* Request:
738
*
739
* type = XEN_NETIF_CTRL_TYPE_DEL_GREF_MAPPING
740
* data[0] = queue index
741
* data[1] = grant reference of page containing the mapping list
742
* (r/w and assumed to start at beginning of page)
743
* data[2] = size of list in entries
744
*
745
* Response:
746
*
747
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
748
* supported
749
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Operation failed
750
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
751
* data = number of entries that were unmapped
752
*
753
* NOTE: Each entry in the input table has the format outlined in struct
754
* xen_netif_gref.
755
* The struct xen_netif_gref 'status' field determines if the entry
756
* was successfully removed.
757
* The entries used are only the ones representing grant references that
758
* were previously the subject of a XEN_NETIF_CTRL_TYPE_ADD_GREF_MAPPING
759
* operation. Any other entries will have their status set to
760
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER upon completion.
761
*/
762
763
DEFINE_RING_TYPES(xen_netif_ctrl,
764
struct xen_netif_ctrl_request,
765
struct xen_netif_ctrl_response);
766
767
/*
768
* Guest transmit
769
* ==============
770
*
771
* This is the 'wire' format for transmit (frontend -> backend) packets:
772
*
773
* Fragment 1: netif_tx_request_t - flags = NETTXF_*
774
* size = total packet size
775
* [Extra 1: netif_extra_info_t] - (only if fragment 1 flags include
776
* NETTXF_extra_info)
777
* ...
778
* [Extra N: netif_extra_info_t] - (only if extra N-1 flags include
779
* XEN_NETIF_EXTRA_MORE)
780
* ...
781
* Fragment N: netif_tx_request_t - (only if fragment N-1 flags include
782
* NETTXF_more_data - flags on preceding
783
* extras are not relevant here)
784
* flags = 0
785
* size = fragment size
786
*
787
* NOTE:
788
*
789
* This format slightly is different from that used for receive
790
* (backend -> frontend) packets. Specifically, in a multi-fragment
791
* packet the actual size of fragment 1 can only be determined by
792
* subtracting the sizes of fragments 2..N from the total packet size.
793
*
794
* Ring slot size is 12 octets, however not all request/response
795
* structs use the full size.
796
*
797
* tx request data (netif_tx_request_t)
798
* ------------------------------------
799
*
800
* 0 1 2 3 4 5 6 7 octet
801
* +-----+-----+-----+-----+-----+-----+-----+-----+
802
* | grant ref | offset | flags |
803
* +-----+-----+-----+-----+-----+-----+-----+-----+
804
* | id | size |
805
* +-----+-----+-----+-----+
806
*
807
* grant ref: Reference to buffer page.
808
* offset: Offset within buffer page.
809
* flags: NETTXF_*.
810
* id: request identifier, echoed in response.
811
* size: packet size in bytes.
812
*
813
* tx response (netif_tx_response_t)
814
* ---------------------------------
815
*
816
* 0 1 2 3 4 5 6 7 octet
817
* +-----+-----+-----+-----+-----+-----+-----+-----+
818
* | id | status | unused |
819
* +-----+-----+-----+-----+-----+-----+-----+-----+
820
* | unused |
821
* +-----+-----+-----+-----+
822
*
823
* id: reflects id in transmit request
824
* status: NETIF_RSP_*
825
*
826
* Guest receive
827
* =============
828
*
829
* This is the 'wire' format for receive (backend -> frontend) packets:
830
*
831
* Fragment 1: netif_rx_request_t - flags = NETRXF_*
832
* size = fragment size
833
* [Extra 1: netif_extra_info_t] - (only if fragment 1 flags include
834
* NETRXF_extra_info)
835
* ...
836
* [Extra N: netif_extra_info_t] - (only if extra N-1 flags include
837
* XEN_NETIF_EXTRA_MORE)
838
* ...
839
* Fragment N: netif_rx_request_t - (only if fragment N-1 flags include
840
* NETRXF_more_data - flags on preceding
841
* extras are not relevant here)
842
* flags = 0
843
* size = fragment size
844
*
845
* NOTE:
846
*
847
* This format slightly is different from that used for transmit
848
* (frontend -> backend) packets. Specifically, in a multi-fragment
849
* packet the size of the packet can only be determined by summing the
850
* sizes of fragments 1..N.
851
*
852
* Ring slot size is 8 octets.
853
*
854
* rx request (netif_rx_request_t)
855
* -------------------------------
856
*
857
* 0 1 2 3 4 5 6 7 octet
858
* +-----+-----+-----+-----+-----+-----+-----+-----+
859
* | id | pad | gref |
860
* +-----+-----+-----+-----+-----+-----+-----+-----+
861
*
862
* id: request identifier, echoed in response.
863
* gref: reference to incoming granted frame.
864
*
865
* rx response (netif_rx_response_t)
866
* ---------------------------------
867
*
868
* 0 1 2 3 4 5 6 7 octet
869
* +-----+-----+-----+-----+-----+-----+-----+-----+
870
* | id | offset | flags | status |
871
* +-----+-----+-----+-----+-----+-----+-----+-----+
872
*
873
* id: reflects id in receive request
874
* offset: offset in page of start of received packet
875
* flags: NETRXF_*
876
* status: -ve: NETIF_RSP_*; +ve: Rx'ed pkt size.
877
*
878
* NOTE: Historically, to support GSO on the frontend receive side, Linux
879
* netfront does not make use of the rx response id (because, as
880
* described below, extra info structures overlay the id field).
881
* Instead it assumes that responses always appear in the same ring
882
* slot as their corresponding request. Thus, to maintain
883
* compatibility, backends must make sure this is the case.
884
*
885
* Extra Info
886
* ==========
887
*
888
* Can be present if initial request or response has NET{T,R}XF_extra_info,
889
* or previous extra request has XEN_NETIF_EXTRA_MORE.
890
*
891
* The struct therefore needs to fit into either a tx or rx slot and
892
* is therefore limited to 8 octets.
893
*
894
* NOTE: Because extra info data overlays the usual request/response
895
* structures, there is no id information in the opposite direction.
896
* So, if an extra info overlays an rx response the frontend can
897
* assume that it is in the same ring slot as the request that was
898
* consumed to make the slot available, and the backend must ensure
899
* this assumption is true.
900
*
901
* extra info (netif_extra_info_t)
902
* -------------------------------
903
*
904
* General format:
905
*
906
* 0 1 2 3 4 5 6 7 octet
907
* +-----+-----+-----+-----+-----+-----+-----+-----+
908
* |type |flags| type specific data |
909
* +-----+-----+-----+-----+-----+-----+-----+-----+
910
* | padding for tx |
911
* +-----+-----+-----+-----+
912
*
913
* type: XEN_NETIF_EXTRA_TYPE_*
914
* flags: XEN_NETIF_EXTRA_FLAG_*
915
* padding for tx: present only in the tx case due to 8 octet limit
916
* from rx case. Not shown in type specific entries
917
* below.
918
*
919
* XEN_NETIF_EXTRA_TYPE_GSO:
920
*
921
* 0 1 2 3 4 5 6 7 octet
922
* +-----+-----+-----+-----+-----+-----+-----+-----+
923
* |type |flags| size |type | pad | features |
924
* +-----+-----+-----+-----+-----+-----+-----+-----+
925
*
926
* type: Must be XEN_NETIF_EXTRA_TYPE_GSO
927
* flags: XEN_NETIF_EXTRA_FLAG_*
928
* size: Maximum payload size of each segment. For example,
929
* for TCP this is just the path MSS.
930
* type: XEN_NETIF_GSO_TYPE_*: This determines the protocol of
931
* the packet and any extra features required to segment the
932
* packet properly.
933
* features: EN_NETIF_GSO_FEAT_*: This specifies any extra GSO
934
* features required to process this packet, such as ECN
935
* support for TCPv4.
936
*
937
* XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}:
938
*
939
* 0 1 2 3 4 5 6 7 octet
940
* +-----+-----+-----+-----+-----+-----+-----+-----+
941
* |type |flags| addr |
942
* +-----+-----+-----+-----+-----+-----+-----+-----+
943
*
944
* type: Must be XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}
945
* flags: XEN_NETIF_EXTRA_FLAG_*
946
* addr: address to add/remove
947
*
948
* XEN_NETIF_EXTRA_TYPE_HASH:
949
*
950
* A backend that supports teoplitz hashing is assumed to accept
951
* this type of extra info in transmit packets.
952
* A frontend that enables hashing is assumed to accept
953
* this type of extra info in receive packets.
954
*
955
* 0 1 2 3 4 5 6 7 octet
956
* +-----+-----+-----+-----+-----+-----+-----+-----+
957
* |type |flags|htype| alg |LSB ---- value ---- MSB|
958
* +-----+-----+-----+-----+-----+-----+-----+-----+
959
*
960
* type: Must be XEN_NETIF_EXTRA_TYPE_HASH
961
* flags: XEN_NETIF_EXTRA_FLAG_*
962
* htype: Hash type (one of _XEN_NETIF_CTRL_HASH_TYPE_* - see above)
963
* alg: The algorithm used to calculate the hash (one of
964
* XEN_NETIF_CTRL_HASH_TYPE_ALGORITHM_* - see above)
965
* value: Hash value
966
*/
967
968
/* Protocol checksum field is blank in the packet (hardware offload)? */
969
#define _NETTXF_csum_blank (0)
970
#define NETTXF_csum_blank (1U<<_NETTXF_csum_blank)
971
972
/* Packet data has been validated against protocol checksum. */
973
#define _NETTXF_data_validated (1)
974
#define NETTXF_data_validated (1U<<_NETTXF_data_validated)
975
976
/* Packet continues in the next request descriptor. */
977
#define _NETTXF_more_data (2)
978
#define NETTXF_more_data (1U<<_NETTXF_more_data)
979
980
/* Packet to be followed by extra descriptor(s). */
981
#define _NETTXF_extra_info (3)
982
#define NETTXF_extra_info (1U<<_NETTXF_extra_info)
983
984
#define XEN_NETIF_MAX_TX_SIZE 0xFFFF
985
struct netif_tx_request {
986
grant_ref_t gref;
987
uint16_t offset;
988
uint16_t flags;
989
uint16_t id;
990
uint16_t size;
991
};
992
typedef struct netif_tx_request netif_tx_request_t;
993
994
/* Types of netif_extra_info descriptors. */
995
#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
996
#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
997
#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
998
#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
999
#define XEN_NETIF_EXTRA_TYPE_HASH (4) /* u.hash */
1000
#define XEN_NETIF_EXTRA_TYPE_MAX (5)
1001
1002
/* netif_extra_info_t flags. */
1003
#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
1004
#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
1005
1006
/* GSO types */
1007
#define XEN_NETIF_GSO_TYPE_NONE (0)
1008
#define XEN_NETIF_GSO_TYPE_TCPV4 (1)
1009
#define XEN_NETIF_GSO_TYPE_TCPV6 (2)
1010
1011
/*
1012
* This structure needs to fit within both netif_tx_request_t and
1013
* netif_rx_response_t for compatibility.
1014
*/
1015
struct netif_extra_info {
1016
uint8_t type;
1017
uint8_t flags;
1018
union {
1019
struct {
1020
uint16_t size;
1021
uint8_t type;
1022
uint8_t pad;
1023
uint16_t features;
1024
} gso;
1025
struct {
1026
uint8_t addr[6];
1027
} mcast;
1028
struct {
1029
uint8_t type;
1030
uint8_t algorithm;
1031
uint8_t value[4];
1032
} hash;
1033
uint16_t pad[3];
1034
} u;
1035
};
1036
typedef struct netif_extra_info netif_extra_info_t;
1037
1038
struct netif_tx_response {
1039
uint16_t id;
1040
int16_t status;
1041
};
1042
typedef struct netif_tx_response netif_tx_response_t;
1043
1044
struct netif_rx_request {
1045
uint16_t id; /* Echoed in response message. */
1046
uint16_t pad;
1047
grant_ref_t gref;
1048
};
1049
typedef struct netif_rx_request netif_rx_request_t;
1050
1051
/* Packet data has been validated against protocol checksum. */
1052
#define _NETRXF_data_validated (0)
1053
#define NETRXF_data_validated (1U<<_NETRXF_data_validated)
1054
1055
/* Protocol checksum field is blank in the packet (hardware offload)? */
1056
#define _NETRXF_csum_blank (1)
1057
#define NETRXF_csum_blank (1U<<_NETRXF_csum_blank)
1058
1059
/* Packet continues in the next request descriptor. */
1060
#define _NETRXF_more_data (2)
1061
#define NETRXF_more_data (1U<<_NETRXF_more_data)
1062
1063
/* Packet to be followed by extra descriptor(s). */
1064
#define _NETRXF_extra_info (3)
1065
#define NETRXF_extra_info (1U<<_NETRXF_extra_info)
1066
1067
/* Packet has GSO prefix. Deprecated but included for compatibility */
1068
#define _NETRXF_gso_prefix (4)
1069
#define NETRXF_gso_prefix (1U<<_NETRXF_gso_prefix)
1070
1071
struct netif_rx_response {
1072
uint16_t id;
1073
uint16_t offset;
1074
uint16_t flags;
1075
int16_t status;
1076
};
1077
typedef struct netif_rx_response netif_rx_response_t;
1078
1079
/*
1080
* Generate netif ring structures and types.
1081
*/
1082
1083
DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response);
1084
DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response);
1085
1086
#define NETIF_RSP_DROPPED -2
1087
#define NETIF_RSP_ERROR -1
1088
#define NETIF_RSP_OKAY 0
1089
/* No response: used for auxiliary requests (e.g., netif_extra_info_t). */
1090
#define NETIF_RSP_NULL 1
1091
1092
#endif
1093
1094
/*
1095
* Local variables:
1096
* mode: C
1097
* c-file-style: "BSD"
1098
* c-basic-offset: 4
1099
* tab-width: 4
1100
* indent-tabs-mode: nil
1101
* End:
1102
*/
1103
1104