Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/dev/aac/aac.c
39536 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2000 Michael Smith
5
* Copyright (c) 2001 Scott Long
6
* Copyright (c) 2000 BSDi
7
* Copyright (c) 2001 Adaptec, Inc.
8
* All rights reserved.
9
*
10
* Redistribution and use in source and binary forms, with or without
11
* modification, are permitted provided that the following conditions
12
* are met:
13
* 1. Redistributions of source code must retain the above copyright
14
* notice, this list of conditions and the following disclaimer.
15
* 2. Redistributions in binary form must reproduce the above copyright
16
* notice, this list of conditions and the following disclaimer in the
17
* documentation and/or other materials provided with the distribution.
18
*
19
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29
* SUCH DAMAGE.
30
*/
31
32
#include <sys/cdefs.h>
33
/*
34
* Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters.
35
*/
36
#define AAC_DRIVERNAME "aac"
37
38
#include "opt_aac.h"
39
40
/* #include <stddef.h> */
41
#include <sys/param.h>
42
#include <sys/systm.h>
43
#include <sys/malloc.h>
44
#include <sys/kernel.h>
45
#include <sys/kthread.h>
46
#include <sys/proc.h>
47
#include <sys/sysctl.h>
48
#include <sys/sysent.h>
49
#include <sys/poll.h>
50
#include <sys/ioccom.h>
51
52
#include <sys/bus.h>
53
#include <sys/conf.h>
54
#include <sys/signalvar.h>
55
#include <sys/time.h>
56
#include <sys/eventhandler.h>
57
#include <sys/rman.h>
58
59
#include <machine/bus.h>
60
#include <machine/resource.h>
61
62
#include <dev/pci/pcireg.h>
63
#include <dev/pci/pcivar.h>
64
65
#include <dev/aac/aacreg.h>
66
#include <sys/aac_ioctl.h>
67
#include <dev/aac/aacvar.h>
68
#include <dev/aac/aac_tables.h>
69
70
static void aac_startup(void *arg);
71
static void aac_add_container(struct aac_softc *sc,
72
struct aac_mntinforesp *mir, int f);
73
static void aac_get_bus_info(struct aac_softc *sc);
74
static void aac_daemon(void *arg);
75
76
/* Command Processing */
77
static void aac_timeout(struct aac_softc *sc);
78
static void aac_complete(void *context, int pending);
79
static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp);
80
static void aac_bio_complete(struct aac_command *cm);
81
static int aac_wait_command(struct aac_command *cm);
82
static void aac_command_thread(struct aac_softc *sc);
83
84
/* Command Buffer Management */
85
static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs,
86
int nseg, int error);
87
static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
88
int nseg, int error);
89
static int aac_alloc_commands(struct aac_softc *sc);
90
static void aac_free_commands(struct aac_softc *sc);
91
static void aac_unmap_command(struct aac_command *cm);
92
93
/* Hardware Interface */
94
static int aac_alloc(struct aac_softc *sc);
95
static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
96
int error);
97
static int aac_check_firmware(struct aac_softc *sc);
98
static int aac_init(struct aac_softc *sc);
99
static int aac_sync_command(struct aac_softc *sc, u_int32_t command,
100
u_int32_t arg0, u_int32_t arg1, u_int32_t arg2,
101
u_int32_t arg3, u_int32_t *sp);
102
static int aac_setup_intr(struct aac_softc *sc);
103
static int aac_enqueue_fib(struct aac_softc *sc, int queue,
104
struct aac_command *cm);
105
static int aac_dequeue_fib(struct aac_softc *sc, int queue,
106
u_int32_t *fib_size, struct aac_fib **fib_addr);
107
static int aac_enqueue_response(struct aac_softc *sc, int queue,
108
struct aac_fib *fib);
109
110
/* StrongARM interface */
111
static int aac_sa_get_fwstatus(struct aac_softc *sc);
112
static void aac_sa_qnotify(struct aac_softc *sc, int qbit);
113
static int aac_sa_get_istatus(struct aac_softc *sc);
114
static void aac_sa_clear_istatus(struct aac_softc *sc, int mask);
115
static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command,
116
u_int32_t arg0, u_int32_t arg1,
117
u_int32_t arg2, u_int32_t arg3);
118
static int aac_sa_get_mailbox(struct aac_softc *sc, int mb);
119
static void aac_sa_set_interrupts(struct aac_softc *sc, int enable);
120
121
const struct aac_interface aac_sa_interface = {
122
aac_sa_get_fwstatus,
123
aac_sa_qnotify,
124
aac_sa_get_istatus,
125
aac_sa_clear_istatus,
126
aac_sa_set_mailbox,
127
aac_sa_get_mailbox,
128
aac_sa_set_interrupts,
129
NULL, NULL, NULL
130
};
131
132
/* i960Rx interface */
133
static int aac_rx_get_fwstatus(struct aac_softc *sc);
134
static void aac_rx_qnotify(struct aac_softc *sc, int qbit);
135
static int aac_rx_get_istatus(struct aac_softc *sc);
136
static void aac_rx_clear_istatus(struct aac_softc *sc, int mask);
137
static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command,
138
u_int32_t arg0, u_int32_t arg1,
139
u_int32_t arg2, u_int32_t arg3);
140
static int aac_rx_get_mailbox(struct aac_softc *sc, int mb);
141
static void aac_rx_set_interrupts(struct aac_softc *sc, int enable);
142
static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm);
143
static int aac_rx_get_outb_queue(struct aac_softc *sc);
144
static void aac_rx_set_outb_queue(struct aac_softc *sc, int index);
145
146
const struct aac_interface aac_rx_interface = {
147
aac_rx_get_fwstatus,
148
aac_rx_qnotify,
149
aac_rx_get_istatus,
150
aac_rx_clear_istatus,
151
aac_rx_set_mailbox,
152
aac_rx_get_mailbox,
153
aac_rx_set_interrupts,
154
aac_rx_send_command,
155
aac_rx_get_outb_queue,
156
aac_rx_set_outb_queue
157
};
158
159
/* Rocket/MIPS interface */
160
static int aac_rkt_get_fwstatus(struct aac_softc *sc);
161
static void aac_rkt_qnotify(struct aac_softc *sc, int qbit);
162
static int aac_rkt_get_istatus(struct aac_softc *sc);
163
static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask);
164
static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command,
165
u_int32_t arg0, u_int32_t arg1,
166
u_int32_t arg2, u_int32_t arg3);
167
static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb);
168
static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable);
169
static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm);
170
static int aac_rkt_get_outb_queue(struct aac_softc *sc);
171
static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index);
172
173
const struct aac_interface aac_rkt_interface = {
174
aac_rkt_get_fwstatus,
175
aac_rkt_qnotify,
176
aac_rkt_get_istatus,
177
aac_rkt_clear_istatus,
178
aac_rkt_set_mailbox,
179
aac_rkt_get_mailbox,
180
aac_rkt_set_interrupts,
181
aac_rkt_send_command,
182
aac_rkt_get_outb_queue,
183
aac_rkt_set_outb_queue
184
};
185
186
/* Debugging and Diagnostics */
187
static void aac_describe_controller(struct aac_softc *sc);
188
static const char *aac_describe_code(const struct aac_code_lookup *table,
189
u_int32_t code);
190
191
/* Management Interface */
192
static d_open_t aac_open;
193
static d_ioctl_t aac_ioctl;
194
static d_poll_t aac_poll;
195
static void aac_cdevpriv_dtor(void *arg);
196
static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
197
static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
198
static void aac_handle_aif(struct aac_softc *sc,
199
struct aac_fib *fib);
200
static int aac_rev_check(struct aac_softc *sc, caddr_t udata);
201
static int aac_open_aif(struct aac_softc *sc, caddr_t arg);
202
static int aac_close_aif(struct aac_softc *sc, caddr_t arg);
203
static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
204
static int aac_return_aif(struct aac_softc *sc,
205
struct aac_fib_context *ctx, caddr_t uptr);
206
static int aac_query_disk(struct aac_softc *sc, caddr_t uptr);
207
static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
208
static int aac_supported_features(struct aac_softc *sc, caddr_t uptr);
209
static void aac_ioctl_event(struct aac_softc *sc,
210
struct aac_event *event, void *arg);
211
static struct aac_mntinforesp *
212
aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid);
213
214
static struct cdevsw aac_cdevsw = {
215
.d_version = D_VERSION,
216
.d_flags = 0,
217
.d_open = aac_open,
218
.d_ioctl = aac_ioctl,
219
.d_poll = aac_poll,
220
.d_name = "aac",
221
};
222
223
static MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver");
224
225
/* sysctl node */
226
SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
227
"AAC driver parameters");
228
229
/*
230
* Device Interface
231
*/
232
233
/*
234
* Initialize the controller and softc
235
*/
236
int
237
aac_attach(struct aac_softc *sc)
238
{
239
int error, unit;
240
241
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
242
243
/*
244
* Initialize per-controller queues.
245
*/
246
aac_initq_free(sc);
247
aac_initq_ready(sc);
248
aac_initq_busy(sc);
249
aac_initq_bio(sc);
250
251
/*
252
* Initialize command-completion task.
253
*/
254
TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc);
255
256
/* mark controller as suspended until we get ourselves organised */
257
sc->aac_state |= AAC_STATE_SUSPEND;
258
259
/*
260
* Check that the firmware on the card is supported.
261
*/
262
if ((error = aac_check_firmware(sc)) != 0)
263
return(error);
264
265
/*
266
* Initialize locks
267
*/
268
mtx_init(&sc->aac_aifq_lock, "AAC AIF lock", NULL, MTX_DEF);
269
mtx_init(&sc->aac_io_lock, "AAC I/O lock", NULL, MTX_DEF);
270
mtx_init(&sc->aac_container_lock, "AAC container lock", NULL, MTX_DEF);
271
TAILQ_INIT(&sc->aac_container_tqh);
272
TAILQ_INIT(&sc->aac_ev_cmfree);
273
274
/* Initialize the clock daemon callout. */
275
callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
276
277
/*
278
* Initialize the adapter.
279
*/
280
if ((error = aac_alloc(sc)) != 0)
281
return(error);
282
if ((error = aac_init(sc)) != 0)
283
return(error);
284
285
/*
286
* Allocate and connect our interrupt.
287
*/
288
if ((error = aac_setup_intr(sc)) != 0)
289
return(error);
290
291
/*
292
* Print a little information about the controller.
293
*/
294
aac_describe_controller(sc);
295
296
/*
297
* Add sysctls.
298
*/
299
SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->aac_dev),
300
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->aac_dev)),
301
OID_AUTO, "firmware_build", CTLFLAG_RD,
302
&sc->aac_revision.buildNumber, 0,
303
"firmware build number");
304
305
/*
306
* Register to probe our containers later.
307
*/
308
sc->aac_ich.ich_func = aac_startup;
309
sc->aac_ich.ich_arg = sc;
310
if (config_intrhook_establish(&sc->aac_ich) != 0) {
311
device_printf(sc->aac_dev,
312
"can't establish configuration hook\n");
313
return(ENXIO);
314
}
315
316
/*
317
* Make the control device.
318
*/
319
unit = device_get_unit(sc->aac_dev);
320
sc->aac_dev_t = make_dev(&aac_cdevsw, unit, UID_ROOT, GID_OPERATOR,
321
0640, "aac%d", unit);
322
(void)make_dev_alias(sc->aac_dev_t, "afa%d", unit);
323
(void)make_dev_alias(sc->aac_dev_t, "hpn%d", unit);
324
sc->aac_dev_t->si_drv1 = sc;
325
326
/* Create the AIF thread */
327
if (kproc_create((void(*)(void *))aac_command_thread, sc,
328
&sc->aifthread, 0, 0, "aac%daif", unit))
329
panic("Could not create AIF thread");
330
331
/* Register the shutdown method to only be called post-dump */
332
if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aac_shutdown,
333
sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
334
device_printf(sc->aac_dev,
335
"shutdown event registration failed\n");
336
337
/* Register with CAM for the non-DASD devices */
338
if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) {
339
TAILQ_INIT(&sc->aac_sim_tqh);
340
aac_get_bus_info(sc);
341
}
342
343
mtx_lock(&sc->aac_io_lock);
344
callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
345
mtx_unlock(&sc->aac_io_lock);
346
347
return(0);
348
}
349
350
static void
351
aac_daemon(void *arg)
352
{
353
struct timeval tv;
354
struct aac_softc *sc;
355
struct aac_fib *fib;
356
357
sc = arg;
358
mtx_assert(&sc->aac_io_lock, MA_OWNED);
359
360
if (callout_pending(&sc->aac_daemontime) ||
361
callout_active(&sc->aac_daemontime) == 0)
362
return;
363
getmicrotime(&tv);
364
aac_alloc_sync_fib(sc, &fib);
365
*(uint32_t *)fib->data = tv.tv_sec;
366
aac_sync_fib(sc, SendHostTime, 0, fib, sizeof(uint32_t));
367
aac_release_sync_fib(sc);
368
callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
369
}
370
371
void
372
aac_add_event(struct aac_softc *sc, struct aac_event *event)
373
{
374
375
switch (event->ev_type & AAC_EVENT_MASK) {
376
case AAC_EVENT_CMFREE:
377
TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
378
break;
379
default:
380
device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
381
event->ev_type);
382
break;
383
}
384
}
385
386
/*
387
* Request information of container #cid
388
*/
389
static struct aac_mntinforesp *
390
aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid)
391
{
392
struct aac_mntinfo *mi;
393
394
mi = (struct aac_mntinfo *)&fib->data[0];
395
/* use 64-bit LBA if enabled */
396
mi->Command = (sc->flags & AAC_FLAGS_LBA_64BIT) ?
397
VM_NameServe64 : VM_NameServe;
398
mi->MntType = FT_FILESYS;
399
mi->MntCount = cid;
400
401
if (aac_sync_fib(sc, ContainerCommand, 0, fib,
402
sizeof(struct aac_mntinfo))) {
403
device_printf(sc->aac_dev, "Error probing container %d\n", cid);
404
return (NULL);
405
}
406
407
return ((struct aac_mntinforesp *)&fib->data[0]);
408
}
409
410
/*
411
* Probe for containers, create disks.
412
*/
413
static void
414
aac_startup(void *arg)
415
{
416
struct aac_softc *sc;
417
struct aac_fib *fib;
418
struct aac_mntinforesp *mir;
419
int count = 0, i = 0;
420
421
sc = (struct aac_softc *)arg;
422
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
423
424
mtx_lock(&sc->aac_io_lock);
425
aac_alloc_sync_fib(sc, &fib);
426
427
/* loop over possible containers */
428
do {
429
if ((mir = aac_get_container_info(sc, fib, i)) == NULL)
430
continue;
431
if (i == 0)
432
count = mir->MntRespCount;
433
aac_add_container(sc, mir, 0);
434
i++;
435
} while ((i < count) && (i < AAC_MAX_CONTAINERS));
436
437
aac_release_sync_fib(sc);
438
mtx_unlock(&sc->aac_io_lock);
439
440
/* mark the controller up */
441
sc->aac_state &= ~AAC_STATE_SUSPEND;
442
443
/* poke the bus to actually attach the child devices */
444
bus_attach_children(sc->aac_dev);
445
446
/* disconnect ourselves from the intrhook chain */
447
config_intrhook_disestablish(&sc->aac_ich);
448
449
/* enable interrupts now */
450
AAC_UNMASK_INTERRUPTS(sc);
451
}
452
453
/*
454
* Create a device to represent a new container
455
*/
456
static void
457
aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f)
458
{
459
struct aac_container *co;
460
device_t child;
461
462
/*
463
* Check container volume type for validity. Note that many of
464
* the possible types may never show up.
465
*/
466
if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
467
co = (struct aac_container *)malloc(sizeof *co, M_AACBUF,
468
M_NOWAIT | M_ZERO);
469
if (co == NULL)
470
panic("Out of memory?!");
471
fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "id %x name '%.16s' size %u type %d",
472
mir->MntTable[0].ObjectId,
473
mir->MntTable[0].FileSystemName,
474
mir->MntTable[0].Capacity, mir->MntTable[0].VolType);
475
476
if ((child = device_add_child(sc->aac_dev, "aacd", DEVICE_UNIT_ANY)) == NULL)
477
device_printf(sc->aac_dev, "device_add_child failed\n");
478
else
479
device_set_ivars(child, co);
480
device_set_desc(child, aac_describe_code(aac_container_types,
481
mir->MntTable[0].VolType));
482
co->co_disk = child;
483
co->co_found = f;
484
bcopy(&mir->MntTable[0], &co->co_mntobj,
485
sizeof(struct aac_mntobj));
486
mtx_lock(&sc->aac_container_lock);
487
TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
488
mtx_unlock(&sc->aac_container_lock);
489
}
490
}
491
492
/*
493
* Allocate resources associated with (sc)
494
*/
495
static int
496
aac_alloc(struct aac_softc *sc)
497
{
498
499
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
500
501
/*
502
* Create DMA tag for mapping buffers into controller-addressable space.
503
*/
504
if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
505
1, 0, /* algnmnt, boundary */
506
(sc->flags & AAC_FLAGS_SG_64BIT) ?
507
BUS_SPACE_MAXADDR :
508
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
509
BUS_SPACE_MAXADDR, /* highaddr */
510
NULL, NULL, /* filter, filterarg */
511
sc->aac_max_sectors << 9, /* maxsize */
512
sc->aac_sg_tablesize, /* nsegments */
513
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
514
BUS_DMA_ALLOCNOW, /* flags */
515
busdma_lock_mutex, /* lockfunc */
516
&sc->aac_io_lock, /* lockfuncarg */
517
&sc->aac_buffer_dmat)) {
518
device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
519
return (ENOMEM);
520
}
521
522
/*
523
* Create DMA tag for mapping FIBs into controller-addressable space..
524
*/
525
if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
526
1, 0, /* algnmnt, boundary */
527
(sc->flags & AAC_FLAGS_4GB_WINDOW) ?
528
BUS_SPACE_MAXADDR_32BIT :
529
0x7fffffff, /* lowaddr */
530
BUS_SPACE_MAXADDR, /* highaddr */
531
NULL, NULL, /* filter, filterarg */
532
sc->aac_max_fibs_alloc *
533
sc->aac_max_fib_size, /* maxsize */
534
1, /* nsegments */
535
sc->aac_max_fibs_alloc *
536
sc->aac_max_fib_size, /* maxsize */
537
0, /* flags */
538
NULL, NULL, /* No locking needed */
539
&sc->aac_fib_dmat)) {
540
device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
541
return (ENOMEM);
542
}
543
544
/*
545
* Create DMA tag for the common structure and allocate it.
546
*/
547
if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
548
1, 0, /* algnmnt, boundary */
549
(sc->flags & AAC_FLAGS_4GB_WINDOW) ?
550
BUS_SPACE_MAXADDR_32BIT :
551
0x7fffffff, /* lowaddr */
552
BUS_SPACE_MAXADDR, /* highaddr */
553
NULL, NULL, /* filter, filterarg */
554
8192 + sizeof(struct aac_common), /* maxsize */
555
1, /* nsegments */
556
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
557
0, /* flags */
558
NULL, NULL, /* No locking needed */
559
&sc->aac_common_dmat)) {
560
device_printf(sc->aac_dev,
561
"can't allocate common structure DMA tag\n");
562
return (ENOMEM);
563
}
564
if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
565
BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
566
device_printf(sc->aac_dev, "can't allocate common structure\n");
567
return (ENOMEM);
568
}
569
570
/*
571
* Work around a bug in the 2120 and 2200 that cannot DMA commands
572
* below address 8192 in physical memory.
573
* XXX If the padding is not needed, can it be put to use instead
574
* of ignored?
575
*/
576
(void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
577
sc->aac_common, 8192 + sizeof(*sc->aac_common),
578
aac_common_map, sc, 0);
579
580
if (sc->aac_common_busaddr < 8192) {
581
sc->aac_common = (struct aac_common *)
582
((uint8_t *)sc->aac_common + 8192);
583
sc->aac_common_busaddr += 8192;
584
}
585
bzero(sc->aac_common, sizeof(*sc->aac_common));
586
587
/* Allocate some FIBs and associated command structs */
588
TAILQ_INIT(&sc->aac_fibmap_tqh);
589
sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
590
M_AACBUF, M_WAITOK|M_ZERO);
591
while (sc->total_fibs < sc->aac_max_fibs) {
592
if (aac_alloc_commands(sc) != 0)
593
break;
594
}
595
if (sc->total_fibs == 0)
596
return (ENOMEM);
597
598
return (0);
599
}
600
601
/*
602
* Free all of the resources associated with (sc)
603
*
604
* Should not be called if the controller is active.
605
*/
606
void
607
aac_free(struct aac_softc *sc)
608
{
609
610
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
611
612
/* remove the control device */
613
if (sc->aac_dev_t != NULL)
614
destroy_dev(sc->aac_dev_t);
615
616
/* throw away any FIB buffers, discard the FIB DMA tag */
617
aac_free_commands(sc);
618
if (sc->aac_fib_dmat)
619
bus_dma_tag_destroy(sc->aac_fib_dmat);
620
621
free(sc->aac_commands, M_AACBUF);
622
623
/* destroy the common area */
624
if (sc->aac_common) {
625
bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
626
bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
627
sc->aac_common_dmamap);
628
}
629
if (sc->aac_common_dmat)
630
bus_dma_tag_destroy(sc->aac_common_dmat);
631
632
/* disconnect the interrupt handler */
633
if (sc->aac_intr)
634
bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr);
635
if (sc->aac_irq != NULL) {
636
bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
637
rman_get_rid(sc->aac_irq), sc->aac_irq);
638
pci_release_msi(sc->aac_dev);
639
}
640
641
/* destroy data-transfer DMA tag */
642
if (sc->aac_buffer_dmat)
643
bus_dma_tag_destroy(sc->aac_buffer_dmat);
644
645
/* destroy the parent DMA tag */
646
if (sc->aac_parent_dmat)
647
bus_dma_tag_destroy(sc->aac_parent_dmat);
648
649
/* release the register window mapping */
650
if (sc->aac_regs_res0 != NULL)
651
bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
652
rman_get_rid(sc->aac_regs_res0), sc->aac_regs_res0);
653
if (sc->aac_hwif == AAC_HWIF_NARK && sc->aac_regs_res1 != NULL)
654
bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
655
rman_get_rid(sc->aac_regs_res1), sc->aac_regs_res1);
656
}
657
658
/*
659
* Disconnect from the controller completely, in preparation for unload.
660
*/
661
int
662
aac_detach(device_t dev)
663
{
664
struct aac_softc *sc;
665
struct aac_container *co;
666
struct aac_sim *sim;
667
int error;
668
669
sc = device_get_softc(dev);
670
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
671
672
error = bus_generic_detach(dev);
673
if (error != 0)
674
return (error);
675
676
callout_drain(&sc->aac_daemontime);
677
678
mtx_lock(&sc->aac_io_lock);
679
while (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
680
sc->aifflags |= AAC_AIFFLAGS_EXIT;
681
wakeup(sc->aifthread);
682
msleep(sc->aac_dev, &sc->aac_io_lock, PUSER, "aacdch", 0);
683
}
684
mtx_unlock(&sc->aac_io_lock);
685
KASSERT((sc->aifflags & AAC_AIFFLAGS_RUNNING) == 0,
686
("%s: invalid detach state", __func__));
687
688
/* Remove the child containers */
689
while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
690
TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
691
free(co, M_AACBUF);
692
}
693
694
/* Remove the CAM SIMs */
695
while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
696
TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
697
free(sim, M_AACBUF);
698
}
699
700
if ((error = aac_shutdown(dev)))
701
return(error);
702
703
EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
704
705
aac_free(sc);
706
707
mtx_destroy(&sc->aac_aifq_lock);
708
mtx_destroy(&sc->aac_io_lock);
709
mtx_destroy(&sc->aac_container_lock);
710
711
return(0);
712
}
713
714
/*
715
* Bring the controller down to a dormant state and detach all child devices.
716
*
717
* This function is called before detach or system shutdown.
718
*
719
* Note that we can assume that the bioq on the controller is empty, as we won't
720
* allow shutdown if any device is open.
721
*/
722
int
723
aac_shutdown(device_t dev)
724
{
725
struct aac_softc *sc;
726
struct aac_fib *fib;
727
struct aac_close_command *cc;
728
729
sc = device_get_softc(dev);
730
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
731
732
sc->aac_state |= AAC_STATE_SUSPEND;
733
734
/*
735
* Send a Container shutdown followed by a HostShutdown FIB to the
736
* controller to convince it that we don't want to talk to it anymore.
737
* We've been closed and all I/O completed already
738
*/
739
device_printf(sc->aac_dev, "shutting down controller...");
740
741
mtx_lock(&sc->aac_io_lock);
742
aac_alloc_sync_fib(sc, &fib);
743
cc = (struct aac_close_command *)&fib->data[0];
744
745
bzero(cc, sizeof(struct aac_close_command));
746
cc->Command = VM_CloseAll;
747
cc->ContainerId = 0xffffffff;
748
if (aac_sync_fib(sc, ContainerCommand, 0, fib,
749
sizeof(struct aac_close_command)))
750
printf("FAILED.\n");
751
else
752
printf("done\n");
753
#if 0
754
else {
755
fib->data[0] = 0;
756
/*
757
* XXX Issuing this command to the controller makes it shut down
758
* but also keeps it from coming back up without a reset of the
759
* PCI bus. This is not desirable if you are just unloading the
760
* driver module with the intent to reload it later.
761
*/
762
if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN,
763
fib, 1)) {
764
printf("FAILED.\n");
765
} else {
766
printf("done.\n");
767
}
768
}
769
#endif
770
771
AAC_MASK_INTERRUPTS(sc);
772
aac_release_sync_fib(sc);
773
mtx_unlock(&sc->aac_io_lock);
774
775
return(0);
776
}
777
778
/*
779
* Bring the controller to a quiescent state, ready for system suspend.
780
*/
781
int
782
aac_suspend(device_t dev)
783
{
784
struct aac_softc *sc;
785
786
sc = device_get_softc(dev);
787
788
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
789
sc->aac_state |= AAC_STATE_SUSPEND;
790
791
AAC_MASK_INTERRUPTS(sc);
792
return(0);
793
}
794
795
/*
796
* Bring the controller back to a state ready for operation.
797
*/
798
int
799
aac_resume(device_t dev)
800
{
801
struct aac_softc *sc;
802
803
sc = device_get_softc(dev);
804
805
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
806
sc->aac_state &= ~AAC_STATE_SUSPEND;
807
AAC_UNMASK_INTERRUPTS(sc);
808
return(0);
809
}
810
811
/*
812
* Interrupt handler for NEW_COMM interface.
813
*/
814
void
815
aac_new_intr(void *arg)
816
{
817
struct aac_softc *sc;
818
u_int32_t index, fast;
819
struct aac_command *cm;
820
struct aac_fib *fib;
821
int i;
822
823
sc = (struct aac_softc *)arg;
824
825
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
826
mtx_lock(&sc->aac_io_lock);
827
while (1) {
828
index = AAC_GET_OUTB_QUEUE(sc);
829
if (index == 0xffffffff)
830
index = AAC_GET_OUTB_QUEUE(sc);
831
if (index == 0xffffffff)
832
break;
833
if (index & 2) {
834
if (index == 0xfffffffe) {
835
/* XXX This means that the controller wants
836
* more work. Ignore it for now.
837
*/
838
continue;
839
}
840
/* AIF */
841
fib = (struct aac_fib *)malloc(sizeof *fib, M_AACBUF,
842
M_NOWAIT | M_ZERO);
843
if (fib == NULL) {
844
/* If we're really this short on memory,
845
* hopefully breaking out of the handler will
846
* allow something to get freed. This
847
* actually sucks a whole lot.
848
*/
849
break;
850
}
851
index &= ~2;
852
for (i = 0; i < sizeof(struct aac_fib)/4; ++i)
853
((u_int32_t *)fib)[i] = AAC_MEM1_GETREG4(sc, index + i*4);
854
aac_handle_aif(sc, fib);
855
free(fib, M_AACBUF);
856
857
/*
858
* AIF memory is owned by the adapter, so let it
859
* know that we are done with it.
860
*/
861
AAC_SET_OUTB_QUEUE(sc, index);
862
AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY);
863
} else {
864
fast = index & 1;
865
cm = sc->aac_commands + (index >> 2);
866
fib = cm->cm_fib;
867
if (fast) {
868
fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
869
*((u_int32_t *)(fib->data)) = AAC_ERROR_NORMAL;
870
}
871
aac_remove_busy(cm);
872
aac_unmap_command(cm);
873
cm->cm_flags |= AAC_CMD_COMPLETED;
874
875
/* is there a completion handler? */
876
if (cm->cm_complete != NULL) {
877
cm->cm_complete(cm);
878
} else {
879
/* assume that someone is sleeping on this
880
* command
881
*/
882
wakeup(cm);
883
}
884
sc->flags &= ~AAC_QUEUE_FRZN;
885
}
886
}
887
/* see if we can start some more I/O */
888
if ((sc->flags & AAC_QUEUE_FRZN) == 0)
889
aac_startio(sc);
890
891
mtx_unlock(&sc->aac_io_lock);
892
}
893
894
/*
895
* Interrupt filter for !NEW_COMM interface.
896
*/
897
int
898
aac_filter(void *arg)
899
{
900
struct aac_softc *sc;
901
u_int16_t reason;
902
903
sc = (struct aac_softc *)arg;
904
905
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
906
/*
907
* Read the status register directly. This is faster than taking the
908
* driver lock and reading the queues directly. It also saves having
909
* to turn parts of the driver lock into a spin mutex, which would be
910
* ugly.
911
*/
912
reason = AAC_GET_ISTATUS(sc);
913
AAC_CLEAR_ISTATUS(sc, reason);
914
915
/* handle completion processing */
916
if (reason & AAC_DB_RESPONSE_READY)
917
taskqueue_enqueue(taskqueue_fast, &sc->aac_task_complete);
918
919
/* controller wants to talk to us */
920
if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) {
921
/*
922
* XXX Make sure that we don't get fooled by strange messages
923
* that start with a NULL.
924
*/
925
if ((reason & AAC_DB_PRINTF) &&
926
(sc->aac_common->ac_printf[0] == 0))
927
sc->aac_common->ac_printf[0] = 32;
928
929
/*
930
* This might miss doing the actual wakeup. However, the
931
* msleep that this is waking up has a timeout, so it will
932
* wake up eventually. AIFs and printfs are low enough
933
* priority that they can handle hanging out for a few seconds
934
* if needed.
935
*/
936
wakeup(sc->aifthread);
937
}
938
return (FILTER_HANDLED);
939
}
940
941
/*
942
* Command Processing
943
*/
944
945
/*
946
* Start as much queued I/O as possible on the controller
947
*/
948
void
949
aac_startio(struct aac_softc *sc)
950
{
951
struct aac_command *cm;
952
int error;
953
954
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
955
956
for (;;) {
957
/*
958
* This flag might be set if the card is out of resources.
959
* Checking it here prevents an infinite loop of deferrals.
960
*/
961
if (sc->flags & AAC_QUEUE_FRZN)
962
break;
963
964
/*
965
* Try to get a command that's been put off for lack of
966
* resources
967
*/
968
cm = aac_dequeue_ready(sc);
969
970
/*
971
* Try to build a command off the bio queue (ignore error
972
* return)
973
*/
974
if (cm == NULL)
975
aac_bio_command(sc, &cm);
976
977
/* nothing to do? */
978
if (cm == NULL)
979
break;
980
981
/* don't map more than once */
982
if (cm->cm_flags & AAC_CMD_MAPPED)
983
panic("aac: command %p already mapped", cm);
984
985
/*
986
* Set up the command to go to the controller. If there are no
987
* data buffers associated with the command then it can bypass
988
* busdma.
989
*/
990
if (cm->cm_datalen != 0) {
991
if (cm->cm_flags & AAC_REQ_BIO)
992
error = bus_dmamap_load_bio(
993
sc->aac_buffer_dmat, cm->cm_datamap,
994
(struct bio *)cm->cm_private,
995
aac_map_command_sg, cm, 0);
996
else
997
error = bus_dmamap_load(sc->aac_buffer_dmat,
998
cm->cm_datamap, cm->cm_data,
999
cm->cm_datalen, aac_map_command_sg, cm, 0);
1000
if (error == EINPROGRESS) {
1001
fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "freezing queue\n");
1002
sc->flags |= AAC_QUEUE_FRZN;
1003
} else if (error != 0)
1004
panic("aac_startio: unexpected error %d from "
1005
"busdma", error);
1006
} else
1007
aac_map_command_sg(cm, NULL, 0, 0);
1008
}
1009
}
1010
1011
/*
1012
* Handle notification of one or more FIBs coming from the controller.
1013
*/
1014
static void
1015
aac_command_thread(struct aac_softc *sc)
1016
{
1017
struct aac_fib *fib;
1018
u_int32_t fib_size;
1019
int size, retval;
1020
1021
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1022
1023
mtx_lock(&sc->aac_io_lock);
1024
sc->aifflags = AAC_AIFFLAGS_RUNNING;
1025
1026
while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1027
retval = 0;
1028
if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1029
retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1030
"aifthd", AAC_PERIODIC_INTERVAL * hz);
1031
1032
/*
1033
* First see if any FIBs need to be allocated. This needs
1034
* to be called without the driver lock because contigmalloc
1035
* can sleep.
1036
*/
1037
if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1038
mtx_unlock(&sc->aac_io_lock);
1039
aac_alloc_commands(sc);
1040
mtx_lock(&sc->aac_io_lock);
1041
sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1042
aac_startio(sc);
1043
}
1044
1045
/*
1046
* While we're here, check to see if any commands are stuck.
1047
* This is pretty low-priority, so it's ok if it doesn't
1048
* always fire.
1049
*/
1050
if (retval == EWOULDBLOCK)
1051
aac_timeout(sc);
1052
1053
/* Check the hardware printf message buffer */
1054
if (sc->aac_common->ac_printf[0] != 0)
1055
aac_print_printf(sc);
1056
1057
/* Also check to see if the adapter has a command for us. */
1058
if (sc->flags & AAC_FLAGS_NEW_COMM)
1059
continue;
1060
for (;;) {
1061
if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE,
1062
&fib_size, &fib))
1063
break;
1064
1065
AAC_PRINT_FIB(sc, fib);
1066
1067
switch (fib->Header.Command) {
1068
case AifRequest:
1069
aac_handle_aif(sc, fib);
1070
break;
1071
default:
1072
device_printf(sc->aac_dev, "unknown command "
1073
"from controller\n");
1074
break;
1075
}
1076
1077
if ((fib->Header.XferState == 0) ||
1078
(fib->Header.StructType != AAC_FIBTYPE_TFIB)) {
1079
break;
1080
}
1081
1082
/* Return the AIF to the controller. */
1083
if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) {
1084
fib->Header.XferState |= AAC_FIBSTATE_DONEHOST;
1085
*(AAC_FSAStatus*)fib->data = ST_OK;
1086
1087
/* XXX Compute the Size field? */
1088
size = fib->Header.Size;
1089
if (size > sizeof(struct aac_fib)) {
1090
size = sizeof(struct aac_fib);
1091
fib->Header.Size = size;
1092
}
1093
/*
1094
* Since we did not generate this command, it
1095
* cannot go through the normal
1096
* enqueue->startio chain.
1097
*/
1098
aac_enqueue_response(sc,
1099
AAC_ADAP_NORM_RESP_QUEUE,
1100
fib);
1101
}
1102
}
1103
}
1104
sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1105
mtx_unlock(&sc->aac_io_lock);
1106
wakeup(sc->aac_dev);
1107
1108
kproc_exit(0);
1109
}
1110
1111
/*
1112
* Process completed commands.
1113
*/
1114
static void
1115
aac_complete(void *context, int pending)
1116
{
1117
struct aac_softc *sc;
1118
struct aac_command *cm;
1119
struct aac_fib *fib;
1120
u_int32_t fib_size;
1121
1122
sc = (struct aac_softc *)context;
1123
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1124
1125
mtx_lock(&sc->aac_io_lock);
1126
1127
/* pull completed commands off the queue */
1128
for (;;) {
1129
/* look for completed FIBs on our queue */
1130
if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size,
1131
&fib))
1132
break; /* nothing to do */
1133
1134
/* get the command, unmap and hand off for processing */
1135
cm = sc->aac_commands + fib->Header.SenderData;
1136
if (cm == NULL) {
1137
AAC_PRINT_FIB(sc, fib);
1138
break;
1139
}
1140
if ((cm->cm_flags & AAC_CMD_TIMEDOUT) != 0)
1141
device_printf(sc->aac_dev,
1142
"COMMAND %p COMPLETED AFTER %d SECONDS\n",
1143
cm, (int)(time_uptime-cm->cm_timestamp));
1144
1145
aac_remove_busy(cm);
1146
1147
aac_unmap_command(cm);
1148
cm->cm_flags |= AAC_CMD_COMPLETED;
1149
1150
/* is there a completion handler? */
1151
if (cm->cm_complete != NULL) {
1152
cm->cm_complete(cm);
1153
} else {
1154
/* assume that someone is sleeping on this command */
1155
wakeup(cm);
1156
}
1157
}
1158
1159
/* see if we can start some more I/O */
1160
sc->flags &= ~AAC_QUEUE_FRZN;
1161
aac_startio(sc);
1162
1163
mtx_unlock(&sc->aac_io_lock);
1164
}
1165
1166
/*
1167
* Handle a bio submitted from a disk device.
1168
*/
1169
void
1170
aac_submit_bio(struct bio *bp)
1171
{
1172
struct aac_disk *ad;
1173
struct aac_softc *sc;
1174
1175
ad = (struct aac_disk *)bp->bio_disk->d_drv1;
1176
sc = ad->ad_controller;
1177
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1178
1179
/* queue the BIO and try to get some work done */
1180
aac_enqueue_bio(sc, bp);
1181
aac_startio(sc);
1182
}
1183
1184
/*
1185
* Get a bio and build a command to go with it.
1186
*/
1187
static int
1188
aac_bio_command(struct aac_softc *sc, struct aac_command **cmp)
1189
{
1190
struct aac_command *cm;
1191
struct aac_fib *fib;
1192
struct aac_disk *ad;
1193
struct bio *bp;
1194
1195
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1196
1197
/* get the resources we will need */
1198
cm = NULL;
1199
bp = NULL;
1200
if (aac_alloc_command(sc, &cm)) /* get a command */
1201
goto fail;
1202
if ((bp = aac_dequeue_bio(sc)) == NULL)
1203
goto fail;
1204
1205
/* fill out the command */
1206
cm->cm_datalen = bp->bio_bcount;
1207
cm->cm_complete = aac_bio_complete;
1208
cm->cm_flags = AAC_REQ_BIO;
1209
cm->cm_private = bp;
1210
cm->cm_timestamp = time_uptime;
1211
1212
/* build the FIB */
1213
fib = cm->cm_fib;
1214
fib->Header.Size = sizeof(struct aac_fib_header);
1215
fib->Header.XferState =
1216
AAC_FIBSTATE_HOSTOWNED |
1217
AAC_FIBSTATE_INITIALISED |
1218
AAC_FIBSTATE_EMPTY |
1219
AAC_FIBSTATE_FROMHOST |
1220
AAC_FIBSTATE_REXPECTED |
1221
AAC_FIBSTATE_NORM |
1222
AAC_FIBSTATE_ASYNC |
1223
AAC_FIBSTATE_FAST_RESPONSE;
1224
1225
/* build the read/write request */
1226
ad = (struct aac_disk *)bp->bio_disk->d_drv1;
1227
1228
if (sc->flags & AAC_FLAGS_RAW_IO) {
1229
struct aac_raw_io *raw;
1230
raw = (struct aac_raw_io *)&fib->data[0];
1231
fib->Header.Command = RawIo;
1232
raw->BlockNumber = (u_int64_t)bp->bio_pblkno;
1233
raw->ByteCount = bp->bio_bcount;
1234
raw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1235
raw->BpTotal = 0;
1236
raw->BpComplete = 0;
1237
fib->Header.Size += sizeof(struct aac_raw_io);
1238
cm->cm_sgtable = (struct aac_sg_table *)&raw->SgMapRaw;
1239
if (bp->bio_cmd == BIO_READ) {
1240
raw->Flags = 1;
1241
cm->cm_flags |= AAC_CMD_DATAIN;
1242
} else {
1243
raw->Flags = 0;
1244
cm->cm_flags |= AAC_CMD_DATAOUT;
1245
}
1246
} else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1247
fib->Header.Command = ContainerCommand;
1248
if (bp->bio_cmd == BIO_READ) {
1249
struct aac_blockread *br;
1250
br = (struct aac_blockread *)&fib->data[0];
1251
br->Command = VM_CtBlockRead;
1252
br->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1253
br->BlockNumber = bp->bio_pblkno;
1254
br->ByteCount = bp->bio_bcount;
1255
fib->Header.Size += sizeof(struct aac_blockread);
1256
cm->cm_sgtable = &br->SgMap;
1257
cm->cm_flags |= AAC_CMD_DATAIN;
1258
} else {
1259
struct aac_blockwrite *bw;
1260
bw = (struct aac_blockwrite *)&fib->data[0];
1261
bw->Command = VM_CtBlockWrite;
1262
bw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1263
bw->BlockNumber = bp->bio_pblkno;
1264
bw->ByteCount = bp->bio_bcount;
1265
bw->Stable = CUNSTABLE;
1266
fib->Header.Size += sizeof(struct aac_blockwrite);
1267
cm->cm_flags |= AAC_CMD_DATAOUT;
1268
cm->cm_sgtable = &bw->SgMap;
1269
}
1270
} else {
1271
fib->Header.Command = ContainerCommand64;
1272
if (bp->bio_cmd == BIO_READ) {
1273
struct aac_blockread64 *br;
1274
br = (struct aac_blockread64 *)&fib->data[0];
1275
br->Command = VM_CtHostRead64;
1276
br->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1277
br->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE;
1278
br->BlockNumber = bp->bio_pblkno;
1279
br->Pad = 0;
1280
br->Flags = 0;
1281
fib->Header.Size += sizeof(struct aac_blockread64);
1282
cm->cm_flags |= AAC_CMD_DATAIN;
1283
cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64;
1284
} else {
1285
struct aac_blockwrite64 *bw;
1286
bw = (struct aac_blockwrite64 *)&fib->data[0];
1287
bw->Command = VM_CtHostWrite64;
1288
bw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1289
bw->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE;
1290
bw->BlockNumber = bp->bio_pblkno;
1291
bw->Pad = 0;
1292
bw->Flags = 0;
1293
fib->Header.Size += sizeof(struct aac_blockwrite64);
1294
cm->cm_flags |= AAC_CMD_DATAOUT;
1295
cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64;
1296
}
1297
}
1298
1299
*cmp = cm;
1300
return(0);
1301
1302
fail:
1303
if (bp != NULL)
1304
aac_enqueue_bio(sc, bp);
1305
if (cm != NULL)
1306
aac_release_command(cm);
1307
return(ENOMEM);
1308
}
1309
1310
/*
1311
* Handle a bio-instigated command that has been completed.
1312
*/
1313
static void
1314
aac_bio_complete(struct aac_command *cm)
1315
{
1316
struct aac_blockread_response *brr;
1317
struct aac_blockwrite_response *bwr;
1318
struct bio *bp;
1319
AAC_FSAStatus status;
1320
1321
/* fetch relevant status and then release the command */
1322
bp = (struct bio *)cm->cm_private;
1323
if (bp->bio_cmd == BIO_READ) {
1324
brr = (struct aac_blockread_response *)&cm->cm_fib->data[0];
1325
status = brr->Status;
1326
} else {
1327
bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0];
1328
status = bwr->Status;
1329
}
1330
aac_release_command(cm);
1331
1332
/* fix up the bio based on status */
1333
if (status == ST_OK) {
1334
bp->bio_resid = 0;
1335
} else {
1336
bp->bio_error = EIO;
1337
bp->bio_flags |= BIO_ERROR;
1338
}
1339
aac_biodone(bp);
1340
}
1341
1342
/*
1343
* Submit a command to the controller, return when it completes.
1344
* XXX This is very dangerous! If the card has gone out to lunch, we could
1345
* be stuck here forever. At the same time, signals are not caught
1346
* because there is a risk that a signal could wakeup the sleep before
1347
* the card has a chance to complete the command. Since there is no way
1348
* to cancel a command that is in progress, we can't protect against the
1349
* card completing a command late and spamming the command and data
1350
* memory. So, we are held hostage until the command completes.
1351
*/
1352
static int
1353
aac_wait_command(struct aac_command *cm)
1354
{
1355
struct aac_softc *sc;
1356
int error;
1357
1358
sc = cm->cm_sc;
1359
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1360
1361
/* Put the command on the ready queue and get things going */
1362
aac_enqueue_ready(cm);
1363
aac_startio(sc);
1364
error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacwait", 0);
1365
return(error);
1366
}
1367
1368
/*
1369
*Command Buffer Management
1370
*/
1371
1372
/*
1373
* Allocate a command.
1374
*/
1375
int
1376
aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1377
{
1378
struct aac_command *cm;
1379
1380
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1381
1382
if ((cm = aac_dequeue_free(sc)) == NULL) {
1383
if (sc->total_fibs < sc->aac_max_fibs) {
1384
mtx_lock(&sc->aac_io_lock);
1385
sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1386
mtx_unlock(&sc->aac_io_lock);
1387
wakeup(sc->aifthread);
1388
}
1389
return (EBUSY);
1390
}
1391
1392
*cmp = cm;
1393
return(0);
1394
}
1395
1396
/*
1397
* Release a command back to the freelist.
1398
*/
1399
void
1400
aac_release_command(struct aac_command *cm)
1401
{
1402
struct aac_event *event;
1403
struct aac_softc *sc;
1404
1405
sc = cm->cm_sc;
1406
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1407
1408
/* (re)initialize the command/FIB */
1409
cm->cm_datalen = 0;
1410
cm->cm_sgtable = NULL;
1411
cm->cm_flags = 0;
1412
cm->cm_complete = NULL;
1413
cm->cm_private = NULL;
1414
cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE;
1415
cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1416
cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1417
cm->cm_fib->Header.Flags = 0;
1418
cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1419
1420
/*
1421
* These are duplicated in aac_start to cover the case where an
1422
* intermediate stage may have destroyed them. They're left
1423
* initialized here for debugging purposes only.
1424
*/
1425
cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1426
cm->cm_fib->Header.SenderData = 0;
1427
1428
aac_enqueue_free(cm);
1429
1430
if ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1431
TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1432
event->ev_callback(sc, event, event->ev_arg);
1433
}
1434
}
1435
1436
/*
1437
* Map helper for command/FIB allocation.
1438
*/
1439
static void
1440
aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1441
{
1442
uint64_t *fibphys;
1443
1444
fibphys = (uint64_t *)arg;
1445
1446
*fibphys = segs[0].ds_addr;
1447
}
1448
1449
/*
1450
* Allocate and initialize commands/FIBs for this adapter.
1451
*/
1452
static int
1453
aac_alloc_commands(struct aac_softc *sc)
1454
{
1455
struct aac_command *cm;
1456
struct aac_fibmap *fm;
1457
uint64_t fibphys;
1458
int i, error;
1459
1460
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1461
1462
if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1463
return (ENOMEM);
1464
1465
fm = malloc(sizeof(struct aac_fibmap), M_AACBUF, M_NOWAIT|M_ZERO);
1466
if (fm == NULL)
1467
return (ENOMEM);
1468
1469
/* allocate the FIBs in DMAable memory and load them */
1470
if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1471
BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1472
device_printf(sc->aac_dev,
1473
"Not enough contiguous memory available.\n");
1474
free(fm, M_AACBUF);
1475
return (ENOMEM);
1476
}
1477
1478
/* Ignore errors since this doesn't bounce */
1479
(void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1480
sc->aac_max_fibs_alloc * sc->aac_max_fib_size,
1481
aac_map_command_helper, &fibphys, 0);
1482
1483
/* initialize constant fields in the command structure */
1484
bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size);
1485
for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1486
cm = sc->aac_commands + sc->total_fibs;
1487
fm->aac_commands = cm;
1488
cm->cm_sc = sc;
1489
cm->cm_fib = (struct aac_fib *)
1490
((u_int8_t *)fm->aac_fibs + i*sc->aac_max_fib_size);
1491
cm->cm_fibphys = fibphys + i*sc->aac_max_fib_size;
1492
cm->cm_index = sc->total_fibs;
1493
1494
if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1495
&cm->cm_datamap)) != 0)
1496
break;
1497
mtx_lock(&sc->aac_io_lock);
1498
aac_release_command(cm);
1499
sc->total_fibs++;
1500
mtx_unlock(&sc->aac_io_lock);
1501
}
1502
1503
if (i > 0) {
1504
mtx_lock(&sc->aac_io_lock);
1505
TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1506
fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1507
mtx_unlock(&sc->aac_io_lock);
1508
return (0);
1509
}
1510
1511
bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1512
bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1513
free(fm, M_AACBUF);
1514
return (ENOMEM);
1515
}
1516
1517
/*
1518
* Free FIBs owned by this adapter.
1519
*/
1520
static void
1521
aac_free_commands(struct aac_softc *sc)
1522
{
1523
struct aac_fibmap *fm;
1524
struct aac_command *cm;
1525
int i;
1526
1527
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1528
1529
while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1530
TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1531
/*
1532
* We check against total_fibs to handle partially
1533
* allocated blocks.
1534
*/
1535
for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1536
cm = fm->aac_commands + i;
1537
bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1538
}
1539
bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1540
bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1541
free(fm, M_AACBUF);
1542
}
1543
}
1544
1545
/*
1546
* Command-mapping helper function - populate this command's s/g table.
1547
*/
1548
static void
1549
aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1550
{
1551
struct aac_softc *sc;
1552
struct aac_command *cm;
1553
struct aac_fib *fib;
1554
int i;
1555
1556
cm = (struct aac_command *)arg;
1557
sc = cm->cm_sc;
1558
fib = cm->cm_fib;
1559
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1560
1561
/* copy into the FIB */
1562
if (cm->cm_sgtable != NULL) {
1563
if (fib->Header.Command == RawIo) {
1564
struct aac_sg_tableraw *sg;
1565
sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1566
sg->SgCount = nseg;
1567
for (i = 0; i < nseg; i++) {
1568
sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1569
sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1570
sg->SgEntryRaw[i].Next = 0;
1571
sg->SgEntryRaw[i].Prev = 0;
1572
sg->SgEntryRaw[i].Flags = 0;
1573
}
1574
/* update the FIB size for the s/g count */
1575
fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1576
} else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1577
struct aac_sg_table *sg;
1578
sg = cm->cm_sgtable;
1579
sg->SgCount = nseg;
1580
for (i = 0; i < nseg; i++) {
1581
sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1582
sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1583
}
1584
/* update the FIB size for the s/g count */
1585
fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1586
} else {
1587
struct aac_sg_table64 *sg;
1588
sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1589
sg->SgCount = nseg;
1590
for (i = 0; i < nseg; i++) {
1591
sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1592
sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1593
}
1594
/* update the FIB size for the s/g count */
1595
fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1596
}
1597
}
1598
1599
/* Fix up the address values in the FIB. Use the command array index
1600
* instead of a pointer since these fields are only 32 bits. Shift
1601
* the SenderFibAddress over to make room for the fast response bit
1602
* and for the AIF bit
1603
*/
1604
cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1605
cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1606
1607
/* save a pointer to the command for speedy reverse-lookup */
1608
cm->cm_fib->Header.SenderData = cm->cm_index;
1609
1610
if (cm->cm_flags & AAC_CMD_DATAIN)
1611
bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1612
BUS_DMASYNC_PREREAD);
1613
if (cm->cm_flags & AAC_CMD_DATAOUT)
1614
bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1615
BUS_DMASYNC_PREWRITE);
1616
cm->cm_flags |= AAC_CMD_MAPPED;
1617
1618
if (sc->flags & AAC_FLAGS_NEW_COMM) {
1619
int count = 10000000L;
1620
while (AAC_SEND_COMMAND(sc, cm) != 0) {
1621
if (--count == 0) {
1622
aac_unmap_command(cm);
1623
sc->flags |= AAC_QUEUE_FRZN;
1624
aac_requeue_ready(cm);
1625
}
1626
DELAY(5); /* wait 5 usec. */
1627
}
1628
} else {
1629
/* Put the FIB on the outbound queue */
1630
if (aac_enqueue_fib(sc, cm->cm_queue, cm) == EBUSY) {
1631
aac_unmap_command(cm);
1632
sc->flags |= AAC_QUEUE_FRZN;
1633
aac_requeue_ready(cm);
1634
}
1635
}
1636
}
1637
1638
/*
1639
* Unmap a command from controller-visible space.
1640
*/
1641
static void
1642
aac_unmap_command(struct aac_command *cm)
1643
{
1644
struct aac_softc *sc;
1645
1646
sc = cm->cm_sc;
1647
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1648
1649
if (!(cm->cm_flags & AAC_CMD_MAPPED))
1650
return;
1651
1652
if (cm->cm_datalen != 0) {
1653
if (cm->cm_flags & AAC_CMD_DATAIN)
1654
bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1655
BUS_DMASYNC_POSTREAD);
1656
if (cm->cm_flags & AAC_CMD_DATAOUT)
1657
bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1658
BUS_DMASYNC_POSTWRITE);
1659
1660
bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1661
}
1662
cm->cm_flags &= ~AAC_CMD_MAPPED;
1663
}
1664
1665
/*
1666
* Hardware Interface
1667
*/
1668
1669
/*
1670
* Initialize the adapter.
1671
*/
1672
static void
1673
aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1674
{
1675
struct aac_softc *sc;
1676
1677
sc = (struct aac_softc *)arg;
1678
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1679
1680
sc->aac_common_busaddr = segs[0].ds_addr;
1681
}
1682
1683
static int
1684
aac_check_firmware(struct aac_softc *sc)
1685
{
1686
u_int32_t code, major, minor, options = 0, atu_size = 0;
1687
int rid, status;
1688
time_t then;
1689
1690
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1691
/*
1692
* Wait for the adapter to come ready.
1693
*/
1694
then = time_uptime;
1695
do {
1696
code = AAC_GET_FWSTATUS(sc);
1697
if (code & AAC_SELF_TEST_FAILED) {
1698
device_printf(sc->aac_dev, "FATAL: selftest failed\n");
1699
return(ENXIO);
1700
}
1701
if (code & AAC_KERNEL_PANIC) {
1702
device_printf(sc->aac_dev,
1703
"FATAL: controller kernel panic");
1704
return(ENXIO);
1705
}
1706
if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1707
device_printf(sc->aac_dev,
1708
"FATAL: controller not coming ready, "
1709
"status %x\n", code);
1710
return(ENXIO);
1711
}
1712
} while (!(code & AAC_UP_AND_RUNNING));
1713
1714
/*
1715
* Retrieve the firmware version numbers. Dell PERC2/QC cards with
1716
* firmware version 1.x are not compatible with this driver.
1717
*/
1718
if (sc->flags & AAC_FLAGS_PERC2QC) {
1719
if (aac_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1720
NULL)) {
1721
device_printf(sc->aac_dev,
1722
"Error reading firmware version\n");
1723
return (EIO);
1724
}
1725
1726
/* These numbers are stored as ASCII! */
1727
major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1728
minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1729
if (major == 1) {
1730
device_printf(sc->aac_dev,
1731
"Firmware version %d.%d is not supported.\n",
1732
major, minor);
1733
return (EINVAL);
1734
}
1735
}
1736
1737
/*
1738
* Retrieve the capabilities/supported options word so we know what
1739
* work-arounds to enable. Some firmware revs don't support this
1740
* command.
1741
*/
1742
if (aac_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status)) {
1743
if (status != AAC_SRB_STS_INVALID_REQUEST) {
1744
device_printf(sc->aac_dev,
1745
"RequestAdapterInfo failed\n");
1746
return (EIO);
1747
}
1748
} else {
1749
options = AAC_GET_MAILBOX(sc, 1);
1750
atu_size = AAC_GET_MAILBOX(sc, 2);
1751
sc->supported_options = options;
1752
1753
if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1754
(sc->flags & AAC_FLAGS_NO4GB) == 0)
1755
sc->flags |= AAC_FLAGS_4GB_WINDOW;
1756
if (options & AAC_SUPPORTED_NONDASD)
1757
sc->flags |= AAC_FLAGS_ENABLE_CAM;
1758
if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1759
&& (sizeof(bus_addr_t) > 4)) {
1760
device_printf(sc->aac_dev,
1761
"Enabling 64-bit address support\n");
1762
sc->flags |= AAC_FLAGS_SG_64BIT;
1763
}
1764
if ((options & AAC_SUPPORTED_NEW_COMM)
1765
&& sc->aac_if->aif_send_command)
1766
sc->flags |= AAC_FLAGS_NEW_COMM;
1767
if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1768
sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1769
}
1770
1771
/* Check for broken hardware that does a lower number of commands */
1772
sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1773
1774
/* Remap mem. resource, if required */
1775
if ((sc->flags & AAC_FLAGS_NEW_COMM) &&
1776
atu_size > rman_get_size(sc->aac_regs_res1)) {
1777
rid = rman_get_rid(sc->aac_regs_res1);
1778
bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, rid,
1779
sc->aac_regs_res1);
1780
sc->aac_regs_res1 = bus_alloc_resource_anywhere(sc->aac_dev,
1781
SYS_RES_MEMORY, &rid, atu_size, RF_ACTIVE);
1782
if (sc->aac_regs_res1 == NULL) {
1783
sc->aac_regs_res1 = bus_alloc_resource_any(
1784
sc->aac_dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
1785
if (sc->aac_regs_res1 == NULL) {
1786
device_printf(sc->aac_dev,
1787
"couldn't allocate register window\n");
1788
return (ENXIO);
1789
}
1790
sc->flags &= ~AAC_FLAGS_NEW_COMM;
1791
}
1792
sc->aac_btag1 = rman_get_bustag(sc->aac_regs_res1);
1793
sc->aac_bhandle1 = rman_get_bushandle(sc->aac_regs_res1);
1794
1795
if (sc->aac_hwif == AAC_HWIF_NARK) {
1796
sc->aac_regs_res0 = sc->aac_regs_res1;
1797
sc->aac_btag0 = sc->aac_btag1;
1798
sc->aac_bhandle0 = sc->aac_bhandle1;
1799
}
1800
}
1801
1802
/* Read preferred settings */
1803
sc->aac_max_fib_size = sizeof(struct aac_fib);
1804
sc->aac_max_sectors = 128; /* 64KB */
1805
if (sc->flags & AAC_FLAGS_SG_64BIT)
1806
sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1807
- sizeof(struct aac_blockwrite64))
1808
/ sizeof(struct aac_sg_entry64);
1809
else
1810
sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1811
- sizeof(struct aac_blockwrite))
1812
/ sizeof(struct aac_sg_entry);
1813
1814
if (!aac_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL)) {
1815
options = AAC_GET_MAILBOX(sc, 1);
1816
sc->aac_max_fib_size = (options & 0xFFFF);
1817
sc->aac_max_sectors = (options >> 16) << 1;
1818
options = AAC_GET_MAILBOX(sc, 2);
1819
sc->aac_sg_tablesize = (options >> 16);
1820
options = AAC_GET_MAILBOX(sc, 3);
1821
sc->aac_max_fibs = (options & 0xFFFF);
1822
}
1823
if (sc->aac_max_fib_size > PAGE_SIZE)
1824
sc->aac_max_fib_size = PAGE_SIZE;
1825
sc->aac_max_fibs_alloc = PAGE_SIZE / sc->aac_max_fib_size;
1826
1827
if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1828
sc->flags |= AAC_FLAGS_RAW_IO;
1829
device_printf(sc->aac_dev, "Enable Raw I/O\n");
1830
}
1831
if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1832
(sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1833
sc->flags |= AAC_FLAGS_LBA_64BIT;
1834
device_printf(sc->aac_dev, "Enable 64-bit array\n");
1835
}
1836
1837
return (0);
1838
}
1839
1840
static int
1841
aac_init(struct aac_softc *sc)
1842
{
1843
struct aac_adapter_init *ip;
1844
u_int32_t qoffset;
1845
int error;
1846
1847
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1848
1849
/*
1850
* Fill in the init structure. This tells the adapter about the
1851
* physical location of various important shared data structures.
1852
*/
1853
ip = &sc->aac_common->ac_init;
1854
ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1855
if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1856
ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1857
sc->flags |= AAC_FLAGS_RAW_IO;
1858
}
1859
ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION;
1860
1861
ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1862
offsetof(struct aac_common, ac_fibs);
1863
ip->AdapterFibsVirtualAddress = 0;
1864
ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1865
ip->AdapterFibAlign = sizeof(struct aac_fib);
1866
1867
ip->PrintfBufferAddress = sc->aac_common_busaddr +
1868
offsetof(struct aac_common, ac_printf);
1869
ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1870
1871
/*
1872
* The adapter assumes that pages are 4K in size, except on some
1873
* broken firmware versions that do the page->byte conversion twice,
1874
* therefore 'assuming' that this value is in 16MB units (2^24).
1875
* Round up since the granularity is so high.
1876
*/
1877
ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1878
if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1879
ip->HostPhysMemPages =
1880
(ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1881
}
1882
ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */
1883
1884
ip->InitFlags = 0;
1885
if (sc->flags & AAC_FLAGS_NEW_COMM) {
1886
ip->InitFlags |= AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1887
device_printf(sc->aac_dev, "New comm. interface enabled\n");
1888
}
1889
1890
ip->MaxIoCommands = sc->aac_max_fibs;
1891
ip->MaxIoSize = sc->aac_max_sectors << 9;
1892
ip->MaxFibSize = sc->aac_max_fib_size;
1893
1894
/*
1895
* Initialize FIB queues. Note that it appears that the layout of the
1896
* indexes and the segmentation of the entries may be mandated by the
1897
* adapter, which is only told about the base of the queue index fields.
1898
*
1899
* The initial values of the indices are assumed to inform the adapter
1900
* of the sizes of the respective queues, and theoretically it could
1901
* work out the entire layout of the queue structures from this. We
1902
* take the easy route and just lay this area out like everyone else
1903
* does.
1904
*
1905
* The Linux driver uses a much more complex scheme whereby several
1906
* header records are kept for each queue. We use a couple of generic
1907
* list manipulation functions which 'know' the size of each list by
1908
* virtue of a table.
1909
*/
1910
qoffset = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN;
1911
qoffset &= ~(AAC_QUEUE_ALIGN - 1);
1912
sc->aac_queues =
1913
(struct aac_queue_table *)((uintptr_t)sc->aac_common + qoffset);
1914
ip->CommHeaderAddress = sc->aac_common_busaddr + qoffset;
1915
1916
sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1917
AAC_HOST_NORM_CMD_ENTRIES;
1918
sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1919
AAC_HOST_NORM_CMD_ENTRIES;
1920
sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1921
AAC_HOST_HIGH_CMD_ENTRIES;
1922
sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1923
AAC_HOST_HIGH_CMD_ENTRIES;
1924
sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1925
AAC_ADAP_NORM_CMD_ENTRIES;
1926
sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1927
AAC_ADAP_NORM_CMD_ENTRIES;
1928
sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1929
AAC_ADAP_HIGH_CMD_ENTRIES;
1930
sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1931
AAC_ADAP_HIGH_CMD_ENTRIES;
1932
sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1933
AAC_HOST_NORM_RESP_ENTRIES;
1934
sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1935
AAC_HOST_NORM_RESP_ENTRIES;
1936
sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1937
AAC_HOST_HIGH_RESP_ENTRIES;
1938
sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1939
AAC_HOST_HIGH_RESP_ENTRIES;
1940
sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1941
AAC_ADAP_NORM_RESP_ENTRIES;
1942
sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1943
AAC_ADAP_NORM_RESP_ENTRIES;
1944
sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1945
AAC_ADAP_HIGH_RESP_ENTRIES;
1946
sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1947
AAC_ADAP_HIGH_RESP_ENTRIES;
1948
sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] =
1949
&sc->aac_queues->qt_HostNormCmdQueue[0];
1950
sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] =
1951
&sc->aac_queues->qt_HostHighCmdQueue[0];
1952
sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] =
1953
&sc->aac_queues->qt_AdapNormCmdQueue[0];
1954
sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] =
1955
&sc->aac_queues->qt_AdapHighCmdQueue[0];
1956
sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] =
1957
&sc->aac_queues->qt_HostNormRespQueue[0];
1958
sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] =
1959
&sc->aac_queues->qt_HostHighRespQueue[0];
1960
sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] =
1961
&sc->aac_queues->qt_AdapNormRespQueue[0];
1962
sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] =
1963
&sc->aac_queues->qt_AdapHighRespQueue[0];
1964
1965
/*
1966
* Do controller-type-specific initialisation
1967
*/
1968
switch (sc->aac_hwif) {
1969
case AAC_HWIF_I960RX:
1970
AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, ~0);
1971
break;
1972
case AAC_HWIF_RKT:
1973
AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, ~0);
1974
break;
1975
default:
1976
break;
1977
}
1978
1979
/*
1980
* Give the init structure to the controller.
1981
*/
1982
if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT,
1983
sc->aac_common_busaddr +
1984
offsetof(struct aac_common, ac_init), 0, 0, 0,
1985
NULL)) {
1986
device_printf(sc->aac_dev,
1987
"error establishing init structure\n");
1988
error = EIO;
1989
goto out;
1990
}
1991
1992
error = 0;
1993
out:
1994
return(error);
1995
}
1996
1997
static int
1998
aac_setup_intr(struct aac_softc *sc)
1999
{
2000
2001
if (sc->flags & AAC_FLAGS_NEW_COMM) {
2002
if (bus_setup_intr(sc->aac_dev, sc->aac_irq,
2003
INTR_MPSAFE|INTR_TYPE_BIO, NULL,
2004
aac_new_intr, sc, &sc->aac_intr)) {
2005
device_printf(sc->aac_dev, "can't set up interrupt\n");
2006
return (EINVAL);
2007
}
2008
} else {
2009
if (bus_setup_intr(sc->aac_dev, sc->aac_irq,
2010
INTR_TYPE_BIO, aac_filter, NULL,
2011
sc, &sc->aac_intr)) {
2012
device_printf(sc->aac_dev,
2013
"can't set up interrupt filter\n");
2014
return (EINVAL);
2015
}
2016
}
2017
return (0);
2018
}
2019
2020
/*
2021
* Send a synchronous command to the controller and wait for a result.
2022
* Indicate if the controller completed the command with an error status.
2023
*/
2024
static int
2025
aac_sync_command(struct aac_softc *sc, u_int32_t command,
2026
u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2027
u_int32_t *sp)
2028
{
2029
time_t then;
2030
u_int32_t status;
2031
2032
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2033
2034
/* populate the mailbox */
2035
AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2036
2037
/* ensure the sync command doorbell flag is cleared */
2038
AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2039
2040
/* then set it to signal the adapter */
2041
AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2042
2043
/* spin waiting for the command to complete */
2044
then = time_uptime;
2045
do {
2046
if (time_uptime > (then + AAC_IMMEDIATE_TIMEOUT)) {
2047
fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2048
return(EIO);
2049
}
2050
} while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2051
2052
/* clear the completion flag */
2053
AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2054
2055
/* get the command status */
2056
status = AAC_GET_MAILBOX(sc, 0);
2057
if (sp != NULL)
2058
*sp = status;
2059
2060
if (status != AAC_SRB_STS_SUCCESS)
2061
return (-1);
2062
return(0);
2063
}
2064
2065
int
2066
aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2067
struct aac_fib *fib, u_int16_t datasize)
2068
{
2069
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2070
mtx_assert(&sc->aac_io_lock, MA_OWNED);
2071
2072
if (datasize > AAC_FIB_DATASIZE)
2073
return(EINVAL);
2074
2075
/*
2076
* Set up the sync FIB
2077
*/
2078
fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2079
AAC_FIBSTATE_INITIALISED |
2080
AAC_FIBSTATE_EMPTY;
2081
fib->Header.XferState |= xferstate;
2082
fib->Header.Command = command;
2083
fib->Header.StructType = AAC_FIBTYPE_TFIB;
2084
fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2085
fib->Header.SenderSize = sizeof(struct aac_fib);
2086
fib->Header.SenderFibAddress = 0; /* Not needed */
2087
fib->Header.ReceiverFibAddress = sc->aac_common_busaddr +
2088
offsetof(struct aac_common,
2089
ac_sync_fib);
2090
2091
/*
2092
* Give the FIB to the controller, wait for a response.
2093
*/
2094
if (aac_sync_command(sc, AAC_MONKER_SYNCFIB,
2095
fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) {
2096
fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2097
return(EIO);
2098
}
2099
2100
return (0);
2101
}
2102
2103
/*
2104
* Adapter-space FIB queue manipulation
2105
*
2106
* Note that the queue implementation here is a little funky; neither the PI or
2107
* CI will ever be zero. This behaviour is a controller feature.
2108
*/
2109
static const struct {
2110
int size;
2111
int notify;
2112
} aac_qinfo[] = {
2113
{AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL},
2114
{AAC_HOST_HIGH_CMD_ENTRIES, 0},
2115
{AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY},
2116
{AAC_ADAP_HIGH_CMD_ENTRIES, 0},
2117
{AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL},
2118
{AAC_HOST_HIGH_RESP_ENTRIES, 0},
2119
{AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY},
2120
{AAC_ADAP_HIGH_RESP_ENTRIES, 0}
2121
};
2122
2123
/*
2124
* Atomically insert an entry into the nominated queue, returns 0 on success or
2125
* EBUSY if the queue is full.
2126
*
2127
* Note: it would be more efficient to defer notifying the controller in
2128
* the case where we may be inserting several entries in rapid succession,
2129
* but implementing this usefully may be difficult (it would involve a
2130
* separate queue/notify interface).
2131
*/
2132
static int
2133
aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm)
2134
{
2135
u_int32_t pi, ci;
2136
int error;
2137
u_int32_t fib_size;
2138
u_int32_t fib_addr;
2139
2140
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2141
2142
fib_size = cm->cm_fib->Header.Size;
2143
fib_addr = cm->cm_fib->Header.ReceiverFibAddress;
2144
2145
/* get the producer/consumer indices */
2146
pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2147
ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2148
2149
/* wrap the queue? */
2150
if (pi >= aac_qinfo[queue].size)
2151
pi = 0;
2152
2153
/* check for queue full */
2154
if ((pi + 1) == ci) {
2155
error = EBUSY;
2156
goto out;
2157
}
2158
2159
/*
2160
* To avoid a race with its completion interrupt, place this command on
2161
* the busy queue prior to advertising it to the controller.
2162
*/
2163
aac_enqueue_busy(cm);
2164
2165
/* populate queue entry */
2166
(sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size;
2167
(sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr;
2168
2169
/* update producer index */
2170
sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1;
2171
2172
/* notify the adapter if we know how */
2173
if (aac_qinfo[queue].notify != 0)
2174
AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2175
2176
error = 0;
2177
2178
out:
2179
return(error);
2180
}
2181
2182
/*
2183
* Atomically remove one entry from the nominated queue, returns 0 on
2184
* success or ENOENT if the queue is empty.
2185
*/
2186
static int
2187
aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size,
2188
struct aac_fib **fib_addr)
2189
{
2190
u_int32_t pi, ci;
2191
u_int32_t fib_index;
2192
int error;
2193
int notify;
2194
2195
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2196
2197
/* get the producer/consumer indices */
2198
pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2199
ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2200
2201
/* check for queue empty */
2202
if (ci == pi) {
2203
error = ENOENT;
2204
goto out;
2205
}
2206
2207
/* wrap the pi so the following test works */
2208
if (pi >= aac_qinfo[queue].size)
2209
pi = 0;
2210
2211
notify = 0;
2212
if (ci == pi + 1)
2213
notify++;
2214
2215
/* wrap the queue? */
2216
if (ci >= aac_qinfo[queue].size)
2217
ci = 0;
2218
2219
/* fetch the entry */
2220
*fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size;
2221
2222
switch (queue) {
2223
case AAC_HOST_NORM_CMD_QUEUE:
2224
case AAC_HOST_HIGH_CMD_QUEUE:
2225
/*
2226
* The aq_fib_addr is only 32 bits wide so it can't be counted
2227
* on to hold an address. For AIF's, the adapter assumes
2228
* that it's giving us an address into the array of AIF fibs.
2229
* Therefore, we have to convert it to an index.
2230
*/
2231
fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr /
2232
sizeof(struct aac_fib);
2233
*fib_addr = &sc->aac_common->ac_fibs[fib_index];
2234
break;
2235
2236
case AAC_HOST_NORM_RESP_QUEUE:
2237
case AAC_HOST_HIGH_RESP_QUEUE:
2238
{
2239
struct aac_command *cm;
2240
2241
/*
2242
* As above, an index is used instead of an actual address.
2243
* Gotta shift the index to account for the fast response
2244
* bit. No other correction is needed since this value was
2245
* originally provided by the driver via the SenderFibAddress
2246
* field.
2247
*/
2248
fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr;
2249
cm = sc->aac_commands + (fib_index >> 2);
2250
*fib_addr = cm->cm_fib;
2251
2252
/*
2253
* Is this a fast response? If it is, update the fib fields in
2254
* local memory since the whole fib isn't DMA'd back up.
2255
*/
2256
if (fib_index & 0x01) {
2257
(*fib_addr)->Header.XferState |= AAC_FIBSTATE_DONEADAP;
2258
*((u_int32_t*)((*fib_addr)->data)) = AAC_ERROR_NORMAL;
2259
}
2260
break;
2261
}
2262
default:
2263
panic("Invalid queue in aac_dequeue_fib()");
2264
break;
2265
}
2266
2267
/* update consumer index */
2268
sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1;
2269
2270
/* if we have made the queue un-full, notify the adapter */
2271
if (notify && (aac_qinfo[queue].notify != 0))
2272
AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2273
error = 0;
2274
2275
out:
2276
return(error);
2277
}
2278
2279
/*
2280
* Put our response to an Adapter Initialed Fib on the response queue
2281
*/
2282
static int
2283
aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib)
2284
{
2285
u_int32_t pi, ci;
2286
int error;
2287
u_int32_t fib_size;
2288
u_int32_t fib_addr;
2289
2290
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2291
2292
/* Tell the adapter where the FIB is */
2293
fib_size = fib->Header.Size;
2294
fib_addr = fib->Header.SenderFibAddress;
2295
fib->Header.ReceiverFibAddress = fib_addr;
2296
2297
/* get the producer/consumer indices */
2298
pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2299
ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2300
2301
/* wrap the queue? */
2302
if (pi >= aac_qinfo[queue].size)
2303
pi = 0;
2304
2305
/* check for queue full */
2306
if ((pi + 1) == ci) {
2307
error = EBUSY;
2308
goto out;
2309
}
2310
2311
/* populate queue entry */
2312
(sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size;
2313
(sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr;
2314
2315
/* update producer index */
2316
sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1;
2317
2318
/* notify the adapter if we know how */
2319
if (aac_qinfo[queue].notify != 0)
2320
AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2321
2322
error = 0;
2323
2324
out:
2325
return(error);
2326
}
2327
2328
/*
2329
* Check for commands that have been outstanding for a suspiciously long time,
2330
* and complain about them.
2331
*/
2332
static void
2333
aac_timeout(struct aac_softc *sc)
2334
{
2335
struct aac_command *cm;
2336
time_t deadline;
2337
int timedout, code;
2338
2339
/*
2340
* Traverse the busy command list, bitch about late commands once
2341
* only.
2342
*/
2343
timedout = 0;
2344
deadline = time_uptime - AAC_CMD_TIMEOUT;
2345
TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2346
if ((cm->cm_timestamp < deadline)
2347
&& !(cm->cm_flags & AAC_CMD_TIMEDOUT)) {
2348
cm->cm_flags |= AAC_CMD_TIMEDOUT;
2349
device_printf(sc->aac_dev,
2350
"COMMAND %p (TYPE %d) TIMEOUT AFTER %d SECONDS\n",
2351
cm, cm->cm_fib->Header.Command,
2352
(int)(time_uptime-cm->cm_timestamp));
2353
AAC_PRINT_FIB(sc, cm->cm_fib);
2354
timedout++;
2355
}
2356
}
2357
2358
if (timedout) {
2359
code = AAC_GET_FWSTATUS(sc);
2360
if (code != AAC_UP_AND_RUNNING) {
2361
device_printf(sc->aac_dev, "WARNING! Controller is no "
2362
"longer running! code= 0x%x\n", code);
2363
}
2364
}
2365
}
2366
2367
/*
2368
* Interface Function Vectors
2369
*/
2370
2371
/*
2372
* Read the current firmware status word.
2373
*/
2374
static int
2375
aac_sa_get_fwstatus(struct aac_softc *sc)
2376
{
2377
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2378
2379
return(AAC_MEM0_GETREG4(sc, AAC_SA_FWSTATUS));
2380
}
2381
2382
static int
2383
aac_rx_get_fwstatus(struct aac_softc *sc)
2384
{
2385
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2386
2387
return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ?
2388
AAC_RX_OMR0 : AAC_RX_FWSTATUS));
2389
}
2390
2391
static int
2392
aac_rkt_get_fwstatus(struct aac_softc *sc)
2393
{
2394
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2395
2396
return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ?
2397
AAC_RKT_OMR0 : AAC_RKT_FWSTATUS));
2398
}
2399
2400
/*
2401
* Notify the controller of a change in a given queue
2402
*/
2403
2404
static void
2405
aac_sa_qnotify(struct aac_softc *sc, int qbit)
2406
{
2407
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2408
2409
AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit);
2410
}
2411
2412
static void
2413
aac_rx_qnotify(struct aac_softc *sc, int qbit)
2414
{
2415
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2416
2417
AAC_MEM0_SETREG4(sc, AAC_RX_IDBR, qbit);
2418
}
2419
2420
static void
2421
aac_rkt_qnotify(struct aac_softc *sc, int qbit)
2422
{
2423
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2424
2425
AAC_MEM0_SETREG4(sc, AAC_RKT_IDBR, qbit);
2426
}
2427
2428
/*
2429
* Get the interrupt reason bits
2430
*/
2431
static int
2432
aac_sa_get_istatus(struct aac_softc *sc)
2433
{
2434
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2435
2436
return(AAC_MEM0_GETREG2(sc, AAC_SA_DOORBELL0));
2437
}
2438
2439
static int
2440
aac_rx_get_istatus(struct aac_softc *sc)
2441
{
2442
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2443
2444
return(AAC_MEM0_GETREG4(sc, AAC_RX_ODBR));
2445
}
2446
2447
static int
2448
aac_rkt_get_istatus(struct aac_softc *sc)
2449
{
2450
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2451
2452
return(AAC_MEM0_GETREG4(sc, AAC_RKT_ODBR));
2453
}
2454
2455
/*
2456
* Clear some interrupt reason bits
2457
*/
2458
static void
2459
aac_sa_clear_istatus(struct aac_softc *sc, int mask)
2460
{
2461
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2462
2463
AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask);
2464
}
2465
2466
static void
2467
aac_rx_clear_istatus(struct aac_softc *sc, int mask)
2468
{
2469
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2470
2471
AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, mask);
2472
}
2473
2474
static void
2475
aac_rkt_clear_istatus(struct aac_softc *sc, int mask)
2476
{
2477
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2478
2479
AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, mask);
2480
}
2481
2482
/*
2483
* Populate the mailbox and set the command word
2484
*/
2485
static void
2486
aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command,
2487
u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2488
{
2489
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2490
2491
AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX, command);
2492
AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0);
2493
AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1);
2494
AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2);
2495
AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3);
2496
}
2497
2498
static void
2499
aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command,
2500
u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2501
{
2502
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2503
2504
AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX, command);
2505
AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0);
2506
AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1);
2507
AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2);
2508
AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3);
2509
}
2510
2511
static void
2512
aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2513
u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2514
{
2515
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2516
2517
AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX, command);
2518
AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 4, arg0);
2519
AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 8, arg1);
2520
AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 12, arg2);
2521
AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 16, arg3);
2522
}
2523
2524
/*
2525
* Fetch the immediate command status word
2526
*/
2527
static int
2528
aac_sa_get_mailbox(struct aac_softc *sc, int mb)
2529
{
2530
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2531
2532
return(AAC_MEM1_GETREG4(sc, AAC_SA_MAILBOX + (mb * 4)));
2533
}
2534
2535
static int
2536
aac_rx_get_mailbox(struct aac_softc *sc, int mb)
2537
{
2538
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2539
2540
return(AAC_MEM1_GETREG4(sc, AAC_RX_MAILBOX + (mb * 4)));
2541
}
2542
2543
static int
2544
aac_rkt_get_mailbox(struct aac_softc *sc, int mb)
2545
{
2546
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2547
2548
return(AAC_MEM1_GETREG4(sc, AAC_RKT_MAILBOX + (mb * 4)));
2549
}
2550
2551
/*
2552
* Set/clear interrupt masks
2553
*/
2554
static void
2555
aac_sa_set_interrupts(struct aac_softc *sc, int enable)
2556
{
2557
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis");
2558
2559
if (enable) {
2560
AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS);
2561
} else {
2562
AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_SET, ~0);
2563
}
2564
}
2565
2566
static void
2567
aac_rx_set_interrupts(struct aac_softc *sc, int enable)
2568
{
2569
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis");
2570
2571
if (enable) {
2572
if (sc->flags & AAC_FLAGS_NEW_COMM)
2573
AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INT_NEW_COMM);
2574
else
2575
AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS);
2576
} else {
2577
AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~0);
2578
}
2579
}
2580
2581
static void
2582
aac_rkt_set_interrupts(struct aac_softc *sc, int enable)
2583
{
2584
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis");
2585
2586
if (enable) {
2587
if (sc->flags & AAC_FLAGS_NEW_COMM)
2588
AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INT_NEW_COMM);
2589
else
2590
AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INTERRUPTS);
2591
} else {
2592
AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~0);
2593
}
2594
}
2595
2596
/*
2597
* New comm. interface: Send command functions
2598
*/
2599
static int
2600
aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm)
2601
{
2602
u_int32_t index, device;
2603
2604
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)");
2605
2606
index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE);
2607
if (index == 0xffffffffL)
2608
index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE);
2609
if (index == 0xffffffffL)
2610
return index;
2611
aac_enqueue_busy(cm);
2612
device = index;
2613
AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL));
2614
device += 4;
2615
AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32));
2616
device += 4;
2617
AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size);
2618
AAC_MEM0_SETREG4(sc, AAC_RX_IQUE, index);
2619
return 0;
2620
}
2621
2622
static int
2623
aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm)
2624
{
2625
u_int32_t index, device;
2626
2627
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)");
2628
2629
index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE);
2630
if (index == 0xffffffffL)
2631
index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE);
2632
if (index == 0xffffffffL)
2633
return index;
2634
aac_enqueue_busy(cm);
2635
device = index;
2636
AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL));
2637
device += 4;
2638
AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32));
2639
device += 4;
2640
AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size);
2641
AAC_MEM0_SETREG4(sc, AAC_RKT_IQUE, index);
2642
return 0;
2643
}
2644
2645
/*
2646
* New comm. interface: get, set outbound queue index
2647
*/
2648
static int
2649
aac_rx_get_outb_queue(struct aac_softc *sc)
2650
{
2651
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2652
2653
return(AAC_MEM0_GETREG4(sc, AAC_RX_OQUE));
2654
}
2655
2656
static int
2657
aac_rkt_get_outb_queue(struct aac_softc *sc)
2658
{
2659
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2660
2661
return(AAC_MEM0_GETREG4(sc, AAC_RKT_OQUE));
2662
}
2663
2664
static void
2665
aac_rx_set_outb_queue(struct aac_softc *sc, int index)
2666
{
2667
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2668
2669
AAC_MEM0_SETREG4(sc, AAC_RX_OQUE, index);
2670
}
2671
2672
static void
2673
aac_rkt_set_outb_queue(struct aac_softc *sc, int index)
2674
{
2675
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2676
2677
AAC_MEM0_SETREG4(sc, AAC_RKT_OQUE, index);
2678
}
2679
2680
/*
2681
* Debugging and Diagnostics
2682
*/
2683
2684
/*
2685
* Print some information about the controller.
2686
*/
2687
static void
2688
aac_describe_controller(struct aac_softc *sc)
2689
{
2690
struct aac_fib *fib;
2691
struct aac_adapter_info *info;
2692
char *adapter_type = "Adaptec RAID controller";
2693
2694
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2695
2696
mtx_lock(&sc->aac_io_lock);
2697
aac_alloc_sync_fib(sc, &fib);
2698
2699
fib->data[0] = 0;
2700
if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2701
device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2702
aac_release_sync_fib(sc);
2703
mtx_unlock(&sc->aac_io_lock);
2704
return;
2705
}
2706
2707
/* save the kernel revision structure for later use */
2708
info = (struct aac_adapter_info *)&fib->data[0];
2709
sc->aac_revision = info->KernelRevision;
2710
2711
if (bootverbose) {
2712
device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2713
"(%dMB cache, %dMB execution), %s\n",
2714
aac_describe_code(aac_cpu_variant, info->CpuVariant),
2715
info->ClockSpeed, info->TotalMem / (1024 * 1024),
2716
info->BufferMem / (1024 * 1024),
2717
info->ExecutionMem / (1024 * 1024),
2718
aac_describe_code(aac_battery_platform,
2719
info->batteryPlatform));
2720
2721
device_printf(sc->aac_dev,
2722
"Kernel %d.%d-%d, Build %d, S/N %6X\n",
2723
info->KernelRevision.external.comp.major,
2724
info->KernelRevision.external.comp.minor,
2725
info->KernelRevision.external.comp.dash,
2726
info->KernelRevision.buildNumber,
2727
(u_int32_t)(info->SerialNumber & 0xffffff));
2728
2729
device_printf(sc->aac_dev, "Supported Options=%b\n",
2730
sc->supported_options,
2731
"\20"
2732
"\1SNAPSHOT"
2733
"\2CLUSTERS"
2734
"\3WCACHE"
2735
"\4DATA64"
2736
"\5HOSTTIME"
2737
"\6RAID50"
2738
"\7WINDOW4GB"
2739
"\10SCSIUPGD"
2740
"\11SOFTERR"
2741
"\12NORECOND"
2742
"\13SGMAP64"
2743
"\14ALARM"
2744
"\15NONDASD"
2745
"\16SCSIMGT"
2746
"\17RAIDSCSI"
2747
"\21ADPTINFO"
2748
"\22NEWCOMM"
2749
"\23ARRAY64BIT"
2750
"\24HEATSENSOR");
2751
}
2752
2753
if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2754
fib->data[0] = 0;
2755
if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2756
device_printf(sc->aac_dev,
2757
"RequestSupplementAdapterInfo failed\n");
2758
else
2759
adapter_type = ((struct aac_supplement_adapter_info *)
2760
&fib->data[0])->AdapterTypeText;
2761
}
2762
device_printf(sc->aac_dev, "%s, aac driver %d.%d.%d-%d\n",
2763
adapter_type,
2764
AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2765
AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2766
2767
aac_release_sync_fib(sc);
2768
mtx_unlock(&sc->aac_io_lock);
2769
}
2770
2771
/*
2772
* Look up a text description of a numeric error code and return a pointer to
2773
* same.
2774
*/
2775
static const char *
2776
aac_describe_code(const struct aac_code_lookup *table, u_int32_t code)
2777
{
2778
int i;
2779
2780
for (i = 0; table[i].string != NULL; i++)
2781
if (table[i].code == code)
2782
return(table[i].string);
2783
return(table[i + 1].string);
2784
}
2785
2786
/*
2787
* Management Interface
2788
*/
2789
2790
static int
2791
aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2792
{
2793
struct aac_softc *sc;
2794
2795
sc = dev->si_drv1;
2796
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2797
device_busy(sc->aac_dev);
2798
devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2799
2800
return 0;
2801
}
2802
2803
static int
2804
aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2805
{
2806
union aac_statrequest *as;
2807
struct aac_softc *sc;
2808
int error = 0;
2809
2810
as = (union aac_statrequest *)arg;
2811
sc = dev->si_drv1;
2812
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2813
2814
switch (cmd) {
2815
case AACIO_STATS:
2816
switch (as->as_item) {
2817
case AACQ_FREE:
2818
case AACQ_BIO:
2819
case AACQ_READY:
2820
case AACQ_BUSY:
2821
bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2822
sizeof(struct aac_qstat));
2823
break;
2824
default:
2825
error = ENOENT;
2826
break;
2827
}
2828
break;
2829
2830
case FSACTL_SENDFIB:
2831
case FSACTL_SEND_LARGE_FIB:
2832
arg = *(caddr_t*)arg;
2833
case FSACTL_LNX_SENDFIB:
2834
case FSACTL_LNX_SEND_LARGE_FIB:
2835
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2836
error = aac_ioctl_sendfib(sc, arg);
2837
break;
2838
case FSACTL_SEND_RAW_SRB:
2839
arg = *(caddr_t*)arg;
2840
case FSACTL_LNX_SEND_RAW_SRB:
2841
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2842
error = aac_ioctl_send_raw_srb(sc, arg);
2843
break;
2844
case FSACTL_AIF_THREAD:
2845
case FSACTL_LNX_AIF_THREAD:
2846
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2847
error = EINVAL;
2848
break;
2849
case FSACTL_OPEN_GET_ADAPTER_FIB:
2850
arg = *(caddr_t*)arg;
2851
case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2852
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2853
error = aac_open_aif(sc, arg);
2854
break;
2855
case FSACTL_GET_NEXT_ADAPTER_FIB:
2856
arg = *(caddr_t*)arg;
2857
case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2858
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2859
error = aac_getnext_aif(sc, arg);
2860
break;
2861
case FSACTL_CLOSE_GET_ADAPTER_FIB:
2862
arg = *(caddr_t*)arg;
2863
case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2864
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2865
error = aac_close_aif(sc, arg);
2866
break;
2867
case FSACTL_MINIPORT_REV_CHECK:
2868
arg = *(caddr_t*)arg;
2869
case FSACTL_LNX_MINIPORT_REV_CHECK:
2870
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2871
error = aac_rev_check(sc, arg);
2872
break;
2873
case FSACTL_QUERY_DISK:
2874
arg = *(caddr_t*)arg;
2875
case FSACTL_LNX_QUERY_DISK:
2876
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2877
error = aac_query_disk(sc, arg);
2878
break;
2879
case FSACTL_DELETE_DISK:
2880
case FSACTL_LNX_DELETE_DISK:
2881
/*
2882
* We don't trust the underland to tell us when to delete a
2883
* container, rather we rely on an AIF coming from the
2884
* controller
2885
*/
2886
error = 0;
2887
break;
2888
case FSACTL_GET_PCI_INFO:
2889
arg = *(caddr_t*)arg;
2890
case FSACTL_LNX_GET_PCI_INFO:
2891
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2892
error = aac_get_pci_info(sc, arg);
2893
break;
2894
case FSACTL_GET_FEATURES:
2895
arg = *(caddr_t*)arg;
2896
case FSACTL_LNX_GET_FEATURES:
2897
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2898
error = aac_supported_features(sc, arg);
2899
break;
2900
default:
2901
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2902
error = EINVAL;
2903
break;
2904
}
2905
return(error);
2906
}
2907
2908
static int
2909
aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2910
{
2911
struct aac_softc *sc;
2912
struct aac_fib_context *ctx;
2913
int revents;
2914
2915
sc = dev->si_drv1;
2916
revents = 0;
2917
2918
mtx_lock(&sc->aac_aifq_lock);
2919
if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2920
for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2921
if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2922
revents |= poll_events & (POLLIN | POLLRDNORM);
2923
break;
2924
}
2925
}
2926
}
2927
mtx_unlock(&sc->aac_aifq_lock);
2928
2929
if (revents == 0) {
2930
if (poll_events & (POLLIN | POLLRDNORM))
2931
selrecord(td, &sc->rcv_select);
2932
}
2933
2934
return (revents);
2935
}
2936
2937
static void
2938
aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2939
{
2940
2941
switch (event->ev_type) {
2942
case AAC_EVENT_CMFREE:
2943
mtx_assert(&sc->aac_io_lock, MA_OWNED);
2944
if (aac_alloc_command(sc, (struct aac_command **)arg)) {
2945
aac_add_event(sc, event);
2946
return;
2947
}
2948
free(event, M_AACBUF);
2949
wakeup(arg);
2950
break;
2951
default:
2952
break;
2953
}
2954
}
2955
2956
/*
2957
* Send a FIB supplied from userspace
2958
*/
2959
static int
2960
aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2961
{
2962
struct aac_command *cm;
2963
int size, error;
2964
2965
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2966
2967
cm = NULL;
2968
2969
/*
2970
* Get a command
2971
*/
2972
mtx_lock(&sc->aac_io_lock);
2973
if (aac_alloc_command(sc, &cm)) {
2974
struct aac_event *event;
2975
2976
event = malloc(sizeof(struct aac_event), M_AACBUF,
2977
M_NOWAIT | M_ZERO);
2978
if (event == NULL) {
2979
error = EBUSY;
2980
mtx_unlock(&sc->aac_io_lock);
2981
goto out;
2982
}
2983
event->ev_type = AAC_EVENT_CMFREE;
2984
event->ev_callback = aac_ioctl_event;
2985
event->ev_arg = &cm;
2986
aac_add_event(sc, event);
2987
msleep(&cm, &sc->aac_io_lock, 0, "sendfib", 0);
2988
}
2989
mtx_unlock(&sc->aac_io_lock);
2990
2991
/*
2992
* Fetch the FIB header, then re-copy to get data as well.
2993
*/
2994
if ((error = copyin(ufib, cm->cm_fib,
2995
sizeof(struct aac_fib_header))) != 0)
2996
goto out;
2997
size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2998
if (size > sc->aac_max_fib_size) {
2999
device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
3000
size, sc->aac_max_fib_size);
3001
size = sc->aac_max_fib_size;
3002
}
3003
if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
3004
goto out;
3005
cm->cm_fib->Header.Size = size;
3006
cm->cm_timestamp = time_uptime;
3007
3008
/*
3009
* Pass the FIB to the controller, wait for it to complete.
3010
*/
3011
mtx_lock(&sc->aac_io_lock);
3012
error = aac_wait_command(cm);
3013
mtx_unlock(&sc->aac_io_lock);
3014
if (error != 0) {
3015
device_printf(sc->aac_dev,
3016
"aac_wait_command return %d\n", error);
3017
goto out;
3018
}
3019
3020
/*
3021
* Copy the FIB and data back out to the caller.
3022
*/
3023
size = cm->cm_fib->Header.Size;
3024
if (size > sc->aac_max_fib_size) {
3025
device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
3026
size, sc->aac_max_fib_size);
3027
size = sc->aac_max_fib_size;
3028
}
3029
error = copyout(cm->cm_fib, ufib, size);
3030
3031
out:
3032
if (cm != NULL) {
3033
mtx_lock(&sc->aac_io_lock);
3034
aac_release_command(cm);
3035
mtx_unlock(&sc->aac_io_lock);
3036
}
3037
return(error);
3038
}
3039
3040
/*
3041
* Send a passthrough FIB supplied from userspace
3042
*/
3043
static int
3044
aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
3045
{
3046
struct aac_command *cm;
3047
struct aac_event *event;
3048
struct aac_fib *fib;
3049
struct aac_srb *srbcmd, *user_srb;
3050
struct aac_sg_entry *sge;
3051
void *srb_sg_address, *ureply;
3052
uint32_t fibsize, srb_sg_bytecount;
3053
int error, transfer_data;
3054
3055
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3056
3057
cm = NULL;
3058
transfer_data = 0;
3059
fibsize = 0;
3060
user_srb = (struct aac_srb *)arg;
3061
3062
mtx_lock(&sc->aac_io_lock);
3063
if (aac_alloc_command(sc, &cm)) {
3064
event = malloc(sizeof(struct aac_event), M_AACBUF,
3065
M_NOWAIT | M_ZERO);
3066
if (event == NULL) {
3067
error = EBUSY;
3068
mtx_unlock(&sc->aac_io_lock);
3069
goto out;
3070
}
3071
event->ev_type = AAC_EVENT_CMFREE;
3072
event->ev_callback = aac_ioctl_event;
3073
event->ev_arg = &cm;
3074
aac_add_event(sc, event);
3075
msleep(cm, &sc->aac_io_lock, 0, "aacraw", 0);
3076
}
3077
mtx_unlock(&sc->aac_io_lock);
3078
3079
cm->cm_data = NULL;
3080
fib = cm->cm_fib;
3081
srbcmd = (struct aac_srb *)fib->data;
3082
error = copyin(&user_srb->data_len, &fibsize, sizeof(uint32_t));
3083
if (error != 0)
3084
goto out;
3085
if (fibsize > (sc->aac_max_fib_size - sizeof(struct aac_fib_header))) {
3086
error = EINVAL;
3087
goto out;
3088
}
3089
error = copyin(user_srb, srbcmd, fibsize);
3090
if (error != 0)
3091
goto out;
3092
srbcmd->function = 0;
3093
srbcmd->retry_limit = 0;
3094
if (srbcmd->sg_map.SgCount > 1) {
3095
error = EINVAL;
3096
goto out;
3097
}
3098
3099
/* Retrieve correct SG entries. */
3100
if (fibsize == (sizeof(struct aac_srb) +
3101
srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
3102
struct aac_sg_entry sg;
3103
3104
sge = srbcmd->sg_map.SgEntry;
3105
3106
if ((error = copyin(sge, &sg, sizeof(sg))) != 0)
3107
goto out;
3108
3109
srb_sg_bytecount = sg.SgByteCount;
3110
srb_sg_address = (void *)(uintptr_t)sg.SgAddress;
3111
}
3112
#ifdef __amd64__
3113
else if (fibsize == (sizeof(struct aac_srb) +
3114
srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
3115
struct aac_sg_entry64 *sge64;
3116
struct aac_sg_entry64 sg;
3117
3118
sge = NULL;
3119
sge64 = (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
3120
3121
if ((error = copyin(sge64, &sg, sizeof(sg))) != 0)
3122
goto out;
3123
3124
srb_sg_bytecount = sg.SgByteCount;
3125
srb_sg_address = (void *)sg.SgAddress;
3126
if (sge64->SgAddress > 0xffffffffull &&
3127
(sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
3128
error = EINVAL;
3129
goto out;
3130
}
3131
}
3132
#endif
3133
else {
3134
error = EINVAL;
3135
goto out;
3136
}
3137
ureply = (char *)arg + fibsize;
3138
srbcmd->data_len = srb_sg_bytecount;
3139
if (srbcmd->sg_map.SgCount == 1)
3140
transfer_data = 1;
3141
3142
cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
3143
if (transfer_data) {
3144
cm->cm_datalen = srb_sg_bytecount;
3145
cm->cm_data = malloc(cm->cm_datalen, M_AACBUF, M_NOWAIT);
3146
if (cm->cm_data == NULL) {
3147
error = ENOMEM;
3148
goto out;
3149
}
3150
if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
3151
cm->cm_flags |= AAC_CMD_DATAIN;
3152
if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
3153
cm->cm_flags |= AAC_CMD_DATAOUT;
3154
error = copyin(srb_sg_address, cm->cm_data,
3155
cm->cm_datalen);
3156
if (error != 0)
3157
goto out;
3158
}
3159
}
3160
3161
fib->Header.Size = sizeof(struct aac_fib_header) +
3162
sizeof(struct aac_srb);
3163
fib->Header.XferState =
3164
AAC_FIBSTATE_HOSTOWNED |
3165
AAC_FIBSTATE_INITIALISED |
3166
AAC_FIBSTATE_EMPTY |
3167
AAC_FIBSTATE_FROMHOST |
3168
AAC_FIBSTATE_REXPECTED |
3169
AAC_FIBSTATE_NORM |
3170
AAC_FIBSTATE_ASYNC |
3171
AAC_FIBSTATE_FAST_RESPONSE;
3172
fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) != 0 ?
3173
ScsiPortCommandU64 : ScsiPortCommand;
3174
3175
mtx_lock(&sc->aac_io_lock);
3176
aac_wait_command(cm);
3177
mtx_unlock(&sc->aac_io_lock);
3178
3179
if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) != 0) {
3180
error = copyout(cm->cm_data, srb_sg_address, cm->cm_datalen);
3181
if (error != 0)
3182
goto out;
3183
}
3184
error = copyout(fib->data, ureply, sizeof(struct aac_srb_response));
3185
out:
3186
if (cm != NULL) {
3187
if (cm->cm_data != NULL)
3188
free(cm->cm_data, M_AACBUF);
3189
mtx_lock(&sc->aac_io_lock);
3190
aac_release_command(cm);
3191
mtx_unlock(&sc->aac_io_lock);
3192
}
3193
return(error);
3194
}
3195
3196
/*
3197
* cdevpriv interface private destructor.
3198
*/
3199
static void
3200
aac_cdevpriv_dtor(void *arg)
3201
{
3202
struct aac_softc *sc;
3203
3204
sc = arg;
3205
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3206
device_unbusy(sc->aac_dev);
3207
}
3208
3209
/*
3210
* Handle an AIF sent to us by the controller; queue it for later reference.
3211
* If the queue fills up, then drop the older entries.
3212
*/
3213
static void
3214
aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3215
{
3216
struct aac_aif_command *aif;
3217
struct aac_container *co, *co_next;
3218
struct aac_fib_context *ctx;
3219
struct aac_mntinforesp *mir;
3220
int next, current, found;
3221
int count = 0, added = 0, i = 0;
3222
uint32_t channel;
3223
3224
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3225
3226
aif = (struct aac_aif_command*)&fib->data[0];
3227
aac_print_aif(sc, aif);
3228
3229
/* Is it an event that we should care about? */
3230
switch (aif->command) {
3231
case AifCmdEventNotify:
3232
switch (aif->data.EN.type) {
3233
case AifEnAddContainer:
3234
case AifEnDeleteContainer:
3235
/*
3236
* A container was added or deleted, but the message
3237
* doesn't tell us anything else! Re-enumerate the
3238
* containers and sort things out.
3239
*/
3240
aac_alloc_sync_fib(sc, &fib);
3241
do {
3242
/*
3243
* Ask the controller for its containers one at
3244
* a time.
3245
* XXX What if the controller's list changes
3246
* midway through this enumaration?
3247
* XXX This should be done async.
3248
*/
3249
if ((mir = aac_get_container_info(sc, fib, i)) == NULL)
3250
continue;
3251
if (i == 0)
3252
count = mir->MntRespCount;
3253
/*
3254
* Check the container against our list.
3255
* co->co_found was already set to 0 in a
3256
* previous run.
3257
*/
3258
if ((mir->Status == ST_OK) &&
3259
(mir->MntTable[0].VolType != CT_NONE)) {
3260
found = 0;
3261
TAILQ_FOREACH(co,
3262
&sc->aac_container_tqh,
3263
co_link) {
3264
if (co->co_mntobj.ObjectId ==
3265
mir->MntTable[0].ObjectId) {
3266
co->co_found = 1;
3267
found = 1;
3268
break;
3269
}
3270
}
3271
/*
3272
* If the container matched, continue
3273
* in the list.
3274
*/
3275
if (found) {
3276
i++;
3277
continue;
3278
}
3279
3280
/*
3281
* This is a new container. Do all the
3282
* appropriate things to set it up.
3283
*/
3284
aac_add_container(sc, mir, 1);
3285
added = 1;
3286
}
3287
i++;
3288
} while ((i < count) && (i < AAC_MAX_CONTAINERS));
3289
aac_release_sync_fib(sc);
3290
3291
/*
3292
* Go through our list of containers and see which ones
3293
* were not marked 'found'. Since the controller didn't
3294
* list them they must have been deleted. Do the
3295
* appropriate steps to destroy the device. Also reset
3296
* the co->co_found field.
3297
*/
3298
co = TAILQ_FIRST(&sc->aac_container_tqh);
3299
while (co != NULL) {
3300
if (co->co_found == 0) {
3301
mtx_unlock(&sc->aac_io_lock);
3302
bus_topo_lock();
3303
device_delete_child(sc->aac_dev,
3304
co->co_disk);
3305
bus_topo_unlock();
3306
mtx_lock(&sc->aac_io_lock);
3307
co_next = TAILQ_NEXT(co, co_link);
3308
mtx_lock(&sc->aac_container_lock);
3309
TAILQ_REMOVE(&sc->aac_container_tqh, co,
3310
co_link);
3311
mtx_unlock(&sc->aac_container_lock);
3312
free(co, M_AACBUF);
3313
co = co_next;
3314
} else {
3315
co->co_found = 0;
3316
co = TAILQ_NEXT(co, co_link);
3317
}
3318
}
3319
3320
/* Attach the newly created containers */
3321
if (added) {
3322
mtx_unlock(&sc->aac_io_lock);
3323
bus_topo_lock();
3324
bus_attach_children(sc->aac_dev);
3325
bus_topo_unlock();
3326
mtx_lock(&sc->aac_io_lock);
3327
}
3328
3329
break;
3330
3331
case AifEnEnclosureManagement:
3332
switch (aif->data.EN.data.EEE.eventType) {
3333
case AIF_EM_DRIVE_INSERTION:
3334
case AIF_EM_DRIVE_REMOVAL:
3335
channel = aif->data.EN.data.EEE.unitID;
3336
if (sc->cam_rescan_cb != NULL)
3337
sc->cam_rescan_cb(sc,
3338
(channel >> 24) & 0xF,
3339
(channel & 0xFFFF));
3340
break;
3341
}
3342
break;
3343
3344
case AifEnAddJBOD:
3345
case AifEnDeleteJBOD:
3346
channel = aif->data.EN.data.ECE.container;
3347
if (sc->cam_rescan_cb != NULL)
3348
sc->cam_rescan_cb(sc, (channel >> 24) & 0xF,
3349
AAC_CAM_TARGET_WILDCARD);
3350
break;
3351
3352
default:
3353
break;
3354
}
3355
3356
default:
3357
break;
3358
}
3359
3360
/* Copy the AIF data to the AIF queue for ioctl retrieval */
3361
mtx_lock(&sc->aac_aifq_lock);
3362
current = sc->aifq_idx;
3363
next = (current + 1) % AAC_AIFQ_LENGTH;
3364
if (next == 0)
3365
sc->aifq_filled = 1;
3366
bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3367
/* modify AIF contexts */
3368
if (sc->aifq_filled) {
3369
for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3370
if (next == ctx->ctx_idx)
3371
ctx->ctx_wrap = 1;
3372
else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3373
ctx->ctx_idx = next;
3374
}
3375
}
3376
sc->aifq_idx = next;
3377
/* On the off chance that someone is sleeping for an aif... */
3378
if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3379
wakeup(sc->aac_aifq);
3380
/* Wakeup any poll()ers */
3381
selwakeuppri(&sc->rcv_select, PRIBIO);
3382
mtx_unlock(&sc->aac_aifq_lock);
3383
}
3384
3385
/*
3386
* Return the Revision of the driver to userspace and check to see if the
3387
* userspace app is possibly compatible. This is extremely bogus since
3388
* our driver doesn't follow Adaptec's versioning system. Cheat by just
3389
* returning what the card reported.
3390
*/
3391
static int
3392
aac_rev_check(struct aac_softc *sc, caddr_t udata)
3393
{
3394
struct aac_rev_check rev_check;
3395
struct aac_rev_check_resp rev_check_resp;
3396
int error = 0;
3397
3398
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3399
3400
/*
3401
* Copyin the revision struct from userspace
3402
*/
3403
if ((error = copyin(udata, (caddr_t)&rev_check,
3404
sizeof(struct aac_rev_check))) != 0) {
3405
return error;
3406
}
3407
3408
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3409
rev_check.callingRevision.buildNumber);
3410
3411
/*
3412
* Doctor up the response struct.
3413
*/
3414
rev_check_resp.possiblyCompatible = 1;
3415
rev_check_resp.adapterSWRevision.external.comp.major =
3416
AAC_DRIVER_MAJOR_VERSION;
3417
rev_check_resp.adapterSWRevision.external.comp.minor =
3418
AAC_DRIVER_MINOR_VERSION;
3419
rev_check_resp.adapterSWRevision.external.comp.type =
3420
AAC_DRIVER_TYPE;
3421
rev_check_resp.adapterSWRevision.external.comp.dash =
3422
AAC_DRIVER_BUGFIX_LEVEL;
3423
rev_check_resp.adapterSWRevision.buildNumber =
3424
AAC_DRIVER_BUILD;
3425
3426
return(copyout((caddr_t)&rev_check_resp, udata,
3427
sizeof(struct aac_rev_check_resp)));
3428
}
3429
3430
/*
3431
* Pass the fib context to the caller
3432
*/
3433
static int
3434
aac_open_aif(struct aac_softc *sc, caddr_t arg)
3435
{
3436
struct aac_fib_context *fibctx, *ctx;
3437
int error = 0;
3438
3439
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3440
3441
fibctx = malloc(sizeof(struct aac_fib_context), M_AACBUF, M_NOWAIT|M_ZERO);
3442
if (fibctx == NULL)
3443
return (ENOMEM);
3444
3445
mtx_lock(&sc->aac_aifq_lock);
3446
/* all elements are already 0, add to queue */
3447
if (sc->fibctx == NULL)
3448
sc->fibctx = fibctx;
3449
else {
3450
for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3451
;
3452
ctx->next = fibctx;
3453
fibctx->prev = ctx;
3454
}
3455
3456
/* evaluate unique value */
3457
fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3458
ctx = sc->fibctx;
3459
while (ctx != fibctx) {
3460
if (ctx->unique == fibctx->unique) {
3461
fibctx->unique++;
3462
ctx = sc->fibctx;
3463
} else {
3464
ctx = ctx->next;
3465
}
3466
}
3467
mtx_unlock(&sc->aac_aifq_lock);
3468
3469
error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3470
if (error)
3471
aac_close_aif(sc, (caddr_t)ctx);
3472
return error;
3473
}
3474
3475
/*
3476
* Close the caller's fib context
3477
*/
3478
static int
3479
aac_close_aif(struct aac_softc *sc, caddr_t arg)
3480
{
3481
struct aac_fib_context *ctx;
3482
3483
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3484
3485
mtx_lock(&sc->aac_aifq_lock);
3486
for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3487
if (ctx->unique == *(uint32_t *)&arg) {
3488
if (ctx == sc->fibctx)
3489
sc->fibctx = NULL;
3490
else {
3491
ctx->prev->next = ctx->next;
3492
if (ctx->next)
3493
ctx->next->prev = ctx->prev;
3494
}
3495
break;
3496
}
3497
}
3498
mtx_unlock(&sc->aac_aifq_lock);
3499
if (ctx)
3500
free(ctx, M_AACBUF);
3501
3502
return 0;
3503
}
3504
3505
/*
3506
* Pass the caller the next AIF in their queue
3507
*/
3508
static int
3509
aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3510
{
3511
struct get_adapter_fib_ioctl agf;
3512
struct aac_fib_context *ctx;
3513
int error;
3514
3515
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3516
3517
#ifdef COMPAT_FREEBSD32
3518
if (SV_CURPROC_FLAG(SV_ILP32)) {
3519
struct get_adapter_fib_ioctl32 agf32;
3520
error = copyin(arg, &agf32, sizeof(agf32));
3521
if (error == 0) {
3522
agf.AdapterFibContext = agf32.AdapterFibContext;
3523
agf.Wait = agf32.Wait;
3524
agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib;
3525
}
3526
} else
3527
#endif
3528
error = copyin(arg, &agf, sizeof(agf));
3529
if (error == 0) {
3530
for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3531
if (agf.AdapterFibContext == ctx->unique)
3532
break;
3533
}
3534
if (!ctx)
3535
return (EFAULT);
3536
3537
error = aac_return_aif(sc, ctx, agf.AifFib);
3538
if (error == EAGAIN && agf.Wait) {
3539
fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3540
sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3541
while (error == EAGAIN) {
3542
error = tsleep(sc->aac_aifq, PRIBIO |
3543
PCATCH, "aacaif", 0);
3544
if (error == 0)
3545
error = aac_return_aif(sc, ctx, agf.AifFib);
3546
}
3547
sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3548
}
3549
}
3550
return(error);
3551
}
3552
3553
/*
3554
* Hand the next AIF off the top of the queue out to userspace.
3555
*/
3556
static int
3557
aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3558
{
3559
int current, error;
3560
3561
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3562
3563
mtx_lock(&sc->aac_aifq_lock);
3564
current = ctx->ctx_idx;
3565
if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3566
/* empty */
3567
mtx_unlock(&sc->aac_aifq_lock);
3568
return (EAGAIN);
3569
}
3570
error =
3571
copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3572
if (error)
3573
device_printf(sc->aac_dev,
3574
"aac_return_aif: copyout returned %d\n", error);
3575
else {
3576
ctx->ctx_wrap = 0;
3577
ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3578
}
3579
mtx_unlock(&sc->aac_aifq_lock);
3580
return(error);
3581
}
3582
3583
static int
3584
aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3585
{
3586
struct aac_pci_info {
3587
u_int32_t bus;
3588
u_int32_t slot;
3589
} pciinf;
3590
int error;
3591
3592
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3593
3594
pciinf.bus = pci_get_bus(sc->aac_dev);
3595
pciinf.slot = pci_get_slot(sc->aac_dev);
3596
3597
error = copyout((caddr_t)&pciinf, uptr,
3598
sizeof(struct aac_pci_info));
3599
3600
return (error);
3601
}
3602
3603
static int
3604
aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3605
{
3606
struct aac_features f;
3607
int error;
3608
3609
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3610
3611
if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3612
return (error);
3613
3614
/*
3615
* When the management driver receives FSACTL_GET_FEATURES ioctl with
3616
* ALL zero in the featuresState, the driver will return the current
3617
* state of all the supported features, the data field will not be
3618
* valid.
3619
* When the management driver receives FSACTL_GET_FEATURES ioctl with
3620
* a specific bit set in the featuresState, the driver will return the
3621
* current state of this specific feature and whatever data that are
3622
* associated with the feature in the data field or perform whatever
3623
* action needed indicates in the data field.
3624
*/
3625
if (f.feat.fValue == 0) {
3626
f.feat.fBits.largeLBA =
3627
(sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3628
/* TODO: In the future, add other features state here as well */
3629
} else {
3630
if (f.feat.fBits.largeLBA)
3631
f.feat.fBits.largeLBA =
3632
(sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3633
/* TODO: Add other features state and data in the future */
3634
}
3635
3636
error = copyout(&f, uptr, sizeof (f));
3637
return (error);
3638
}
3639
3640
/*
3641
* Give the userland some information about the container. The AAC arch
3642
* expects the driver to be a SCSI passthrough type driver, so it expects
3643
* the containers to have b:t:l numbers. Fake it.
3644
*/
3645
static int
3646
aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3647
{
3648
struct aac_query_disk query_disk;
3649
struct aac_container *co;
3650
struct aac_disk *disk;
3651
int error, id;
3652
3653
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3654
3655
disk = NULL;
3656
3657
error = copyin(uptr, (caddr_t)&query_disk,
3658
sizeof(struct aac_query_disk));
3659
if (error)
3660
return (error);
3661
3662
id = query_disk.ContainerNumber;
3663
if (id == -1)
3664
return (EINVAL);
3665
3666
mtx_lock(&sc->aac_container_lock);
3667
TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3668
if (co->co_mntobj.ObjectId == id)
3669
break;
3670
}
3671
3672
if (co == NULL) {
3673
query_disk.Valid = 0;
3674
query_disk.Locked = 0;
3675
query_disk.Deleted = 1; /* XXX is this right? */
3676
} else {
3677
disk = device_get_softc(co->co_disk);
3678
query_disk.Valid = 1;
3679
query_disk.Locked =
3680
(disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0;
3681
query_disk.Deleted = 0;
3682
query_disk.Bus = device_get_unit(sc->aac_dev);
3683
query_disk.Target = disk->unit;
3684
query_disk.Lun = 0;
3685
query_disk.UnMapped = 0;
3686
sprintf(&query_disk.diskDeviceName[0], "%s%d",
3687
disk->ad_disk->d_name, disk->ad_disk->d_unit);
3688
}
3689
mtx_unlock(&sc->aac_container_lock);
3690
3691
error = copyout((caddr_t)&query_disk, uptr,
3692
sizeof(struct aac_query_disk));
3693
3694
return (error);
3695
}
3696
3697
static void
3698
aac_get_bus_info(struct aac_softc *sc)
3699
{
3700
struct aac_fib *fib;
3701
struct aac_ctcfg *c_cmd;
3702
struct aac_ctcfg_resp *c_resp;
3703
struct aac_vmioctl *vmi;
3704
struct aac_vmi_businf_resp *vmi_resp;
3705
struct aac_getbusinf businfo;
3706
struct aac_sim *caminf;
3707
device_t child;
3708
int i, found, error;
3709
3710
mtx_lock(&sc->aac_io_lock);
3711
aac_alloc_sync_fib(sc, &fib);
3712
c_cmd = (struct aac_ctcfg *)&fib->data[0];
3713
bzero(c_cmd, sizeof(struct aac_ctcfg));
3714
3715
c_cmd->Command = VM_ContainerConfig;
3716
c_cmd->cmd = CT_GET_SCSI_METHOD;
3717
c_cmd->param = 0;
3718
3719
error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3720
sizeof(struct aac_ctcfg));
3721
if (error) {
3722
device_printf(sc->aac_dev, "Error %d sending "
3723
"VM_ContainerConfig command\n", error);
3724
aac_release_sync_fib(sc);
3725
mtx_unlock(&sc->aac_io_lock);
3726
return;
3727
}
3728
3729
c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3730
if (c_resp->Status != ST_OK) {
3731
device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3732
c_resp->Status);
3733
aac_release_sync_fib(sc);
3734
mtx_unlock(&sc->aac_io_lock);
3735
return;
3736
}
3737
3738
sc->scsi_method_id = c_resp->param;
3739
3740
vmi = (struct aac_vmioctl *)&fib->data[0];
3741
bzero(vmi, sizeof(struct aac_vmioctl));
3742
3743
vmi->Command = VM_Ioctl;
3744
vmi->ObjType = FT_DRIVE;
3745
vmi->MethId = sc->scsi_method_id;
3746
vmi->ObjId = 0;
3747
vmi->IoctlCmd = GetBusInfo;
3748
3749
error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3750
sizeof(struct aac_vmi_businf_resp));
3751
if (error) {
3752
device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3753
error);
3754
aac_release_sync_fib(sc);
3755
mtx_unlock(&sc->aac_io_lock);
3756
return;
3757
}
3758
3759
vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3760
if (vmi_resp->Status != ST_OK) {
3761
device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3762
vmi_resp->Status);
3763
aac_release_sync_fib(sc);
3764
mtx_unlock(&sc->aac_io_lock);
3765
return;
3766
}
3767
3768
bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3769
aac_release_sync_fib(sc);
3770
mtx_unlock(&sc->aac_io_lock);
3771
3772
found = 0;
3773
for (i = 0; i < businfo.BusCount; i++) {
3774
if (businfo.BusValid[i] != AAC_BUS_VALID)
3775
continue;
3776
3777
caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3778
M_AACBUF, M_NOWAIT | M_ZERO);
3779
if (caminf == NULL) {
3780
device_printf(sc->aac_dev,
3781
"No memory to add passthrough bus %d\n", i);
3782
break;
3783
}
3784
3785
child = device_add_child(sc->aac_dev, "aacp", DEVICE_UNIT_ANY);
3786
if (child == NULL) {
3787
device_printf(sc->aac_dev,
3788
"device_add_child failed for passthrough bus %d\n",
3789
i);
3790
free(caminf, M_AACBUF);
3791
break;
3792
}
3793
3794
caminf->TargetsPerBus = businfo.TargetsPerBus;
3795
caminf->BusNumber = i;
3796
caminf->InitiatorBusId = businfo.InitiatorBusId[i];
3797
caminf->aac_sc = sc;
3798
caminf->sim_dev = child;
3799
3800
device_set_ivars(child, caminf);
3801
device_set_desc(child, "SCSI Passthrough Bus");
3802
TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3803
3804
found = 1;
3805
}
3806
3807
if (found)
3808
bus_attach_children(sc->aac_dev);
3809
}
3810
3811