Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/dev/aic7xxx/aic79xx_osm.c
39536 views
1
/*-
2
* Bus independent FreeBSD shim for the aic79xx based Adaptec SCSI controllers
3
*
4
* Copyright (c) 1994-2002, 2004 Justin T. Gibbs.
5
* Copyright (c) 2001-2002 Adaptec Inc.
6
* All rights reserved.
7
*
8
* Redistribution and use in source and binary forms, with or without
9
* modification, are permitted provided that the following conditions
10
* are met:
11
* 1. Redistributions of source code must retain the above copyright
12
* notice, this list of conditions, and the following disclaimer,
13
* without modification.
14
* 2. The name of the author may not be used to endorse or promote products
15
* derived from this software without specific prior written permission.
16
*
17
* Alternatively, this software may be distributed under the terms of the
18
* GNU Public License ("GPL").
19
*
20
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
24
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30
* SUCH DAMAGE.
31
*
32
* $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#35 $
33
*/
34
35
#include <dev/aic7xxx/aic79xx_osm.h>
36
#include <dev/aic7xxx/aic79xx_inline.h>
37
38
#include <sys/kthread.h>
39
40
#include "opt_ddb.h"
41
#ifdef DDB
42
#include <ddb/ddb.h>
43
#endif
44
45
#ifndef AHD_TMODE_ENABLE
46
#define AHD_TMODE_ENABLE 0
47
#endif
48
49
#include <dev/aic7xxx/aic_osm_lib.c>
50
51
#define ccb_scb_ptr spriv_ptr0
52
53
#if 0
54
static void ahd_dump_targcmd(struct target_cmd *cmd);
55
#endif
56
static int ahd_modevent(module_t mod, int type, void *data);
57
static void ahd_action(struct cam_sim *sim, union ccb *ccb);
58
static void ahd_set_tran_settings(struct ahd_softc *ahd,
59
int our_id, char channel,
60
struct ccb_trans_settings *cts);
61
static void ahd_get_tran_settings(struct ahd_softc *ahd,
62
int our_id, char channel,
63
struct ccb_trans_settings *cts);
64
static void ahd_async(void *callback_arg, uint32_t code,
65
struct cam_path *path, void *arg);
66
static void ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
67
int nsegments, int error);
68
static void ahd_poll(struct cam_sim *sim);
69
static void ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim,
70
struct ccb_scsiio *csio, struct scb *scb);
71
static void ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim,
72
union ccb *ccb);
73
static int ahd_create_path(struct ahd_softc *ahd,
74
char channel, u_int target, u_int lun,
75
struct cam_path **path);
76
77
static const char *ahd_sysctl_node_elements[] = {
78
"root",
79
"summary",
80
"debug"
81
};
82
83
#ifndef NO_SYSCTL_DESCR
84
static const char *ahd_sysctl_node_descriptions[] = {
85
"root error collection for aic79xx controllers",
86
"summary collection for aic79xx controllers",
87
"debug collection for aic79xx controllers"
88
};
89
#endif
90
91
static const char *ahd_sysctl_errors_elements[] = {
92
"Cerrors",
93
"Uerrors",
94
"Ferrors"
95
};
96
97
#ifndef NO_SYSCTL_DESCR
98
static const char *ahd_sysctl_errors_descriptions[] = {
99
"Correctable errors",
100
"Uncorrectable errors",
101
"Fatal errors"
102
};
103
#endif
104
105
static int
106
ahd_set_debugcounters(SYSCTL_HANDLER_ARGS)
107
{
108
struct ahd_softc *sc;
109
int error, tmpv;
110
111
tmpv = 0;
112
sc = arg1;
113
error = sysctl_handle_int(oidp, &tmpv, 0, req);
114
if (error != 0 || req->newptr == NULL)
115
return (error);
116
if (tmpv < 0 || tmpv >= AHD_ERRORS_NUMBER)
117
return (EINVAL);
118
sc->summerr[arg2] = tmpv;
119
return (0);
120
}
121
122
static int
123
ahd_clear_allcounters(SYSCTL_HANDLER_ARGS)
124
{
125
struct ahd_softc *sc;
126
int error, tmpv;
127
128
tmpv = 0;
129
sc = arg1;
130
error = sysctl_handle_int(oidp, &tmpv, 0, req);
131
if (error != 0 || req->newptr == NULL)
132
return (error);
133
if (tmpv != 0)
134
bzero(sc->summerr, sizeof(sc->summerr));
135
return (0);
136
}
137
138
static int
139
ahd_create_path(struct ahd_softc *ahd, char channel, u_int target,
140
u_int lun, struct cam_path **path)
141
{
142
path_id_t path_id;
143
144
path_id = cam_sim_path(ahd->platform_data->sim);
145
return (xpt_create_path(path, /*periph*/NULL,
146
path_id, target, lun));
147
}
148
149
void
150
ahd_sysctl(struct ahd_softc *ahd)
151
{
152
u_int i;
153
154
for (i = 0; i < AHD_SYSCTL_NUMBER; i++)
155
sysctl_ctx_init(&ahd->sysctl_ctx[i]);
156
157
ahd->sysctl_tree[AHD_SYSCTL_ROOT] =
158
SYSCTL_ADD_NODE(&ahd->sysctl_ctx[AHD_SYSCTL_ROOT],
159
SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
160
device_get_nameunit(ahd->dev_softc),
161
CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
162
ahd_sysctl_node_descriptions[AHD_SYSCTL_ROOT]);
163
SYSCTL_ADD_PROC(&ahd->sysctl_ctx[AHD_SYSCTL_ROOT],
164
SYSCTL_CHILDREN(ahd->sysctl_tree[AHD_SYSCTL_ROOT]), OID_AUTO,
165
"clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, ahd,
166
0, ahd_clear_allcounters, "IU", "Clear all counters");
167
168
for (i = AHD_SYSCTL_SUMMARY; i < AHD_SYSCTL_NUMBER; i++)
169
ahd->sysctl_tree[i] =
170
SYSCTL_ADD_NODE(&ahd->sysctl_ctx[i],
171
SYSCTL_CHILDREN(ahd->sysctl_tree[AHD_SYSCTL_ROOT]),
172
OID_AUTO, ahd_sysctl_node_elements[i],
173
CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
174
ahd_sysctl_node_descriptions[i]);
175
176
for (i = AHD_ERRORS_CORRECTABLE; i < AHD_ERRORS_NUMBER; i++) {
177
SYSCTL_ADD_UINT(&ahd->sysctl_ctx[AHD_SYSCTL_SUMMARY],
178
SYSCTL_CHILDREN(ahd->sysctl_tree[AHD_SYSCTL_SUMMARY]),
179
OID_AUTO, ahd_sysctl_errors_elements[i],
180
CTLFLAG_RD, &ahd->summerr[i], i,
181
ahd_sysctl_errors_descriptions[i]);
182
SYSCTL_ADD_PROC(&ahd->sysctl_ctx[AHD_SYSCTL_DEBUG],
183
SYSCTL_CHILDREN(ahd->sysctl_tree[AHD_SYSCTL_DEBUG]),
184
OID_AUTO, ahd_sysctl_errors_elements[i],
185
CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE, ahd, i,
186
ahd_set_debugcounters, "IU",
187
ahd_sysctl_errors_descriptions[i]);
188
}
189
}
190
191
int
192
ahd_map_int(struct ahd_softc *ahd)
193
{
194
int error;
195
196
/* Hook up our interrupt handler */
197
error = bus_setup_intr(ahd->dev_softc, ahd->platform_data->irq,
198
INTR_TYPE_CAM|INTR_MPSAFE, NULL,
199
ahd_platform_intr, ahd, &ahd->platform_data->ih);
200
if (error != 0)
201
device_printf(ahd->dev_softc, "bus_setup_intr() failed: %d\n",
202
error);
203
return (error);
204
}
205
206
/*
207
* Attach all the sub-devices we can find
208
*/
209
int
210
ahd_attach(struct ahd_softc *ahd)
211
{
212
char ahd_info[256];
213
struct ccb_setasync csa;
214
struct cam_devq *devq;
215
struct cam_sim *sim;
216
struct cam_path *path;
217
int count;
218
219
count = 0;
220
devq = NULL;
221
sim = NULL;
222
path = NULL;
223
224
/*
225
* Create a thread to perform all recovery.
226
*/
227
if (ahd_spawn_recovery_thread(ahd) != 0)
228
goto fail;
229
230
ahd_controller_info(ahd, ahd_info);
231
printf("%s\n", ahd_info);
232
ahd_lock(ahd);
233
234
/*
235
* Create the device queue for our SIM(s).
236
*/
237
devq = cam_simq_alloc(AHD_MAX_QUEUE);
238
if (devq == NULL)
239
goto fail;
240
241
/*
242
* Construct our SIM entry
243
*/
244
sim = cam_sim_alloc(ahd_action, ahd_poll, "ahd", ahd,
245
device_get_unit(ahd->dev_softc),
246
&ahd->platform_data->mtx, 1, /*XXX*/256, devq);
247
if (sim == NULL) {
248
cam_simq_free(devq);
249
goto fail;
250
}
251
252
if (xpt_bus_register(sim, ahd->dev_softc, /*bus_id*/0) != CAM_SUCCESS) {
253
cam_sim_free(sim, /*free_devq*/TRUE);
254
sim = NULL;
255
goto fail;
256
}
257
258
if (xpt_create_path(&path, /*periph*/NULL,
259
cam_sim_path(sim), CAM_TARGET_WILDCARD,
260
CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
261
xpt_bus_deregister(cam_sim_path(sim));
262
cam_sim_free(sim, /*free_devq*/TRUE);
263
sim = NULL;
264
goto fail;
265
}
266
267
memset(&csa, 0, sizeof(csa));
268
xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
269
csa.ccb_h.func_code = XPT_SASYNC_CB;
270
csa.event_enable = AC_LOST_DEVICE;
271
csa.callback = ahd_async;
272
csa.callback_arg = sim;
273
xpt_action((union ccb *)&csa);
274
count++;
275
276
fail:
277
ahd->platform_data->sim = sim;
278
ahd->platform_data->path = path;
279
ahd_unlock(ahd);
280
if (count != 0) {
281
/* We have to wait until after any system dumps... */
282
ahd->platform_data->eh =
283
EVENTHANDLER_REGISTER(shutdown_final, ahd_shutdown,
284
ahd, SHUTDOWN_PRI_DEFAULT);
285
ahd_intr_enable(ahd, TRUE);
286
}
287
288
return (count);
289
}
290
291
/*
292
* Catch an interrupt from the adapter
293
*/
294
void
295
ahd_platform_intr(void *arg)
296
{
297
struct ahd_softc *ahd;
298
299
ahd = (struct ahd_softc *)arg;
300
ahd_lock(ahd);
301
ahd_intr(ahd);
302
ahd_unlock(ahd);
303
}
304
305
static void
306
ahd_sync_ccb(struct ahd_softc *ahd, struct scb *scb, union ccb *ccb, bool post)
307
{
308
bus_dmasync_op_t op;
309
uint32_t rdmask;
310
311
if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
312
rdmask = CAM_DIR_OUT;
313
else
314
rdmask = CAM_DIR_IN;
315
316
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == rdmask)
317
op = post ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_PREREAD;
318
else
319
op = post ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_PREWRITE;
320
321
bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op);
322
}
323
324
/*
325
* We have an scb which has been processed by the
326
* adaptor, now we look to see how the operation
327
* went.
328
*/
329
void
330
ahd_done(struct ahd_softc *ahd, struct scb *scb)
331
{
332
union ccb *ccb;
333
334
CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
335
("ahd_done - scb %d\n", SCB_GET_TAG(scb)));
336
337
ccb = scb->io_ctx;
338
LIST_REMOVE(scb, pending_links);
339
if ((scb->flags & SCB_TIMEDOUT) != 0)
340
LIST_REMOVE(scb, timedout_links);
341
342
callout_stop(&scb->io_timer);
343
344
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
345
ahd_sync_ccb(ahd, scb, ccb, true);
346
bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap);
347
}
348
349
#ifdef AHD_TARGET_MODE
350
if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
351
struct cam_path *ccb_path;
352
353
/*
354
* If we have finally disconnected, clean up our
355
* pending device state.
356
* XXX - There may be error states that cause where
357
* we will remain connected.
358
*/
359
ccb_path = ccb->ccb_h.path;
360
if (ahd->pending_device != NULL
361
&& xpt_path_comp(ahd->pending_device->path, ccb_path) == 0) {
362
if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
363
ahd->pending_device = NULL;
364
} else {
365
xpt_print_path(ccb->ccb_h.path);
366
printf("Still disconnected\n");
367
ahd_freeze_ccb(ccb);
368
}
369
}
370
371
if (aic_get_transaction_status(scb) == CAM_REQ_INPROG)
372
ccb->ccb_h.status |= CAM_REQ_CMP;
373
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
374
ahd_free_scb(ahd, scb);
375
xpt_done(ccb);
376
return;
377
}
378
#endif
379
380
if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
381
struct scb *list_scb;
382
383
ahd->scb_data.recovery_scbs--;
384
385
if (aic_get_transaction_status(scb) == CAM_BDR_SENT
386
|| aic_get_transaction_status(scb) == CAM_REQ_ABORTED)
387
aic_set_transaction_status(scb, CAM_CMD_TIMEOUT);
388
389
if (ahd->scb_data.recovery_scbs == 0) {
390
/*
391
* All recovery actions have completed successfully,
392
* so reinstate the timeouts for all other pending
393
* commands.
394
*/
395
LIST_FOREACH(list_scb,
396
&ahd->pending_scbs, pending_links) {
397
aic_scb_timer_reset(list_scb,
398
aic_get_timeout(scb));
399
}
400
401
ahd_print_path(ahd, scb);
402
printf("no longer in timeout, status = %x\n",
403
ccb->ccb_h.status);
404
}
405
}
406
407
/* Don't clobber any existing error state */
408
if (aic_get_transaction_status(scb) == CAM_REQ_INPROG) {
409
ccb->ccb_h.status |= CAM_REQ_CMP;
410
} else if ((scb->flags & SCB_SENSE) != 0) {
411
/*
412
* We performed autosense retrieval.
413
*
414
* Zero any sense not transferred by the
415
* device. The SCSI spec mandates that any
416
* untransfered data should be assumed to be
417
* zero. Complete the 'bounce' of sense information
418
* through buffers accessible via bus-space by
419
* copying it into the clients csio.
420
*/
421
memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
422
memcpy(&ccb->csio.sense_data,
423
ahd_get_sense_buf(ahd, scb),
424
/* XXX What size do we want to use??? */
425
sizeof(ccb->csio.sense_data)
426
- ccb->csio.sense_resid);
427
scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
428
} else if ((scb->flags & SCB_PKT_SENSE) != 0) {
429
struct scsi_status_iu_header *siu;
430
u_int sense_len;
431
432
/*
433
* Copy only the sense data into the provided buffer.
434
*/
435
siu = (struct scsi_status_iu_header *)scb->sense_data;
436
sense_len = MIN(scsi_4btoul(siu->sense_length),
437
sizeof(ccb->csio.sense_data));
438
memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
439
memcpy(&ccb->csio.sense_data,
440
ahd_get_sense_buf(ahd, scb) + SIU_SENSE_OFFSET(siu),
441
sense_len);
442
#ifdef AHD_DEBUG
443
if ((ahd_debug & AHD_SHOW_SENSE) != 0) {
444
uint8_t *sense_data = (uint8_t *)&ccb->csio.sense_data;
445
u_int i;
446
447
printf("Copied %d bytes of sense data offset %d:",
448
sense_len, SIU_SENSE_OFFSET(siu));
449
for (i = 0; i < sense_len; i++)
450
printf(" 0x%x", *sense_data++);
451
printf("\n");
452
}
453
#endif
454
scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
455
}
456
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
457
ahd_free_scb(ahd, scb);
458
xpt_done(ccb);
459
}
460
461
static void
462
ahd_action(struct cam_sim *sim, union ccb *ccb)
463
{
464
struct ahd_softc *ahd;
465
#ifdef AHD_TARGET_MODE
466
struct ahd_tmode_lstate *lstate;
467
#endif
468
u_int target_id;
469
u_int our_id;
470
471
CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahd_action\n"));
472
473
ahd = (struct ahd_softc *)cam_sim_softc(sim);
474
475
target_id = ccb->ccb_h.target_id;
476
our_id = SIM_SCSI_ID(ahd, sim);
477
478
switch (ccb->ccb_h.func_code) {
479
/* Common cases first */
480
#ifdef AHD_TARGET_MODE
481
case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
482
case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
483
{
484
struct ahd_tmode_tstate *tstate;
485
cam_status status;
486
487
status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate,
488
&lstate, TRUE);
489
490
if (status != CAM_REQ_CMP) {
491
if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
492
/* Response from the black hole device */
493
tstate = NULL;
494
lstate = ahd->black_hole;
495
} else {
496
ccb->ccb_h.status = status;
497
xpt_done(ccb);
498
break;
499
}
500
}
501
if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
502
SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
503
sim_links.sle);
504
ccb->ccb_h.status = CAM_REQ_INPROG;
505
if ((ahd->flags & AHD_TQINFIFO_BLOCKED) != 0)
506
ahd_run_tqinfifo(ahd, /*paused*/FALSE);
507
break;
508
}
509
510
/*
511
* The target_id represents the target we attempt to
512
* select. In target mode, this is the initiator of
513
* the original command.
514
*/
515
our_id = target_id;
516
target_id = ccb->csio.init_id;
517
/* FALLTHROUGH */
518
}
519
#endif
520
case XPT_SCSI_IO: /* Execute the requested I/O operation */
521
case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
522
{
523
struct scb *scb;
524
struct hardware_scb *hscb;
525
struct ahd_initiator_tinfo *tinfo;
526
struct ahd_tmode_tstate *tstate;
527
u_int col_idx;
528
529
if ((ahd->flags & AHD_INITIATORROLE) == 0
530
&& (ccb->ccb_h.func_code == XPT_SCSI_IO
531
|| ccb->ccb_h.func_code == XPT_RESET_DEV)) {
532
ccb->ccb_h.status = CAM_PROVIDE_FAIL;
533
xpt_done(ccb);
534
return;
535
}
536
537
/*
538
* get an scb to use.
539
*/
540
tinfo = ahd_fetch_transinfo(ahd, 'A', our_id,
541
target_id, &tstate);
542
if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
543
|| (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0
544
|| ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
545
col_idx = AHD_NEVER_COL_IDX;
546
} else {
547
col_idx = AHD_BUILD_COL_IDX(target_id,
548
ccb->ccb_h.target_lun);
549
}
550
if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
551
xpt_freeze_simq(sim, /*count*/1);
552
ahd->flags |= AHD_RESOURCE_SHORTAGE;
553
ccb->ccb_h.status = CAM_REQUEUE_REQ;
554
xpt_done(ccb);
555
return;
556
}
557
558
hscb = scb->hscb;
559
560
CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
561
("start scb(%p)\n", scb));
562
scb->io_ctx = ccb;
563
/*
564
* So we can find the SCB when an abort is requested
565
*/
566
ccb->ccb_h.ccb_scb_ptr = scb;
567
568
/*
569
* Put all the arguments for the xfer in the scb
570
*/
571
hscb->control = 0;
572
hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id);
573
hscb->lun = ccb->ccb_h.target_lun;
574
if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
575
hscb->cdb_len = 0;
576
scb->flags |= SCB_DEVICE_RESET;
577
hscb->control |= MK_MESSAGE;
578
hscb->task_management = SIU_TASKMGMT_LUN_RESET;
579
ahd_execute_scb(scb, NULL, 0, 0);
580
} else {
581
#ifdef AHD_TARGET_MODE
582
if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
583
struct target_data *tdata;
584
585
tdata = &hscb->shared_data.tdata;
586
if (ahd->pending_device == lstate)
587
scb->flags |= SCB_TARGET_IMMEDIATE;
588
hscb->control |= TARGET_SCB;
589
tdata->target_phases = 0;
590
if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
591
tdata->target_phases |= SPHASE_PENDING;
592
tdata->scsi_status =
593
ccb->csio.scsi_status;
594
}
595
if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT)
596
tdata->target_phases |= NO_DISCONNECT;
597
598
tdata->initiator_tag =
599
ahd_htole16(ccb->csio.tag_id);
600
}
601
#endif
602
hscb->task_management = 0;
603
if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
604
hscb->control |= ccb->csio.tag_action;
605
606
ahd_setup_data(ahd, sim, &ccb->csio, scb);
607
}
608
break;
609
}
610
#ifdef AHD_TARGET_MODE
611
case XPT_NOTIFY_ACKNOWLEDGE:
612
case XPT_IMMEDIATE_NOTIFY:
613
{
614
struct ahd_tmode_tstate *tstate;
615
struct ahd_tmode_lstate *lstate;
616
cam_status status;
617
618
status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate,
619
&lstate, TRUE);
620
621
if (status != CAM_REQ_CMP) {
622
ccb->ccb_h.status = status;
623
xpt_done(ccb);
624
break;
625
}
626
SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
627
sim_links.sle);
628
ccb->ccb_h.status = CAM_REQ_INPROG;
629
ahd_send_lstate_events(ahd, lstate);
630
break;
631
}
632
case XPT_EN_LUN: /* Enable LUN as a target */
633
ahd_handle_en_lun(ahd, sim, ccb);
634
xpt_done(ccb);
635
break;
636
#endif
637
case XPT_ABORT: /* Abort the specified CCB */
638
{
639
ahd_abort_ccb(ahd, sim, ccb);
640
break;
641
}
642
case XPT_SET_TRAN_SETTINGS:
643
{
644
ahd_set_tran_settings(ahd, SIM_SCSI_ID(ahd, sim),
645
SIM_CHANNEL(ahd, sim), &ccb->cts);
646
xpt_done(ccb);
647
break;
648
}
649
case XPT_GET_TRAN_SETTINGS:
650
/* Get default/user set transfer settings for the target */
651
{
652
ahd_get_tran_settings(ahd, SIM_SCSI_ID(ahd, sim),
653
SIM_CHANNEL(ahd, sim), &ccb->cts);
654
xpt_done(ccb);
655
break;
656
}
657
case XPT_CALC_GEOMETRY:
658
{
659
aic_calc_geometry(&ccb->ccg, ahd->flags & AHD_EXTENDED_TRANS_A);
660
xpt_done(ccb);
661
break;
662
}
663
case XPT_RESET_BUS: /* Reset the specified SCSI bus */
664
{
665
int found;
666
667
found = ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
668
/*initiate reset*/TRUE);
669
if (bootverbose) {
670
xpt_print_path(SIM_PATH(ahd, sim));
671
printf("SCSI bus reset delivered. "
672
"%d SCBs aborted.\n", found);
673
}
674
ccb->ccb_h.status = CAM_REQ_CMP;
675
xpt_done(ccb);
676
break;
677
}
678
case XPT_TERM_IO: /* Terminate the I/O process */
679
/* XXX Implement */
680
ccb->ccb_h.status = CAM_REQ_INVALID;
681
xpt_done(ccb);
682
break;
683
case XPT_PATH_INQ: /* Path routing inquiry */
684
{
685
struct ccb_pathinq *cpi = &ccb->cpi;
686
687
cpi->version_num = 1; /* XXX??? */
688
cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
689
if ((ahd->features & AHD_WIDE) != 0)
690
cpi->hba_inquiry |= PI_WIDE_16;
691
if ((ahd->features & AHD_TARGETMODE) != 0) {
692
cpi->target_sprt = PIT_PROCESSOR
693
| PIT_DISCONNECT
694
| PIT_TERM_IO;
695
} else {
696
cpi->target_sprt = 0;
697
}
698
cpi->hba_misc = 0;
699
cpi->hba_eng_cnt = 0;
700
cpi->max_target = (ahd->features & AHD_WIDE) ? 15 : 7;
701
cpi->max_lun = AHD_NUM_LUNS_NONPKT - 1;
702
cpi->initiator_id = ahd->our_id;
703
if ((ahd->flags & AHD_RESET_BUS_A) == 0) {
704
cpi->hba_misc |= PIM_NOBUSRESET;
705
}
706
cpi->bus_id = cam_sim_bus(sim);
707
cpi->base_transfer_speed = 3300;
708
strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
709
strlcpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
710
strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
711
cpi->unit_number = cam_sim_unit(sim);
712
cpi->protocol = PROTO_SCSI;
713
cpi->protocol_version = SCSI_REV_2;
714
cpi->transport = XPORT_SPI;
715
cpi->transport_version = 4;
716
cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_DT_ST
717
| SID_SPI_IUS
718
| SID_SPI_QAS;
719
cpi->ccb_h.status = CAM_REQ_CMP;
720
xpt_done(ccb);
721
break;
722
}
723
default:
724
ccb->ccb_h.status = CAM_PROVIDE_FAIL;
725
xpt_done(ccb);
726
break;
727
}
728
}
729
730
static void
731
ahd_set_tran_settings(struct ahd_softc *ahd, int our_id, char channel,
732
struct ccb_trans_settings *cts)
733
{
734
struct ahd_devinfo devinfo;
735
struct ccb_trans_settings_scsi *scsi;
736
struct ccb_trans_settings_spi *spi;
737
struct ahd_initiator_tinfo *tinfo;
738
struct ahd_tmode_tstate *tstate;
739
uint16_t *discenable;
740
uint16_t *tagenable;
741
u_int update_type;
742
743
scsi = &cts->proto_specific.scsi;
744
spi = &cts->xport_specific.spi;
745
ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
746
cts->ccb_h.target_id,
747
cts->ccb_h.target_lun,
748
SIM_CHANNEL(ahd, sim),
749
ROLE_UNKNOWN);
750
tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
751
devinfo.our_scsiid,
752
devinfo.target, &tstate);
753
update_type = 0;
754
if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
755
update_type |= AHD_TRANS_GOAL;
756
discenable = &tstate->discenable;
757
tagenable = &tstate->tagenable;
758
tinfo->curr.protocol_version = cts->protocol_version;
759
tinfo->curr.transport_version = cts->transport_version;
760
tinfo->goal.protocol_version = cts->protocol_version;
761
tinfo->goal.transport_version = cts->transport_version;
762
} else if (cts->type == CTS_TYPE_USER_SETTINGS) {
763
update_type |= AHD_TRANS_USER;
764
discenable = &ahd->user_discenable;
765
tagenable = &ahd->user_tagenable;
766
tinfo->user.protocol_version = cts->protocol_version;
767
tinfo->user.transport_version = cts->transport_version;
768
} else {
769
cts->ccb_h.status = CAM_REQ_INVALID;
770
return;
771
}
772
773
if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
774
if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
775
*discenable |= devinfo.target_mask;
776
else
777
*discenable &= ~devinfo.target_mask;
778
}
779
780
if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
781
if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
782
*tagenable |= devinfo.target_mask;
783
else
784
*tagenable &= ~devinfo.target_mask;
785
}
786
787
if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
788
ahd_validate_width(ahd, /*tinfo limit*/NULL,
789
&spi->bus_width, ROLE_UNKNOWN);
790
ahd_set_width(ahd, &devinfo, spi->bus_width,
791
update_type, /*paused*/FALSE);
792
}
793
794
if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
795
if (update_type == AHD_TRANS_USER)
796
spi->ppr_options = tinfo->user.ppr_options;
797
else
798
spi->ppr_options = tinfo->goal.ppr_options;
799
}
800
801
if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
802
if (update_type == AHD_TRANS_USER)
803
spi->sync_offset = tinfo->user.offset;
804
else
805
spi->sync_offset = tinfo->goal.offset;
806
}
807
808
if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
809
if (update_type == AHD_TRANS_USER)
810
spi->sync_period = tinfo->user.period;
811
else
812
spi->sync_period = tinfo->goal.period;
813
}
814
815
if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
816
|| ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
817
u_int maxsync;
818
819
maxsync = AHD_SYNCRATE_MAX;
820
821
if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT)
822
spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
823
824
if ((*discenable & devinfo.target_mask) == 0)
825
spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
826
827
ahd_find_syncrate(ahd, &spi->sync_period,
828
&spi->ppr_options, maxsync);
829
ahd_validate_offset(ahd, /*tinfo limit*/NULL,
830
spi->sync_period, &spi->sync_offset,
831
spi->bus_width, ROLE_UNKNOWN);
832
833
/* We use a period of 0 to represent async */
834
if (spi->sync_offset == 0) {
835
spi->sync_period = 0;
836
spi->ppr_options = 0;
837
}
838
839
ahd_set_syncrate(ahd, &devinfo, spi->sync_period,
840
spi->sync_offset, spi->ppr_options,
841
update_type, /*paused*/FALSE);
842
}
843
cts->ccb_h.status = CAM_REQ_CMP;
844
}
845
846
static void
847
ahd_get_tran_settings(struct ahd_softc *ahd, int our_id, char channel,
848
struct ccb_trans_settings *cts)
849
{
850
struct ahd_devinfo devinfo;
851
struct ccb_trans_settings_scsi *scsi;
852
struct ccb_trans_settings_spi *spi;
853
struct ahd_initiator_tinfo *targ_info;
854
struct ahd_tmode_tstate *tstate;
855
struct ahd_transinfo *tinfo;
856
857
scsi = &cts->proto_specific.scsi;
858
spi = &cts->xport_specific.spi;
859
ahd_compile_devinfo(&devinfo, our_id,
860
cts->ccb_h.target_id,
861
cts->ccb_h.target_lun,
862
channel, ROLE_UNKNOWN);
863
targ_info = ahd_fetch_transinfo(ahd, devinfo.channel,
864
devinfo.our_scsiid,
865
devinfo.target, &tstate);
866
867
if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
868
tinfo = &targ_info->curr;
869
else
870
tinfo = &targ_info->user;
871
872
scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
873
spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
874
if (cts->type == CTS_TYPE_USER_SETTINGS) {
875
if ((ahd->user_discenable & devinfo.target_mask) != 0)
876
spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
877
878
if ((ahd->user_tagenable & devinfo.target_mask) != 0)
879
scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
880
} else {
881
if ((tstate->discenable & devinfo.target_mask) != 0)
882
spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
883
884
if ((tstate->tagenable & devinfo.target_mask) != 0)
885
scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
886
}
887
cts->protocol_version = tinfo->protocol_version;
888
cts->transport_version = tinfo->transport_version;
889
890
spi->sync_period = tinfo->period;
891
spi->sync_offset = tinfo->offset;
892
spi->bus_width = tinfo->width;
893
spi->ppr_options = tinfo->ppr_options;
894
895
cts->protocol = PROTO_SCSI;
896
cts->transport = XPORT_SPI;
897
spi->valid = CTS_SPI_VALID_SYNC_RATE
898
| CTS_SPI_VALID_SYNC_OFFSET
899
| CTS_SPI_VALID_BUS_WIDTH
900
| CTS_SPI_VALID_PPR_OPTIONS;
901
902
if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
903
scsi->valid = CTS_SCSI_VALID_TQ;
904
spi->valid |= CTS_SPI_VALID_DISC;
905
} else {
906
scsi->valid = 0;
907
}
908
909
cts->ccb_h.status = CAM_REQ_CMP;
910
}
911
912
static void
913
ahd_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
914
{
915
struct ahd_softc *ahd;
916
struct cam_sim *sim;
917
918
sim = (struct cam_sim *)callback_arg;
919
ahd = (struct ahd_softc *)cam_sim_softc(sim);
920
switch (code) {
921
case AC_LOST_DEVICE:
922
{
923
struct ahd_devinfo devinfo;
924
925
ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
926
xpt_path_target_id(path),
927
xpt_path_lun_id(path),
928
SIM_CHANNEL(ahd, sim),
929
ROLE_UNKNOWN);
930
931
/*
932
* Revert to async/narrow transfers
933
* for the next device.
934
*/
935
ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
936
AHD_TRANS_GOAL|AHD_TRANS_CUR, /*paused*/FALSE);
937
ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0,
938
/*ppr_options*/0, AHD_TRANS_GOAL|AHD_TRANS_CUR,
939
/*paused*/FALSE);
940
break;
941
}
942
default:
943
break;
944
}
945
}
946
947
static void
948
ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
949
int error)
950
{
951
struct scb *scb;
952
union ccb *ccb;
953
struct ahd_softc *ahd;
954
struct ahd_initiator_tinfo *tinfo;
955
struct ahd_tmode_tstate *tstate;
956
u_int mask;
957
958
scb = (struct scb *)arg;
959
ccb = scb->io_ctx;
960
ahd = scb->ahd_softc;
961
962
if (error != 0) {
963
if (error == EFBIG)
964
aic_set_transaction_status(scb, CAM_REQ_TOO_BIG);
965
else
966
aic_set_transaction_status(scb, CAM_REQ_CMP_ERR);
967
if (nsegments != 0)
968
bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap);
969
ahd_free_scb(ahd, scb);
970
xpt_done(ccb);
971
return;
972
}
973
scb->sg_count = 0;
974
if (nsegments != 0) {
975
void *sg;
976
u_int i;
977
978
/* Copy the segments into our SG list */
979
for (i = nsegments, sg = scb->sg_list; i > 0; i--) {
980
sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
981
dm_segs->ds_len,
982
/*last*/i == 1);
983
dm_segs++;
984
}
985
986
ahd_sync_ccb(ahd, scb, ccb, false);
987
988
if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
989
struct target_data *tdata;
990
991
tdata = &scb->hscb->shared_data.tdata;
992
tdata->target_phases |= DPHASE_PENDING;
993
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
994
tdata->data_phase = P_DATAOUT;
995
else
996
tdata->data_phase = P_DATAIN;
997
}
998
}
999
1000
/*
1001
* Last time we need to check if this SCB needs to
1002
* be aborted.
1003
*/
1004
if (aic_get_transaction_status(scb) != CAM_REQ_INPROG) {
1005
if (nsegments != 0)
1006
bus_dmamap_unload(ahd->buffer_dmat,
1007
scb->dmamap);
1008
ahd_free_scb(ahd, scb);
1009
xpt_done(ccb);
1010
return;
1011
}
1012
1013
tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
1014
SCSIID_OUR_ID(scb->hscb->scsiid),
1015
SCSIID_TARGET(ahd, scb->hscb->scsiid),
1016
&tstate);
1017
1018
mask = SCB_GET_TARGET_MASK(ahd, scb);
1019
1020
if ((tstate->discenable & mask) != 0
1021
&& (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1022
scb->hscb->control |= DISCENB;
1023
1024
if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
1025
scb->flags |= SCB_PACKETIZED;
1026
if (scb->hscb->task_management != 0)
1027
scb->hscb->control &= ~MK_MESSAGE;
1028
}
1029
1030
if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
1031
&& (tinfo->goal.width != 0
1032
|| tinfo->goal.period != 0
1033
|| tinfo->goal.ppr_options != 0)) {
1034
scb->flags |= SCB_NEGOTIATE;
1035
scb->hscb->control |= MK_MESSAGE;
1036
} else if ((tstate->auto_negotiate & mask) != 0) {
1037
scb->flags |= SCB_AUTO_NEGOTIATE;
1038
scb->hscb->control |= MK_MESSAGE;
1039
}
1040
1041
LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
1042
1043
ccb->ccb_h.status |= CAM_SIM_QUEUED;
1044
1045
aic_scb_timer_start(scb);
1046
1047
if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
1048
/* Define a mapping from our tag to the SCB. */
1049
ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
1050
ahd_pause(ahd);
1051
ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
1052
ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
1053
ahd_unpause(ahd);
1054
} else {
1055
ahd_queue_scb(ahd, scb);
1056
}
1057
1058
}
1059
1060
static void
1061
ahd_poll(struct cam_sim *sim)
1062
{
1063
ahd_intr(cam_sim_softc(sim));
1064
}
1065
1066
static void
1067
ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim,
1068
struct ccb_scsiio *csio, struct scb *scb)
1069
{
1070
struct hardware_scb *hscb;
1071
struct ccb_hdr *ccb_h;
1072
int error;
1073
1074
hscb = scb->hscb;
1075
ccb_h = &csio->ccb_h;
1076
1077
csio->resid = 0;
1078
csio->sense_resid = 0;
1079
if (ccb_h->func_code == XPT_SCSI_IO) {
1080
hscb->cdb_len = csio->cdb_len;
1081
if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
1082
if (hscb->cdb_len > MAX_CDB_LEN
1083
&& (ccb_h->flags & CAM_CDB_PHYS) == 0) {
1084
/*
1085
* Should CAM start to support CDB sizes
1086
* greater than 16 bytes, we could use
1087
* the sense buffer to store the CDB.
1088
*/
1089
aic_set_transaction_status(scb,
1090
CAM_REQ_INVALID);
1091
ahd_free_scb(ahd, scb);
1092
xpt_done((union ccb *)csio);
1093
return;
1094
}
1095
if ((ccb_h->flags & CAM_CDB_PHYS) != 0) {
1096
hscb->shared_data.idata.cdb_from_host.cdbptr =
1097
aic_htole64((uintptr_t)csio->cdb_io.cdb_ptr);
1098
hscb->shared_data.idata.cdb_from_host.cdblen =
1099
csio->cdb_len;
1100
hscb->cdb_len |= SCB_CDB_LEN_PTR;
1101
} else {
1102
memcpy(hscb->shared_data.idata.cdb,
1103
csio->cdb_io.cdb_ptr,
1104
hscb->cdb_len);
1105
}
1106
} else {
1107
if (hscb->cdb_len > MAX_CDB_LEN) {
1108
aic_set_transaction_status(scb,
1109
CAM_REQ_INVALID);
1110
ahd_free_scb(ahd, scb);
1111
xpt_done((union ccb *)csio);
1112
return;
1113
}
1114
memcpy(hscb->shared_data.idata.cdb,
1115
csio->cdb_io.cdb_bytes, hscb->cdb_len);
1116
}
1117
}
1118
1119
error = bus_dmamap_load_ccb(ahd->buffer_dmat,
1120
scb->dmamap,
1121
(union ccb *)csio,
1122
ahd_execute_scb,
1123
scb, /*flags*/0);
1124
if (error == EINPROGRESS) {
1125
/*
1126
* So as to maintain ordering, freeze the controller queue
1127
* until our mapping is returned.
1128
*/
1129
xpt_freeze_simq(sim, /*count*/1);
1130
scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ;
1131
}
1132
}
1133
1134
static void
1135
ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
1136
{
1137
union ccb *abort_ccb;
1138
1139
abort_ccb = ccb->cab.abort_ccb;
1140
switch (abort_ccb->ccb_h.func_code) {
1141
#ifdef AHD_TARGET_MODE
1142
case XPT_ACCEPT_TARGET_IO:
1143
case XPT_IMMEDIATE_NOTIFY:
1144
case XPT_CONT_TARGET_IO:
1145
{
1146
struct ahd_tmode_tstate *tstate;
1147
struct ahd_tmode_lstate *lstate;
1148
struct ccb_hdr_slist *list;
1149
cam_status status;
1150
1151
status = ahd_find_tmode_devs(ahd, sim, abort_ccb, &tstate,
1152
&lstate, TRUE);
1153
1154
if (status != CAM_REQ_CMP) {
1155
ccb->ccb_h.status = status;
1156
break;
1157
}
1158
1159
if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
1160
list = &lstate->accept_tios;
1161
else if (abort_ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY)
1162
list = &lstate->immed_notifies;
1163
else
1164
list = NULL;
1165
1166
if (list != NULL) {
1167
struct ccb_hdr *curelm;
1168
int found;
1169
1170
curelm = SLIST_FIRST(list);
1171
found = 0;
1172
if (curelm == &abort_ccb->ccb_h) {
1173
found = 1;
1174
SLIST_REMOVE_HEAD(list, sim_links.sle);
1175
} else {
1176
while(curelm != NULL) {
1177
struct ccb_hdr *nextelm;
1178
1179
nextelm =
1180
SLIST_NEXT(curelm, sim_links.sle);
1181
1182
if (nextelm == &abort_ccb->ccb_h) {
1183
found = 1;
1184
SLIST_NEXT(curelm,
1185
sim_links.sle) =
1186
SLIST_NEXT(nextelm,
1187
sim_links.sle);
1188
break;
1189
}
1190
curelm = nextelm;
1191
}
1192
}
1193
1194
if (found) {
1195
abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
1196
xpt_done(abort_ccb);
1197
ccb->ccb_h.status = CAM_REQ_CMP;
1198
} else {
1199
xpt_print_path(abort_ccb->ccb_h.path);
1200
printf("Not found\n");
1201
ccb->ccb_h.status = CAM_PATH_INVALID;
1202
}
1203
break;
1204
}
1205
/* FALLTHROUGH */
1206
}
1207
#endif
1208
case XPT_SCSI_IO:
1209
/* XXX Fully implement the hard ones */
1210
ccb->ccb_h.status = CAM_UA_ABORT;
1211
break;
1212
default:
1213
ccb->ccb_h.status = CAM_REQ_INVALID;
1214
break;
1215
}
1216
xpt_done(ccb);
1217
}
1218
1219
void
1220
ahd_send_async(struct ahd_softc *ahd, char channel, u_int target,
1221
u_int lun, ac_code code, void *opt_arg)
1222
{
1223
struct ccb_trans_settings cts;
1224
struct cam_path *path;
1225
void *arg;
1226
int error;
1227
1228
arg = NULL;
1229
error = ahd_create_path(ahd, channel, target, lun, &path);
1230
1231
if (error != CAM_REQ_CMP)
1232
return;
1233
1234
switch (code) {
1235
case AC_TRANSFER_NEG:
1236
{
1237
struct ccb_trans_settings_scsi *scsi;
1238
1239
cts.type = CTS_TYPE_CURRENT_SETTINGS;
1240
scsi = &cts.proto_specific.scsi;
1241
cts.ccb_h.path = path;
1242
cts.ccb_h.target_id = target;
1243
cts.ccb_h.target_lun = lun;
1244
ahd_get_tran_settings(ahd, ahd->our_id, channel, &cts);
1245
arg = &cts;
1246
scsi->valid &= ~CTS_SCSI_VALID_TQ;
1247
scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1248
if (opt_arg == NULL)
1249
break;
1250
if (*((ahd_queue_alg *)opt_arg) == AHD_QUEUE_TAGGED)
1251
scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB;
1252
scsi->valid |= CTS_SCSI_VALID_TQ;
1253
break;
1254
}
1255
case AC_SENT_BDR:
1256
case AC_BUS_RESET:
1257
break;
1258
default:
1259
panic("ahd_send_async: Unexpected async event");
1260
}
1261
xpt_async(code, path, arg);
1262
xpt_free_path(path);
1263
}
1264
1265
void
1266
ahd_platform_set_tags(struct ahd_softc *ahd,
1267
struct ahd_devinfo *devinfo, int enable)
1268
{
1269
}
1270
1271
int
1272
ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
1273
{
1274
ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF,
1275
M_NOWAIT | M_ZERO);
1276
if (ahd->platform_data == NULL)
1277
return (ENOMEM);
1278
return (0);
1279
}
1280
1281
void
1282
ahd_platform_free(struct ahd_softc *ahd)
1283
{
1284
struct ahd_platform_data *pdata;
1285
1286
pdata = ahd->platform_data;
1287
if (pdata != NULL) {
1288
if (pdata->regs[0] != NULL)
1289
bus_release_resource(ahd->dev_softc,
1290
pdata->regs_res_type[0],
1291
pdata->regs_res_id[0],
1292
pdata->regs[0]);
1293
1294
if (pdata->regs[1] != NULL)
1295
bus_release_resource(ahd->dev_softc,
1296
pdata->regs_res_type[1],
1297
pdata->regs_res_id[1],
1298
pdata->regs[1]);
1299
1300
if (pdata->irq != NULL)
1301
bus_release_resource(ahd->dev_softc,
1302
pdata->irq_res_type,
1303
0, pdata->irq);
1304
1305
if (pdata->sim != NULL) {
1306
xpt_async(AC_LOST_DEVICE, pdata->path, NULL);
1307
xpt_free_path(pdata->path);
1308
xpt_bus_deregister(cam_sim_path(pdata->sim));
1309
cam_sim_free(pdata->sim, /*free_devq*/TRUE);
1310
}
1311
if (pdata->eh != NULL)
1312
EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh);
1313
free(ahd->platform_data, M_DEVBUF);
1314
}
1315
}
1316
1317
int
1318
ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
1319
{
1320
/* We don't sort softcs under FreeBSD so report equal always */
1321
return (0);
1322
}
1323
1324
int
1325
ahd_detach(device_t dev)
1326
{
1327
struct ahd_softc *ahd;
1328
1329
device_printf(dev, "detaching device\n");
1330
ahd = device_get_softc(dev);
1331
ahd_lock(ahd);
1332
TAILQ_REMOVE(&ahd_tailq, ahd, links);
1333
ahd_intr_enable(ahd, FALSE);
1334
bus_teardown_intr(dev, ahd->platform_data->irq, ahd->platform_data->ih);
1335
ahd_unlock(ahd);
1336
ahd_free(ahd);
1337
return (0);
1338
}
1339
1340
#if 0
1341
static void
1342
ahd_dump_targcmd(struct target_cmd *cmd)
1343
{
1344
uint8_t *byte;
1345
uint8_t *last_byte;
1346
int i;
1347
1348
byte = &cmd->initiator_channel;
1349
/* Debugging info for received commands */
1350
last_byte = &cmd[1].initiator_channel;
1351
1352
i = 0;
1353
while (byte < last_byte) {
1354
if (i == 0)
1355
printf("\t");
1356
printf("%#x", *byte++);
1357
i++;
1358
if (i == 8) {
1359
printf("\n");
1360
i = 0;
1361
} else {
1362
printf(", ");
1363
}
1364
}
1365
}
1366
#endif
1367
1368
static int
1369
ahd_modevent(module_t mod, int type, void *data)
1370
{
1371
/* XXX Deal with busy status on unload. */
1372
/* XXX Deal with unknown events */
1373
return 0;
1374
}
1375
1376
static moduledata_t ahd_mod = {
1377
"ahd",
1378
ahd_modevent,
1379
NULL
1380
};
1381
1382
/********************************** DDB Hooks *********************************/
1383
#ifdef DDB
1384
static struct ahd_softc *ahd_ddb_softc;
1385
static int ahd_ddb_paused;
1386
static int ahd_ddb_paused_on_entry;
1387
DB_COMMAND(ahd_sunit, ahd_ddb_sunit)
1388
{
1389
struct ahd_softc *list_ahd;
1390
1391
ahd_ddb_softc = NULL;
1392
TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
1393
if (list_ahd->unit == addr)
1394
ahd_ddb_softc = list_ahd;
1395
}
1396
if (ahd_ddb_softc == NULL)
1397
db_error("No matching softc found!\n");
1398
}
1399
1400
DB_COMMAND(ahd_pause, ahd_ddb_pause)
1401
{
1402
if (ahd_ddb_softc == NULL) {
1403
db_error("Must set unit with ahd_sunit first!\n");
1404
return;
1405
}
1406
if (ahd_ddb_paused == 0) {
1407
ahd_ddb_paused++;
1408
if (ahd_is_paused(ahd_ddb_softc)) {
1409
ahd_ddb_paused_on_entry++;
1410
return;
1411
}
1412
ahd_pause(ahd_ddb_softc);
1413
}
1414
}
1415
1416
DB_COMMAND(ahd_unpause, ahd_ddb_unpause)
1417
{
1418
if (ahd_ddb_softc == NULL) {
1419
db_error("Must set unit with ahd_sunit first!\n");
1420
return;
1421
}
1422
if (ahd_ddb_paused != 0) {
1423
ahd_ddb_paused = 0;
1424
if (ahd_ddb_paused_on_entry)
1425
return;
1426
ahd_unpause(ahd_ddb_softc);
1427
} else if (ahd_ddb_paused_on_entry != 0) {
1428
/* Two unpauses to clear a paused on entry. */
1429
ahd_ddb_paused_on_entry = 0;
1430
ahd_unpause(ahd_ddb_softc);
1431
}
1432
}
1433
1434
DB_COMMAND(ahd_in, ahd_ddb_in)
1435
{
1436
int c;
1437
int size;
1438
1439
if (ahd_ddb_softc == NULL) {
1440
db_error("Must set unit with ahd_sunit first!\n");
1441
return;
1442
}
1443
if (have_addr == 0)
1444
return;
1445
1446
size = 1;
1447
while ((c = *modif++) != '\0') {
1448
switch (c) {
1449
case 'b':
1450
size = 1;
1451
break;
1452
case 'w':
1453
size = 2;
1454
break;
1455
case 'l':
1456
size = 4;
1457
break;
1458
}
1459
}
1460
1461
if (count <= 0)
1462
count = 1;
1463
while (--count >= 0) {
1464
db_printf("%04lx (M)%x: \t", (u_long)addr,
1465
ahd_inb(ahd_ddb_softc, MODE_PTR));
1466
switch (size) {
1467
case 1:
1468
db_printf("%02x\n", ahd_inb(ahd_ddb_softc, addr));
1469
break;
1470
case 2:
1471
db_printf("%04x\n", ahd_inw(ahd_ddb_softc, addr));
1472
break;
1473
case 4:
1474
db_printf("%08x\n", ahd_inl(ahd_ddb_softc, addr));
1475
break;
1476
}
1477
}
1478
}
1479
1480
DB_COMMAND_FLAGS(ahd_out, ahd_ddb_out, CS_MORE)
1481
{
1482
db_expr_t old_value;
1483
db_expr_t new_value;
1484
int size;
1485
1486
if (ahd_ddb_softc == NULL) {
1487
db_error("Must set unit with ahd_sunit first!\n");
1488
return;
1489
}
1490
1491
switch (modif[0]) {
1492
case '\0':
1493
case 'b':
1494
size = 1;
1495
break;
1496
case 'h':
1497
size = 2;
1498
break;
1499
case 'l':
1500
size = 4;
1501
break;
1502
default:
1503
db_error("Unknown size\n");
1504
return;
1505
}
1506
1507
while (db_expression(&new_value)) {
1508
switch (size) {
1509
default:
1510
case 1:
1511
old_value = ahd_inb(ahd_ddb_softc, addr);
1512
ahd_outb(ahd_ddb_softc, addr, new_value);
1513
break;
1514
case 2:
1515
old_value = ahd_inw(ahd_ddb_softc, addr);
1516
ahd_outw(ahd_ddb_softc, addr, new_value);
1517
break;
1518
case 4:
1519
old_value = ahd_inl(ahd_ddb_softc, addr);
1520
ahd_outl(ahd_ddb_softc, addr, new_value);
1521
break;
1522
}
1523
db_printf("%04lx (M)%x: \t0x%lx\t=\t0x%lx",
1524
(u_long)addr, ahd_inb(ahd_ddb_softc, MODE_PTR),
1525
(u_long)old_value, (u_long)new_value);
1526
addr += size;
1527
}
1528
db_skip_to_eol();
1529
}
1530
1531
DB_COMMAND(ahd_dump, ahd_ddb_dump)
1532
{
1533
if (ahd_ddb_softc == NULL) {
1534
db_error("Must set unit with ahd_sunit first!\n");
1535
return;
1536
}
1537
ahd_dump_card_state(ahd_ddb_softc);
1538
}
1539
1540
#endif
1541
1542
DECLARE_MODULE(ahd, ahd_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
1543
MODULE_DEPEND(ahd, cam, 1, 1, 1);
1544
MODULE_VERSION(ahd, 1);
1545
1546