Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/dev/aic7xxx/aic7xxx.c
39507 views
1
/*-
2
* Core routines and tables shareable across OS platforms.
3
*
4
* SPDX-License-Identifier: BSD-3-Clause
5
*
6
* Copyright (c) 1994-2002 Justin T. Gibbs.
7
* Copyright (c) 2000-2002 Adaptec Inc.
8
* All rights reserved.
9
*
10
* Redistribution and use in source and binary forms, with or without
11
* modification, are permitted provided that the following conditions
12
* are met:
13
* 1. Redistributions of source code must retain the above copyright
14
* notice, this list of conditions, and the following disclaimer,
15
* without modification.
16
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
17
* substantially similar to the "NO WARRANTY" disclaimer below
18
* ("Disclaimer") and any redistribution must be conditioned upon
19
* including a substantially similar Disclaimer requirement for further
20
* binary redistribution.
21
* 3. Neither the names of the above-listed copyright holders nor the names
22
* of any contributors may be used to endorse or promote products derived
23
* from this software without specific prior written permission.
24
*
25
* Alternatively, this software may be distributed under the terms of the
26
* GNU General Public License ("GPL") version 2 as published by the Free
27
* Software Foundation.
28
*
29
* NO WARRANTY
30
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
33
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
38
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
39
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40
* POSSIBILITY OF SUCH DAMAGES.
41
*
42
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $
43
*/
44
45
#include <dev/aic7xxx/aic7xxx_osm.h>
46
#include <dev/aic7xxx/aic7xxx_inline.h>
47
#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
48
49
/****************************** Softc Data ************************************/
50
struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq);
51
52
/***************************** Lookup Tables **********************************/
53
char *ahc_chip_names[] =
54
{
55
"NONE",
56
"aic7770",
57
"aic7850",
58
"aic7855",
59
"aic7859",
60
"aic7860",
61
"aic7870",
62
"aic7880",
63
"aic7895",
64
"aic7895C",
65
"aic7890/91",
66
"aic7896/97",
67
"aic7892",
68
"aic7899"
69
};
70
71
/*
72
* Hardware error codes.
73
*/
74
struct ahc_hard_error_entry {
75
uint8_t errno;
76
char *errmesg;
77
};
78
79
static struct ahc_hard_error_entry ahc_hard_errors[] = {
80
{ ILLHADDR, "Illegal Host Access" },
81
{ ILLSADDR, "Illegal Sequencer Address referrenced" },
82
{ ILLOPCODE, "Illegal Opcode in sequencer program" },
83
{ SQPARERR, "Sequencer Parity Error" },
84
{ DPARERR, "Data-path Parity Error" },
85
{ MPARERR, "Scratch or SCB Memory Parity Error" },
86
{ PCIERRSTAT, "PCI Error detected" },
87
{ CIOPARERR, "CIOBUS Parity Error" },
88
};
89
static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors);
90
91
static struct ahc_phase_table_entry ahc_phase_table[] =
92
{
93
{ P_DATAOUT, MSG_NOOP, "in Data-out phase" },
94
{ P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
95
{ P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" },
96
{ P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" },
97
{ P_COMMAND, MSG_NOOP, "in Command phase" },
98
{ P_MESGOUT, MSG_NOOP, "in Message-out phase" },
99
{ P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" },
100
{ P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" },
101
{ P_BUSFREE, MSG_NOOP, "while idle" },
102
{ 0, MSG_NOOP, "in unknown phase" }
103
};
104
105
/*
106
* In most cases we only wish to itterate over real phases, so
107
* exclude the last element from the count.
108
*/
109
static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1;
110
111
/*
112
* Valid SCSIRATE values. (p. 3-17)
113
* Provides a mapping of transfer periods in ns to the proper value to
114
* stick in the scsixfer reg.
115
*/
116
static struct ahc_syncrate ahc_syncrates[] =
117
{
118
/* ultra2 fast/ultra period rate */
119
{ 0x42, 0x000, 9, "80.0" },
120
{ 0x03, 0x000, 10, "40.0" },
121
{ 0x04, 0x000, 11, "33.0" },
122
{ 0x05, 0x100, 12, "20.0" },
123
{ 0x06, 0x110, 15, "16.0" },
124
{ 0x07, 0x120, 18, "13.4" },
125
{ 0x08, 0x000, 25, "10.0" },
126
{ 0x19, 0x010, 31, "8.0" },
127
{ 0x1a, 0x020, 37, "6.67" },
128
{ 0x1b, 0x030, 43, "5.7" },
129
{ 0x1c, 0x040, 50, "5.0" },
130
{ 0x00, 0x050, 56, "4.4" },
131
{ 0x00, 0x060, 62, "4.0" },
132
{ 0x00, 0x070, 68, "3.6" },
133
{ 0x00, 0x000, 0, NULL }
134
};
135
136
/* Our Sequencer Program */
137
#include "aic7xxx_seq.h"
138
139
/**************************** Function Declarations ***************************/
140
static void ahc_force_renegotiation(struct ahc_softc *ahc,
141
struct ahc_devinfo *devinfo);
142
static struct ahc_tmode_tstate*
143
ahc_alloc_tstate(struct ahc_softc *ahc,
144
u_int scsi_id, char channel);
145
#ifdef AHC_TARGET_MODE
146
static void ahc_free_tstate(struct ahc_softc *ahc,
147
u_int scsi_id, char channel, int force);
148
#endif
149
static struct ahc_syncrate*
150
ahc_devlimited_syncrate(struct ahc_softc *ahc,
151
struct ahc_initiator_tinfo *,
152
u_int *period,
153
u_int *ppr_options,
154
role_t role);
155
static void ahc_update_pending_scbs(struct ahc_softc *ahc);
156
static void ahc_fetch_devinfo(struct ahc_softc *ahc,
157
struct ahc_devinfo *devinfo);
158
static void ahc_scb_devinfo(struct ahc_softc *ahc,
159
struct ahc_devinfo *devinfo,
160
struct scb *scb);
161
static void ahc_assert_atn(struct ahc_softc *ahc);
162
static void ahc_setup_initiator_msgout(struct ahc_softc *ahc,
163
struct ahc_devinfo *devinfo,
164
struct scb *scb);
165
static void ahc_build_transfer_msg(struct ahc_softc *ahc,
166
struct ahc_devinfo *devinfo);
167
static void ahc_construct_sdtr(struct ahc_softc *ahc,
168
struct ahc_devinfo *devinfo,
169
u_int period, u_int offset);
170
static void ahc_construct_wdtr(struct ahc_softc *ahc,
171
struct ahc_devinfo *devinfo,
172
u_int bus_width);
173
static void ahc_construct_ppr(struct ahc_softc *ahc,
174
struct ahc_devinfo *devinfo,
175
u_int period, u_int offset,
176
u_int bus_width, u_int ppr_options);
177
static void ahc_clear_msg_state(struct ahc_softc *ahc);
178
static void ahc_handle_proto_violation(struct ahc_softc *ahc);
179
static void ahc_handle_message_phase(struct ahc_softc *ahc);
180
typedef enum {
181
AHCMSG_1B,
182
AHCMSG_2B,
183
AHCMSG_EXT
184
} ahc_msgtype;
185
static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type,
186
u_int msgval, int full);
187
static int ahc_parse_msg(struct ahc_softc *ahc,
188
struct ahc_devinfo *devinfo);
189
static int ahc_handle_msg_reject(struct ahc_softc *ahc,
190
struct ahc_devinfo *devinfo);
191
static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc,
192
struct ahc_devinfo *devinfo);
193
static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc);
194
static void ahc_handle_devreset(struct ahc_softc *ahc,
195
struct ahc_devinfo *devinfo,
196
cam_status status, char *message,
197
int verbose_level);
198
#ifdef AHC_TARGET_MODE
199
static void ahc_setup_target_msgin(struct ahc_softc *ahc,
200
struct ahc_devinfo *devinfo,
201
struct scb *scb);
202
#endif
203
204
static bus_dmamap_callback_t ahc_dmamap_cb;
205
static void ahc_build_free_scb_list(struct ahc_softc *ahc);
206
static int ahc_init_scbdata(struct ahc_softc *ahc);
207
static void ahc_fini_scbdata(struct ahc_softc *ahc);
208
static void ahc_qinfifo_requeue(struct ahc_softc *ahc,
209
struct scb *prev_scb,
210
struct scb *scb);
211
static int ahc_qinfifo_count(struct ahc_softc *ahc);
212
static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc,
213
u_int prev, u_int scbptr);
214
static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc);
215
static u_int ahc_rem_wscb(struct ahc_softc *ahc,
216
u_int scbpos, u_int prev);
217
static void ahc_reset_current_bus(struct ahc_softc *ahc);
218
#ifdef AHC_DUMP_SEQ
219
static void ahc_dumpseq(struct ahc_softc *ahc);
220
#endif
221
static int ahc_loadseq(struct ahc_softc *ahc);
222
static int ahc_check_patch(struct ahc_softc *ahc,
223
struct patch **start_patch,
224
u_int start_instr, u_int *skip_addr);
225
static void ahc_download_instr(struct ahc_softc *ahc,
226
u_int instrptr, uint8_t *dconsts);
227
static int ahc_other_scb_timeout(struct ahc_softc *ahc,
228
struct scb *scb,
229
struct scb *other_scb);
230
#ifdef AHC_TARGET_MODE
231
static void ahc_queue_lstate_event(struct ahc_softc *ahc,
232
struct ahc_tmode_lstate *lstate,
233
u_int initiator_id,
234
u_int event_type,
235
u_int event_arg);
236
static void ahc_update_scsiid(struct ahc_softc *ahc,
237
u_int targid_mask);
238
static int ahc_handle_target_cmd(struct ahc_softc *ahc,
239
struct target_cmd *cmd);
240
#endif
241
/************************* Sequencer Execution Control ************************/
242
/*
243
* Restart the sequencer program from address zero
244
*/
245
void
246
ahc_restart(struct ahc_softc *ahc)
247
{
248
249
ahc_pause(ahc);
250
251
/* No more pending messages. */
252
ahc_clear_msg_state(ahc);
253
254
ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */
255
ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */
256
ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
257
ahc_outb(ahc, LASTPHASE, P_BUSFREE);
258
ahc_outb(ahc, SAVED_SCSIID, 0xFF);
259
ahc_outb(ahc, SAVED_LUN, 0xFF);
260
261
/*
262
* Ensure that the sequencer's idea of TQINPOS
263
* matches our own. The sequencer increments TQINPOS
264
* only after it sees a DMA complete and a reset could
265
* occur before the increment leaving the kernel to believe
266
* the command arrived but the sequencer to not.
267
*/
268
ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
269
270
/* Always allow reselection */
271
ahc_outb(ahc, SCSISEQ,
272
ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
273
if ((ahc->features & AHC_CMD_CHAN) != 0) {
274
/* Ensure that no DMA operations are in progress */
275
ahc_outb(ahc, CCSCBCNT, 0);
276
ahc_outb(ahc, CCSGCTL, 0);
277
ahc_outb(ahc, CCSCBCTL, 0);
278
}
279
/*
280
* If we were in the process of DMA'ing SCB data into
281
* an SCB, replace that SCB on the free list. This prevents
282
* an SCB leak.
283
*/
284
if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) {
285
ahc_add_curscb_to_free_list(ahc);
286
ahc_outb(ahc, SEQ_FLAGS2,
287
ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA);
288
}
289
290
/*
291
* Clear any pending sequencer interrupt. It is no
292
* longer relevant since we're resetting the Program
293
* Counter.
294
*/
295
ahc_outb(ahc, CLRINT, CLRSEQINT);
296
297
ahc_outb(ahc, MWI_RESIDUAL, 0);
298
ahc_outb(ahc, SEQCTL, ahc->seqctl);
299
ahc_outb(ahc, SEQADDR0, 0);
300
ahc_outb(ahc, SEQADDR1, 0);
301
302
ahc_unpause(ahc);
303
}
304
305
/************************* Input/Output Queues ********************************/
306
void
307
ahc_run_qoutfifo(struct ahc_softc *ahc)
308
{
309
struct scb *scb;
310
u_int scb_index;
311
312
ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
313
while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) {
314
scb_index = ahc->qoutfifo[ahc->qoutfifonext];
315
if ((ahc->qoutfifonext & 0x03) == 0x03) {
316
u_int modnext;
317
318
/*
319
* Clear 32bits of QOUTFIFO at a time
320
* so that we don't clobber an incoming
321
* byte DMA to the array on architectures
322
* that only support 32bit load and store
323
* operations.
324
*/
325
modnext = ahc->qoutfifonext & ~0x3;
326
*((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL;
327
aic_dmamap_sync(ahc, ahc->shared_data_dmat,
328
ahc->shared_data_dmamap,
329
/*offset*/modnext, /*len*/4,
330
BUS_DMASYNC_PREREAD);
331
}
332
ahc->qoutfifonext++;
333
334
scb = ahc_lookup_scb(ahc, scb_index);
335
if (scb == NULL) {
336
printf("%s: WARNING no command for scb %d "
337
"(cmdcmplt)\nQOUTPOS = %d\n",
338
ahc_name(ahc), scb_index,
339
(ahc->qoutfifonext - 1) & 0xFF);
340
continue;
341
}
342
343
/*
344
* Save off the residual
345
* if there is one.
346
*/
347
ahc_update_residual(ahc, scb);
348
ahc_done(ahc, scb);
349
}
350
}
351
352
void
353
ahc_run_untagged_queues(struct ahc_softc *ahc)
354
{
355
int i;
356
357
for (i = 0; i < 16; i++)
358
ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
359
}
360
361
void
362
ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
363
{
364
struct scb *scb;
365
366
if (ahc->untagged_queue_lock != 0)
367
return;
368
369
if ((scb = TAILQ_FIRST(queue)) != NULL
370
&& (scb->flags & SCB_ACTIVE) == 0) {
371
scb->flags |= SCB_ACTIVE;
372
/*
373
* Timers are disabled while recovery is in progress.
374
*/
375
aic_scb_timer_start(scb);
376
ahc_queue_scb(ahc, scb);
377
}
378
}
379
380
/************************* Interrupt Handling *********************************/
381
void
382
ahc_handle_brkadrint(struct ahc_softc *ahc)
383
{
384
/*
385
* We upset the sequencer :-(
386
* Lookup the error message
387
*/
388
int i;
389
int error;
390
391
error = ahc_inb(ahc, ERROR);
392
for (i = 0; error != 1 && i < num_errors; i++)
393
error >>= 1;
394
printf("%s: brkadrint, %s at seqaddr = 0x%x\n",
395
ahc_name(ahc), ahc_hard_errors[i].errmesg,
396
ahc_inb(ahc, SEQADDR0) |
397
(ahc_inb(ahc, SEQADDR1) << 8));
398
399
ahc_dump_card_state(ahc);
400
401
/* Tell everyone that this HBA is no longer available */
402
ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
403
CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
404
CAM_NO_HBA);
405
406
/* Disable all interrupt sources by resetting the controller */
407
ahc_shutdown(ahc);
408
}
409
410
void
411
ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
412
{
413
struct scb *scb;
414
struct ahc_devinfo devinfo;
415
416
ahc_fetch_devinfo(ahc, &devinfo);
417
418
/*
419
* Clear the upper byte that holds SEQINT status
420
* codes and clear the SEQINT bit. We will unpause
421
* the sequencer, if appropriate, after servicing
422
* the request.
423
*/
424
ahc_outb(ahc, CLRINT, CLRSEQINT);
425
switch (intstat & SEQINT_MASK) {
426
case BAD_STATUS:
427
{
428
u_int scb_index;
429
struct hardware_scb *hscb;
430
431
/*
432
* Set the default return value to 0 (don't
433
* send sense). The sense code will change
434
* this if needed.
435
*/
436
ahc_outb(ahc, RETURN_1, 0);
437
438
/*
439
* The sequencer will notify us when a command
440
* has an error that would be of interest to
441
* the kernel. This allows us to leave the sequencer
442
* running in the common case of command completes
443
* without error. The sequencer will already have
444
* dma'd the SCB back up to us, so we can reference
445
* the in kernel copy directly.
446
*/
447
scb_index = ahc_inb(ahc, SCB_TAG);
448
scb = ahc_lookup_scb(ahc, scb_index);
449
if (scb == NULL) {
450
ahc_print_devinfo(ahc, &devinfo);
451
printf("ahc_intr - referenced scb "
452
"not valid during seqint 0x%x scb(%d)\n",
453
intstat, scb_index);
454
ahc_dump_card_state(ahc);
455
panic("for safety");
456
goto unpause;
457
}
458
459
hscb = scb->hscb;
460
461
/* Don't want to clobber the original sense code */
462
if ((scb->flags & SCB_SENSE) != 0) {
463
/*
464
* Clear the SCB_SENSE Flag and have
465
* the sequencer do a normal command
466
* complete.
467
*/
468
scb->flags &= ~SCB_SENSE;
469
aic_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
470
break;
471
}
472
aic_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR);
473
/* Freeze the queue until the client sees the error. */
474
ahc_freeze_devq(ahc, scb);
475
aic_freeze_scb(scb);
476
aic_set_scsi_status(scb, hscb->shared_data.status.scsi_status);
477
switch (hscb->shared_data.status.scsi_status) {
478
case SCSI_STATUS_OK:
479
printf("%s: Interrupted for staus of 0???\n",
480
ahc_name(ahc));
481
break;
482
case SCSI_STATUS_CMD_TERMINATED:
483
case SCSI_STATUS_CHECK_COND:
484
{
485
struct ahc_dma_seg *sg;
486
struct scsi_sense *sc;
487
struct ahc_initiator_tinfo *targ_info;
488
struct ahc_tmode_tstate *tstate;
489
struct ahc_transinfo *tinfo;
490
#ifdef AHC_DEBUG
491
if (ahc_debug & AHC_SHOW_SENSE) {
492
ahc_print_path(ahc, scb);
493
printf("SCB %d: requests Check Status\n",
494
scb->hscb->tag);
495
}
496
#endif
497
498
if (aic_perform_autosense(scb) == 0)
499
break;
500
501
targ_info = ahc_fetch_transinfo(ahc,
502
devinfo.channel,
503
devinfo.our_scsiid,
504
devinfo.target,
505
&tstate);
506
tinfo = &targ_info->curr;
507
sg = scb->sg_list;
508
sc = (struct scsi_sense *)(&hscb->shared_data.cdb);
509
/*
510
* Save off the residual if there is one.
511
*/
512
ahc_update_residual(ahc, scb);
513
#ifdef AHC_DEBUG
514
if (ahc_debug & AHC_SHOW_SENSE) {
515
ahc_print_path(ahc, scb);
516
printf("Sending Sense\n");
517
}
518
#endif
519
sg->addr = ahc_get_sense_bufaddr(ahc, scb);
520
sg->len = aic_get_sense_bufsize(ahc, scb);
521
sg->len |= AHC_DMA_LAST_SEG;
522
523
/* Fixup byte order */
524
sg->addr = aic_htole32(sg->addr);
525
sg->len = aic_htole32(sg->len);
526
527
sc->opcode = REQUEST_SENSE;
528
sc->byte2 = 0;
529
if (tinfo->protocol_version <= SCSI_REV_2
530
&& SCB_GET_LUN(scb) < 8)
531
sc->byte2 = SCB_GET_LUN(scb) << 5;
532
sc->unused[0] = 0;
533
sc->unused[1] = 0;
534
sc->length = sg->len;
535
sc->control = 0;
536
537
/*
538
* We can't allow the target to disconnect.
539
* This will be an untagged transaction and
540
* having the target disconnect will make this
541
* transaction indestinguishable from outstanding
542
* tagged transactions.
543
*/
544
hscb->control = 0;
545
546
/*
547
* This request sense could be because the
548
* the device lost power or in some other
549
* way has lost our transfer negotiations.
550
* Renegotiate if appropriate. Unit attention
551
* errors will be reported before any data
552
* phases occur.
553
*/
554
if (aic_get_residual(scb)
555
== aic_get_transfer_length(scb)) {
556
ahc_update_neg_request(ahc, &devinfo,
557
tstate, targ_info,
558
AHC_NEG_IF_NON_ASYNC);
559
}
560
if (tstate->auto_negotiate & devinfo.target_mask) {
561
hscb->control |= MK_MESSAGE;
562
scb->flags &= ~SCB_NEGOTIATE;
563
scb->flags |= SCB_AUTO_NEGOTIATE;
564
}
565
hscb->cdb_len = sizeof(*sc);
566
hscb->dataptr = sg->addr;
567
hscb->datacnt = sg->len;
568
hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
569
hscb->sgptr = aic_htole32(hscb->sgptr);
570
scb->sg_count = 1;
571
scb->flags |= SCB_SENSE;
572
ahc_qinfifo_requeue_tail(ahc, scb);
573
ahc_outb(ahc, RETURN_1, SEND_SENSE);
574
/*
575
* Ensure we have enough time to actually
576
* retrieve the sense, but only schedule
577
* the timer if we are not in recovery or
578
* this is a recovery SCB that is allowed
579
* to have an active timer.
580
*/
581
if (ahc->scb_data->recovery_scbs == 0
582
|| (scb->flags & SCB_RECOVERY_SCB) != 0)
583
aic_scb_timer_reset(scb, 5 * 1000);
584
break;
585
}
586
default:
587
break;
588
}
589
break;
590
}
591
case NO_MATCH:
592
{
593
/* Ensure we don't leave the selection hardware on */
594
ahc_outb(ahc, SCSISEQ,
595
ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
596
597
printf("%s:%c:%d: no active SCB for reconnecting "
598
"target - issuing BUS DEVICE RESET\n",
599
ahc_name(ahc), devinfo.channel, devinfo.target);
600
printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
601
"ARG_1 == 0x%x ACCUM = 0x%x\n",
602
ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
603
ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
604
printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
605
"SINDEX == 0x%x\n",
606
ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
607
ahc_index_busy_tcl(ahc,
608
BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
609
ahc_inb(ahc, SAVED_LUN))),
610
ahc_inb(ahc, SINDEX));
611
printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
612
"SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
613
ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
614
ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
615
ahc_inb(ahc, SCB_CONTROL));
616
printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
617
ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
618
printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0));
619
printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL));
620
ahc_dump_card_state(ahc);
621
ahc->msgout_buf[0] = MSG_BUS_DEV_RESET;
622
ahc->msgout_len = 1;
623
ahc->msgout_index = 0;
624
ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
625
ahc_outb(ahc, MSG_OUT, HOST_MSG);
626
ahc_assert_atn(ahc);
627
break;
628
}
629
case SEND_REJECT:
630
{
631
u_int rejbyte = ahc_inb(ahc, ACCUM);
632
printf("%s:%c:%d: Warning - unknown message received from "
633
"target (0x%x). Rejecting\n",
634
ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
635
break;
636
}
637
case PROTO_VIOLATION:
638
{
639
ahc_handle_proto_violation(ahc);
640
break;
641
}
642
case IGN_WIDE_RES:
643
ahc_handle_ign_wide_residue(ahc, &devinfo);
644
break;
645
case PDATA_REINIT:
646
ahc_reinitialize_dataptrs(ahc);
647
break;
648
case BAD_PHASE:
649
{
650
u_int lastphase;
651
652
lastphase = ahc_inb(ahc, LASTPHASE);
653
printf("%s:%c:%d: unknown scsi bus phase %x, "
654
"lastphase = 0x%x. Attempting to continue\n",
655
ahc_name(ahc), devinfo.channel, devinfo.target,
656
lastphase, ahc_inb(ahc, SCSISIGI));
657
break;
658
}
659
case MISSED_BUSFREE:
660
{
661
u_int lastphase;
662
663
lastphase = ahc_inb(ahc, LASTPHASE);
664
printf("%s:%c:%d: Missed busfree. "
665
"Lastphase = 0x%x, Curphase = 0x%x\n",
666
ahc_name(ahc), devinfo.channel, devinfo.target,
667
lastphase, ahc_inb(ahc, SCSISIGI));
668
ahc_restart(ahc);
669
return;
670
}
671
case HOST_MSG_LOOP:
672
{
673
/*
674
* The sequencer has encountered a message phase
675
* that requires host assistance for completion.
676
* While handling the message phase(s), we will be
677
* notified by the sequencer after each byte is
678
* transferred so we can track bus phase changes.
679
*
680
* If this is the first time we've seen a HOST_MSG_LOOP
681
* interrupt, initialize the state of the host message
682
* loop.
683
*/
684
if (ahc->msg_type == MSG_TYPE_NONE) {
685
struct scb *scb;
686
u_int scb_index;
687
u_int bus_phase;
688
689
bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
690
if (bus_phase != P_MESGIN
691
&& bus_phase != P_MESGOUT) {
692
printf("ahc_intr: HOST_MSG_LOOP bad "
693
"phase 0x%x\n",
694
bus_phase);
695
/*
696
* Probably transitioned to bus free before
697
* we got here. Just punt the message.
698
*/
699
ahc_clear_intstat(ahc);
700
ahc_restart(ahc);
701
return;
702
}
703
704
scb_index = ahc_inb(ahc, SCB_TAG);
705
scb = ahc_lookup_scb(ahc, scb_index);
706
if (devinfo.role == ROLE_INITIATOR) {
707
if (scb == NULL)
708
panic("HOST_MSG_LOOP with "
709
"invalid SCB %x\n", scb_index);
710
711
if (bus_phase == P_MESGOUT)
712
ahc_setup_initiator_msgout(ahc,
713
&devinfo,
714
scb);
715
else {
716
ahc->msg_type =
717
MSG_TYPE_INITIATOR_MSGIN;
718
ahc->msgin_index = 0;
719
}
720
}
721
#ifdef AHC_TARGET_MODE
722
else {
723
if (bus_phase == P_MESGOUT) {
724
ahc->msg_type =
725
MSG_TYPE_TARGET_MSGOUT;
726
ahc->msgin_index = 0;
727
}
728
else
729
ahc_setup_target_msgin(ahc,
730
&devinfo,
731
scb);
732
}
733
#endif
734
}
735
736
ahc_handle_message_phase(ahc);
737
break;
738
}
739
case PERR_DETECTED:
740
{
741
/*
742
* If we've cleared the parity error interrupt
743
* but the sequencer still believes that SCSIPERR
744
* is true, it must be that the parity error is
745
* for the currently presented byte on the bus,
746
* and we are not in a phase (data-in) where we will
747
* eventually ack this byte. Ack the byte and
748
* throw it away in the hope that the target will
749
* take us to message out to deliver the appropriate
750
* error message.
751
*/
752
if ((intstat & SCSIINT) == 0
753
&& (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) {
754
if ((ahc->features & AHC_DT) == 0) {
755
u_int curphase;
756
757
/*
758
* The hardware will only let you ack bytes
759
* if the expected phase in SCSISIGO matches
760
* the current phase. Make sure this is
761
* currently the case.
762
*/
763
curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
764
ahc_outb(ahc, LASTPHASE, curphase);
765
ahc_outb(ahc, SCSISIGO, curphase);
766
}
767
if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) {
768
int wait;
769
770
/*
771
* In a data phase. Faster to bitbucket
772
* the data than to individually ack each
773
* byte. This is also the only strategy
774
* that will work with AUTOACK enabled.
775
*/
776
ahc_outb(ahc, SXFRCTL1,
777
ahc_inb(ahc, SXFRCTL1) | BITBUCKET);
778
wait = 5000;
779
while (--wait != 0) {
780
if ((ahc_inb(ahc, SCSISIGI)
781
& (CDI|MSGI)) != 0)
782
break;
783
aic_delay(100);
784
}
785
ahc_outb(ahc, SXFRCTL1,
786
ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
787
if (wait == 0) {
788
struct scb *scb;
789
u_int scb_index;
790
791
ahc_print_devinfo(ahc, &devinfo);
792
printf("Unable to clear parity error. "
793
"Resetting bus.\n");
794
scb_index = ahc_inb(ahc, SCB_TAG);
795
scb = ahc_lookup_scb(ahc, scb_index);
796
if (scb != NULL)
797
aic_set_transaction_status(scb,
798
CAM_UNCOR_PARITY);
799
ahc_reset_channel(ahc, devinfo.channel,
800
/*init reset*/TRUE);
801
}
802
} else {
803
ahc_inb(ahc, SCSIDATL);
804
}
805
}
806
break;
807
}
808
case DATA_OVERRUN:
809
{
810
/*
811
* When the sequencer detects an overrun, it
812
* places the controller in "BITBUCKET" mode
813
* and allows the target to complete its transfer.
814
* Unfortunately, none of the counters get updated
815
* when the controller is in this mode, so we have
816
* no way of knowing how large the overrun was.
817
*/
818
u_int scbindex = ahc_inb(ahc, SCB_TAG);
819
u_int lastphase = ahc_inb(ahc, LASTPHASE);
820
u_int i;
821
822
scb = ahc_lookup_scb(ahc, scbindex);
823
for (i = 0; i < num_phases; i++) {
824
if (lastphase == ahc_phase_table[i].phase)
825
break;
826
}
827
ahc_print_path(ahc, scb);
828
printf("data overrun detected %s."
829
" Tag == 0x%x.\n",
830
ahc_phase_table[i].phasemsg,
831
scb->hscb->tag);
832
ahc_print_path(ahc, scb);
833
printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n",
834
ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
835
aic_get_transfer_length(scb), scb->sg_count);
836
if (scb->sg_count > 0) {
837
for (i = 0; i < scb->sg_count; i++) {
838
printf("sg[%d] - Addr 0x%x%x : Length %d\n",
839
i,
840
(aic_le32toh(scb->sg_list[i].len) >> 24
841
& SG_HIGH_ADDR_BITS),
842
aic_le32toh(scb->sg_list[i].addr),
843
aic_le32toh(scb->sg_list[i].len)
844
& AHC_SG_LEN_MASK);
845
}
846
}
847
/*
848
* Set this and it will take effect when the
849
* target does a command complete.
850
*/
851
ahc_freeze_devq(ahc, scb);
852
if ((scb->flags & SCB_SENSE) == 0) {
853
aic_set_transaction_status(scb, CAM_DATA_RUN_ERR);
854
} else {
855
scb->flags &= ~SCB_SENSE;
856
aic_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
857
}
858
aic_freeze_scb(scb);
859
860
if ((ahc->features & AHC_ULTRA2) != 0) {
861
/*
862
* Clear the channel in case we return
863
* to data phase later.
864
*/
865
ahc_outb(ahc, SXFRCTL0,
866
ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN);
867
ahc_outb(ahc, SXFRCTL0,
868
ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN);
869
}
870
if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
871
u_int dscommand1;
872
873
/* Ensure HHADDR is 0 for future DMA operations. */
874
dscommand1 = ahc_inb(ahc, DSCOMMAND1);
875
ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0);
876
ahc_outb(ahc, HADDR, 0);
877
ahc_outb(ahc, DSCOMMAND1, dscommand1);
878
}
879
break;
880
}
881
case MKMSG_FAILED:
882
{
883
u_int scbindex;
884
885
printf("%s:%c:%d:%d: Attempt to issue message failed\n",
886
ahc_name(ahc), devinfo.channel, devinfo.target,
887
devinfo.lun);
888
scbindex = ahc_inb(ahc, SCB_TAG);
889
scb = ahc_lookup_scb(ahc, scbindex);
890
if (scb != NULL
891
&& (scb->flags & SCB_RECOVERY_SCB) != 0)
892
/*
893
* Ensure that we didn't put a second instance of this
894
* SCB into the QINFIFO.
895
*/
896
ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
897
SCB_GET_CHANNEL(ahc, scb),
898
SCB_GET_LUN(scb), scb->hscb->tag,
899
ROLE_INITIATOR, /*status*/0,
900
SEARCH_REMOVE);
901
break;
902
}
903
case NO_FREE_SCB:
904
{
905
printf("%s: No free or disconnected SCBs\n", ahc_name(ahc));
906
ahc_dump_card_state(ahc);
907
panic("for safety");
908
break;
909
}
910
case SCB_MISMATCH:
911
{
912
u_int scbptr;
913
914
scbptr = ahc_inb(ahc, SCBPTR);
915
printf("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n",
916
scbptr, ahc_inb(ahc, ARG_1),
917
ahc->scb_data->hscbs[scbptr].tag);
918
ahc_dump_card_state(ahc);
919
panic("for safety");
920
break;
921
}
922
case OUT_OF_RANGE:
923
{
924
printf("%s: BTT calculation out of range\n", ahc_name(ahc));
925
printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
926
"ARG_1 == 0x%x ACCUM = 0x%x\n",
927
ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
928
ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
929
printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
930
"SINDEX == 0x%x\n, A == 0x%x\n",
931
ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
932
ahc_index_busy_tcl(ahc,
933
BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
934
ahc_inb(ahc, SAVED_LUN))),
935
ahc_inb(ahc, SINDEX),
936
ahc_inb(ahc, ACCUM));
937
printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
938
"SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
939
ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
940
ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
941
ahc_inb(ahc, SCB_CONTROL));
942
printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
943
ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
944
ahc_dump_card_state(ahc);
945
panic("for safety");
946
break;
947
}
948
default:
949
printf("ahc_intr: seqint, "
950
"intstat == 0x%x, scsisigi = 0x%x\n",
951
intstat, ahc_inb(ahc, SCSISIGI));
952
break;
953
}
954
unpause:
955
/*
956
* The sequencer is paused immediately on
957
* a SEQINT, so we should restart it when
958
* we're done.
959
*/
960
ahc_unpause(ahc);
961
}
962
963
void
964
ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
965
{
966
u_int scb_index;
967
u_int status0;
968
u_int status;
969
struct scb *scb;
970
char cur_channel;
971
char intr_channel;
972
973
if ((ahc->features & AHC_TWIN) != 0
974
&& ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0))
975
cur_channel = 'B';
976
else
977
cur_channel = 'A';
978
intr_channel = cur_channel;
979
980
if ((ahc->features & AHC_ULTRA2) != 0)
981
status0 = ahc_inb(ahc, SSTAT0) & IOERR;
982
else
983
status0 = 0;
984
status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
985
if (status == 0 && status0 == 0) {
986
if ((ahc->features & AHC_TWIN) != 0) {
987
/* Try the other channel */
988
ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
989
status = ahc_inb(ahc, SSTAT1)
990
& (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
991
intr_channel = (cur_channel == 'A') ? 'B' : 'A';
992
}
993
if (status == 0) {
994
printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc));
995
ahc_outb(ahc, CLRINT, CLRSCSIINT);
996
ahc_unpause(ahc);
997
return;
998
}
999
}
1000
1001
/* Make sure the sequencer is in a safe location. */
1002
ahc_clear_critical_section(ahc);
1003
1004
scb_index = ahc_inb(ahc, SCB_TAG);
1005
scb = ahc_lookup_scb(ahc, scb_index);
1006
if (scb != NULL
1007
&& (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
1008
scb = NULL;
1009
1010
if ((ahc->features & AHC_ULTRA2) != 0
1011
&& (status0 & IOERR) != 0) {
1012
int now_lvd;
1013
1014
now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40;
1015
printf("%s: Transceiver State Has Changed to %s mode\n",
1016
ahc_name(ahc), now_lvd ? "LVD" : "SE");
1017
ahc_outb(ahc, CLRSINT0, CLRIOERR);
1018
/*
1019
* When transitioning to SE mode, the reset line
1020
* glitches, triggering an arbitration bug in some
1021
* Ultra2 controllers. This bug is cleared when we
1022
* assert the reset line. Since a reset glitch has
1023
* already occurred with this transition and a
1024
* transceiver state change is handled just like
1025
* a bus reset anyway, asserting the reset line
1026
* ourselves is safe.
1027
*/
1028
ahc_reset_channel(ahc, intr_channel,
1029
/*Initiate Reset*/now_lvd == 0);
1030
} else if ((status & SCSIRSTI) != 0) {
1031
printf("%s: Someone reset channel %c\n",
1032
ahc_name(ahc), intr_channel);
1033
if (intr_channel != cur_channel)
1034
ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
1035
ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE);
1036
} else if ((status & SCSIPERR) != 0) {
1037
/*
1038
* Determine the bus phase and queue an appropriate message.
1039
* SCSIPERR is latched true as soon as a parity error
1040
* occurs. If the sequencer acked the transfer that
1041
* caused the parity error and the currently presented
1042
* transfer on the bus has correct parity, SCSIPERR will
1043
* be cleared by CLRSCSIPERR. Use this to determine if
1044
* we should look at the last phase the sequencer recorded,
1045
* or the current phase presented on the bus.
1046
*/
1047
struct ahc_devinfo devinfo;
1048
u_int mesg_out;
1049
u_int curphase;
1050
u_int errorphase;
1051
u_int lastphase;
1052
u_int scsirate;
1053
u_int i;
1054
u_int sstat2;
1055
int silent;
1056
1057
lastphase = ahc_inb(ahc, LASTPHASE);
1058
curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
1059
sstat2 = ahc_inb(ahc, SSTAT2);
1060
ahc_outb(ahc, CLRSINT1, CLRSCSIPERR);
1061
/*
1062
* For all phases save DATA, the sequencer won't
1063
* automatically ack a byte that has a parity error
1064
* in it. So the only way that the current phase
1065
* could be 'data-in' is if the parity error is for
1066
* an already acked byte in the data phase. During
1067
* synchronous data-in transfers, we may actually
1068
* ack bytes before latching the current phase in
1069
* LASTPHASE, leading to the discrepancy between
1070
* curphase and lastphase.
1071
*/
1072
if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0
1073
|| curphase == P_DATAIN || curphase == P_DATAIN_DT)
1074
errorphase = curphase;
1075
else
1076
errorphase = lastphase;
1077
1078
for (i = 0; i < num_phases; i++) {
1079
if (errorphase == ahc_phase_table[i].phase)
1080
break;
1081
}
1082
mesg_out = ahc_phase_table[i].mesg_out;
1083
silent = FALSE;
1084
if (scb != NULL) {
1085
if (SCB_IS_SILENT(scb))
1086
silent = TRUE;
1087
else
1088
ahc_print_path(ahc, scb);
1089
scb->flags |= SCB_TRANSMISSION_ERROR;
1090
} else
1091
printf("%s:%c:%d: ", ahc_name(ahc), intr_channel,
1092
SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID)));
1093
scsirate = ahc_inb(ahc, SCSIRATE);
1094
if (silent == FALSE) {
1095
printf("parity error detected %s. "
1096
"SEQADDR(0x%x) SCSIRATE(0x%x)\n",
1097
ahc_phase_table[i].phasemsg,
1098
ahc_inw(ahc, SEQADDR0),
1099
scsirate);
1100
if ((ahc->features & AHC_DT) != 0) {
1101
if ((sstat2 & CRCVALERR) != 0)
1102
printf("\tCRC Value Mismatch\n");
1103
if ((sstat2 & CRCENDERR) != 0)
1104
printf("\tNo terminal CRC packet "
1105
"received\n");
1106
if ((sstat2 & CRCREQERR) != 0)
1107
printf("\tIllegal CRC packet "
1108
"request\n");
1109
if ((sstat2 & DUAL_EDGE_ERR) != 0)
1110
printf("\tUnexpected %sDT Data Phase\n",
1111
(scsirate & SINGLE_EDGE)
1112
? "" : "non-");
1113
}
1114
}
1115
1116
if ((ahc->features & AHC_DT) != 0
1117
&& (sstat2 & DUAL_EDGE_ERR) != 0) {
1118
/*
1119
* This error applies regardless of
1120
* data direction, so ignore the value
1121
* in the phase table.
1122
*/
1123
mesg_out = MSG_INITIATOR_DET_ERR;
1124
}
1125
1126
/*
1127
* We've set the hardware to assert ATN if we
1128
* get a parity error on "in" phases, so all we
1129
* need to do is stuff the message buffer with
1130
* the appropriate message. "In" phases have set
1131
* mesg_out to something other than MSG_NOP.
1132
*/
1133
if (mesg_out != MSG_NOOP) {
1134
if (ahc->msg_type != MSG_TYPE_NONE)
1135
ahc->send_msg_perror = TRUE;
1136
else
1137
ahc_outb(ahc, MSG_OUT, mesg_out);
1138
}
1139
/*
1140
* Force a renegotiation with this target just in
1141
* case we are out of sync for some external reason
1142
* unknown (or unreported) by the target.
1143
*/
1144
ahc_fetch_devinfo(ahc, &devinfo);
1145
ahc_force_renegotiation(ahc, &devinfo);
1146
1147
ahc_outb(ahc, CLRINT, CLRSCSIINT);
1148
ahc_unpause(ahc);
1149
} else if ((status & SELTO) != 0) {
1150
u_int scbptr;
1151
1152
/* Stop the selection */
1153
ahc_outb(ahc, SCSISEQ, 0);
1154
1155
/* No more pending messages */
1156
ahc_clear_msg_state(ahc);
1157
1158
/* Clear interrupt state */
1159
ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
1160
ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
1161
1162
/*
1163
* Although the driver does not care about the
1164
* 'Selection in Progress' status bit, the busy
1165
* LED does. SELINGO is only cleared by a successful
1166
* selection, so we must manually clear it to insure
1167
* the LED turns off just incase no future successful
1168
* selections occur (e.g. no devices on the bus).
1169
*/
1170
ahc_outb(ahc, CLRSINT0, CLRSELINGO);
1171
1172
scbptr = ahc_inb(ahc, WAITING_SCBH);
1173
ahc_outb(ahc, SCBPTR, scbptr);
1174
scb_index = ahc_inb(ahc, SCB_TAG);
1175
1176
scb = ahc_lookup_scb(ahc, scb_index);
1177
if (scb == NULL) {
1178
printf("%s: ahc_intr - referenced scb not "
1179
"valid during SELTO scb(%d, %d)\n",
1180
ahc_name(ahc), scbptr, scb_index);
1181
ahc_dump_card_state(ahc);
1182
} else {
1183
struct ahc_devinfo devinfo;
1184
#ifdef AHC_DEBUG
1185
if ((ahc_debug & AHC_SHOW_SELTO) != 0) {
1186
ahc_print_path(ahc, scb);
1187
printf("Saw Selection Timeout for SCB 0x%x\n",
1188
scb_index);
1189
}
1190
#endif
1191
ahc_scb_devinfo(ahc, &devinfo, scb);
1192
aic_set_transaction_status(scb, CAM_SEL_TIMEOUT);
1193
ahc_freeze_devq(ahc, scb);
1194
1195
/*
1196
* Cancel any pending transactions on the device
1197
* now that it seems to be missing. This will
1198
* also revert us to async/narrow transfers until
1199
* we can renegotiate with the device.
1200
*/
1201
ahc_handle_devreset(ahc, &devinfo,
1202
CAM_SEL_TIMEOUT,
1203
"Selection Timeout",
1204
/*verbose_level*/1);
1205
}
1206
ahc_outb(ahc, CLRINT, CLRSCSIINT);
1207
ahc_restart(ahc);
1208
} else if ((status & BUSFREE) != 0
1209
&& (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) {
1210
struct ahc_devinfo devinfo;
1211
u_int lastphase;
1212
u_int saved_scsiid;
1213
u_int saved_lun;
1214
u_int target;
1215
u_int initiator_role_id;
1216
char channel;
1217
int printerror;
1218
1219
/*
1220
* Clear our selection hardware as soon as possible.
1221
* We may have an entry in the waiting Q for this target,
1222
* that is affected by this busfree and we don't want to
1223
* go about selecting the target while we handle the event.
1224
*/
1225
ahc_outb(ahc, SCSISEQ,
1226
ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
1227
1228
/*
1229
* Disable busfree interrupts and clear the busfree
1230
* interrupt status. We do this here so that several
1231
* bus transactions occur prior to clearing the SCSIINT
1232
* latch. It can take a bit for the clearing to take effect.
1233
*/
1234
ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
1235
ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR);
1236
1237
/*
1238
* Look at what phase we were last in.
1239
* If its message out, chances are pretty good
1240
* that the busfree was in response to one of
1241
* our abort requests.
1242
*/
1243
lastphase = ahc_inb(ahc, LASTPHASE);
1244
saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
1245
saved_lun = ahc_inb(ahc, SAVED_LUN);
1246
target = SCSIID_TARGET(ahc, saved_scsiid);
1247
initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
1248
channel = SCSIID_CHANNEL(ahc, saved_scsiid);
1249
ahc_compile_devinfo(&devinfo, initiator_role_id,
1250
target, saved_lun, channel, ROLE_INITIATOR);
1251
printerror = 1;
1252
1253
if (lastphase == P_MESGOUT) {
1254
u_int tag;
1255
1256
tag = SCB_LIST_NULL;
1257
if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE)
1258
|| ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) {
1259
if (ahc->msgout_buf[ahc->msgout_index - 1]
1260
== MSG_ABORT_TAG)
1261
tag = scb->hscb->tag;
1262
ahc_print_path(ahc, scb);
1263
printf("SCB %d - Abort%s Completed.\n",
1264
scb->hscb->tag, tag == SCB_LIST_NULL ?
1265
"" : " Tag");
1266
ahc_abort_scbs(ahc, target, channel,
1267
saved_lun, tag,
1268
ROLE_INITIATOR,
1269
CAM_REQ_ABORTED);
1270
printerror = 0;
1271
} else if (ahc_sent_msg(ahc, AHCMSG_1B,
1272
MSG_BUS_DEV_RESET, TRUE)) {
1273
/*
1274
* Don't mark the user's request for this BDR
1275
* as completing with CAM_BDR_SENT. CAM3
1276
* specifies CAM_REQ_CMP.
1277
*/
1278
if (scb != NULL
1279
&& scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
1280
&& ahc_match_scb(ahc, scb, target, channel,
1281
CAM_LUN_WILDCARD,
1282
SCB_LIST_NULL,
1283
ROLE_INITIATOR)) {
1284
aic_set_transaction_status(scb, CAM_REQ_CMP);
1285
}
1286
ahc_compile_devinfo(&devinfo,
1287
initiator_role_id,
1288
target,
1289
CAM_LUN_WILDCARD,
1290
channel,
1291
ROLE_INITIATOR);
1292
ahc_handle_devreset(ahc, &devinfo,
1293
CAM_BDR_SENT,
1294
"Bus Device Reset",
1295
/*verbose_level*/0);
1296
printerror = 0;
1297
} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1298
MSG_EXT_PPR, FALSE)) {
1299
struct ahc_initiator_tinfo *tinfo;
1300
struct ahc_tmode_tstate *tstate;
1301
1302
/*
1303
* PPR Rejected. Try non-ppr negotiation
1304
* and retry command.
1305
*/
1306
tinfo = ahc_fetch_transinfo(ahc,
1307
devinfo.channel,
1308
devinfo.our_scsiid,
1309
devinfo.target,
1310
&tstate);
1311
tinfo->curr.transport_version = 2;
1312
tinfo->goal.transport_version = 2;
1313
tinfo->goal.ppr_options = 0;
1314
ahc_qinfifo_requeue_tail(ahc, scb);
1315
printerror = 0;
1316
} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1317
MSG_EXT_WDTR, FALSE)) {
1318
/*
1319
* Negotiation Rejected. Go-narrow and
1320
* retry command.
1321
*/
1322
ahc_set_width(ahc, &devinfo,
1323
MSG_EXT_WDTR_BUS_8_BIT,
1324
AHC_TRANS_CUR|AHC_TRANS_GOAL,
1325
/*paused*/TRUE);
1326
ahc_qinfifo_requeue_tail(ahc, scb);
1327
printerror = 0;
1328
} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1329
MSG_EXT_SDTR, FALSE)) {
1330
/*
1331
* Negotiation Rejected. Go-async and
1332
* retry command.
1333
*/
1334
ahc_set_syncrate(ahc, &devinfo,
1335
/*syncrate*/NULL,
1336
/*period*/0, /*offset*/0,
1337
/*ppr_options*/0,
1338
AHC_TRANS_CUR|AHC_TRANS_GOAL,
1339
/*paused*/TRUE);
1340
ahc_qinfifo_requeue_tail(ahc, scb);
1341
printerror = 0;
1342
}
1343
}
1344
if (printerror != 0) {
1345
u_int i;
1346
1347
if (scb != NULL) {
1348
u_int tag;
1349
1350
if ((scb->hscb->control & TAG_ENB) != 0)
1351
tag = scb->hscb->tag;
1352
else
1353
tag = SCB_LIST_NULL;
1354
ahc_print_path(ahc, scb);
1355
ahc_abort_scbs(ahc, target, channel,
1356
SCB_GET_LUN(scb), tag,
1357
ROLE_INITIATOR,
1358
CAM_UNEXP_BUSFREE);
1359
} else {
1360
/*
1361
* We had not fully identified this connection,
1362
* so we cannot abort anything.
1363
*/
1364
printf("%s: ", ahc_name(ahc));
1365
}
1366
for (i = 0; i < num_phases; i++) {
1367
if (lastphase == ahc_phase_table[i].phase)
1368
break;
1369
}
1370
if (lastphase != P_BUSFREE) {
1371
/*
1372
* Renegotiate with this device at the
1373
* next opportunity just in case this busfree
1374
* is due to a negotiation mismatch with the
1375
* device.
1376
*/
1377
ahc_force_renegotiation(ahc, &devinfo);
1378
}
1379
printf("Unexpected busfree %s\n"
1380
"SEQADDR == 0x%x\n",
1381
ahc_phase_table[i].phasemsg,
1382
ahc_inb(ahc, SEQADDR0)
1383
| (ahc_inb(ahc, SEQADDR1) << 8));
1384
}
1385
ahc_outb(ahc, CLRINT, CLRSCSIINT);
1386
ahc_restart(ahc);
1387
} else {
1388
printf("%s: Missing case in ahc_handle_scsiint. status = %x\n",
1389
ahc_name(ahc), status);
1390
ahc_outb(ahc, CLRINT, CLRSCSIINT);
1391
}
1392
}
1393
1394
/*
1395
* Force renegotiation to occur the next time we initiate
1396
* a command to the current device.
1397
*/
1398
static void
1399
ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
1400
{
1401
struct ahc_initiator_tinfo *targ_info;
1402
struct ahc_tmode_tstate *tstate;
1403
1404
targ_info = ahc_fetch_transinfo(ahc,
1405
devinfo->channel,
1406
devinfo->our_scsiid,
1407
devinfo->target,
1408
&tstate);
1409
ahc_update_neg_request(ahc, devinfo, tstate,
1410
targ_info, AHC_NEG_IF_NON_ASYNC);
1411
}
1412
1413
#define AHC_MAX_STEPS 2000
1414
void
1415
ahc_clear_critical_section(struct ahc_softc *ahc)
1416
{
1417
int stepping;
1418
int steps;
1419
u_int simode0;
1420
u_int simode1;
1421
1422
if (ahc->num_critical_sections == 0)
1423
return;
1424
1425
stepping = FALSE;
1426
steps = 0;
1427
simode0 = 0;
1428
simode1 = 0;
1429
for (;;) {
1430
struct cs *cs;
1431
u_int seqaddr;
1432
u_int i;
1433
1434
seqaddr = ahc_inb(ahc, SEQADDR0)
1435
| (ahc_inb(ahc, SEQADDR1) << 8);
1436
1437
/*
1438
* Seqaddr represents the next instruction to execute,
1439
* so we are really executing the instruction just
1440
* before it.
1441
*/
1442
cs = ahc->critical_sections;
1443
for (i = 0; i < ahc->num_critical_sections; i++, cs++) {
1444
1445
if (cs->begin < seqaddr && cs->end >= seqaddr)
1446
break;
1447
}
1448
1449
if (i == ahc->num_critical_sections)
1450
break;
1451
1452
if (steps > AHC_MAX_STEPS) {
1453
printf("%s: Infinite loop in critical section\n",
1454
ahc_name(ahc));
1455
ahc_dump_card_state(ahc);
1456
panic("critical section loop");
1457
}
1458
1459
steps++;
1460
if (stepping == FALSE) {
1461
/*
1462
* Disable all interrupt sources so that the
1463
* sequencer will not be stuck by a pausing
1464
* interrupt condition while we attempt to
1465
* leave a critical section.
1466
*/
1467
simode0 = ahc_inb(ahc, SIMODE0);
1468
ahc_outb(ahc, SIMODE0, 0);
1469
simode1 = ahc_inb(ahc, SIMODE1);
1470
if ((ahc->features & AHC_DT) != 0)
1471
/*
1472
* On DT class controllers, we
1473
* use the enhanced busfree logic.
1474
* Unfortunately we cannot re-enable
1475
* busfree detection within the
1476
* current connection, so we must
1477
* leave it on while single stepping.
1478
*/
1479
ahc_outb(ahc, SIMODE1, simode1 & ENBUSFREE);
1480
else
1481
ahc_outb(ahc, SIMODE1, 0);
1482
ahc_outb(ahc, CLRINT, CLRSCSIINT);
1483
ahc_outb(ahc, SEQCTL, ahc->seqctl | STEP);
1484
stepping = TRUE;
1485
}
1486
if ((ahc->features & AHC_DT) != 0) {
1487
ahc_outb(ahc, CLRSINT1, CLRBUSFREE);
1488
ahc_outb(ahc, CLRINT, CLRSCSIINT);
1489
}
1490
ahc_outb(ahc, HCNTRL, ahc->unpause);
1491
while (!ahc_is_paused(ahc))
1492
aic_delay(200);
1493
}
1494
if (stepping) {
1495
ahc_outb(ahc, SIMODE0, simode0);
1496
ahc_outb(ahc, SIMODE1, simode1);
1497
ahc_outb(ahc, SEQCTL, ahc->seqctl);
1498
}
1499
}
1500
1501
/*
1502
* Clear any pending interrupt status.
1503
*/
1504
void
1505
ahc_clear_intstat(struct ahc_softc *ahc)
1506
{
1507
/* Clear any interrupt conditions this may have caused */
1508
ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
1509
|CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG|
1510
CLRREQINIT);
1511
ahc_flush_device_writes(ahc);
1512
ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO);
1513
ahc_flush_device_writes(ahc);
1514
ahc_outb(ahc, CLRINT, CLRSCSIINT);
1515
ahc_flush_device_writes(ahc);
1516
}
1517
1518
/**************************** Debugging Routines ******************************/
1519
#ifdef AHC_DEBUG
1520
uint32_t ahc_debug = AHC_DEBUG_OPTS;
1521
#endif
1522
1523
void
1524
ahc_print_scb(struct scb *scb)
1525
{
1526
int i;
1527
1528
struct hardware_scb *hscb = scb->hscb;
1529
1530
printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
1531
(void *)scb,
1532
hscb->control,
1533
hscb->scsiid,
1534
hscb->lun,
1535
hscb->cdb_len);
1536
printf("Shared Data: ");
1537
for (i = 0; i < sizeof(hscb->shared_data.cdb); i++)
1538
printf("%#02x", hscb->shared_data.cdb[i]);
1539
printf(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n",
1540
aic_le32toh(hscb->dataptr),
1541
aic_le32toh(hscb->datacnt),
1542
aic_le32toh(hscb->sgptr),
1543
hscb->tag);
1544
if (scb->sg_count > 0) {
1545
for (i = 0; i < scb->sg_count; i++) {
1546
printf("sg[%d] - Addr 0x%x%x : Length %d\n",
1547
i,
1548
(aic_le32toh(scb->sg_list[i].len) >> 24
1549
& SG_HIGH_ADDR_BITS),
1550
aic_le32toh(scb->sg_list[i].addr),
1551
aic_le32toh(scb->sg_list[i].len));
1552
}
1553
}
1554
}
1555
1556
/************************* Transfer Negotiation *******************************/
1557
/*
1558
* Allocate per target mode instance (ID we respond to as a target)
1559
* transfer negotiation data structures.
1560
*/
1561
static struct ahc_tmode_tstate *
1562
ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel)
1563
{
1564
struct ahc_tmode_tstate *master_tstate;
1565
struct ahc_tmode_tstate *tstate;
1566
int i;
1567
1568
master_tstate = ahc->enabled_targets[ahc->our_id];
1569
if (channel == 'B') {
1570
scsi_id += 8;
1571
master_tstate = ahc->enabled_targets[ahc->our_id_b + 8];
1572
}
1573
if (ahc->enabled_targets[scsi_id] != NULL
1574
&& ahc->enabled_targets[scsi_id] != master_tstate)
1575
panic("%s: ahc_alloc_tstate - Target already allocated",
1576
ahc_name(ahc));
1577
tstate = (struct ahc_tmode_tstate*)malloc(sizeof(*tstate),
1578
M_DEVBUF, M_NOWAIT);
1579
if (tstate == NULL)
1580
return (NULL);
1581
1582
/*
1583
* If we have allocated a master tstate, copy user settings from
1584
* the master tstate (taken from SRAM or the EEPROM) for this
1585
* channel, but reset our current and goal settings to async/narrow
1586
* until an initiator talks to us.
1587
*/
1588
if (master_tstate != NULL) {
1589
memcpy(tstate, master_tstate, sizeof(*tstate));
1590
memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
1591
tstate->ultraenb = 0;
1592
for (i = 0; i < AHC_NUM_TARGETS; i++) {
1593
memset(&tstate->transinfo[i].curr, 0,
1594
sizeof(tstate->transinfo[i].curr));
1595
memset(&tstate->transinfo[i].goal, 0,
1596
sizeof(tstate->transinfo[i].goal));
1597
}
1598
} else
1599
memset(tstate, 0, sizeof(*tstate));
1600
ahc->enabled_targets[scsi_id] = tstate;
1601
return (tstate);
1602
}
1603
1604
#ifdef AHC_TARGET_MODE
1605
/*
1606
* Free per target mode instance (ID we respond to as a target)
1607
* transfer negotiation data structures.
1608
*/
1609
static void
1610
ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
1611
{
1612
struct ahc_tmode_tstate *tstate;
1613
1614
/*
1615
* Don't clean up our "master" tstate.
1616
* It has our default user settings.
1617
*/
1618
if (((channel == 'B' && scsi_id == ahc->our_id_b)
1619
|| (channel == 'A' && scsi_id == ahc->our_id))
1620
&& force == FALSE)
1621
return;
1622
1623
if (channel == 'B')
1624
scsi_id += 8;
1625
tstate = ahc->enabled_targets[scsi_id];
1626
if (tstate != NULL)
1627
free(tstate, M_DEVBUF);
1628
ahc->enabled_targets[scsi_id] = NULL;
1629
}
1630
#endif
1631
1632
/*
1633
* Called when we have an active connection to a target on the bus,
1634
* this function finds the nearest syncrate to the input period limited
1635
* by the capabilities of the bus connectivity of and sync settings for
1636
* the target.
1637
*/
1638
struct ahc_syncrate *
1639
ahc_devlimited_syncrate(struct ahc_softc *ahc,
1640
struct ahc_initiator_tinfo *tinfo,
1641
u_int *period, u_int *ppr_options, role_t role)
1642
{
1643
struct ahc_transinfo *transinfo;
1644
u_int maxsync;
1645
1646
if ((ahc->features & AHC_ULTRA2) != 0) {
1647
if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0
1648
&& (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) {
1649
maxsync = AHC_SYNCRATE_DT;
1650
} else {
1651
maxsync = AHC_SYNCRATE_ULTRA;
1652
/* Can't do DT on an SE bus */
1653
*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1654
}
1655
} else if ((ahc->features & AHC_ULTRA) != 0) {
1656
maxsync = AHC_SYNCRATE_ULTRA;
1657
} else {
1658
maxsync = AHC_SYNCRATE_FAST;
1659
}
1660
/*
1661
* Never allow a value higher than our current goal
1662
* period otherwise we may allow a target initiated
1663
* negotiation to go above the limit as set by the
1664
* user. In the case of an initiator initiated
1665
* sync negotiation, we limit based on the user
1666
* setting. This allows the system to still accept
1667
* incoming negotiations even if target initiated
1668
* negotiation is not performed.
1669
*/
1670
if (role == ROLE_TARGET)
1671
transinfo = &tinfo->user;
1672
else
1673
transinfo = &tinfo->goal;
1674
*ppr_options &= transinfo->ppr_options;
1675
if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
1676
maxsync = MAX(maxsync, AHC_SYNCRATE_ULTRA2);
1677
*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1678
}
1679
if (transinfo->period == 0) {
1680
*period = 0;
1681
*ppr_options = 0;
1682
return (NULL);
1683
}
1684
*period = MAX(*period, transinfo->period);
1685
return (ahc_find_syncrate(ahc, period, ppr_options, maxsync));
1686
}
1687
1688
/*
1689
* Look up the valid period to SCSIRATE conversion in our table.
1690
* Return the period and offset that should be sent to the target
1691
* if this was the beginning of an SDTR.
1692
*/
1693
struct ahc_syncrate *
1694
ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
1695
u_int *ppr_options, u_int maxsync)
1696
{
1697
struct ahc_syncrate *syncrate;
1698
1699
if ((ahc->features & AHC_DT) == 0)
1700
*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1701
1702
/* Skip all DT only entries if DT is not available */
1703
if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
1704
&& maxsync < AHC_SYNCRATE_ULTRA2)
1705
maxsync = AHC_SYNCRATE_ULTRA2;
1706
1707
for (syncrate = &ahc_syncrates[maxsync];
1708
syncrate->rate != NULL;
1709
syncrate++) {
1710
/*
1711
* The Ultra2 table doesn't go as low
1712
* as for the Fast/Ultra cards.
1713
*/
1714
if ((ahc->features & AHC_ULTRA2) != 0
1715
&& (syncrate->sxfr_u2 == 0))
1716
break;
1717
1718
if (*period <= syncrate->period) {
1719
/*
1720
* When responding to a target that requests
1721
* sync, the requested rate may fall between
1722
* two rates that we can output, but still be
1723
* a rate that we can receive. Because of this,
1724
* we want to respond to the target with
1725
* the same rate that it sent to us even
1726
* if the period we use to send data to it
1727
* is lower. Only lower the response period
1728
* if we must.
1729
*/
1730
if (syncrate == &ahc_syncrates[maxsync])
1731
*period = syncrate->period;
1732
1733
/*
1734
* At some speeds, we only support
1735
* ST transfers.
1736
*/
1737
if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
1738
*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1739
break;
1740
}
1741
}
1742
1743
if ((*period == 0)
1744
|| (syncrate->rate == NULL)
1745
|| ((ahc->features & AHC_ULTRA2) != 0
1746
&& (syncrate->sxfr_u2 == 0))) {
1747
/* Use asynchronous transfers. */
1748
*period = 0;
1749
syncrate = NULL;
1750
*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1751
}
1752
return (syncrate);
1753
}
1754
1755
/*
1756
* Convert from an entry in our syncrate table to the SCSI equivalent
1757
* sync "period" factor.
1758
*/
1759
u_int
1760
ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
1761
{
1762
struct ahc_syncrate *syncrate;
1763
1764
if ((ahc->features & AHC_ULTRA2) != 0)
1765
scsirate &= SXFR_ULTRA2;
1766
else
1767
scsirate &= SXFR;
1768
1769
syncrate = &ahc_syncrates[maxsync];
1770
while (syncrate->rate != NULL) {
1771
if ((ahc->features & AHC_ULTRA2) != 0) {
1772
if (syncrate->sxfr_u2 == 0)
1773
break;
1774
else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2))
1775
return (syncrate->period);
1776
} else if (scsirate == (syncrate->sxfr & SXFR)) {
1777
return (syncrate->period);
1778
}
1779
syncrate++;
1780
}
1781
return (0); /* async */
1782
}
1783
1784
/*
1785
* Truncate the given synchronous offset to a value the
1786
* current adapter type and syncrate are capable of.
1787
*/
1788
void
1789
ahc_validate_offset(struct ahc_softc *ahc,
1790
struct ahc_initiator_tinfo *tinfo,
1791
struct ahc_syncrate *syncrate,
1792
u_int *offset, int wide, role_t role)
1793
{
1794
u_int maxoffset;
1795
1796
/* Limit offset to what we can do */
1797
if (syncrate == NULL) {
1798
maxoffset = 0;
1799
} else if ((ahc->features & AHC_ULTRA2) != 0) {
1800
maxoffset = MAX_OFFSET_ULTRA2;
1801
} else {
1802
if (wide)
1803
maxoffset = MAX_OFFSET_16BIT;
1804
else
1805
maxoffset = MAX_OFFSET_8BIT;
1806
}
1807
*offset = MIN(*offset, maxoffset);
1808
if (tinfo != NULL) {
1809
if (role == ROLE_TARGET)
1810
*offset = MIN(*offset, tinfo->user.offset);
1811
else
1812
*offset = MIN(*offset, tinfo->goal.offset);
1813
}
1814
}
1815
1816
/*
1817
* Truncate the given transfer width parameter to a value the
1818
* current adapter type is capable of.
1819
*/
1820
void
1821
ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
1822
u_int *bus_width, role_t role)
1823
{
1824
switch (*bus_width) {
1825
default:
1826
if (ahc->features & AHC_WIDE) {
1827
/* Respond Wide */
1828
*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1829
break;
1830
}
1831
/* FALLTHROUGH */
1832
case MSG_EXT_WDTR_BUS_8_BIT:
1833
*bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1834
break;
1835
}
1836
if (tinfo != NULL) {
1837
if (role == ROLE_TARGET)
1838
*bus_width = MIN(tinfo->user.width, *bus_width);
1839
else
1840
*bus_width = MIN(tinfo->goal.width, *bus_width);
1841
}
1842
}
1843
1844
/*
1845
* Update the bitmask of targets for which the controller should
1846
* negotiate with at the next convenient opportunity. This currently
1847
* means the next time we send the initial identify messages for
1848
* a new transaction.
1849
*/
1850
int
1851
ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1852
struct ahc_tmode_tstate *tstate,
1853
struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type)
1854
{
1855
u_int auto_negotiate_orig;
1856
1857
auto_negotiate_orig = tstate->auto_negotiate;
1858
if (neg_type == AHC_NEG_ALWAYS) {
1859
/*
1860
* Force our "current" settings to be
1861
* unknown so that unless a bus reset
1862
* occurs the need to renegotiate is
1863
* recorded persistently.
1864
*/
1865
if ((ahc->features & AHC_WIDE) != 0)
1866
tinfo->curr.width = AHC_WIDTH_UNKNOWN;
1867
tinfo->curr.period = AHC_PERIOD_UNKNOWN;
1868
tinfo->curr.offset = AHC_OFFSET_UNKNOWN;
1869
}
1870
if (tinfo->curr.period != tinfo->goal.period
1871
|| tinfo->curr.width != tinfo->goal.width
1872
|| tinfo->curr.offset != tinfo->goal.offset
1873
|| tinfo->curr.ppr_options != tinfo->goal.ppr_options
1874
|| (neg_type == AHC_NEG_IF_NON_ASYNC
1875
&& (tinfo->goal.offset != 0
1876
|| tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
1877
|| tinfo->goal.ppr_options != 0)))
1878
tstate->auto_negotiate |= devinfo->target_mask;
1879
else
1880
tstate->auto_negotiate &= ~devinfo->target_mask;
1881
1882
return (auto_negotiate_orig != tstate->auto_negotiate);
1883
}
1884
1885
/*
1886
* Update the user/goal/curr tables of synchronous negotiation
1887
* parameters as well as, in the case of a current or active update,
1888
* any data structures on the host controller. In the case of an
1889
* active update, the specified target is currently talking to us on
1890
* the bus, so the transfer parameter update must take effect
1891
* immediately.
1892
*/
1893
void
1894
ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1895
struct ahc_syncrate *syncrate, u_int period,
1896
u_int offset, u_int ppr_options, u_int type, int paused)
1897
{
1898
struct ahc_initiator_tinfo *tinfo;
1899
struct ahc_tmode_tstate *tstate;
1900
u_int old_period;
1901
u_int old_offset;
1902
u_int old_ppr;
1903
int active;
1904
int update_needed;
1905
1906
active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1907
update_needed = 0;
1908
1909
if (syncrate == NULL) {
1910
period = 0;
1911
offset = 0;
1912
}
1913
1914
tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1915
devinfo->target, &tstate);
1916
1917
if ((type & AHC_TRANS_USER) != 0) {
1918
tinfo->user.period = period;
1919
tinfo->user.offset = offset;
1920
tinfo->user.ppr_options = ppr_options;
1921
}
1922
1923
if ((type & AHC_TRANS_GOAL) != 0) {
1924
tinfo->goal.period = period;
1925
tinfo->goal.offset = offset;
1926
tinfo->goal.ppr_options = ppr_options;
1927
}
1928
1929
old_period = tinfo->curr.period;
1930
old_offset = tinfo->curr.offset;
1931
old_ppr = tinfo->curr.ppr_options;
1932
1933
if ((type & AHC_TRANS_CUR) != 0
1934
&& (old_period != period
1935
|| old_offset != offset
1936
|| old_ppr != ppr_options)) {
1937
u_int scsirate;
1938
1939
update_needed++;
1940
scsirate = tinfo->scsirate;
1941
if ((ahc->features & AHC_ULTRA2) != 0) {
1942
scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC);
1943
if (syncrate != NULL) {
1944
scsirate |= syncrate->sxfr_u2;
1945
if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0)
1946
scsirate |= ENABLE_CRC;
1947
else
1948
scsirate |= SINGLE_EDGE;
1949
}
1950
} else {
1951
scsirate &= ~(SXFR|SOFS);
1952
/*
1953
* Ensure Ultra mode is set properly for
1954
* this target.
1955
*/
1956
tstate->ultraenb &= ~devinfo->target_mask;
1957
if (syncrate != NULL) {
1958
if (syncrate->sxfr & ULTRA_SXFR) {
1959
tstate->ultraenb |=
1960
devinfo->target_mask;
1961
}
1962
scsirate |= syncrate->sxfr & SXFR;
1963
scsirate |= offset & SOFS;
1964
}
1965
if (active) {
1966
u_int sxfrctl0;
1967
1968
sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
1969
sxfrctl0 &= ~FAST20;
1970
if (tstate->ultraenb & devinfo->target_mask)
1971
sxfrctl0 |= FAST20;
1972
ahc_outb(ahc, SXFRCTL0, sxfrctl0);
1973
}
1974
}
1975
if (active) {
1976
ahc_outb(ahc, SCSIRATE, scsirate);
1977
if ((ahc->features & AHC_ULTRA2) != 0)
1978
ahc_outb(ahc, SCSIOFFSET, offset);
1979
}
1980
1981
tinfo->scsirate = scsirate;
1982
tinfo->curr.period = period;
1983
tinfo->curr.offset = offset;
1984
tinfo->curr.ppr_options = ppr_options;
1985
1986
ahc_send_async(ahc, devinfo->channel, devinfo->target,
1987
CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
1988
if (bootverbose) {
1989
if (offset != 0) {
1990
printf("%s: target %d synchronous at %sMHz%s, "
1991
"offset = 0x%x\n", ahc_name(ahc),
1992
devinfo->target, syncrate->rate,
1993
(ppr_options & MSG_EXT_PPR_DT_REQ)
1994
? " DT" : "", offset);
1995
} else {
1996
printf("%s: target %d using "
1997
"asynchronous transfers\n",
1998
ahc_name(ahc), devinfo->target);
1999
}
2000
}
2001
}
2002
2003
update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
2004
tinfo, AHC_NEG_TO_GOAL);
2005
2006
if (update_needed)
2007
ahc_update_pending_scbs(ahc);
2008
}
2009
2010
/*
2011
* Update the user/goal/curr tables of wide negotiation
2012
* parameters as well as, in the case of a current or active update,
2013
* any data structures on the host controller. In the case of an
2014
* active update, the specified target is currently talking to us on
2015
* the bus, so the transfer parameter update must take effect
2016
* immediately.
2017
*/
2018
void
2019
ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2020
u_int width, u_int type, int paused)
2021
{
2022
struct ahc_initiator_tinfo *tinfo;
2023
struct ahc_tmode_tstate *tstate;
2024
u_int oldwidth;
2025
int active;
2026
int update_needed;
2027
2028
active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
2029
update_needed = 0;
2030
tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2031
devinfo->target, &tstate);
2032
2033
if ((type & AHC_TRANS_USER) != 0)
2034
tinfo->user.width = width;
2035
2036
if ((type & AHC_TRANS_GOAL) != 0)
2037
tinfo->goal.width = width;
2038
2039
oldwidth = tinfo->curr.width;
2040
if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) {
2041
u_int scsirate;
2042
2043
update_needed++;
2044
scsirate = tinfo->scsirate;
2045
scsirate &= ~WIDEXFER;
2046
if (width == MSG_EXT_WDTR_BUS_16_BIT)
2047
scsirate |= WIDEXFER;
2048
2049
tinfo->scsirate = scsirate;
2050
2051
if (active)
2052
ahc_outb(ahc, SCSIRATE, scsirate);
2053
2054
tinfo->curr.width = width;
2055
2056
ahc_send_async(ahc, devinfo->channel, devinfo->target,
2057
CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
2058
if (bootverbose) {
2059
printf("%s: target %d using %dbit transfers\n",
2060
ahc_name(ahc), devinfo->target,
2061
8 * (0x01 << width));
2062
}
2063
}
2064
2065
update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
2066
tinfo, AHC_NEG_TO_GOAL);
2067
if (update_needed)
2068
ahc_update_pending_scbs(ahc);
2069
}
2070
2071
/*
2072
* Update the current state of tagged queuing for a given target.
2073
*/
2074
void
2075
ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2076
ahc_queue_alg alg)
2077
{
2078
ahc_platform_set_tags(ahc, devinfo, alg);
2079
ahc_send_async(ahc, devinfo->channel, devinfo->target,
2080
devinfo->lun, AC_TRANSFER_NEG, &alg);
2081
}
2082
2083
/*
2084
* When the transfer settings for a connection change, update any
2085
* in-transit SCBs to contain the new data so the hardware will
2086
* be set correctly during future (re)selections.
2087
*/
2088
static void
2089
ahc_update_pending_scbs(struct ahc_softc *ahc)
2090
{
2091
struct scb *pending_scb;
2092
int pending_scb_count;
2093
int i;
2094
int paused;
2095
u_int saved_scbptr;
2096
2097
/*
2098
* Traverse the pending SCB list and ensure that all of the
2099
* SCBs there have the proper settings.
2100
*/
2101
pending_scb_count = 0;
2102
LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
2103
struct ahc_devinfo devinfo;
2104
struct hardware_scb *pending_hscb;
2105
struct ahc_initiator_tinfo *tinfo;
2106
struct ahc_tmode_tstate *tstate;
2107
2108
ahc_scb_devinfo(ahc, &devinfo, pending_scb);
2109
tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
2110
devinfo.our_scsiid,
2111
devinfo.target, &tstate);
2112
pending_hscb = pending_scb->hscb;
2113
pending_hscb->control &= ~ULTRAENB;
2114
if ((tstate->ultraenb & devinfo.target_mask) != 0)
2115
pending_hscb->control |= ULTRAENB;
2116
pending_hscb->scsirate = tinfo->scsirate;
2117
pending_hscb->scsioffset = tinfo->curr.offset;
2118
if ((tstate->auto_negotiate & devinfo.target_mask) == 0
2119
&& (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
2120
pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
2121
pending_hscb->control &= ~MK_MESSAGE;
2122
}
2123
ahc_sync_scb(ahc, pending_scb,
2124
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2125
pending_scb_count++;
2126
}
2127
2128
if (pending_scb_count == 0)
2129
return;
2130
2131
if (ahc_is_paused(ahc)) {
2132
paused = 1;
2133
} else {
2134
paused = 0;
2135
ahc_pause(ahc);
2136
}
2137
2138
saved_scbptr = ahc_inb(ahc, SCBPTR);
2139
/* Ensure that the hscbs down on the card match the new information */
2140
for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
2141
struct hardware_scb *pending_hscb;
2142
u_int control;
2143
u_int scb_tag;
2144
2145
ahc_outb(ahc, SCBPTR, i);
2146
scb_tag = ahc_inb(ahc, SCB_TAG);
2147
pending_scb = ahc_lookup_scb(ahc, scb_tag);
2148
if (pending_scb == NULL)
2149
continue;
2150
2151
pending_hscb = pending_scb->hscb;
2152
control = ahc_inb(ahc, SCB_CONTROL);
2153
control &= ~(ULTRAENB|MK_MESSAGE);
2154
control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE);
2155
ahc_outb(ahc, SCB_CONTROL, control);
2156
ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate);
2157
ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset);
2158
}
2159
ahc_outb(ahc, SCBPTR, saved_scbptr);
2160
2161
if (paused == 0)
2162
ahc_unpause(ahc);
2163
}
2164
2165
/**************************** Pathing Information *****************************/
2166
static void
2167
ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2168
{
2169
u_int saved_scsiid;
2170
role_t role;
2171
int our_id;
2172
2173
if (ahc_inb(ahc, SSTAT0) & TARGET)
2174
role = ROLE_TARGET;
2175
else
2176
role = ROLE_INITIATOR;
2177
2178
if (role == ROLE_TARGET
2179
&& (ahc->features & AHC_MULTI_TID) != 0
2180
&& (ahc_inb(ahc, SEQ_FLAGS)
2181
& (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) {
2182
/* We were selected, so pull our id from TARGIDIN */
2183
our_id = ahc_inb(ahc, TARGIDIN) & OID;
2184
} else if ((ahc->features & AHC_ULTRA2) != 0)
2185
our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
2186
else
2187
our_id = ahc_inb(ahc, SCSIID) & OID;
2188
2189
saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
2190
ahc_compile_devinfo(devinfo,
2191
our_id,
2192
SCSIID_TARGET(ahc, saved_scsiid),
2193
ahc_inb(ahc, SAVED_LUN),
2194
SCSIID_CHANNEL(ahc, saved_scsiid),
2195
role);
2196
}
2197
2198
struct ahc_phase_table_entry*
2199
ahc_lookup_phase_entry(int phase)
2200
{
2201
struct ahc_phase_table_entry *entry;
2202
struct ahc_phase_table_entry *last_entry;
2203
2204
/*
2205
* num_phases doesn't include the default entry which
2206
* will be returned if the phase doesn't match.
2207
*/
2208
last_entry = &ahc_phase_table[num_phases];
2209
for (entry = ahc_phase_table; entry < last_entry; entry++) {
2210
if (phase == entry->phase)
2211
break;
2212
}
2213
return (entry);
2214
}
2215
2216
void
2217
ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target,
2218
u_int lun, char channel, role_t role)
2219
{
2220
devinfo->our_scsiid = our_id;
2221
devinfo->target = target;
2222
devinfo->lun = lun;
2223
devinfo->target_offset = target;
2224
devinfo->channel = channel;
2225
devinfo->role = role;
2226
if (channel == 'B')
2227
devinfo->target_offset += 8;
2228
devinfo->target_mask = (0x01 << devinfo->target_offset);
2229
}
2230
2231
void
2232
ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2233
{
2234
printf("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel,
2235
devinfo->target, devinfo->lun);
2236
}
2237
2238
static void
2239
ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2240
struct scb *scb)
2241
{
2242
role_t role;
2243
int our_id;
2244
2245
our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
2246
role = ROLE_INITIATOR;
2247
if ((scb->flags & SCB_TARGET_SCB) != 0)
2248
role = ROLE_TARGET;
2249
ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb),
2250
SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role);
2251
}
2252
2253
/************************ Message Phase Processing ****************************/
2254
static void
2255
ahc_assert_atn(struct ahc_softc *ahc)
2256
{
2257
u_int scsisigo;
2258
2259
scsisigo = ATNO;
2260
if ((ahc->features & AHC_DT) == 0)
2261
scsisigo |= ahc_inb(ahc, SCSISIGI);
2262
ahc_outb(ahc, SCSISIGO, scsisigo);
2263
}
2264
2265
/*
2266
* When an initiator transaction with the MK_MESSAGE flag either reconnects
2267
* or enters the initial message out phase, we are interrupted. Fill our
2268
* outgoing message buffer with the appropriate message and beging handing
2269
* the message phase(s) manually.
2270
*/
2271
static void
2272
ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2273
struct scb *scb)
2274
{
2275
/*
2276
* To facilitate adding multiple messages together,
2277
* each routine should increment the index and len
2278
* variables instead of setting them explicitly.
2279
*/
2280
ahc->msgout_index = 0;
2281
ahc->msgout_len = 0;
2282
2283
if ((scb->flags & SCB_DEVICE_RESET) == 0
2284
&& ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) {
2285
u_int identify_msg;
2286
2287
identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
2288
if ((scb->hscb->control & DISCENB) != 0)
2289
identify_msg |= MSG_IDENTIFY_DISCFLAG;
2290
ahc->msgout_buf[ahc->msgout_index++] = identify_msg;
2291
ahc->msgout_len++;
2292
2293
if ((scb->hscb->control & TAG_ENB) != 0) {
2294
ahc->msgout_buf[ahc->msgout_index++] =
2295
scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
2296
ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag;
2297
ahc->msgout_len += 2;
2298
}
2299
}
2300
2301
if (scb->flags & SCB_DEVICE_RESET) {
2302
ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET;
2303
ahc->msgout_len++;
2304
ahc_print_path(ahc, scb);
2305
printf("Bus Device Reset Message Sent\n");
2306
/*
2307
* Clear our selection hardware in advance of
2308
* the busfree. We may have an entry in the waiting
2309
* Q for this target, and we don't want to go about
2310
* selecting while we handle the busfree and blow it
2311
* away.
2312
*/
2313
ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
2314
} else if ((scb->flags & SCB_ABORT) != 0) {
2315
if ((scb->hscb->control & TAG_ENB) != 0)
2316
ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG;
2317
else
2318
ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT;
2319
ahc->msgout_len++;
2320
ahc_print_path(ahc, scb);
2321
printf("Abort%s Message Sent\n",
2322
(scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
2323
/*
2324
* Clear our selection hardware in advance of
2325
* the busfree. We may have an entry in the waiting
2326
* Q for this target, and we don't want to go about
2327
* selecting while we handle the busfree and blow it
2328
* away.
2329
*/
2330
ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
2331
} else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
2332
ahc_build_transfer_msg(ahc, devinfo);
2333
} else {
2334
printf("ahc_intr: AWAITING_MSG for an SCB that "
2335
"does not have a waiting message\n");
2336
printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
2337
devinfo->target_mask);
2338
panic("SCB = %d, SCB Control = %x, MSG_OUT = %x "
2339
"SCB flags = %x", scb->hscb->tag, scb->hscb->control,
2340
ahc_inb(ahc, MSG_OUT), scb->flags);
2341
}
2342
2343
/*
2344
* Clear the MK_MESSAGE flag from the SCB so we aren't
2345
* asked to send this message again.
2346
*/
2347
ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE);
2348
scb->hscb->control &= ~MK_MESSAGE;
2349
ahc->msgout_index = 0;
2350
ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2351
}
2352
2353
/*
2354
* Build an appropriate transfer negotiation message for the
2355
* currently active target.
2356
*/
2357
static void
2358
ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2359
{
2360
/*
2361
* We need to initiate transfer negotiations.
2362
* If our current and goal settings are identical,
2363
* we want to renegotiate due to a check condition.
2364
*/
2365
struct ahc_initiator_tinfo *tinfo;
2366
struct ahc_tmode_tstate *tstate;
2367
struct ahc_syncrate *rate;
2368
int dowide;
2369
int dosync;
2370
int doppr;
2371
u_int period;
2372
u_int ppr_options;
2373
u_int offset;
2374
2375
tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2376
devinfo->target, &tstate);
2377
/*
2378
* Filter our period based on the current connection.
2379
* If we can't perform DT transfers on this segment (not in LVD
2380
* mode for instance), then our decision to issue a PPR message
2381
* may change.
2382
*/
2383
period = tinfo->goal.period;
2384
offset = tinfo->goal.offset;
2385
ppr_options = tinfo->goal.ppr_options;
2386
/* Target initiated PPR is not allowed in the SCSI spec */
2387
if (devinfo->role == ROLE_TARGET)
2388
ppr_options = 0;
2389
rate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2390
&ppr_options, devinfo->role);
2391
dowide = tinfo->curr.width != tinfo->goal.width;
2392
dosync = tinfo->curr.offset != offset || tinfo->curr.period != period;
2393
/*
2394
* Only use PPR if we have options that need it, even if the device
2395
* claims to support it. There might be an expander in the way
2396
* that doesn't.
2397
*/
2398
doppr = ppr_options != 0;
2399
2400
if (!dowide && !dosync && !doppr) {
2401
dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
2402
dosync = tinfo->goal.offset != 0;
2403
}
2404
2405
if (!dowide && !dosync && !doppr) {
2406
/*
2407
* Force async with a WDTR message if we have a wide bus,
2408
* or just issue an SDTR with a 0 offset.
2409
*/
2410
if ((ahc->features & AHC_WIDE) != 0)
2411
dowide = 1;
2412
else
2413
dosync = 1;
2414
2415
if (bootverbose) {
2416
ahc_print_devinfo(ahc, devinfo);
2417
printf("Ensuring async\n");
2418
}
2419
}
2420
2421
/* Target initiated PPR is not allowed in the SCSI spec */
2422
if (devinfo->role == ROLE_TARGET)
2423
doppr = 0;
2424
2425
/*
2426
* Both the PPR message and SDTR message require the
2427
* goal syncrate to be limited to what the target device
2428
* is capable of handling (based on whether an LVD->SE
2429
* expander is on the bus), so combine these two cases.
2430
* Regardless, guarantee that if we are using WDTR and SDTR
2431
* messages that WDTR comes first.
2432
*/
2433
if (doppr || (dosync && !dowide)) {
2434
offset = tinfo->goal.offset;
2435
ahc_validate_offset(ahc, tinfo, rate, &offset,
2436
doppr ? tinfo->goal.width
2437
: tinfo->curr.width,
2438
devinfo->role);
2439
if (doppr) {
2440
ahc_construct_ppr(ahc, devinfo, period, offset,
2441
tinfo->goal.width, ppr_options);
2442
} else {
2443
ahc_construct_sdtr(ahc, devinfo, period, offset);
2444
}
2445
} else {
2446
ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width);
2447
}
2448
}
2449
2450
/*
2451
* Build a synchronous negotiation message in our message
2452
* buffer based on the input parameters.
2453
*/
2454
static void
2455
ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2456
u_int period, u_int offset)
2457
{
2458
if (offset == 0)
2459
period = AHC_ASYNC_XFER_PERIOD;
2460
ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2461
ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN;
2462
ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR;
2463
ahc->msgout_buf[ahc->msgout_index++] = period;
2464
ahc->msgout_buf[ahc->msgout_index++] = offset;
2465
ahc->msgout_len += 5;
2466
if (bootverbose) {
2467
printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
2468
ahc_name(ahc), devinfo->channel, devinfo->target,
2469
devinfo->lun, period, offset);
2470
}
2471
}
2472
2473
/*
2474
* Build a wide negotiation message in our message
2475
* buffer based on the input parameters.
2476
*/
2477
static void
2478
ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2479
u_int bus_width)
2480
{
2481
ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2482
ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN;
2483
ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR;
2484
ahc->msgout_buf[ahc->msgout_index++] = bus_width;
2485
ahc->msgout_len += 4;
2486
if (bootverbose) {
2487
printf("(%s:%c:%d:%d): Sending WDTR %x\n",
2488
ahc_name(ahc), devinfo->channel, devinfo->target,
2489
devinfo->lun, bus_width);
2490
}
2491
}
2492
2493
/*
2494
* Build a parallel protocol request message in our message
2495
* buffer based on the input parameters.
2496
*/
2497
static void
2498
ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2499
u_int period, u_int offset, u_int bus_width,
2500
u_int ppr_options)
2501
{
2502
if (offset == 0)
2503
period = AHC_ASYNC_XFER_PERIOD;
2504
ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2505
ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN;
2506
ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR;
2507
ahc->msgout_buf[ahc->msgout_index++] = period;
2508
ahc->msgout_buf[ahc->msgout_index++] = 0;
2509
ahc->msgout_buf[ahc->msgout_index++] = offset;
2510
ahc->msgout_buf[ahc->msgout_index++] = bus_width;
2511
ahc->msgout_buf[ahc->msgout_index++] = ppr_options;
2512
ahc->msgout_len += 8;
2513
if (bootverbose) {
2514
printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
2515
"offset %x, ppr_options %x\n", ahc_name(ahc),
2516
devinfo->channel, devinfo->target, devinfo->lun,
2517
bus_width, period, offset, ppr_options);
2518
}
2519
}
2520
2521
/*
2522
* Clear any active message state.
2523
*/
2524
static void
2525
ahc_clear_msg_state(struct ahc_softc *ahc)
2526
{
2527
ahc->msgout_len = 0;
2528
ahc->msgin_index = 0;
2529
ahc->msg_type = MSG_TYPE_NONE;
2530
if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) {
2531
/*
2532
* The target didn't care to respond to our
2533
* message request, so clear ATN.
2534
*/
2535
ahc_outb(ahc, CLRSINT1, CLRATNO);
2536
}
2537
ahc_outb(ahc, MSG_OUT, MSG_NOOP);
2538
ahc_outb(ahc, SEQ_FLAGS2,
2539
ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING);
2540
}
2541
2542
static void
2543
ahc_handle_proto_violation(struct ahc_softc *ahc)
2544
{
2545
struct ahc_devinfo devinfo;
2546
struct scb *scb;
2547
u_int scbid;
2548
u_int seq_flags;
2549
u_int curphase;
2550
u_int lastphase;
2551
int found;
2552
2553
ahc_fetch_devinfo(ahc, &devinfo);
2554
scbid = ahc_inb(ahc, SCB_TAG);
2555
scb = ahc_lookup_scb(ahc, scbid);
2556
seq_flags = ahc_inb(ahc, SEQ_FLAGS);
2557
curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2558
lastphase = ahc_inb(ahc, LASTPHASE);
2559
if ((seq_flags & NOT_IDENTIFIED) != 0) {
2560
/*
2561
* The reconnecting target either did not send an
2562
* identify message, or did, but we didn't find an SCB
2563
* to match.
2564
*/
2565
ahc_print_devinfo(ahc, &devinfo);
2566
printf("Target did not send an IDENTIFY message. "
2567
"LASTPHASE = 0x%x.\n", lastphase);
2568
scb = NULL;
2569
} else if (scb == NULL) {
2570
/*
2571
* We don't seem to have an SCB active for this
2572
* transaction. Print an error and reset the bus.
2573
*/
2574
ahc_print_devinfo(ahc, &devinfo);
2575
printf("No SCB found during protocol violation\n");
2576
goto proto_violation_reset;
2577
} else {
2578
aic_set_transaction_status(scb, CAM_SEQUENCE_FAIL);
2579
if ((seq_flags & NO_CDB_SENT) != 0) {
2580
ahc_print_path(ahc, scb);
2581
printf("No or incomplete CDB sent to device.\n");
2582
} else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) {
2583
/*
2584
* The target never bothered to provide status to
2585
* us prior to completing the command. Since we don't
2586
* know the disposition of this command, we must attempt
2587
* to abort it. Assert ATN and prepare to send an abort
2588
* message.
2589
*/
2590
ahc_print_path(ahc, scb);
2591
printf("Completed command without status.\n");
2592
} else {
2593
ahc_print_path(ahc, scb);
2594
printf("Unknown protocol violation.\n");
2595
ahc_dump_card_state(ahc);
2596
}
2597
}
2598
if ((lastphase & ~P_DATAIN_DT) == 0
2599
|| lastphase == P_COMMAND) {
2600
proto_violation_reset:
2601
/*
2602
* Target either went directly to data/command
2603
* phase or didn't respond to our ATN.
2604
* The only safe thing to do is to blow
2605
* it away with a bus reset.
2606
*/
2607
found = ahc_reset_channel(ahc, 'A', TRUE);
2608
printf("%s: Issued Channel %c Bus Reset. "
2609
"%d SCBs aborted\n", ahc_name(ahc), 'A', found);
2610
} else {
2611
/*
2612
* Leave the selection hardware off in case
2613
* this abort attempt will affect yet to
2614
* be sent commands.
2615
*/
2616
ahc_outb(ahc, SCSISEQ,
2617
ahc_inb(ahc, SCSISEQ) & ~ENSELO);
2618
ahc_assert_atn(ahc);
2619
ahc_outb(ahc, MSG_OUT, HOST_MSG);
2620
if (scb == NULL) {
2621
ahc_print_devinfo(ahc, &devinfo);
2622
ahc->msgout_buf[0] = MSG_ABORT_TASK;
2623
ahc->msgout_len = 1;
2624
ahc->msgout_index = 0;
2625
ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2626
} else {
2627
ahc_print_path(ahc, scb);
2628
scb->flags |= SCB_ABORT;
2629
}
2630
printf("Protocol violation %s. Attempting to abort.\n",
2631
ahc_lookup_phase_entry(curphase)->phasemsg);
2632
}
2633
}
2634
2635
/*
2636
* Manual message loop handler.
2637
*/
2638
static void
2639
ahc_handle_message_phase(struct ahc_softc *ahc)
2640
{
2641
struct ahc_devinfo devinfo;
2642
u_int bus_phase;
2643
int end_session;
2644
2645
ahc_fetch_devinfo(ahc, &devinfo);
2646
end_session = FALSE;
2647
bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2648
2649
reswitch:
2650
switch (ahc->msg_type) {
2651
case MSG_TYPE_INITIATOR_MSGOUT:
2652
{
2653
int lastbyte;
2654
int phasemis;
2655
int msgdone;
2656
2657
if (ahc->msgout_len == 0)
2658
panic("HOST_MSG_LOOP interrupt with no active message");
2659
2660
#ifdef AHC_DEBUG
2661
if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2662
ahc_print_devinfo(ahc, &devinfo);
2663
printf("INITIATOR_MSG_OUT");
2664
}
2665
#endif
2666
phasemis = bus_phase != P_MESGOUT;
2667
if (phasemis) {
2668
#ifdef AHC_DEBUG
2669
if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2670
printf(" PHASEMIS %s\n",
2671
ahc_lookup_phase_entry(bus_phase)
2672
->phasemsg);
2673
}
2674
#endif
2675
if (bus_phase == P_MESGIN) {
2676
/*
2677
* Change gears and see if
2678
* this messages is of interest to
2679
* us or should be passed back to
2680
* the sequencer.
2681
*/
2682
ahc_outb(ahc, CLRSINT1, CLRATNO);
2683
ahc->send_msg_perror = FALSE;
2684
ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN;
2685
ahc->msgin_index = 0;
2686
goto reswitch;
2687
}
2688
end_session = TRUE;
2689
break;
2690
}
2691
2692
if (ahc->send_msg_perror) {
2693
ahc_outb(ahc, CLRSINT1, CLRATNO);
2694
ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2695
#ifdef AHC_DEBUG
2696
if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2697
printf(" byte 0x%x\n", ahc->send_msg_perror);
2698
#endif
2699
ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR);
2700
break;
2701
}
2702
2703
msgdone = ahc->msgout_index == ahc->msgout_len;
2704
if (msgdone) {
2705
/*
2706
* The target has requested a retry.
2707
* Re-assert ATN, reset our message index to
2708
* 0, and try again.
2709
*/
2710
ahc->msgout_index = 0;
2711
ahc_assert_atn(ahc);
2712
}
2713
2714
lastbyte = ahc->msgout_index == (ahc->msgout_len - 1);
2715
if (lastbyte) {
2716
/* Last byte is signified by dropping ATN */
2717
ahc_outb(ahc, CLRSINT1, CLRATNO);
2718
}
2719
2720
/*
2721
* Clear our interrupt status and present
2722
* the next byte on the bus.
2723
*/
2724
ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2725
#ifdef AHC_DEBUG
2726
if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2727
printf(" byte 0x%x\n",
2728
ahc->msgout_buf[ahc->msgout_index]);
2729
#endif
2730
ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2731
break;
2732
}
2733
case MSG_TYPE_INITIATOR_MSGIN:
2734
{
2735
int phasemis;
2736
int message_done;
2737
2738
#ifdef AHC_DEBUG
2739
if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2740
ahc_print_devinfo(ahc, &devinfo);
2741
printf("INITIATOR_MSG_IN");
2742
}
2743
#endif
2744
phasemis = bus_phase != P_MESGIN;
2745
if (phasemis) {
2746
#ifdef AHC_DEBUG
2747
if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2748
printf(" PHASEMIS %s\n",
2749
ahc_lookup_phase_entry(bus_phase)
2750
->phasemsg);
2751
}
2752
#endif
2753
ahc->msgin_index = 0;
2754
if (bus_phase == P_MESGOUT
2755
&& (ahc->send_msg_perror == TRUE
2756
|| (ahc->msgout_len != 0
2757
&& ahc->msgout_index == 0))) {
2758
ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2759
goto reswitch;
2760
}
2761
end_session = TRUE;
2762
break;
2763
}
2764
2765
/* Pull the byte in without acking it */
2766
ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL);
2767
#ifdef AHC_DEBUG
2768
if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2769
printf(" byte 0x%x\n",
2770
ahc->msgin_buf[ahc->msgin_index]);
2771
#endif
2772
2773
message_done = ahc_parse_msg(ahc, &devinfo);
2774
2775
if (message_done) {
2776
/*
2777
* Clear our incoming message buffer in case there
2778
* is another message following this one.
2779
*/
2780
ahc->msgin_index = 0;
2781
2782
/*
2783
* If this message illicited a response,
2784
* assert ATN so the target takes us to the
2785
* message out phase.
2786
*/
2787
if (ahc->msgout_len != 0) {
2788
#ifdef AHC_DEBUG
2789
if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2790
ahc_print_devinfo(ahc, &devinfo);
2791
printf("Asserting ATN for response\n");
2792
}
2793
#endif
2794
ahc_assert_atn(ahc);
2795
}
2796
} else
2797
ahc->msgin_index++;
2798
2799
if (message_done == MSGLOOP_TERMINATED) {
2800
end_session = TRUE;
2801
} else {
2802
/* Ack the byte */
2803
ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2804
ahc_inb(ahc, SCSIDATL);
2805
}
2806
break;
2807
}
2808
case MSG_TYPE_TARGET_MSGIN:
2809
{
2810
int msgdone;
2811
2812
if (ahc->msgout_len == 0)
2813
panic("Target MSGIN with no active message");
2814
2815
#ifdef AHC_DEBUG
2816
if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2817
ahc_print_devinfo(ahc, &devinfo);
2818
printf("TARGET_MSG_IN");
2819
}
2820
#endif
2821
2822
/*
2823
* If we interrupted a mesgout session, the initiator
2824
* will not know this until our first REQ. So, we
2825
* only honor mesgout requests after we've sent our
2826
* first byte.
2827
*/
2828
if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0
2829
&& ahc->msgout_index > 0) {
2830
/*
2831
* Change gears and see if this messages is
2832
* of interest to us or should be passed back
2833
* to the sequencer.
2834
*/
2835
#ifdef AHC_DEBUG
2836
if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2837
printf(" Honoring ATN Request.\n");
2838
#endif
2839
ahc->msg_type = MSG_TYPE_TARGET_MSGOUT;
2840
2841
/*
2842
* Disable SCSI Programmed I/O during the
2843
* phase change so as to avoid phantom REQs.
2844
*/
2845
ahc_outb(ahc, SXFRCTL0,
2846
ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2847
2848
/*
2849
* Since SPIORDY asserts when ACK is asserted
2850
* for P_MSGOUT, and SPIORDY's assertion triggered
2851
* our entry into this routine, wait for ACK to
2852
* *de-assert* before changing phases.
2853
*/
2854
while ((ahc_inb(ahc, SCSISIGI) & ACKI) != 0)
2855
;
2856
2857
ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO);
2858
2859
/*
2860
* All phase line changes require a bus
2861
* settle delay before REQ is asserted.
2862
* [SCSI SPI4 10.7.1]
2863
*/
2864
ahc_flush_device_writes(ahc);
2865
aic_delay(AHC_BUSSETTLE_DELAY);
2866
2867
ahc->msgin_index = 0;
2868
/* Enable SCSI Programmed I/O to REQ for first byte */
2869
ahc_outb(ahc, SXFRCTL0,
2870
ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2871
break;
2872
}
2873
2874
msgdone = ahc->msgout_index == ahc->msgout_len;
2875
if (msgdone) {
2876
ahc_outb(ahc, SXFRCTL0,
2877
ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2878
end_session = TRUE;
2879
break;
2880
}
2881
2882
/*
2883
* Present the next byte on the bus.
2884
*/
2885
#ifdef AHC_DEBUG
2886
if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2887
printf(" byte 0x%x\n",
2888
ahc->msgout_buf[ahc->msgout_index]);
2889
#endif
2890
ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2891
ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2892
break;
2893
}
2894
case MSG_TYPE_TARGET_MSGOUT:
2895
{
2896
int lastbyte;
2897
int msgdone;
2898
2899
#ifdef AHC_DEBUG
2900
if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2901
ahc_print_devinfo(ahc, &devinfo);
2902
printf("TARGET_MSG_OUT");
2903
}
2904
#endif
2905
/*
2906
* The initiator signals that this is
2907
* the last byte by dropping ATN.
2908
*/
2909
lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0;
2910
2911
/*
2912
* Read the latched byte, but turn off SPIOEN first
2913
* so that we don't inadvertently cause a REQ for the
2914
* next byte.
2915
*/
2916
ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2917
ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL);
2918
2919
#ifdef AHC_DEBUG
2920
if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2921
printf(" byte 0x%x\n",
2922
ahc->msgin_buf[ahc->msgin_index]);
2923
#endif
2924
2925
msgdone = ahc_parse_msg(ahc, &devinfo);
2926
if (msgdone == MSGLOOP_TERMINATED) {
2927
/*
2928
* The message is *really* done in that it caused
2929
* us to go to bus free. The sequencer has already
2930
* been reset at this point, so pull the ejection
2931
* handle.
2932
*/
2933
return;
2934
}
2935
2936
ahc->msgin_index++;
2937
2938
/*
2939
* XXX Read spec about initiator dropping ATN too soon
2940
* and use msgdone to detect it.
2941
*/
2942
if (msgdone == MSGLOOP_MSGCOMPLETE) {
2943
ahc->msgin_index = 0;
2944
2945
/*
2946
* If this message illicited a response, transition
2947
* to the Message in phase and send it.
2948
*/
2949
if (ahc->msgout_len != 0) {
2950
#ifdef AHC_DEBUG
2951
if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2952
ahc_print_devinfo(ahc, &devinfo);
2953
printf(" preparing response.\n");
2954
}
2955
#endif
2956
ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO);
2957
2958
/*
2959
* All phase line changes require a bus
2960
* settle delay before REQ is asserted.
2961
* [SCSI SPI4 10.7.1] When transitioning
2962
* from an OUT to an IN phase, we must
2963
* also wait a data release delay to allow
2964
* the initiator time to release the data
2965
* lines. [SCSI SPI4 10.12]
2966
*/
2967
ahc_flush_device_writes(ahc);
2968
aic_delay(AHC_BUSSETTLE_DELAY
2969
+ AHC_DATARELEASE_DELAY);
2970
2971
/*
2972
* Enable SCSI Programmed I/O. This will
2973
* immediately cause SPIORDY to assert,
2974
* and the sequencer will call our message
2975
* loop again.
2976
*/
2977
ahc_outb(ahc, SXFRCTL0,
2978
ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2979
ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
2980
ahc->msgin_index = 0;
2981
break;
2982
}
2983
}
2984
2985
if (lastbyte)
2986
end_session = TRUE;
2987
else {
2988
/* Ask for the next byte. */
2989
ahc_outb(ahc, SXFRCTL0,
2990
ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2991
}
2992
2993
break;
2994
}
2995
default:
2996
panic("Unknown REQINIT message type");
2997
}
2998
2999
if (end_session) {
3000
ahc_clear_msg_state(ahc);
3001
ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP);
3002
} else
3003
ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
3004
}
3005
3006
/*
3007
* See if we sent a particular extended message to the target.
3008
* If "full" is true, return true only if the target saw the full
3009
* message. If "full" is false, return true if the target saw at
3010
* least the first byte of the message.
3011
*/
3012
static int
3013
ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full)
3014
{
3015
int found;
3016
u_int index;
3017
3018
found = FALSE;
3019
index = 0;
3020
3021
while (index < ahc->msgout_len) {
3022
if (ahc->msgout_buf[index] == MSG_EXTENDED) {
3023
u_int end_index;
3024
3025
end_index = index + 1 + ahc->msgout_buf[index + 1];
3026
if (ahc->msgout_buf[index+2] == msgval
3027
&& type == AHCMSG_EXT) {
3028
if (full) {
3029
if (ahc->msgout_index > end_index)
3030
found = TRUE;
3031
} else if (ahc->msgout_index > index)
3032
found = TRUE;
3033
}
3034
index = end_index;
3035
} else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK
3036
&& ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
3037
/* Skip tag type and tag id or residue param*/
3038
index += 2;
3039
} else {
3040
/* Single byte message */
3041
if (type == AHCMSG_1B
3042
&& ahc->msgout_buf[index] == msgval
3043
&& ahc->msgout_index > index)
3044
found = TRUE;
3045
index++;
3046
}
3047
3048
if (found)
3049
break;
3050
}
3051
return (found);
3052
}
3053
3054
/*
3055
* Wait for a complete incoming message, parse it, and respond accordingly.
3056
*/
3057
static int
3058
ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3059
{
3060
struct ahc_initiator_tinfo *tinfo;
3061
struct ahc_tmode_tstate *tstate;
3062
int reject;
3063
int done;
3064
int response;
3065
u_int targ_scsirate;
3066
3067
done = MSGLOOP_IN_PROG;
3068
response = FALSE;
3069
reject = FALSE;
3070
tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
3071
devinfo->target, &tstate);
3072
targ_scsirate = tinfo->scsirate;
3073
3074
/*
3075
* Parse as much of the message as is available,
3076
* rejecting it if we don't support it. When
3077
* the entire message is available and has been
3078
* handled, return MSGLOOP_MSGCOMPLETE, indicating
3079
* that we have parsed an entire message.
3080
*
3081
* In the case of extended messages, we accept the length
3082
* byte outright and perform more checking once we know the
3083
* extended message type.
3084
*/
3085
switch (ahc->msgin_buf[0]) {
3086
case MSG_DISCONNECT:
3087
case MSG_SAVEDATAPOINTER:
3088
case MSG_CMDCOMPLETE:
3089
case MSG_RESTOREPOINTERS:
3090
case MSG_IGN_WIDE_RESIDUE:
3091
/*
3092
* End our message loop as these are messages
3093
* the sequencer handles on its own.
3094
*/
3095
done = MSGLOOP_TERMINATED;
3096
break;
3097
case MSG_MESSAGE_REJECT:
3098
response = ahc_handle_msg_reject(ahc, devinfo);
3099
/* FALLTHROUGH */
3100
case MSG_NOOP:
3101
done = MSGLOOP_MSGCOMPLETE;
3102
break;
3103
case MSG_EXTENDED:
3104
{
3105
/* Wait for enough of the message to begin validation */
3106
if (ahc->msgin_index < 2)
3107
break;
3108
switch (ahc->msgin_buf[2]) {
3109
case MSG_EXT_SDTR:
3110
{
3111
struct ahc_syncrate *syncrate;
3112
u_int period;
3113
u_int ppr_options;
3114
u_int offset;
3115
u_int saved_offset;
3116
3117
if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
3118
reject = TRUE;
3119
break;
3120
}
3121
3122
/*
3123
* Wait until we have both args before validating
3124
* and acting on this message.
3125
*
3126
* Add one to MSG_EXT_SDTR_LEN to account for
3127
* the extended message preamble.
3128
*/
3129
if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1))
3130
break;
3131
3132
period = ahc->msgin_buf[3];
3133
ppr_options = 0;
3134
saved_offset = offset = ahc->msgin_buf[4];
3135
syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
3136
&ppr_options,
3137
devinfo->role);
3138
ahc_validate_offset(ahc, tinfo, syncrate, &offset,
3139
targ_scsirate & WIDEXFER,
3140
devinfo->role);
3141
if (bootverbose) {
3142
printf("(%s:%c:%d:%d): Received "
3143
"SDTR period %x, offset %x\n\t"
3144
"Filtered to period %x, offset %x\n",
3145
ahc_name(ahc), devinfo->channel,
3146
devinfo->target, devinfo->lun,
3147
ahc->msgin_buf[3], saved_offset,
3148
period, offset);
3149
}
3150
ahc_set_syncrate(ahc, devinfo,
3151
syncrate, period,
3152
offset, ppr_options,
3153
AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3154
/*paused*/TRUE);
3155
3156
/*
3157
* See if we initiated Sync Negotiation
3158
* and didn't have to fall down to async
3159
* transfers.
3160
*/
3161
if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) {
3162
/* We started it */
3163
if (saved_offset != offset) {
3164
/* Went too low - force async */
3165
reject = TRUE;
3166
}
3167
} else {
3168
/*
3169
* Send our own SDTR in reply
3170
*/
3171
if (bootverbose
3172
&& devinfo->role == ROLE_INITIATOR) {
3173
printf("(%s:%c:%d:%d): Target "
3174
"Initiated SDTR\n",
3175
ahc_name(ahc), devinfo->channel,
3176
devinfo->target, devinfo->lun);
3177
}
3178
ahc->msgout_index = 0;
3179
ahc->msgout_len = 0;
3180
ahc_construct_sdtr(ahc, devinfo,
3181
period, offset);
3182
ahc->msgout_index = 0;
3183
response = TRUE;
3184
}
3185
done = MSGLOOP_MSGCOMPLETE;
3186
break;
3187
}
3188
case MSG_EXT_WDTR:
3189
{
3190
u_int bus_width;
3191
u_int saved_width;
3192
u_int sending_reply;
3193
3194
sending_reply = FALSE;
3195
if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
3196
reject = TRUE;
3197
break;
3198
}
3199
3200
/*
3201
* Wait until we have our arg before validating
3202
* and acting on this message.
3203
*
3204
* Add one to MSG_EXT_WDTR_LEN to account for
3205
* the extended message preamble.
3206
*/
3207
if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1))
3208
break;
3209
3210
bus_width = ahc->msgin_buf[3];
3211
saved_width = bus_width;
3212
ahc_validate_width(ahc, tinfo, &bus_width,
3213
devinfo->role);
3214
if (bootverbose) {
3215
printf("(%s:%c:%d:%d): Received WDTR "
3216
"%x filtered to %x\n",
3217
ahc_name(ahc), devinfo->channel,
3218
devinfo->target, devinfo->lun,
3219
saved_width, bus_width);
3220
}
3221
3222
if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) {
3223
/*
3224
* Don't send a WDTR back to the
3225
* target, since we asked first.
3226
* If the width went higher than our
3227
* request, reject it.
3228
*/
3229
if (saved_width > bus_width) {
3230
reject = TRUE;
3231
printf("(%s:%c:%d:%d): requested %dBit "
3232
"transfers. Rejecting...\n",
3233
ahc_name(ahc), devinfo->channel,
3234
devinfo->target, devinfo->lun,
3235
8 * (0x01 << bus_width));
3236
bus_width = 0;
3237
}
3238
} else {
3239
/*
3240
* Send our own WDTR in reply
3241
*/
3242
if (bootverbose
3243
&& devinfo->role == ROLE_INITIATOR) {
3244
printf("(%s:%c:%d:%d): Target "
3245
"Initiated WDTR\n",
3246
ahc_name(ahc), devinfo->channel,
3247
devinfo->target, devinfo->lun);
3248
}
3249
ahc->msgout_index = 0;
3250
ahc->msgout_len = 0;
3251
ahc_construct_wdtr(ahc, devinfo, bus_width);
3252
ahc->msgout_index = 0;
3253
response = TRUE;
3254
sending_reply = TRUE;
3255
}
3256
/*
3257
* After a wide message, we are async, but
3258
* some devices don't seem to honor this portion
3259
* of the spec. Force a renegotiation of the
3260
* sync component of our transfer agreement even
3261
* if our goal is async. By updating our width
3262
* after forcing the negotiation, we avoid
3263
* renegotiating for width.
3264
*/
3265
ahc_update_neg_request(ahc, devinfo, tstate,
3266
tinfo, AHC_NEG_ALWAYS);
3267
ahc_set_width(ahc, devinfo, bus_width,
3268
AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3269
/*paused*/TRUE);
3270
if (sending_reply == FALSE && reject == FALSE) {
3271
/*
3272
* We will always have an SDTR to send.
3273
*/
3274
ahc->msgout_index = 0;
3275
ahc->msgout_len = 0;
3276
ahc_build_transfer_msg(ahc, devinfo);
3277
ahc->msgout_index = 0;
3278
response = TRUE;
3279
}
3280
done = MSGLOOP_MSGCOMPLETE;
3281
break;
3282
}
3283
case MSG_EXT_PPR:
3284
{
3285
struct ahc_syncrate *syncrate;
3286
u_int period;
3287
u_int offset;
3288
u_int bus_width;
3289
u_int ppr_options;
3290
u_int saved_width;
3291
u_int saved_offset;
3292
u_int saved_ppr_options;
3293
3294
if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) {
3295
reject = TRUE;
3296
break;
3297
}
3298
3299
/*
3300
* Wait until we have all args before validating
3301
* and acting on this message.
3302
*
3303
* Add one to MSG_EXT_PPR_LEN to account for
3304
* the extended message preamble.
3305
*/
3306
if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1))
3307
break;
3308
3309
period = ahc->msgin_buf[3];
3310
offset = ahc->msgin_buf[5];
3311
bus_width = ahc->msgin_buf[6];
3312
saved_width = bus_width;
3313
ppr_options = ahc->msgin_buf[7];
3314
/*
3315
* According to the spec, a DT only
3316
* period factor with no DT option
3317
* set implies async.
3318
*/
3319
if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
3320
&& period == 9)
3321
offset = 0;
3322
saved_ppr_options = ppr_options;
3323
saved_offset = offset;
3324
3325
/*
3326
* Mask out any options we don't support
3327
* on any controller. Transfer options are
3328
* only available if we are negotiating wide.
3329
*/
3330
ppr_options &= MSG_EXT_PPR_DT_REQ;
3331
if (bus_width == 0)
3332
ppr_options = 0;
3333
3334
ahc_validate_width(ahc, tinfo, &bus_width,
3335
devinfo->role);
3336
syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
3337
&ppr_options,
3338
devinfo->role);
3339
ahc_validate_offset(ahc, tinfo, syncrate,
3340
&offset, bus_width,
3341
devinfo->role);
3342
3343
if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) {
3344
/*
3345
* If we are unable to do any of the
3346
* requested options (we went too low),
3347
* then we'll have to reject the message.
3348
*/
3349
if (saved_width > bus_width
3350
|| saved_offset != offset
3351
|| saved_ppr_options != ppr_options) {
3352
reject = TRUE;
3353
period = 0;
3354
offset = 0;
3355
bus_width = 0;
3356
ppr_options = 0;
3357
syncrate = NULL;
3358
}
3359
} else {
3360
if (devinfo->role != ROLE_TARGET)
3361
printf("(%s:%c:%d:%d): Target "
3362
"Initiated PPR\n",
3363
ahc_name(ahc), devinfo->channel,
3364
devinfo->target, devinfo->lun);
3365
else
3366
printf("(%s:%c:%d:%d): Initiator "
3367
"Initiated PPR\n",
3368
ahc_name(ahc), devinfo->channel,
3369
devinfo->target, devinfo->lun);
3370
ahc->msgout_index = 0;
3371
ahc->msgout_len = 0;
3372
ahc_construct_ppr(ahc, devinfo, period, offset,
3373
bus_width, ppr_options);
3374
ahc->msgout_index = 0;
3375
response = TRUE;
3376
}
3377
if (bootverbose) {
3378
printf("(%s:%c:%d:%d): Received PPR width %x, "
3379
"period %x, offset %x,options %x\n"
3380
"\tFiltered to width %x, period %x, "
3381
"offset %x, options %x\n",
3382
ahc_name(ahc), devinfo->channel,
3383
devinfo->target, devinfo->lun,
3384
saved_width, ahc->msgin_buf[3],
3385
saved_offset, saved_ppr_options,
3386
bus_width, period, offset, ppr_options);
3387
}
3388
ahc_set_width(ahc, devinfo, bus_width,
3389
AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3390
/*paused*/TRUE);
3391
ahc_set_syncrate(ahc, devinfo,
3392
syncrate, period,
3393
offset, ppr_options,
3394
AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3395
/*paused*/TRUE);
3396
done = MSGLOOP_MSGCOMPLETE;
3397
break;
3398
}
3399
default:
3400
/* Unknown extended message. Reject it. */
3401
reject = TRUE;
3402
break;
3403
}
3404
break;
3405
}
3406
#ifdef AHC_TARGET_MODE
3407
case MSG_BUS_DEV_RESET:
3408
ahc_handle_devreset(ahc, devinfo,
3409
CAM_BDR_SENT,
3410
"Bus Device Reset Received",
3411
/*verbose_level*/0);
3412
ahc_restart(ahc);
3413
done = MSGLOOP_TERMINATED;
3414
break;
3415
case MSG_ABORT_TAG:
3416
case MSG_ABORT:
3417
case MSG_CLEAR_QUEUE:
3418
{
3419
int tag;
3420
3421
/* Target mode messages */
3422
if (devinfo->role != ROLE_TARGET) {
3423
reject = TRUE;
3424
break;
3425
}
3426
tag = SCB_LIST_NULL;
3427
if (ahc->msgin_buf[0] == MSG_ABORT_TAG)
3428
tag = ahc_inb(ahc, INITIATOR_TAG);
3429
ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
3430
devinfo->lun, tag, ROLE_TARGET,
3431
CAM_REQ_ABORTED);
3432
3433
tstate = ahc->enabled_targets[devinfo->our_scsiid];
3434
if (tstate != NULL) {
3435
struct ahc_tmode_lstate* lstate;
3436
3437
lstate = tstate->enabled_luns[devinfo->lun];
3438
if (lstate != NULL) {
3439
ahc_queue_lstate_event(ahc, lstate,
3440
devinfo->our_scsiid,
3441
ahc->msgin_buf[0],
3442
/*arg*/tag);
3443
ahc_send_lstate_events(ahc, lstate);
3444
}
3445
}
3446
ahc_restart(ahc);
3447
done = MSGLOOP_TERMINATED;
3448
break;
3449
}
3450
#endif
3451
case MSG_TERM_IO_PROC:
3452
default:
3453
reject = TRUE;
3454
break;
3455
}
3456
3457
if (reject) {
3458
/*
3459
* Setup to reject the message.
3460
*/
3461
ahc->msgout_index = 0;
3462
ahc->msgout_len = 1;
3463
ahc->msgout_buf[0] = MSG_MESSAGE_REJECT;
3464
done = MSGLOOP_MSGCOMPLETE;
3465
response = TRUE;
3466
}
3467
3468
if (done != MSGLOOP_IN_PROG && !response)
3469
/* Clear the outgoing message buffer */
3470
ahc->msgout_len = 0;
3471
3472
return (done);
3473
}
3474
3475
/*
3476
* Process a message reject message.
3477
*/
3478
static int
3479
ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3480
{
3481
/*
3482
* What we care about here is if we had an
3483
* outstanding SDTR or WDTR message for this
3484
* target. If we did, this is a signal that
3485
* the target is refusing negotiation.
3486
*/
3487
struct scb *scb;
3488
struct ahc_initiator_tinfo *tinfo;
3489
struct ahc_tmode_tstate *tstate;
3490
u_int scb_index;
3491
u_int last_msg;
3492
int response = 0;
3493
3494
scb_index = ahc_inb(ahc, SCB_TAG);
3495
scb = ahc_lookup_scb(ahc, scb_index);
3496
tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
3497
devinfo->our_scsiid,
3498
devinfo->target, &tstate);
3499
/* Might be necessary */
3500
last_msg = ahc_inb(ahc, LAST_MSG);
3501
3502
if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
3503
/*
3504
* Target does not support the PPR message.
3505
* Attempt to negotiate SPI-2 style.
3506
*/
3507
if (bootverbose) {
3508
printf("(%s:%c:%d:%d): PPR Rejected. "
3509
"Trying WDTR/SDTR\n",
3510
ahc_name(ahc), devinfo->channel,
3511
devinfo->target, devinfo->lun);
3512
}
3513
tinfo->goal.ppr_options = 0;
3514
tinfo->curr.transport_version = 2;
3515
tinfo->goal.transport_version = 2;
3516
ahc->msgout_index = 0;
3517
ahc->msgout_len = 0;
3518
ahc_build_transfer_msg(ahc, devinfo);
3519
ahc->msgout_index = 0;
3520
response = 1;
3521
} else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
3522
/* note 8bit xfers */
3523
printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using "
3524
"8bit transfers\n", ahc_name(ahc),
3525
devinfo->channel, devinfo->target, devinfo->lun);
3526
ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3527
AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3528
/*paused*/TRUE);
3529
/*
3530
* No need to clear the sync rate. If the target
3531
* did not accept the command, our syncrate is
3532
* unaffected. If the target started the negotiation,
3533
* but rejected our response, we already cleared the
3534
* sync rate before sending our WDTR.
3535
*/
3536
if (tinfo->goal.offset != tinfo->curr.offset) {
3537
/* Start the sync negotiation */
3538
ahc->msgout_index = 0;
3539
ahc->msgout_len = 0;
3540
ahc_build_transfer_msg(ahc, devinfo);
3541
ahc->msgout_index = 0;
3542
response = 1;
3543
}
3544
} else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
3545
/* note asynch xfers and clear flag */
3546
ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0,
3547
/*offset*/0, /*ppr_options*/0,
3548
AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3549
/*paused*/TRUE);
3550
printf("(%s:%c:%d:%d): refuses synchronous negotiation. "
3551
"Using asynchronous transfers\n",
3552
ahc_name(ahc), devinfo->channel,
3553
devinfo->target, devinfo->lun);
3554
} else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) {
3555
int tag_type;
3556
int mask;
3557
3558
tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
3559
3560
if (tag_type == MSG_SIMPLE_TASK) {
3561
printf("(%s:%c:%d:%d): refuses tagged commands. "
3562
"Performing non-tagged I/O\n", ahc_name(ahc),
3563
devinfo->channel, devinfo->target, devinfo->lun);
3564
ahc_set_tags(ahc, devinfo, AHC_QUEUE_NONE);
3565
mask = ~0x23;
3566
} else {
3567
printf("(%s:%c:%d:%d): refuses %s tagged commands. "
3568
"Performing simple queue tagged I/O only\n",
3569
ahc_name(ahc), devinfo->channel, devinfo->target,
3570
devinfo->lun, tag_type == MSG_ORDERED_TASK
3571
? "ordered" : "head of queue");
3572
ahc_set_tags(ahc, devinfo, AHC_QUEUE_BASIC);
3573
mask = ~0x03;
3574
}
3575
3576
/*
3577
* Resend the identify for this CCB as the target
3578
* may believe that the selection is invalid otherwise.
3579
*/
3580
ahc_outb(ahc, SCB_CONTROL,
3581
ahc_inb(ahc, SCB_CONTROL) & mask);
3582
scb->hscb->control &= mask;
3583
aic_set_transaction_tag(scb, /*enabled*/FALSE,
3584
/*type*/MSG_SIMPLE_TASK);
3585
ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG);
3586
ahc_assert_atn(ahc);
3587
3588
/*
3589
* This transaction is now at the head of
3590
* the untagged queue for this target.
3591
*/
3592
if ((ahc->flags & AHC_SCB_BTT) == 0) {
3593
struct scb_tailq *untagged_q;
3594
3595
untagged_q =
3596
&(ahc->untagged_queues[devinfo->target_offset]);
3597
TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe);
3598
scb->flags |= SCB_UNTAGGEDQ;
3599
}
3600
ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
3601
scb->hscb->tag);
3602
3603
/*
3604
* Requeue all tagged commands for this target
3605
* currently in our possession so they can be
3606
* converted to untagged commands.
3607
*/
3608
ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
3609
SCB_GET_CHANNEL(ahc, scb),
3610
SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
3611
ROLE_INITIATOR, CAM_REQUEUE_REQ,
3612
SEARCH_COMPLETE);
3613
} else {
3614
/*
3615
* Otherwise, we ignore it.
3616
*/
3617
printf("%s:%c:%d: Message reject for %x -- ignored\n",
3618
ahc_name(ahc), devinfo->channel, devinfo->target,
3619
last_msg);
3620
}
3621
return (response);
3622
}
3623
3624
/*
3625
* Process an ignore wide residue message.
3626
*/
3627
static void
3628
ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3629
{
3630
u_int scb_index;
3631
struct scb *scb;
3632
3633
scb_index = ahc_inb(ahc, SCB_TAG);
3634
scb = ahc_lookup_scb(ahc, scb_index);
3635
/*
3636
* XXX Actually check data direction in the sequencer?
3637
* Perhaps add datadir to some spare bits in the hscb?
3638
*/
3639
if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0
3640
|| aic_get_transfer_dir(scb) != CAM_DIR_IN) {
3641
/*
3642
* Ignore the message if we haven't
3643
* seen an appropriate data phase yet.
3644
*/
3645
} else {
3646
/*
3647
* If the residual occurred on the last
3648
* transfer and the transfer request was
3649
* expected to end on an odd count, do
3650
* nothing. Otherwise, subtract a byte
3651
* and update the residual count accordingly.
3652
*/
3653
uint32_t sgptr;
3654
3655
sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
3656
if ((sgptr & SG_LIST_NULL) != 0
3657
&& (ahc_inb(ahc, SCB_LUN) & SCB_XFERLEN_ODD) != 0) {
3658
/*
3659
* If the residual occurred on the last
3660
* transfer and the transfer request was
3661
* expected to end on an odd count, do
3662
* nothing.
3663
*/
3664
} else {
3665
struct ahc_dma_seg *sg;
3666
uint32_t data_cnt;
3667
uint32_t sglen;
3668
3669
/* Pull in all of the sgptr */
3670
sgptr = ahc_inl(ahc, SCB_RESIDUAL_SGPTR);
3671
data_cnt = ahc_inl(ahc, SCB_RESIDUAL_DATACNT);
3672
3673
if ((sgptr & SG_LIST_NULL) != 0) {
3674
/*
3675
* The residual data count is not updated
3676
* for the command run to completion case.
3677
* Explicitly zero the count.
3678
*/
3679
data_cnt &= ~AHC_SG_LEN_MASK;
3680
}
3681
3682
data_cnt += 1;
3683
sgptr &= SG_PTR_MASK;
3684
3685
sg = ahc_sg_bus_to_virt(scb, sgptr);
3686
3687
/*
3688
* The residual sg ptr points to the next S/G
3689
* to load so we must go back one.
3690
*/
3691
sg--;
3692
sglen = aic_le32toh(sg->len) & AHC_SG_LEN_MASK;
3693
if (sg != scb->sg_list
3694
&& sglen < (data_cnt & AHC_SG_LEN_MASK)) {
3695
sg--;
3696
sglen = aic_le32toh(sg->len);
3697
/*
3698
* Preserve High Address and SG_LIST bits
3699
* while setting the count to 1.
3700
*/
3701
data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK));
3702
3703
/*
3704
* Increment sg so it points to the
3705
* "next" sg.
3706
*/
3707
sg++;
3708
sgptr = ahc_sg_virt_to_bus(scb, sg);
3709
}
3710
ahc_outl(ahc, SCB_RESIDUAL_SGPTR, sgptr);
3711
ahc_outl(ahc, SCB_RESIDUAL_DATACNT, data_cnt);
3712
/*
3713
* Toggle the "oddness" of the transfer length
3714
* to handle this mid-transfer ignore wide
3715
* residue. This ensures that the oddness is
3716
* correct for subsequent data transfers.
3717
*/
3718
ahc_outb(ahc, SCB_LUN,
3719
ahc_inb(ahc, SCB_LUN) ^ SCB_XFERLEN_ODD);
3720
}
3721
}
3722
}
3723
3724
/*
3725
* Reinitialize the data pointers for the active transfer
3726
* based on its current residual.
3727
*/
3728
static void
3729
ahc_reinitialize_dataptrs(struct ahc_softc *ahc)
3730
{
3731
struct scb *scb;
3732
struct ahc_dma_seg *sg;
3733
u_int scb_index;
3734
uint32_t sgptr;
3735
uint32_t resid;
3736
uint32_t dataptr;
3737
3738
scb_index = ahc_inb(ahc, SCB_TAG);
3739
scb = ahc_lookup_scb(ahc, scb_index);
3740
sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24)
3741
| (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
3742
| (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8)
3743
| ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
3744
3745
sgptr &= SG_PTR_MASK;
3746
sg = ahc_sg_bus_to_virt(scb, sgptr);
3747
3748
/* The residual sg_ptr always points to the next sg */
3749
sg--;
3750
3751
resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16)
3752
| (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8)
3753
| ahc_inb(ahc, SCB_RESIDUAL_DATACNT);
3754
3755
dataptr = aic_le32toh(sg->addr)
3756
+ (aic_le32toh(sg->len) & AHC_SG_LEN_MASK)
3757
- resid;
3758
if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
3759
u_int dscommand1;
3760
3761
dscommand1 = ahc_inb(ahc, DSCOMMAND1);
3762
ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0);
3763
ahc_outb(ahc, HADDR,
3764
(aic_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS);
3765
ahc_outb(ahc, DSCOMMAND1, dscommand1);
3766
}
3767
ahc_outb(ahc, HADDR + 3, dataptr >> 24);
3768
ahc_outb(ahc, HADDR + 2, dataptr >> 16);
3769
ahc_outb(ahc, HADDR + 1, dataptr >> 8);
3770
ahc_outb(ahc, HADDR, dataptr);
3771
ahc_outb(ahc, HCNT + 2, resid >> 16);
3772
ahc_outb(ahc, HCNT + 1, resid >> 8);
3773
ahc_outb(ahc, HCNT, resid);
3774
if ((ahc->features & AHC_ULTRA2) == 0) {
3775
ahc_outb(ahc, STCNT + 2, resid >> 16);
3776
ahc_outb(ahc, STCNT + 1, resid >> 8);
3777
ahc_outb(ahc, STCNT, resid);
3778
}
3779
}
3780
3781
/*
3782
* Handle the effects of issuing a bus device reset message.
3783
*/
3784
static void
3785
ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3786
cam_status status, char *message, int verbose_level)
3787
{
3788
#ifdef AHC_TARGET_MODE
3789
struct ahc_tmode_tstate* tstate;
3790
u_int lun;
3791
#endif
3792
int found;
3793
3794
found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
3795
CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role,
3796
status);
3797
3798
#ifdef AHC_TARGET_MODE
3799
/*
3800
* Send an immediate notify ccb to all target mord peripheral
3801
* drivers affected by this action.
3802
*/
3803
tstate = ahc->enabled_targets[devinfo->our_scsiid];
3804
if (tstate != NULL) {
3805
for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
3806
struct ahc_tmode_lstate* lstate;
3807
3808
lstate = tstate->enabled_luns[lun];
3809
if (lstate == NULL)
3810
continue;
3811
3812
ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid,
3813
MSG_BUS_DEV_RESET, /*arg*/0);
3814
ahc_send_lstate_events(ahc, lstate);
3815
}
3816
}
3817
#endif
3818
3819
/*
3820
* Go back to async/narrow transfers and renegotiate.
3821
*/
3822
ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3823
AHC_TRANS_CUR, /*paused*/TRUE);
3824
ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL,
3825
/*period*/0, /*offset*/0, /*ppr_options*/0,
3826
AHC_TRANS_CUR, /*paused*/TRUE);
3827
3828
if (status != CAM_SEL_TIMEOUT)
3829
ahc_send_async(ahc, devinfo->channel, devinfo->target,
3830
CAM_LUN_WILDCARD, AC_SENT_BDR, NULL);
3831
3832
if (message != NULL
3833
&& (verbose_level <= bootverbose))
3834
printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc),
3835
message, devinfo->channel, devinfo->target, found);
3836
}
3837
3838
#ifdef AHC_TARGET_MODE
3839
static void
3840
ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3841
struct scb *scb)
3842
{
3843
3844
/*
3845
* To facilitate adding multiple messages together,
3846
* each routine should increment the index and len
3847
* variables instead of setting them explicitly.
3848
*/
3849
ahc->msgout_index = 0;
3850
ahc->msgout_len = 0;
3851
3852
if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0)
3853
ahc_build_transfer_msg(ahc, devinfo);
3854
else
3855
panic("ahc_intr: AWAITING target message with no message");
3856
3857
ahc->msgout_index = 0;
3858
ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
3859
}
3860
#endif
3861
/**************************** Initialization **********************************/
3862
/*
3863
* Allocate a controller structure for a new device
3864
* and perform initial initializion.
3865
*/
3866
struct ahc_softc *
3867
ahc_alloc(void *platform_arg, char *name)
3868
{
3869
struct ahc_softc *ahc;
3870
int i;
3871
3872
ahc = device_get_softc((device_t)platform_arg);
3873
memset(ahc, 0, sizeof(*ahc));
3874
ahc->seep_config = malloc(sizeof(*ahc->seep_config),
3875
M_DEVBUF, M_NOWAIT);
3876
if (ahc->seep_config == NULL) {
3877
free(name, M_DEVBUF);
3878
return (NULL);
3879
}
3880
LIST_INIT(&ahc->pending_scbs);
3881
LIST_INIT(&ahc->timedout_scbs);
3882
/* We don't know our unit number until the OSM sets it */
3883
ahc->name = name;
3884
ahc->unit = -1;
3885
ahc->description = NULL;
3886
ahc->channel = 'A';
3887
ahc->channel_b = 'B';
3888
ahc->chip = AHC_NONE;
3889
ahc->features = AHC_FENONE;
3890
ahc->bugs = AHC_BUGNONE;
3891
ahc->flags = AHC_FNONE;
3892
/*
3893
* Default to all error reporting enabled with the
3894
* sequencer operating at its fastest speed.
3895
* The bus attach code may modify this.
3896
*/
3897
ahc->seqctl = FASTMODE;
3898
3899
for (i = 0; i < AHC_NUM_TARGETS; i++)
3900
TAILQ_INIT(&ahc->untagged_queues[i]);
3901
if (ahc_platform_alloc(ahc, platform_arg) != 0) {
3902
ahc_free(ahc);
3903
ahc = NULL;
3904
}
3905
ahc_lockinit(ahc);
3906
return (ahc);
3907
}
3908
3909
int
3910
ahc_softc_init(struct ahc_softc *ahc)
3911
{
3912
3913
/* The IRQMS bit is only valid on VL and EISA chips */
3914
if ((ahc->chip & AHC_PCI) == 0)
3915
ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS;
3916
else
3917
ahc->unpause = 0;
3918
ahc->pause = ahc->unpause | PAUSE;
3919
/* XXX The shared scb data stuff should be deprecated */
3920
if (ahc->scb_data == NULL) {
3921
ahc->scb_data = malloc(sizeof(*ahc->scb_data),
3922
M_DEVBUF, M_NOWAIT);
3923
if (ahc->scb_data == NULL)
3924
return (ENOMEM);
3925
memset(ahc->scb_data, 0, sizeof(*ahc->scb_data));
3926
}
3927
3928
return (0);
3929
}
3930
3931
void
3932
ahc_softc_insert(struct ahc_softc *ahc)
3933
{
3934
struct ahc_softc *list_ahc;
3935
3936
#if AIC_PCI_CONFIG > 0
3937
/*
3938
* Second Function PCI devices need to inherit some
3939
* settings from function 0.
3940
*/
3941
if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI
3942
&& (ahc->features & AHC_MULTI_FUNC) != 0) {
3943
TAILQ_FOREACH(list_ahc, &ahc_tailq, links) {
3944
aic_dev_softc_t list_pci;
3945
aic_dev_softc_t pci;
3946
3947
list_pci = list_ahc->dev_softc;
3948
pci = ahc->dev_softc;
3949
if (aic_get_pci_slot(list_pci) == aic_get_pci_slot(pci)
3950
&& aic_get_pci_bus(list_pci) == aic_get_pci_bus(pci)) {
3951
struct ahc_softc *master;
3952
struct ahc_softc *slave;
3953
3954
if (aic_get_pci_function(list_pci) == 0) {
3955
master = list_ahc;
3956
slave = ahc;
3957
} else {
3958
master = ahc;
3959
slave = list_ahc;
3960
}
3961
slave->flags &= ~AHC_BIOS_ENABLED;
3962
slave->flags |=
3963
master->flags & AHC_BIOS_ENABLED;
3964
slave->flags &= ~AHC_PRIMARY_CHANNEL;
3965
slave->flags |=
3966
master->flags & AHC_PRIMARY_CHANNEL;
3967
break;
3968
}
3969
}
3970
}
3971
#endif
3972
3973
/*
3974
* Insertion sort into our list of softcs.
3975
*/
3976
list_ahc = TAILQ_FIRST(&ahc_tailq);
3977
while (list_ahc != NULL
3978
&& ahc_softc_comp(ahc, list_ahc) <= 0)
3979
list_ahc = TAILQ_NEXT(list_ahc, links);
3980
if (list_ahc != NULL)
3981
TAILQ_INSERT_BEFORE(list_ahc, ahc, links);
3982
else
3983
TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links);
3984
ahc->init_level++;
3985
}
3986
3987
void
3988
ahc_set_unit(struct ahc_softc *ahc, int unit)
3989
{
3990
ahc->unit = unit;
3991
}
3992
3993
void
3994
ahc_set_name(struct ahc_softc *ahc, char *name)
3995
{
3996
if (ahc->name != NULL)
3997
free(ahc->name, M_DEVBUF);
3998
ahc->name = name;
3999
}
4000
4001
void
4002
ahc_free(struct ahc_softc *ahc)
4003
{
4004
int i;
4005
4006
ahc_terminate_recovery_thread(ahc);
4007
switch (ahc->init_level) {
4008
default:
4009
case 5:
4010
ahc_shutdown(ahc);
4011
/* FALLTHROUGH */
4012
case 4:
4013
aic_dmamap_unload(ahc, ahc->shared_data_dmat,
4014
ahc->shared_data_dmamap);
4015
/* FALLTHROUGH */
4016
case 3:
4017
aic_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo,
4018
ahc->shared_data_dmamap);
4019
/* FALLTHROUGH */
4020
case 2:
4021
aic_dma_tag_destroy(ahc, ahc->shared_data_dmat);
4022
case 1:
4023
aic_dma_tag_destroy(ahc, ahc->buffer_dmat);
4024
break;
4025
case 0:
4026
break;
4027
}
4028
4029
aic_dma_tag_destroy(ahc, ahc->parent_dmat);
4030
ahc_platform_free(ahc);
4031
ahc_fini_scbdata(ahc);
4032
for (i = 0; i < AHC_NUM_TARGETS; i++) {
4033
struct ahc_tmode_tstate *tstate;
4034
4035
tstate = ahc->enabled_targets[i];
4036
if (tstate != NULL) {
4037
#ifdef AHC_TARGET_MODE
4038
int j;
4039
4040
for (j = 0; j < AHC_NUM_LUNS; j++) {
4041
struct ahc_tmode_lstate *lstate;
4042
4043
lstate = tstate->enabled_luns[j];
4044
if (lstate != NULL) {
4045
xpt_free_path(lstate->path);
4046
free(lstate, M_DEVBUF);
4047
}
4048
}
4049
#endif
4050
free(tstate, M_DEVBUF);
4051
}
4052
}
4053
#ifdef AHC_TARGET_MODE
4054
if (ahc->black_hole != NULL) {
4055
xpt_free_path(ahc->black_hole->path);
4056
free(ahc->black_hole, M_DEVBUF);
4057
}
4058
#endif
4059
if (ahc->name != NULL)
4060
free(ahc->name, M_DEVBUF);
4061
if (ahc->seep_config != NULL)
4062
free(ahc->seep_config, M_DEVBUF);
4063
return;
4064
}
4065
4066
void
4067
ahc_shutdown(void *arg)
4068
{
4069
struct ahc_softc *ahc;
4070
int i;
4071
4072
ahc = (struct ahc_softc *)arg;
4073
4074
/* This will reset most registers to 0, but not all */
4075
ahc_reset(ahc, /*reinit*/FALSE);
4076
ahc_outb(ahc, SCSISEQ, 0);
4077
ahc_outb(ahc, SXFRCTL0, 0);
4078
ahc_outb(ahc, DSPCISTATUS, 0);
4079
4080
for (i = TARG_SCSIRATE; i < SCSICONF; i++)
4081
ahc_outb(ahc, i, 0);
4082
}
4083
4084
/*
4085
* Reset the controller and record some information about it
4086
* that is only available just after a reset. If "reinit" is
4087
* non-zero, this reset occurred after initial configuration
4088
* and the caller requests that the chip be fully reinitialized
4089
* to a runable state. Chip interrupts are *not* enabled after
4090
* a reinitialization. The caller must enable interrupts via
4091
* ahc_intr_enable().
4092
*/
4093
int
4094
ahc_reset(struct ahc_softc *ahc, int reinit)
4095
{
4096
u_int sblkctl;
4097
u_int sxfrctl1_a, sxfrctl1_b;
4098
int error;
4099
int wait;
4100
4101
/*
4102
* Preserve the value of the SXFRCTL1 register for all channels.
4103
* It contains settings that affect termination and we don't want
4104
* to disturb the integrity of the bus.
4105
*/
4106
ahc_pause(ahc);
4107
sxfrctl1_b = 0;
4108
if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) {
4109
u_int sblkctl;
4110
4111
/*
4112
* Save channel B's settings in case this chip
4113
* is setup for TWIN channel operation.
4114
*/
4115
sblkctl = ahc_inb(ahc, SBLKCTL);
4116
ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
4117
sxfrctl1_b = ahc_inb(ahc, SXFRCTL1);
4118
ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
4119
}
4120
sxfrctl1_a = ahc_inb(ahc, SXFRCTL1);
4121
4122
ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause);
4123
4124
/*
4125
* Ensure that the reset has finished. We delay 1000us
4126
* prior to reading the register to make sure the chip
4127
* has sufficiently completed its reset to handle register
4128
* accesses.
4129
*/
4130
wait = 1000;
4131
do {
4132
aic_delay(1000);
4133
} while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK));
4134
4135
if (wait == 0) {
4136
printf("%s: WARNING - Failed chip reset! "
4137
"Trying to initialize anyway.\n", ahc_name(ahc));
4138
}
4139
ahc_outb(ahc, HCNTRL, ahc->pause);
4140
4141
/* Determine channel configuration */
4142
sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE);
4143
/* No Twin Channel PCI cards */
4144
if ((ahc->chip & AHC_PCI) != 0)
4145
sblkctl &= ~SELBUSB;
4146
switch (sblkctl) {
4147
case 0:
4148
/* Single Narrow Channel */
4149
break;
4150
case 2:
4151
/* Wide Channel */
4152
ahc->features |= AHC_WIDE;
4153
break;
4154
case 8:
4155
/* Twin Channel */
4156
ahc->features |= AHC_TWIN;
4157
break;
4158
default:
4159
printf(" Unsupported adapter type. Ignoring\n");
4160
return(-1);
4161
}
4162
4163
/*
4164
* Reload sxfrctl1.
4165
*
4166
* We must always initialize STPWEN to 1 before we
4167
* restore the saved values. STPWEN is initialized
4168
* to a tri-state condition which can only be cleared
4169
* by turning it on.
4170
*/
4171
if ((ahc->features & AHC_TWIN) != 0) {
4172
u_int sblkctl;
4173
4174
sblkctl = ahc_inb(ahc, SBLKCTL);
4175
ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
4176
ahc_outb(ahc, SXFRCTL1, sxfrctl1_b);
4177
ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
4178
}
4179
ahc_outb(ahc, SXFRCTL1, sxfrctl1_a);
4180
4181
error = 0;
4182
if (reinit != 0)
4183
/*
4184
* If a recovery action has forced a chip reset,
4185
* re-initialize the chip to our liking.
4186
*/
4187
error = ahc->bus_chip_init(ahc);
4188
#ifdef AHC_DUMP_SEQ
4189
else
4190
ahc_dumpseq(ahc);
4191
#endif
4192
4193
return (error);
4194
}
4195
4196
/*
4197
* Determine the number of SCBs available on the controller
4198
*/
4199
int
4200
ahc_probe_scbs(struct ahc_softc *ahc) {
4201
int i;
4202
4203
for (i = 0; i < AHC_SCB_MAX; i++) {
4204
ahc_outb(ahc, SCBPTR, i);
4205
ahc_outb(ahc, SCB_BASE, i);
4206
if (ahc_inb(ahc, SCB_BASE) != i)
4207
break;
4208
ahc_outb(ahc, SCBPTR, 0);
4209
if (ahc_inb(ahc, SCB_BASE) != 0)
4210
break;
4211
}
4212
return (i);
4213
}
4214
4215
static void
4216
ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
4217
{
4218
bus_addr_t *baddr;
4219
4220
baddr = (bus_addr_t *)arg;
4221
*baddr = segs->ds_addr;
4222
}
4223
4224
static void
4225
ahc_build_free_scb_list(struct ahc_softc *ahc)
4226
{
4227
int scbsize;
4228
int i;
4229
4230
scbsize = 32;
4231
if ((ahc->flags & AHC_LSCBS_ENABLED) != 0)
4232
scbsize = 64;
4233
4234
for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
4235
int j;
4236
4237
ahc_outb(ahc, SCBPTR, i);
4238
4239
/*
4240
* Touch all SCB bytes to avoid parity errors
4241
* should one of our debugging routines read
4242
* an otherwise uninitiatlized byte.
4243
*/
4244
for (j = 0; j < scbsize; j++)
4245
ahc_outb(ahc, SCB_BASE+j, 0xFF);
4246
4247
/* Clear the control byte. */
4248
ahc_outb(ahc, SCB_CONTROL, 0);
4249
4250
/* Set the next pointer */
4251
if ((ahc->flags & AHC_PAGESCBS) != 0)
4252
ahc_outb(ahc, SCB_NEXT, i+1);
4253
else
4254
ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
4255
4256
/* Make the tag number, SCSIID, and lun invalid */
4257
ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
4258
ahc_outb(ahc, SCB_SCSIID, 0xFF);
4259
ahc_outb(ahc, SCB_LUN, 0xFF);
4260
}
4261
4262
if ((ahc->flags & AHC_PAGESCBS) != 0) {
4263
/* SCB 0 heads the free list. */
4264
ahc_outb(ahc, FREE_SCBH, 0);
4265
} else {
4266
/* No free list. */
4267
ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL);
4268
}
4269
4270
/* Make sure that the last SCB terminates the free list */
4271
ahc_outb(ahc, SCBPTR, i-1);
4272
ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
4273
}
4274
4275
static int
4276
ahc_init_scbdata(struct ahc_softc *ahc)
4277
{
4278
struct scb_data *scb_data;
4279
4280
scb_data = ahc->scb_data;
4281
SLIST_INIT(&scb_data->free_scbs);
4282
SLIST_INIT(&scb_data->sg_maps);
4283
4284
/* Allocate SCB resources */
4285
scb_data->scbarray =
4286
(struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC,
4287
M_DEVBUF, M_NOWAIT);
4288
if (scb_data->scbarray == NULL)
4289
return (ENOMEM);
4290
memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC);
4291
4292
/* Determine the number of hardware SCBs and initialize them */
4293
4294
scb_data->maxhscbs = ahc_probe_scbs(ahc);
4295
if (ahc->scb_data->maxhscbs == 0) {
4296
printf("%s: No SCB space found\n", ahc_name(ahc));
4297
return (ENXIO);
4298
}
4299
4300
/*
4301
* Create our DMA tags. These tags define the kinds of device
4302
* accessible memory allocations and memory mappings we will
4303
* need to perform during normal operation.
4304
*
4305
* Unless we need to further restrict the allocation, we rely
4306
* on the restrictions of the parent dmat, hence the common
4307
* use of MAXADDR and MAXSIZE.
4308
*/
4309
4310
/* DMA tag for our hardware scb structures */
4311
if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4312
/*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4313
/*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4314
/*highaddr*/BUS_SPACE_MAXADDR,
4315
/*filter*/NULL, /*filterarg*/NULL,
4316
AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb),
4317
/*nsegments*/1,
4318
/*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4319
/*flags*/0, &scb_data->hscb_dmat) != 0) {
4320
goto error_exit;
4321
}
4322
4323
scb_data->init_level++;
4324
4325
/* Allocation for our hscbs */
4326
if (aic_dmamem_alloc(ahc, scb_data->hscb_dmat,
4327
(void **)&scb_data->hscbs,
4328
BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
4329
&scb_data->hscb_dmamap) != 0) {
4330
goto error_exit;
4331
}
4332
4333
scb_data->init_level++;
4334
4335
/* And permanently map them */
4336
aic_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap,
4337
scb_data->hscbs,
4338
AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb),
4339
ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0);
4340
4341
scb_data->init_level++;
4342
4343
/* DMA tag for our sense buffers */
4344
if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4345
/*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4346
/*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4347
/*highaddr*/BUS_SPACE_MAXADDR,
4348
/*filter*/NULL, /*filterarg*/NULL,
4349
AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data),
4350
/*nsegments*/1,
4351
/*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4352
/*flags*/0, &scb_data->sense_dmat) != 0) {
4353
goto error_exit;
4354
}
4355
4356
scb_data->init_level++;
4357
4358
/* Allocate them */
4359
if (aic_dmamem_alloc(ahc, scb_data->sense_dmat,
4360
(void **)&scb_data->sense,
4361
BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) {
4362
goto error_exit;
4363
}
4364
4365
scb_data->init_level++;
4366
4367
/* And permanently map them */
4368
aic_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap,
4369
scb_data->sense,
4370
AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data),
4371
ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0);
4372
4373
scb_data->init_level++;
4374
4375
/* DMA tag for our S/G structures. We allocate in page sized chunks */
4376
if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8,
4377
/*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4378
/*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4379
/*highaddr*/BUS_SPACE_MAXADDR,
4380
/*filter*/NULL, /*filterarg*/NULL,
4381
PAGE_SIZE, /*nsegments*/1,
4382
/*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4383
/*flags*/0, &scb_data->sg_dmat) != 0) {
4384
goto error_exit;
4385
}
4386
4387
scb_data->init_level++;
4388
4389
/* Perform initial CCB allocation */
4390
memset(scb_data->hscbs, 0,
4391
AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb));
4392
while (ahc_alloc_scbs(ahc) != 0)
4393
;
4394
4395
if (scb_data->numscbs == 0) {
4396
printf("%s: ahc_init_scbdata - "
4397
"Unable to allocate initial scbs\n",
4398
ahc_name(ahc));
4399
goto error_exit;
4400
}
4401
4402
/*
4403
* Reserve the next queued SCB.
4404
*/
4405
ahc->next_queued_scb = ahc_get_scb(ahc);
4406
4407
/*
4408
* Note that we were successful
4409
*/
4410
return (0);
4411
4412
error_exit:
4413
4414
return (ENOMEM);
4415
}
4416
4417
static void
4418
ahc_fini_scbdata(struct ahc_softc *ahc)
4419
{
4420
struct scb_data *scb_data;
4421
4422
scb_data = ahc->scb_data;
4423
if (scb_data == NULL)
4424
return;
4425
4426
switch (scb_data->init_level) {
4427
default:
4428
case 7:
4429
{
4430
struct sg_map_node *sg_map;
4431
4432
while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) {
4433
SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
4434
aic_dmamap_unload(ahc, scb_data->sg_dmat,
4435
sg_map->sg_dmamap);
4436
aic_dmamem_free(ahc, scb_data->sg_dmat,
4437
sg_map->sg_vaddr,
4438
sg_map->sg_dmamap);
4439
free(sg_map, M_DEVBUF);
4440
}
4441
aic_dma_tag_destroy(ahc, scb_data->sg_dmat);
4442
}
4443
case 6:
4444
aic_dmamap_unload(ahc, scb_data->sense_dmat,
4445
scb_data->sense_dmamap);
4446
case 5:
4447
aic_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense,
4448
scb_data->sense_dmamap);
4449
case 4:
4450
aic_dma_tag_destroy(ahc, scb_data->sense_dmat);
4451
case 3:
4452
aic_dmamap_unload(ahc, scb_data->hscb_dmat,
4453
scb_data->hscb_dmamap);
4454
case 2:
4455
aic_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs,
4456
scb_data->hscb_dmamap);
4457
case 1:
4458
aic_dma_tag_destroy(ahc, scb_data->hscb_dmat);
4459
break;
4460
case 0:
4461
break;
4462
}
4463
if (scb_data->scbarray != NULL)
4464
free(scb_data->scbarray, M_DEVBUF);
4465
}
4466
4467
int
4468
ahc_alloc_scbs(struct ahc_softc *ahc)
4469
{
4470
struct scb_data *scb_data;
4471
struct scb *next_scb;
4472
struct sg_map_node *sg_map;
4473
bus_addr_t physaddr;
4474
struct ahc_dma_seg *segs;
4475
int newcount;
4476
int i;
4477
4478
scb_data = ahc->scb_data;
4479
if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC)
4480
/* Can't allocate any more */
4481
return (0);
4482
4483
next_scb = &scb_data->scbarray[scb_data->numscbs];
4484
4485
sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
4486
4487
if (sg_map == NULL)
4488
return (0);
4489
4490
/* Allocate S/G space for the next batch of SCBS */
4491
if (aic_dmamem_alloc(ahc, scb_data->sg_dmat,
4492
(void **)&sg_map->sg_vaddr,
4493
BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
4494
&sg_map->sg_dmamap) != 0) {
4495
free(sg_map, M_DEVBUF);
4496
return (0);
4497
}
4498
4499
SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
4500
4501
aic_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap,
4502
sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb,
4503
&sg_map->sg_physaddr, /*flags*/0);
4504
4505
segs = sg_map->sg_vaddr;
4506
physaddr = sg_map->sg_physaddr;
4507
4508
newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg)));
4509
newcount = MIN(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs));
4510
for (i = 0; i < newcount; i++) {
4511
struct scb_platform_data *pdata;
4512
int error;
4513
pdata = (struct scb_platform_data *)malloc(sizeof(*pdata),
4514
M_DEVBUF, M_NOWAIT);
4515
if (pdata == NULL)
4516
break;
4517
next_scb->platform_data = pdata;
4518
next_scb->sg_map = sg_map;
4519
next_scb->sg_list = segs;
4520
/*
4521
* The sequencer always starts with the second entry.
4522
* The first entry is embedded in the scb.
4523
*/
4524
next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
4525
next_scb->ahc_softc = ahc;
4526
next_scb->flags = SCB_FLAG_NONE;
4527
error = aic_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
4528
&next_scb->dmamap);
4529
if (error != 0)
4530
break;
4531
4532
next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
4533
next_scb->hscb->tag = ahc->scb_data->numscbs;
4534
aic_timer_init(&next_scb->io_timer);
4535
SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
4536
next_scb, links.sle);
4537
segs += AHC_NSEG;
4538
physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg));
4539
next_scb++;
4540
ahc->scb_data->numscbs++;
4541
}
4542
return (i);
4543
}
4544
4545
void
4546
ahc_controller_info(struct ahc_softc *ahc, char *buf)
4547
{
4548
int len;
4549
4550
len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]);
4551
buf += len;
4552
if ((ahc->features & AHC_TWIN) != 0)
4553
len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
4554
"B SCSI Id=%d, primary %c, ",
4555
ahc->our_id, ahc->our_id_b,
4556
(ahc->flags & AHC_PRIMARY_CHANNEL) + 'A');
4557
else {
4558
const char *speed;
4559
const char *type;
4560
4561
speed = "";
4562
if ((ahc->features & AHC_ULTRA) != 0) {
4563
speed = "Ultra ";
4564
} else if ((ahc->features & AHC_DT) != 0) {
4565
speed = "Ultra160 ";
4566
} else if ((ahc->features & AHC_ULTRA2) != 0) {
4567
speed = "Ultra2 ";
4568
}
4569
if ((ahc->features & AHC_WIDE) != 0) {
4570
type = "Wide";
4571
} else {
4572
type = "Single";
4573
}
4574
len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ",
4575
speed, type, ahc->channel, ahc->our_id);
4576
}
4577
buf += len;
4578
4579
if ((ahc->flags & AHC_PAGESCBS) != 0)
4580
sprintf(buf, "%d/%d SCBs",
4581
ahc->scb_data->maxhscbs, AHC_MAX_QUEUE);
4582
else
4583
sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs);
4584
}
4585
4586
int
4587
ahc_chip_init(struct ahc_softc *ahc)
4588
{
4589
int term;
4590
int error;
4591
u_int i;
4592
u_int scsi_conf;
4593
u_int scsiseq_template;
4594
uint32_t physaddr;
4595
4596
ahc_outb(ahc, SEQ_FLAGS, 0);
4597
ahc_outb(ahc, SEQ_FLAGS2, 0);
4598
4599
/* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/
4600
if (ahc->features & AHC_TWIN) {
4601
/*
4602
* Setup Channel B first.
4603
*/
4604
ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4605
term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0;
4606
ahc_outb(ahc, SCSIID, ahc->our_id_b);
4607
scsi_conf = ahc_inb(ahc, SCSICONF + 1);
4608
ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4609
|term|ahc->seltime_b|ENSTIMER|ACTNEGEN);
4610
if ((ahc->features & AHC_ULTRA2) != 0)
4611
ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4612
ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4613
ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4614
4615
/* Select Channel A */
4616
ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4617
}
4618
term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0;
4619
if ((ahc->features & AHC_ULTRA2) != 0)
4620
ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
4621
else
4622
ahc_outb(ahc, SCSIID, ahc->our_id);
4623
scsi_conf = ahc_inb(ahc, SCSICONF);
4624
ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4625
|term|ahc->seltime
4626
|ENSTIMER|ACTNEGEN);
4627
if ((ahc->features & AHC_ULTRA2) != 0)
4628
ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4629
ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4630
ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4631
4632
/* There are no untagged SCBs active yet. */
4633
for (i = 0; i < 16; i++) {
4634
ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0));
4635
if ((ahc->flags & AHC_SCB_BTT) != 0) {
4636
int lun;
4637
4638
/*
4639
* The SCB based BTT allows an entry per
4640
* target and lun pair.
4641
*/
4642
for (lun = 1; lun < AHC_NUM_LUNS; lun++)
4643
ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun));
4644
}
4645
}
4646
4647
/* All of our queues are empty */
4648
for (i = 0; i < 256; i++)
4649
ahc->qoutfifo[i] = SCB_LIST_NULL;
4650
ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD);
4651
4652
for (i = 0; i < 256; i++)
4653
ahc->qinfifo[i] = SCB_LIST_NULL;
4654
4655
if ((ahc->features & AHC_MULTI_TID) != 0) {
4656
ahc_outb(ahc, TARGID, 0);
4657
ahc_outb(ahc, TARGID + 1, 0);
4658
}
4659
4660
/*
4661
* Tell the sequencer where it can find our arrays in memory.
4662
*/
4663
physaddr = ahc->scb_data->hscb_busaddr;
4664
ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF);
4665
ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF);
4666
ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF);
4667
ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF);
4668
4669
physaddr = ahc->shared_data_busaddr;
4670
ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF);
4671
ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF);
4672
ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF);
4673
ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF);
4674
4675
/*
4676
* Initialize the group code to command length table.
4677
* This overrides the values in TARG_SCSIRATE, so only
4678
* setup the table after we have processed that information.
4679
*/
4680
ahc_outb(ahc, CMDSIZE_TABLE, 5);
4681
ahc_outb(ahc, CMDSIZE_TABLE + 1, 9);
4682
ahc_outb(ahc, CMDSIZE_TABLE + 2, 9);
4683
ahc_outb(ahc, CMDSIZE_TABLE + 3, 0);
4684
ahc_outb(ahc, CMDSIZE_TABLE + 4, 15);
4685
ahc_outb(ahc, CMDSIZE_TABLE + 5, 11);
4686
ahc_outb(ahc, CMDSIZE_TABLE + 6, 0);
4687
ahc_outb(ahc, CMDSIZE_TABLE + 7, 0);
4688
4689
if ((ahc->features & AHC_HS_MAILBOX) != 0)
4690
ahc_outb(ahc, HS_MAILBOX, 0);
4691
4692
/* Tell the sequencer of our initial queue positions */
4693
if ((ahc->features & AHC_TARGETMODE) != 0) {
4694
ahc->tqinfifonext = 1;
4695
ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1);
4696
ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
4697
}
4698
ahc->qinfifonext = 0;
4699
ahc->qoutfifonext = 0;
4700
if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4701
ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256);
4702
ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
4703
ahc_outb(ahc, SNSCB_QOFF, ahc->qinfifonext);
4704
ahc_outb(ahc, SDSCB_QOFF, 0);
4705
} else {
4706
ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
4707
ahc_outb(ahc, QINPOS, ahc->qinfifonext);
4708
ahc_outb(ahc, QOUTPOS, ahc->qoutfifonext);
4709
}
4710
4711
/* We don't have any waiting selections */
4712
ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL);
4713
4714
/* Our disconnection list is empty too */
4715
ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL);
4716
4717
/* Message out buffer starts empty */
4718
ahc_outb(ahc, MSG_OUT, MSG_NOOP);
4719
4720
/*
4721
* Setup the allowed SCSI Sequences based on operational mode.
4722
* If we are a target, we'll enalbe select in operations once
4723
* we've had a lun enabled.
4724
*/
4725
scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP;
4726
if ((ahc->flags & AHC_INITIATORROLE) != 0)
4727
scsiseq_template |= ENRSELI;
4728
ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template);
4729
4730
/* Initialize our list of free SCBs. */
4731
ahc_build_free_scb_list(ahc);
4732
4733
/*
4734
* Tell the sequencer which SCB will be the next one it receives.
4735
*/
4736
ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
4737
4738
/*
4739
* Load the Sequencer program and Enable the adapter
4740
* in "fast" mode.
4741
*/
4742
if (bootverbose)
4743
printf("%s: Downloading Sequencer Program...",
4744
ahc_name(ahc));
4745
4746
error = ahc_loadseq(ahc);
4747
if (error != 0)
4748
return (error);
4749
4750
if ((ahc->features & AHC_ULTRA2) != 0) {
4751
int wait;
4752
4753
/*
4754
* Wait for up to 500ms for our transceivers
4755
* to settle. If the adapter does not have
4756
* a cable attached, the transceivers may
4757
* never settle, so don't complain if we
4758
* fail here.
4759
*/
4760
for (wait = 5000;
4761
(ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
4762
wait--)
4763
aic_delay(100);
4764
}
4765
ahc_restart(ahc);
4766
return (0);
4767
}
4768
4769
/*
4770
* Start the board, ready for normal operation
4771
*/
4772
int
4773
ahc_init(struct ahc_softc *ahc)
4774
{
4775
int max_targ;
4776
int error;
4777
#ifdef AHC_TARGET_MODE
4778
int tmode_enable;
4779
#endif
4780
u_int i;
4781
u_int scsi_conf;
4782
u_int ultraenb;
4783
u_int discenable;
4784
u_int tagenable;
4785
size_t driver_data_size;
4786
4787
#ifdef AHC_DEBUG
4788
if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0)
4789
ahc->flags |= AHC_SEQUENCER_DEBUG;
4790
#endif
4791
4792
#ifdef AHC_PRINT_SRAM
4793
printf("Scratch Ram:");
4794
for (i = 0x20; i < 0x5f; i++) {
4795
if (((i % 8) == 0) && (i != 0)) {
4796
printf ("\n ");
4797
}
4798
printf (" 0x%x", ahc_inb(ahc, i));
4799
}
4800
if ((ahc->features & AHC_MORE_SRAM) != 0) {
4801
for (i = 0x70; i < 0x7f; i++) {
4802
if (((i % 8) == 0) && (i != 0)) {
4803
printf ("\n ");
4804
}
4805
printf (" 0x%x", ahc_inb(ahc, i));
4806
}
4807
}
4808
printf ("\n");
4809
/*
4810
* Reading uninitialized scratch ram may
4811
* generate parity errors.
4812
*/
4813
ahc_outb(ahc, CLRINT, CLRPARERR);
4814
ahc_outb(ahc, CLRINT, CLRBRKADRINT);
4815
#endif
4816
max_targ = 15;
4817
4818
/*
4819
* Assume we have a board at this stage and it has been reset.
4820
*/
4821
if ((ahc->flags & AHC_USEDEFAULTS) != 0)
4822
ahc->our_id = ahc->our_id_b = 7;
4823
4824
/*
4825
* Default to allowing initiator operations.
4826
*/
4827
ahc->flags |= AHC_INITIATORROLE;
4828
4829
/*
4830
* Only allow target mode features if this unit has them enabled.
4831
*/
4832
#ifdef AHC_TARGET_MODE
4833
tmode_enable = ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) != 0);
4834
resource_int_value(device_get_name(ahc->dev_softc),
4835
device_get_unit(ahc->dev_softc),
4836
"tmode_enable", &tmode_enable);
4837
4838
if (tmode_enable == 0) {
4839
ahc->features &= ~AHC_TARGETMODE;
4840
} else {
4841
if (bootverbose && ((ahc->features & AHC_TARGETMODE) != 0))
4842
printf("%s: enabling target mode\n", ahc_name(ahc));
4843
}
4844
4845
#else
4846
ahc->features &= ~AHC_TARGETMODE;
4847
#endif
4848
4849
/* DMA tag for mapping buffers into device visible space. */
4850
if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4851
/*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4852
/*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING
4853
? (bus_addr_t)0x7FFFFFFFFFULL
4854
: BUS_SPACE_MAXADDR_32BIT,
4855
/*highaddr*/BUS_SPACE_MAXADDR,
4856
/*filter*/NULL, /*filterarg*/NULL,
4857
/*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE,
4858
/*nsegments*/AHC_NSEG,
4859
/*maxsegsz*/AHC_MAXTRANSFER_SIZE,
4860
/*flags*/BUS_DMA_ALLOCNOW,
4861
&ahc->buffer_dmat) != 0) {
4862
return (ENOMEM);
4863
}
4864
4865
ahc->init_level++;
4866
4867
/*
4868
* DMA tag for our command fifos and other data in system memory
4869
* the card's sequencer must be able to access. For initiator
4870
* roles, we need to allocate space for the qinfifo and qoutfifo.
4871
* The qinfifo and qoutfifo are composed of 256 1 byte elements.
4872
* When providing for the target mode role, we must additionally
4873
* provide space for the incoming target command fifo and an extra
4874
* byte to deal with a dma bug in some chip versions.
4875
*/
4876
driver_data_size = 2 * 256 * sizeof(uint8_t);
4877
if ((ahc->features & AHC_TARGETMODE) != 0)
4878
driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd)
4879
+ /*DMA WideOdd Bug Buffer*/1;
4880
if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4881
/*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4882
/*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4883
/*highaddr*/BUS_SPACE_MAXADDR,
4884
/*filter*/NULL, /*filterarg*/NULL,
4885
driver_data_size,
4886
/*nsegments*/1,
4887
/*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4888
/*flags*/0, &ahc->shared_data_dmat) != 0) {
4889
return (ENOMEM);
4890
}
4891
4892
ahc->init_level++;
4893
4894
/* Allocation of driver data */
4895
if (aic_dmamem_alloc(ahc, ahc->shared_data_dmat,
4896
(void **)&ahc->qoutfifo,
4897
BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
4898
&ahc->shared_data_dmamap) != 0) {
4899
return (ENOMEM);
4900
}
4901
4902
ahc->init_level++;
4903
4904
/* And permanently map it in */
4905
aic_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
4906
ahc->qoutfifo, driver_data_size, ahc_dmamap_cb,
4907
&ahc->shared_data_busaddr, /*flags*/0);
4908
4909
if ((ahc->features & AHC_TARGETMODE) != 0) {
4910
ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo;
4911
ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS];
4912
ahc->dma_bug_buf = ahc->shared_data_busaddr
4913
+ driver_data_size - 1;
4914
/* All target command blocks start out invalid. */
4915
for (i = 0; i < AHC_TMODE_CMDS; i++)
4916
ahc->targetcmds[i].cmd_valid = 0;
4917
ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD);
4918
}
4919
ahc->qinfifo = &ahc->qoutfifo[256];
4920
4921
ahc->init_level++;
4922
4923
/* Allocate SCB data now that buffer_dmat is initialized */
4924
if (ahc->scb_data->maxhscbs == 0)
4925
if (ahc_init_scbdata(ahc) != 0)
4926
return (ENOMEM);
4927
4928
/*
4929
* Allocate a tstate to house information for our
4930
* initiator presence on the bus as well as the user
4931
* data for any target mode initiator.
4932
*/
4933
if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) {
4934
printf("%s: unable to allocate ahc_tmode_tstate. "
4935
"Failing attach\n", ahc_name(ahc));
4936
return (ENOMEM);
4937
}
4938
4939
if ((ahc->features & AHC_TWIN) != 0) {
4940
if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) {
4941
printf("%s: unable to allocate ahc_tmode_tstate. "
4942
"Failing attach\n", ahc_name(ahc));
4943
return (ENOMEM);
4944
}
4945
}
4946
4947
/*
4948
* Fire up a recovery thread for this controller.
4949
*/
4950
error = ahc_spawn_recovery_thread(ahc);
4951
if (error != 0)
4952
return (error);
4953
4954
if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) {
4955
ahc->flags |= AHC_PAGESCBS;
4956
} else {
4957
ahc->flags &= ~AHC_PAGESCBS;
4958
}
4959
4960
#ifdef AHC_DEBUG
4961
if (ahc_debug & AHC_SHOW_MISC) {
4962
printf("%s: hardware scb %u bytes; kernel scb %u bytes; "
4963
"ahc_dma %u bytes\n",
4964
ahc_name(ahc),
4965
(u_int)sizeof(struct hardware_scb),
4966
(u_int)sizeof(struct scb),
4967
(u_int)sizeof(struct ahc_dma_seg));
4968
}
4969
#endif /* AHC_DEBUG */
4970
4971
/*
4972
* Look at the information that board initialization or
4973
* the board bios has left us.
4974
*/
4975
if (ahc->features & AHC_TWIN) {
4976
scsi_conf = ahc_inb(ahc, SCSICONF + 1);
4977
if ((scsi_conf & RESET_SCSI) != 0
4978
&& (ahc->flags & AHC_INITIATORROLE) != 0)
4979
ahc->flags |= AHC_RESET_BUS_B;
4980
}
4981
4982
scsi_conf = ahc_inb(ahc, SCSICONF);
4983
if ((scsi_conf & RESET_SCSI) != 0
4984
&& (ahc->flags & AHC_INITIATORROLE) != 0)
4985
ahc->flags |= AHC_RESET_BUS_A;
4986
4987
ultraenb = 0;
4988
tagenable = ALL_TARGETS_MASK;
4989
4990
/* Grab the disconnection disable table and invert it for our needs */
4991
if ((ahc->flags & AHC_USEDEFAULTS) != 0) {
4992
printf("%s: Host Adapter Bios disabled. Using default SCSI "
4993
"device parameters\n", ahc_name(ahc));
4994
ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B|
4995
AHC_TERM_ENB_A|AHC_TERM_ENB_B;
4996
discenable = ALL_TARGETS_MASK;
4997
if ((ahc->features & AHC_ULTRA) != 0)
4998
ultraenb = ALL_TARGETS_MASK;
4999
} else {
5000
discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8)
5001
| ahc_inb(ahc, DISC_DSB));
5002
if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0)
5003
ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8)
5004
| ahc_inb(ahc, ULTRA_ENB);
5005
}
5006
5007
if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
5008
max_targ = 7;
5009
5010
for (i = 0; i <= max_targ; i++) {
5011
struct ahc_initiator_tinfo *tinfo;
5012
struct ahc_tmode_tstate *tstate;
5013
u_int our_id;
5014
u_int target_id;
5015
char channel;
5016
5017
channel = 'A';
5018
our_id = ahc->our_id;
5019
target_id = i;
5020
if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
5021
channel = 'B';
5022
our_id = ahc->our_id_b;
5023
target_id = i % 8;
5024
}
5025
tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
5026
target_id, &tstate);
5027
/* Default to async narrow across the board */
5028
memset(tinfo, 0, sizeof(*tinfo));
5029
if (ahc->flags & AHC_USEDEFAULTS) {
5030
if ((ahc->features & AHC_WIDE) != 0)
5031
tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
5032
5033
/*
5034
* These will be truncated when we determine the
5035
* connection type we have with the target.
5036
*/
5037
tinfo->user.period = ahc_syncrates->period;
5038
tinfo->user.offset = MAX_OFFSET;
5039
} else {
5040
u_int scsirate;
5041
uint16_t mask;
5042
5043
/* Take the settings leftover in scratch RAM. */
5044
scsirate = ahc_inb(ahc, TARG_SCSIRATE + i);
5045
mask = (0x01 << i);
5046
if ((ahc->features & AHC_ULTRA2) != 0) {
5047
u_int offset;
5048
u_int maxsync;
5049
5050
if ((scsirate & SOFS) == 0x0F) {
5051
/*
5052
* Haven't negotiated yet,
5053
* so the format is different.
5054
*/
5055
scsirate = (scsirate & SXFR) >> 4
5056
| (ultraenb & mask)
5057
? 0x08 : 0x0
5058
| (scsirate & WIDEXFER);
5059
offset = MAX_OFFSET_ULTRA2;
5060
} else
5061
offset = ahc_inb(ahc, TARG_OFFSET + i);
5062
if ((scsirate & ~WIDEXFER) == 0 && offset != 0)
5063
/* Set to the lowest sync rate, 5MHz */
5064
scsirate |= 0x1c;
5065
maxsync = AHC_SYNCRATE_ULTRA2;
5066
if ((ahc->features & AHC_DT) != 0)
5067
maxsync = AHC_SYNCRATE_DT;
5068
tinfo->user.period =
5069
ahc_find_period(ahc, scsirate, maxsync);
5070
if (offset == 0)
5071
tinfo->user.period = 0;
5072
else
5073
tinfo->user.offset = MAX_OFFSET;
5074
if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/
5075
&& (ahc->features & AHC_DT) != 0)
5076
tinfo->user.ppr_options =
5077
MSG_EXT_PPR_DT_REQ;
5078
} else if ((scsirate & SOFS) != 0) {
5079
if ((scsirate & SXFR) == 0x40
5080
&& (ultraenb & mask) != 0) {
5081
/* Treat 10MHz as a non-ultra speed */
5082
scsirate &= ~SXFR;
5083
ultraenb &= ~mask;
5084
}
5085
tinfo->user.period =
5086
ahc_find_period(ahc, scsirate,
5087
(ultraenb & mask)
5088
? AHC_SYNCRATE_ULTRA
5089
: AHC_SYNCRATE_FAST);
5090
if (tinfo->user.period != 0)
5091
tinfo->user.offset = MAX_OFFSET;
5092
}
5093
if (tinfo->user.period == 0)
5094
tinfo->user.offset = 0;
5095
if ((scsirate & WIDEXFER) != 0
5096
&& (ahc->features & AHC_WIDE) != 0)
5097
tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
5098
tinfo->user.protocol_version = 4;
5099
if ((ahc->features & AHC_DT) != 0)
5100
tinfo->user.transport_version = 3;
5101
else
5102
tinfo->user.transport_version = 2;
5103
tinfo->goal.protocol_version = 2;
5104
tinfo->goal.transport_version = 2;
5105
tinfo->curr.protocol_version = 2;
5106
tinfo->curr.transport_version = 2;
5107
}
5108
tstate->ultraenb = 0;
5109
}
5110
ahc->user_discenable = discenable;
5111
ahc->user_tagenable = tagenable;
5112
5113
return (ahc->bus_chip_init(ahc));
5114
}
5115
5116
void
5117
ahc_intr_enable(struct ahc_softc *ahc, int enable)
5118
{
5119
u_int hcntrl;
5120
5121
hcntrl = ahc_inb(ahc, HCNTRL);
5122
hcntrl &= ~INTEN;
5123
ahc->pause &= ~INTEN;
5124
ahc->unpause &= ~INTEN;
5125
if (enable) {
5126
hcntrl |= INTEN;
5127
ahc->pause |= INTEN;
5128
ahc->unpause |= INTEN;
5129
}
5130
ahc_outb(ahc, HCNTRL, hcntrl);
5131
}
5132
5133
/*
5134
* Ensure that the card is paused in a location
5135
* outside of all critical sections and that all
5136
* pending work is completed prior to returning.
5137
* This routine should only be called from outside
5138
* an interrupt context.
5139
*/
5140
void
5141
ahc_pause_and_flushwork(struct ahc_softc *ahc)
5142
{
5143
int intstat;
5144
int maxloops;
5145
int paused;
5146
5147
maxloops = 1000;
5148
ahc->flags |= AHC_ALL_INTERRUPTS;
5149
paused = FALSE;
5150
do {
5151
if (paused) {
5152
ahc_unpause(ahc);
5153
/*
5154
* Give the sequencer some time to service
5155
* any active selections.
5156
*/
5157
aic_delay(500);
5158
}
5159
ahc_intr(ahc);
5160
ahc_pause(ahc);
5161
paused = TRUE;
5162
ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO);
5163
intstat = ahc_inb(ahc, INTSTAT);
5164
if ((intstat & INT_PEND) == 0) {
5165
ahc_clear_critical_section(ahc);
5166
intstat = ahc_inb(ahc, INTSTAT);
5167
}
5168
} while (--maxloops
5169
&& (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0)
5170
&& ((intstat & INT_PEND) != 0
5171
|| (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)) != 0));
5172
if (maxloops == 0) {
5173
printf("Infinite interrupt loop, INTSTAT = %x",
5174
ahc_inb(ahc, INTSTAT));
5175
}
5176
ahc_platform_flushwork(ahc);
5177
ahc->flags &= ~AHC_ALL_INTERRUPTS;
5178
}
5179
5180
int
5181
ahc_suspend(struct ahc_softc *ahc)
5182
{
5183
5184
ahc_pause_and_flushwork(ahc);
5185
5186
if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
5187
ahc_unpause(ahc);
5188
return (EBUSY);
5189
}
5190
5191
#ifdef AHC_TARGET_MODE
5192
/*
5193
* XXX What about ATIOs that have not yet been serviced?
5194
* Perhaps we should just refuse to be suspended if we
5195
* are acting in a target role.
5196
*/
5197
if (ahc->pending_device != NULL) {
5198
ahc_unpause(ahc);
5199
return (EBUSY);
5200
}
5201
#endif
5202
ahc_shutdown(ahc);
5203
return (0);
5204
}
5205
5206
int
5207
ahc_resume(struct ahc_softc *ahc)
5208
{
5209
5210
ahc_reset(ahc, /*reinit*/TRUE);
5211
ahc_intr_enable(ahc, TRUE);
5212
ahc_restart(ahc);
5213
return (0);
5214
}
5215
5216
/************************** Busy Target Table *********************************/
5217
/*
5218
* Return the untagged transaction id for a given target/channel lun.
5219
* Optionally, clear the entry.
5220
*/
5221
u_int
5222
ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
5223
{
5224
u_int scbid;
5225
u_int target_offset;
5226
5227
if ((ahc->flags & AHC_SCB_BTT) != 0) {
5228
u_int saved_scbptr;
5229
5230
saved_scbptr = ahc_inb(ahc, SCBPTR);
5231
ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
5232
scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl));
5233
ahc_outb(ahc, SCBPTR, saved_scbptr);
5234
} else {
5235
target_offset = TCL_TARGET_OFFSET(tcl);
5236
scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset);
5237
}
5238
5239
return (scbid);
5240
}
5241
5242
void
5243
ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
5244
{
5245
u_int target_offset;
5246
5247
if ((ahc->flags & AHC_SCB_BTT) != 0) {
5248
u_int saved_scbptr;
5249
5250
saved_scbptr = ahc_inb(ahc, SCBPTR);
5251
ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
5252
ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL);
5253
ahc_outb(ahc, SCBPTR, saved_scbptr);
5254
} else {
5255
target_offset = TCL_TARGET_OFFSET(tcl);
5256
ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL);
5257
}
5258
}
5259
5260
void
5261
ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
5262
{
5263
u_int target_offset;
5264
5265
if ((ahc->flags & AHC_SCB_BTT) != 0) {
5266
u_int saved_scbptr;
5267
5268
saved_scbptr = ahc_inb(ahc, SCBPTR);
5269
ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
5270
ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid);
5271
ahc_outb(ahc, SCBPTR, saved_scbptr);
5272
} else {
5273
target_offset = TCL_TARGET_OFFSET(tcl);
5274
ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid);
5275
}
5276
}
5277
5278
/************************** SCB and SCB queue management **********************/
5279
int
5280
ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
5281
char channel, int lun, u_int tag, role_t role)
5282
{
5283
int targ = SCB_GET_TARGET(ahc, scb);
5284
char chan = SCB_GET_CHANNEL(ahc, scb);
5285
int slun = SCB_GET_LUN(scb);
5286
int match;
5287
5288
match = ((chan == channel) || (channel == ALL_CHANNELS));
5289
if (match != 0)
5290
match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
5291
if (match != 0)
5292
match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
5293
if (match != 0) {
5294
#ifdef AHC_TARGET_MODE
5295
int group;
5296
5297
group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
5298
if (role == ROLE_INITIATOR) {
5299
match = (group != XPT_FC_GROUP_TMODE)
5300
&& ((tag == scb->hscb->tag)
5301
|| (tag == SCB_LIST_NULL));
5302
} else if (role == ROLE_TARGET) {
5303
match = (group == XPT_FC_GROUP_TMODE)
5304
&& ((tag == scb->io_ctx->csio.tag_id)
5305
|| (tag == SCB_LIST_NULL));
5306
}
5307
#else /* !AHC_TARGET_MODE */
5308
match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL));
5309
#endif /* AHC_TARGET_MODE */
5310
}
5311
5312
return match;
5313
}
5314
5315
void
5316
ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
5317
{
5318
int target;
5319
char channel;
5320
int lun;
5321
5322
target = SCB_GET_TARGET(ahc, scb);
5323
lun = SCB_GET_LUN(scb);
5324
channel = SCB_GET_CHANNEL(ahc, scb);
5325
5326
ahc_search_qinfifo(ahc, target, channel, lun,
5327
/*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
5328
CAM_REQUEUE_REQ, SEARCH_COMPLETE);
5329
5330
ahc_platform_freeze_devq(ahc, scb);
5331
}
5332
5333
void
5334
ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb)
5335
{
5336
struct scb *prev_scb;
5337
5338
prev_scb = NULL;
5339
if (ahc_qinfifo_count(ahc) != 0) {
5340
u_int prev_tag;
5341
uint8_t prev_pos;
5342
5343
prev_pos = ahc->qinfifonext - 1;
5344
prev_tag = ahc->qinfifo[prev_pos];
5345
prev_scb = ahc_lookup_scb(ahc, prev_tag);
5346
}
5347
ahc_qinfifo_requeue(ahc, prev_scb, scb);
5348
if ((ahc->features & AHC_QUEUE_REGS) != 0) {
5349
ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
5350
} else {
5351
ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
5352
}
5353
}
5354
5355
static void
5356
ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb,
5357
struct scb *scb)
5358
{
5359
if (prev_scb == NULL) {
5360
ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
5361
} else {
5362
prev_scb->hscb->next = scb->hscb->tag;
5363
ahc_sync_scb(ahc, prev_scb,
5364
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5365
}
5366
ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
5367
scb->hscb->next = ahc->next_queued_scb->hscb->tag;
5368
ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5369
}
5370
5371
static int
5372
ahc_qinfifo_count(struct ahc_softc *ahc)
5373
{
5374
uint8_t qinpos;
5375
uint8_t diff;
5376
5377
if ((ahc->features & AHC_QUEUE_REGS) != 0) {
5378
qinpos = ahc_inb(ahc, SNSCB_QOFF);
5379
ahc_outb(ahc, SNSCB_QOFF, qinpos);
5380
} else
5381
qinpos = ahc_inb(ahc, QINPOS);
5382
diff = ahc->qinfifonext - qinpos;
5383
return (diff);
5384
}
5385
5386
int
5387
ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
5388
int lun, u_int tag, role_t role, uint32_t status,
5389
ahc_search_action action)
5390
{
5391
struct scb *scb;
5392
struct scb *prev_scb;
5393
uint8_t qinstart;
5394
uint8_t qinpos;
5395
uint8_t qintail;
5396
uint8_t next;
5397
uint8_t prev;
5398
uint8_t curscbptr;
5399
int found;
5400
int have_qregs;
5401
5402
qintail = ahc->qinfifonext;
5403
have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0;
5404
if (have_qregs) {
5405
qinstart = ahc_inb(ahc, SNSCB_QOFF);
5406
ahc_outb(ahc, SNSCB_QOFF, qinstart);
5407
} else
5408
qinstart = ahc_inb(ahc, QINPOS);
5409
qinpos = qinstart;
5410
found = 0;
5411
prev_scb = NULL;
5412
5413
if (action == SEARCH_COMPLETE) {
5414
/*
5415
* Don't attempt to run any queued untagged transactions
5416
* until we are done with the abort process.
5417
*/
5418
ahc_freeze_untagged_queues(ahc);
5419
}
5420
5421
/*
5422
* Start with an empty queue. Entries that are not chosen
5423
* for removal will be re-added to the queue as we go.
5424
*/
5425
ahc->qinfifonext = qinpos;
5426
ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
5427
5428
while (qinpos != qintail) {
5429
scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]);
5430
if (scb == NULL) {
5431
printf("qinpos = %d, SCB index = %d\n",
5432
qinpos, ahc->qinfifo[qinpos]);
5433
panic("Loop 1\n");
5434
}
5435
5436
if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) {
5437
/*
5438
* We found an scb that needs to be acted on.
5439
*/
5440
found++;
5441
switch (action) {
5442
case SEARCH_COMPLETE:
5443
{
5444
cam_status ostat;
5445
cam_status cstat;
5446
5447
ostat = aic_get_transaction_status(scb);
5448
if (ostat == CAM_REQ_INPROG)
5449
aic_set_transaction_status(scb, status);
5450
cstat = aic_get_transaction_status(scb);
5451
if (cstat != CAM_REQ_CMP)
5452
aic_freeze_scb(scb);
5453
if ((scb->flags & SCB_ACTIVE) == 0)
5454
printf("Inactive SCB in qinfifo\n");
5455
ahc_done(ahc, scb);
5456
5457
/* FALLTHROUGH */
5458
}
5459
case SEARCH_REMOVE:
5460
break;
5461
case SEARCH_COUNT:
5462
ahc_qinfifo_requeue(ahc, prev_scb, scb);
5463
prev_scb = scb;
5464
break;
5465
}
5466
} else {
5467
ahc_qinfifo_requeue(ahc, prev_scb, scb);
5468
prev_scb = scb;
5469
}
5470
qinpos++;
5471
}
5472
5473
if ((ahc->features & AHC_QUEUE_REGS) != 0) {
5474
ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
5475
} else {
5476
ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
5477
}
5478
5479
if (action != SEARCH_COUNT
5480
&& (found != 0)
5481
&& (qinstart != ahc->qinfifonext)) {
5482
/*
5483
* The sequencer may be in the process of dmaing
5484
* down the SCB at the beginning of the queue.
5485
* This could be problematic if either the first,
5486
* or the second SCB is removed from the queue
5487
* (the first SCB includes a pointer to the "next"
5488
* SCB to dma). If we have removed any entries, swap
5489
* the first element in the queue with the next HSCB
5490
* so the sequencer will notice that NEXT_QUEUED_SCB
5491
* has changed during its dma attempt and will retry
5492
* the DMA.
5493
*/
5494
scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]);
5495
5496
if (scb == NULL) {
5497
printf("found = %d, qinstart = %d, qinfifionext = %d\n",
5498
found, qinstart, ahc->qinfifonext);
5499
panic("First/Second Qinfifo fixup\n");
5500
}
5501
/*
5502
* ahc_swap_with_next_hscb forces our next pointer to
5503
* point to the reserved SCB for future commands. Save
5504
* and restore our original next pointer to maintain
5505
* queue integrity.
5506
*/
5507
next = scb->hscb->next;
5508
ahc->scb_data->scbindex[scb->hscb->tag] = NULL;
5509
ahc_swap_with_next_hscb(ahc, scb);
5510
scb->hscb->next = next;
5511
ahc->qinfifo[qinstart] = scb->hscb->tag;
5512
5513
/* Tell the card about the new head of the qinfifo. */
5514
ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
5515
5516
/* Fixup the tail "next" pointer. */
5517
qintail = ahc->qinfifonext - 1;
5518
scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]);
5519
scb->hscb->next = ahc->next_queued_scb->hscb->tag;
5520
}
5521
5522
/*
5523
* Search waiting for selection list.
5524
*/
5525
curscbptr = ahc_inb(ahc, SCBPTR);
5526
next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */
5527
prev = SCB_LIST_NULL;
5528
5529
while (next != SCB_LIST_NULL) {
5530
uint8_t scb_index;
5531
5532
ahc_outb(ahc, SCBPTR, next);
5533
scb_index = ahc_inb(ahc, SCB_TAG);
5534
if (scb_index >= ahc->scb_data->numscbs) {
5535
printf("Waiting List inconsistency. "
5536
"SCB index == %d, yet numscbs == %d.",
5537
scb_index, ahc->scb_data->numscbs);
5538
ahc_dump_card_state(ahc);
5539
panic("for safety");
5540
}
5541
scb = ahc_lookup_scb(ahc, scb_index);
5542
if (scb == NULL) {
5543
printf("scb_index = %d, next = %d\n",
5544
scb_index, next);
5545
panic("Waiting List traversal\n");
5546
}
5547
if (ahc_match_scb(ahc, scb, target, channel,
5548
lun, SCB_LIST_NULL, role)) {
5549
/*
5550
* We found an scb that needs to be acted on.
5551
*/
5552
found++;
5553
switch (action) {
5554
case SEARCH_COMPLETE:
5555
{
5556
cam_status ostat;
5557
cam_status cstat;
5558
5559
ostat = aic_get_transaction_status(scb);
5560
if (ostat == CAM_REQ_INPROG)
5561
aic_set_transaction_status(scb,
5562
status);
5563
cstat = aic_get_transaction_status(scb);
5564
if (cstat != CAM_REQ_CMP)
5565
aic_freeze_scb(scb);
5566
if ((scb->flags & SCB_ACTIVE) == 0)
5567
printf("Inactive SCB in Wait List\n");
5568
ahc_done(ahc, scb);
5569
/* FALLTHROUGH */
5570
}
5571
case SEARCH_REMOVE:
5572
next = ahc_rem_wscb(ahc, next, prev);
5573
break;
5574
case SEARCH_COUNT:
5575
prev = next;
5576
next = ahc_inb(ahc, SCB_NEXT);
5577
break;
5578
}
5579
} else {
5580
5581
prev = next;
5582
next = ahc_inb(ahc, SCB_NEXT);
5583
}
5584
}
5585
ahc_outb(ahc, SCBPTR, curscbptr);
5586
5587
found += ahc_search_untagged_queues(ahc, /*aic_io_ctx_t*/NULL, target,
5588
channel, lun, status, action);
5589
5590
if (action == SEARCH_COMPLETE)
5591
ahc_release_untagged_queues(ahc);
5592
return (found);
5593
}
5594
5595
int
5596
ahc_search_untagged_queues(struct ahc_softc *ahc, aic_io_ctx_t ctx,
5597
int target, char channel, int lun, uint32_t status,
5598
ahc_search_action action)
5599
{
5600
struct scb *scb;
5601
int maxtarget;
5602
int found;
5603
int i;
5604
5605
if (action == SEARCH_COMPLETE) {
5606
/*
5607
* Don't attempt to run any queued untagged transactions
5608
* until we are done with the abort process.
5609
*/
5610
ahc_freeze_untagged_queues(ahc);
5611
}
5612
5613
found = 0;
5614
i = 0;
5615
if ((ahc->flags & AHC_SCB_BTT) == 0) {
5616
maxtarget = 16;
5617
if (target != CAM_TARGET_WILDCARD) {
5618
i = target;
5619
if (channel == 'B')
5620
i += 8;
5621
maxtarget = i + 1;
5622
}
5623
} else {
5624
maxtarget = 0;
5625
}
5626
5627
for (; i < maxtarget; i++) {
5628
struct scb_tailq *untagged_q;
5629
struct scb *next_scb;
5630
5631
untagged_q = &(ahc->untagged_queues[i]);
5632
next_scb = TAILQ_FIRST(untagged_q);
5633
while (next_scb != NULL) {
5634
scb = next_scb;
5635
next_scb = TAILQ_NEXT(scb, links.tqe);
5636
5637
/*
5638
* The head of the list may be the currently
5639
* active untagged command for a device.
5640
* We're only searching for commands that
5641
* have not been started. A transaction
5642
* marked active but still in the qinfifo
5643
* is removed by the qinfifo scanning code
5644
* above.
5645
*/
5646
if ((scb->flags & SCB_ACTIVE) != 0)
5647
continue;
5648
5649
if (ahc_match_scb(ahc, scb, target, channel, lun,
5650
SCB_LIST_NULL, ROLE_INITIATOR) == 0
5651
|| (ctx != NULL && ctx != scb->io_ctx))
5652
continue;
5653
5654
/*
5655
* We found an scb that needs to be acted on.
5656
*/
5657
found++;
5658
switch (action) {
5659
case SEARCH_COMPLETE:
5660
{
5661
cam_status ostat;
5662
cam_status cstat;
5663
5664
ostat = aic_get_transaction_status(scb);
5665
if (ostat == CAM_REQ_INPROG)
5666
aic_set_transaction_status(scb, status);
5667
cstat = aic_get_transaction_status(scb);
5668
if (cstat != CAM_REQ_CMP)
5669
aic_freeze_scb(scb);
5670
ahc_done(ahc, scb);
5671
break;
5672
}
5673
case SEARCH_REMOVE:
5674
scb->flags &= ~SCB_UNTAGGEDQ;
5675
TAILQ_REMOVE(untagged_q, scb, links.tqe);
5676
break;
5677
case SEARCH_COUNT:
5678
break;
5679
}
5680
}
5681
}
5682
5683
if (action == SEARCH_COMPLETE)
5684
ahc_release_untagged_queues(ahc);
5685
return (found);
5686
}
5687
5688
int
5689
ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel,
5690
int lun, u_int tag, int stop_on_first, int remove,
5691
int save_state)
5692
{
5693
struct scb *scbp;
5694
u_int next;
5695
u_int prev;
5696
u_int count;
5697
u_int active_scb;
5698
5699
count = 0;
5700
next = ahc_inb(ahc, DISCONNECTED_SCBH);
5701
prev = SCB_LIST_NULL;
5702
5703
if (save_state) {
5704
/* restore this when we're done */
5705
active_scb = ahc_inb(ahc, SCBPTR);
5706
} else
5707
/* Silence compiler */
5708
active_scb = SCB_LIST_NULL;
5709
5710
while (next != SCB_LIST_NULL) {
5711
u_int scb_index;
5712
5713
ahc_outb(ahc, SCBPTR, next);
5714
scb_index = ahc_inb(ahc, SCB_TAG);
5715
if (scb_index >= ahc->scb_data->numscbs) {
5716
printf("Disconnected List inconsistency. "
5717
"SCB index == %d, yet numscbs == %d.",
5718
scb_index, ahc->scb_data->numscbs);
5719
ahc_dump_card_state(ahc);
5720
panic("for safety");
5721
}
5722
5723
if (next == prev) {
5724
panic("Disconnected List Loop. "
5725
"cur SCBPTR == %x, prev SCBPTR == %x.",
5726
next, prev);
5727
}
5728
scbp = ahc_lookup_scb(ahc, scb_index);
5729
if (ahc_match_scb(ahc, scbp, target, channel, lun,
5730
tag, ROLE_INITIATOR)) {
5731
count++;
5732
if (remove) {
5733
next =
5734
ahc_rem_scb_from_disc_list(ahc, prev, next);
5735
} else {
5736
prev = next;
5737
next = ahc_inb(ahc, SCB_NEXT);
5738
}
5739
if (stop_on_first)
5740
break;
5741
} else {
5742
prev = next;
5743
next = ahc_inb(ahc, SCB_NEXT);
5744
}
5745
}
5746
if (save_state)
5747
ahc_outb(ahc, SCBPTR, active_scb);
5748
return (count);
5749
}
5750
5751
/*
5752
* Remove an SCB from the on chip list of disconnected transactions.
5753
* This is empty/unused if we are not performing SCB paging.
5754
*/
5755
static u_int
5756
ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr)
5757
{
5758
u_int next;
5759
5760
ahc_outb(ahc, SCBPTR, scbptr);
5761
next = ahc_inb(ahc, SCB_NEXT);
5762
5763
ahc_outb(ahc, SCB_CONTROL, 0);
5764
5765
ahc_add_curscb_to_free_list(ahc);
5766
5767
if (prev != SCB_LIST_NULL) {
5768
ahc_outb(ahc, SCBPTR, prev);
5769
ahc_outb(ahc, SCB_NEXT, next);
5770
} else
5771
ahc_outb(ahc, DISCONNECTED_SCBH, next);
5772
5773
return (next);
5774
}
5775
5776
/*
5777
* Add the SCB as selected by SCBPTR onto the on chip list of
5778
* free hardware SCBs. This list is empty/unused if we are not
5779
* performing SCB paging.
5780
*/
5781
static void
5782
ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
5783
{
5784
/*
5785
* Invalidate the tag so that our abort
5786
* routines don't think it's active.
5787
*/
5788
ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
5789
5790
if ((ahc->flags & AHC_PAGESCBS) != 0) {
5791
ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH));
5792
ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR));
5793
}
5794
}
5795
5796
/*
5797
* Manipulate the waiting for selection list and return the
5798
* scb that follows the one that we remove.
5799
*/
5800
static u_int
5801
ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
5802
{
5803
u_int curscb, next;
5804
5805
/*
5806
* Select the SCB we want to abort and
5807
* pull the next pointer out of it.
5808
*/
5809
curscb = ahc_inb(ahc, SCBPTR);
5810
ahc_outb(ahc, SCBPTR, scbpos);
5811
next = ahc_inb(ahc, SCB_NEXT);
5812
5813
/* Clear the necessary fields */
5814
ahc_outb(ahc, SCB_CONTROL, 0);
5815
5816
ahc_add_curscb_to_free_list(ahc);
5817
5818
/* update the waiting list */
5819
if (prev == SCB_LIST_NULL) {
5820
/* First in the list */
5821
ahc_outb(ahc, WAITING_SCBH, next);
5822
5823
/*
5824
* Ensure we aren't attempting to perform
5825
* selection for this entry.
5826
*/
5827
ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
5828
} else {
5829
/*
5830
* Select the scb that pointed to us
5831
* and update its next pointer.
5832
*/
5833
ahc_outb(ahc, SCBPTR, prev);
5834
ahc_outb(ahc, SCB_NEXT, next);
5835
}
5836
5837
/*
5838
* Point us back at the original scb position.
5839
*/
5840
ahc_outb(ahc, SCBPTR, curscb);
5841
return next;
5842
}
5843
5844
/******************************** Error Handling ******************************/
5845
/*
5846
* Abort all SCBs that match the given description (target/channel/lun/tag),
5847
* setting their status to the passed in status if the status has not already
5848
* been modified from CAM_REQ_INPROG. This routine assumes that the sequencer
5849
* is paused before it is called.
5850
*/
5851
int
5852
ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
5853
int lun, u_int tag, role_t role, uint32_t status)
5854
{
5855
struct scb *scbp;
5856
struct scb *scbp_next;
5857
u_int active_scb;
5858
int i, j;
5859
int maxtarget;
5860
int minlun;
5861
int maxlun;
5862
5863
int found;
5864
5865
/*
5866
* Don't attempt to run any queued untagged transactions
5867
* until we are done with the abort process.
5868
*/
5869
ahc_freeze_untagged_queues(ahc);
5870
5871
/* restore this when we're done */
5872
active_scb = ahc_inb(ahc, SCBPTR);
5873
5874
found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL,
5875
role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
5876
5877
/*
5878
* Clean out the busy target table for any untagged commands.
5879
*/
5880
i = 0;
5881
maxtarget = 16;
5882
if (target != CAM_TARGET_WILDCARD) {
5883
i = target;
5884
if (channel == 'B')
5885
i += 8;
5886
maxtarget = i + 1;
5887
}
5888
5889
if (lun == CAM_LUN_WILDCARD) {
5890
/*
5891
* Unless we are using an SCB based
5892
* busy targets table, there is only
5893
* one table entry for all luns of
5894
* a target.
5895
*/
5896
minlun = 0;
5897
maxlun = 1;
5898
if ((ahc->flags & AHC_SCB_BTT) != 0)
5899
maxlun = AHC_NUM_LUNS;
5900
} else {
5901
minlun = lun;
5902
maxlun = lun + 1;
5903
}
5904
5905
if (role != ROLE_TARGET) {
5906
for (;i < maxtarget; i++) {
5907
for (j = minlun;j < maxlun; j++) {
5908
u_int scbid;
5909
u_int tcl;
5910
5911
tcl = BUILD_TCL(i << 4, j);
5912
scbid = ahc_index_busy_tcl(ahc, tcl);
5913
scbp = ahc_lookup_scb(ahc, scbid);
5914
if (scbp == NULL
5915
|| ahc_match_scb(ahc, scbp, target, channel,
5916
lun, tag, role) == 0)
5917
continue;
5918
ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j));
5919
}
5920
}
5921
5922
/*
5923
* Go through the disconnected list and remove any entries we
5924
* have queued for completion, 0'ing their control byte too.
5925
* We save the active SCB and restore it ourselves, so there
5926
* is no reason for this search to restore it too.
5927
*/
5928
ahc_search_disc_list(ahc, target, channel, lun, tag,
5929
/*stop_on_first*/FALSE, /*remove*/TRUE,
5930
/*save_state*/FALSE);
5931
}
5932
5933
/*
5934
* Go through the hardware SCB array looking for commands that
5935
* were active but not on any list. In some cases, these remnants
5936
* might not still have mappings in the scbindex array (e.g. unexpected
5937
* bus free with the same scb queued for an abort). Don't hold this
5938
* against them.
5939
*/
5940
for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
5941
u_int scbid;
5942
5943
ahc_outb(ahc, SCBPTR, i);
5944
scbid = ahc_inb(ahc, SCB_TAG);
5945
scbp = ahc_lookup_scb(ahc, scbid);
5946
if ((scbp == NULL && scbid != SCB_LIST_NULL)
5947
|| (scbp != NULL
5948
&& ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)))
5949
ahc_add_curscb_to_free_list(ahc);
5950
}
5951
5952
/*
5953
* Go through the pending CCB list and look for
5954
* commands for this target that are still active.
5955
* These are other tagged commands that were
5956
* disconnected when the reset occurred.
5957
*/
5958
scbp_next = LIST_FIRST(&ahc->pending_scbs);
5959
while (scbp_next != NULL) {
5960
scbp = scbp_next;
5961
scbp_next = LIST_NEXT(scbp, pending_links);
5962
if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) {
5963
cam_status ostat;
5964
5965
ostat = aic_get_transaction_status(scbp);
5966
if (ostat == CAM_REQ_INPROG)
5967
aic_set_transaction_status(scbp, status);
5968
if (aic_get_transaction_status(scbp) != CAM_REQ_CMP)
5969
aic_freeze_scb(scbp);
5970
if ((scbp->flags & SCB_ACTIVE) == 0)
5971
printf("Inactive SCB on pending list\n");
5972
ahc_done(ahc, scbp);
5973
found++;
5974
}
5975
}
5976
ahc_outb(ahc, SCBPTR, active_scb);
5977
ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status);
5978
ahc_release_untagged_queues(ahc);
5979
return found;
5980
}
5981
5982
static void
5983
ahc_reset_current_bus(struct ahc_softc *ahc)
5984
{
5985
uint8_t scsiseq;
5986
5987
ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST);
5988
scsiseq = ahc_inb(ahc, SCSISEQ);
5989
ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO);
5990
ahc_flush_device_writes(ahc);
5991
aic_delay(AHC_BUSRESET_DELAY);
5992
/* Turn off the bus reset */
5993
ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO);
5994
5995
ahc_clear_intstat(ahc);
5996
5997
/* Re-enable reset interrupts */
5998
ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST);
5999
}
6000
6001
int
6002
ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
6003
{
6004
struct ahc_devinfo devinfo;
6005
u_int initiator, target, max_scsiid;
6006
u_int sblkctl;
6007
u_int scsiseq;
6008
u_int simode1;
6009
int found;
6010
int restart_needed;
6011
char cur_channel;
6012
6013
ahc->pending_device = NULL;
6014
6015
ahc_compile_devinfo(&devinfo,
6016
CAM_TARGET_WILDCARD,
6017
CAM_TARGET_WILDCARD,
6018
CAM_LUN_WILDCARD,
6019
channel, ROLE_UNKNOWN);
6020
ahc_pause(ahc);
6021
6022
/* Make sure the sequencer is in a safe location. */
6023
ahc_clear_critical_section(ahc);
6024
6025
/*
6026
* Run our command complete fifos to ensure that we perform
6027
* completion processing on any commands that 'completed'
6028
* before the reset occurred.
6029
*/
6030
ahc_run_qoutfifo(ahc);
6031
#ifdef AHC_TARGET_MODE
6032
/*
6033
* XXX - In Twin mode, the tqinfifo may have commands
6034
* for an unaffected channel in it. However, if
6035
* we have run out of ATIO resources to drain that
6036
* queue, we may not get them all out here. Further,
6037
* the blocked transactions for the reset channel
6038
* should just be killed off, irrespective of whether
6039
* we are blocked on ATIO resources. Write a routine
6040
* to compact the tqinfifo appropriately.
6041
*/
6042
if ((ahc->flags & AHC_TARGETROLE) != 0) {
6043
ahc_run_tqinfifo(ahc, /*paused*/TRUE);
6044
}
6045
#endif
6046
6047
/*
6048
* Reset the bus if we are initiating this reset
6049
*/
6050
sblkctl = ahc_inb(ahc, SBLKCTL);
6051
cur_channel = 'A';
6052
if ((ahc->features & AHC_TWIN) != 0
6053
&& ((sblkctl & SELBUSB) != 0))
6054
cur_channel = 'B';
6055
scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
6056
if (cur_channel != channel) {
6057
/* Case 1: Command for another bus is active
6058
* Stealthily reset the other bus without
6059
* upsetting the current bus.
6060
*/
6061
ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB);
6062
simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST);
6063
#ifdef AHC_TARGET_MODE
6064
/*
6065
* Bus resets clear ENSELI, so we cannot
6066
* defer re-enabling bus reset interrupts
6067
* if we are in target mode.
6068
*/
6069
if ((ahc->flags & AHC_TARGETROLE) != 0)
6070
simode1 |= ENSCSIRST;
6071
#endif
6072
ahc_outb(ahc, SIMODE1, simode1);
6073
if (initiate_reset)
6074
ahc_reset_current_bus(ahc);
6075
ahc_clear_intstat(ahc);
6076
ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
6077
ahc_outb(ahc, SBLKCTL, sblkctl);
6078
restart_needed = FALSE;
6079
} else {
6080
/* Case 2: A command from this bus is active or we're idle */
6081
simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST);
6082
#ifdef AHC_TARGET_MODE
6083
/*
6084
* Bus resets clear ENSELI, so we cannot
6085
* defer re-enabling bus reset interrupts
6086
* if we are in target mode.
6087
*/
6088
if ((ahc->flags & AHC_TARGETROLE) != 0)
6089
simode1 |= ENSCSIRST;
6090
#endif
6091
ahc_outb(ahc, SIMODE1, simode1);
6092
if (initiate_reset)
6093
ahc_reset_current_bus(ahc);
6094
ahc_clear_intstat(ahc);
6095
ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
6096
restart_needed = TRUE;
6097
}
6098
6099
/*
6100
* Clean up all the state information for the
6101
* pending transactions on this bus.
6102
*/
6103
found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel,
6104
CAM_LUN_WILDCARD, SCB_LIST_NULL,
6105
ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
6106
6107
max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7;
6108
6109
#ifdef AHC_TARGET_MODE
6110
/*
6111
* Send an immediate notify ccb to all target more peripheral
6112
* drivers affected by this action.
6113
*/
6114
for (target = 0; target <= max_scsiid; target++) {
6115
struct ahc_tmode_tstate* tstate;
6116
u_int lun;
6117
6118
tstate = ahc->enabled_targets[target];
6119
if (tstate == NULL)
6120
continue;
6121
for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
6122
struct ahc_tmode_lstate* lstate;
6123
6124
lstate = tstate->enabled_luns[lun];
6125
if (lstate == NULL)
6126
continue;
6127
6128
ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD,
6129
EVENT_TYPE_BUS_RESET, /*arg*/0);
6130
ahc_send_lstate_events(ahc, lstate);
6131
}
6132
}
6133
#endif
6134
/* Notify the XPT that a bus reset occurred */
6135
ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD,
6136
CAM_LUN_WILDCARD, AC_BUS_RESET, NULL);
6137
6138
/*
6139
* Revert to async/narrow transfers until we renegotiate.
6140
*/
6141
for (target = 0; target <= max_scsiid; target++) {
6142
if (ahc->enabled_targets[target] == NULL)
6143
continue;
6144
for (initiator = 0; initiator <= max_scsiid; initiator++) {
6145
struct ahc_devinfo devinfo;
6146
6147
ahc_compile_devinfo(&devinfo, target, initiator,
6148
CAM_LUN_WILDCARD,
6149
channel, ROLE_UNKNOWN);
6150
ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
6151
AHC_TRANS_CUR, /*paused*/TRUE);
6152
ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
6153
/*period*/0, /*offset*/0,
6154
/*ppr_options*/0, AHC_TRANS_CUR,
6155
/*paused*/TRUE);
6156
}
6157
}
6158
6159
if (restart_needed)
6160
ahc_restart(ahc);
6161
else
6162
ahc_unpause(ahc);
6163
return found;
6164
}
6165
6166
/***************************** Residual Processing ****************************/
6167
/*
6168
* Calculate the residual for a just completed SCB.
6169
*/
6170
void
6171
ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb)
6172
{
6173
struct hardware_scb *hscb;
6174
struct status_pkt *spkt;
6175
uint32_t sgptr;
6176
uint32_t resid_sgptr;
6177
uint32_t resid;
6178
6179
/*
6180
* 5 cases.
6181
* 1) No residual.
6182
* SG_RESID_VALID clear in sgptr.
6183
* 2) Transferless command
6184
* 3) Never performed any transfers.
6185
* sgptr has SG_FULL_RESID set.
6186
* 4) No residual but target did not
6187
* save data pointers after the
6188
* last transfer, so sgptr was
6189
* never updated.
6190
* 5) We have a partial residual.
6191
* Use residual_sgptr to determine
6192
* where we are.
6193
*/
6194
6195
hscb = scb->hscb;
6196
sgptr = aic_le32toh(hscb->sgptr);
6197
if ((sgptr & SG_RESID_VALID) == 0)
6198
/* Case 1 */
6199
return;
6200
sgptr &= ~SG_RESID_VALID;
6201
6202
if ((sgptr & SG_LIST_NULL) != 0)
6203
/* Case 2 */
6204
return;
6205
6206
spkt = &hscb->shared_data.status;
6207
resid_sgptr = aic_le32toh(spkt->residual_sg_ptr);
6208
if ((sgptr & SG_FULL_RESID) != 0) {
6209
/* Case 3 */
6210
resid = aic_get_transfer_length(scb);
6211
} else if ((resid_sgptr & SG_LIST_NULL) != 0) {
6212
/* Case 4 */
6213
return;
6214
} else if ((resid_sgptr & ~SG_PTR_MASK) != 0) {
6215
panic("Bogus resid sgptr value 0x%x\n", resid_sgptr);
6216
/* NOTREACHED */
6217
return;
6218
} else {
6219
struct ahc_dma_seg *sg;
6220
6221
/*
6222
* Remainder of the SG where the transfer
6223
* stopped.
6224
*/
6225
resid = aic_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK;
6226
sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK);
6227
6228
/* The residual sg_ptr always points to the next sg */
6229
sg--;
6230
6231
/*
6232
* Add up the contents of all residual
6233
* SG segments that are after the SG where
6234
* the transfer stopped.
6235
*/
6236
while ((aic_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) {
6237
sg++;
6238
resid += aic_le32toh(sg->len) & AHC_SG_LEN_MASK;
6239
}
6240
}
6241
if ((scb->flags & SCB_SENSE) == 0)
6242
aic_set_residual(scb, resid);
6243
else
6244
aic_set_sense_residual(scb, resid);
6245
6246
#ifdef AHC_DEBUG
6247
if ((ahc_debug & AHC_SHOW_MISC) != 0) {
6248
ahc_print_path(ahc, scb);
6249
printf("Handled %sResidual of %d bytes\n",
6250
(scb->flags & SCB_SENSE) ? "Sense " : "", resid);
6251
}
6252
#endif
6253
}
6254
6255
/******************************* Target Mode **********************************/
6256
#ifdef AHC_TARGET_MODE
6257
/*
6258
* Add a target mode event to this lun's queue
6259
*/
6260
static void
6261
ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate,
6262
u_int initiator_id, u_int event_type, u_int event_arg)
6263
{
6264
struct ahc_tmode_event *event;
6265
int pending;
6266
6267
xpt_freeze_devq(lstate->path, /*count*/1);
6268
if (lstate->event_w_idx >= lstate->event_r_idx)
6269
pending = lstate->event_w_idx - lstate->event_r_idx;
6270
else
6271
pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1
6272
- (lstate->event_r_idx - lstate->event_w_idx);
6273
6274
if (event_type == EVENT_TYPE_BUS_RESET
6275
|| event_type == MSG_BUS_DEV_RESET) {
6276
/*
6277
* Any earlier events are irrelevant, so reset our buffer.
6278
* This has the effect of allowing us to deal with reset
6279
* floods (an external device holding down the reset line)
6280
* without losing the event that is really interesting.
6281
*/
6282
lstate->event_r_idx = 0;
6283
lstate->event_w_idx = 0;
6284
xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
6285
}
6286
6287
if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) {
6288
xpt_print_path(lstate->path);
6289
printf("immediate event %x:%x lost\n",
6290
lstate->event_buffer[lstate->event_r_idx].event_type,
6291
lstate->event_buffer[lstate->event_r_idx].event_arg);
6292
lstate->event_r_idx++;
6293
if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
6294
lstate->event_r_idx = 0;
6295
xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
6296
}
6297
6298
event = &lstate->event_buffer[lstate->event_w_idx];
6299
event->initiator_id = initiator_id;
6300
event->event_type = event_type;
6301
event->event_arg = event_arg;
6302
lstate->event_w_idx++;
6303
if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
6304
lstate->event_w_idx = 0;
6305
}
6306
6307
/*
6308
* Send any target mode events queued up waiting
6309
* for immediate notify resources.
6310
*/
6311
void
6312
ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate)
6313
{
6314
struct ccb_hdr *ccbh;
6315
struct ccb_immediate_notify *inot;
6316
6317
while (lstate->event_r_idx != lstate->event_w_idx
6318
&& (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
6319
struct ahc_tmode_event *event;
6320
6321
event = &lstate->event_buffer[lstate->event_r_idx];
6322
SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
6323
inot = (struct ccb_immediate_notify *)ccbh;
6324
switch (event->event_type) {
6325
case EVENT_TYPE_BUS_RESET:
6326
ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
6327
break;
6328
default:
6329
ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
6330
inot->arg = event->event_type;
6331
inot->seq_id = event->event_arg;
6332
break;
6333
}
6334
inot->initiator_id = event->initiator_id;
6335
xpt_done((union ccb *)inot);
6336
lstate->event_r_idx++;
6337
if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
6338
lstate->event_r_idx = 0;
6339
}
6340
}
6341
#endif
6342
6343
/******************** Sequencer Program Patching/Download *********************/
6344
6345
#ifdef AHC_DUMP_SEQ
6346
void
6347
ahc_dumpseq(struct ahc_softc* ahc)
6348
{
6349
int i;
6350
6351
ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
6352
ahc_outb(ahc, SEQADDR0, 0);
6353
ahc_outb(ahc, SEQADDR1, 0);
6354
for (i = 0; i < ahc->instruction_ram_size; i++) {
6355
uint8_t ins_bytes[4];
6356
6357
ahc_insb(ahc, SEQRAM, ins_bytes, 4);
6358
printf("0x%08x\n", ins_bytes[0] << 24
6359
| ins_bytes[1] << 16
6360
| ins_bytes[2] << 8
6361
| ins_bytes[3]);
6362
}
6363
}
6364
#endif
6365
6366
static int
6367
ahc_loadseq(struct ahc_softc *ahc)
6368
{
6369
struct cs cs_table[num_critical_sections];
6370
u_int begin_set[num_critical_sections];
6371
u_int end_set[num_critical_sections];
6372
struct patch *cur_patch;
6373
u_int cs_count;
6374
u_int cur_cs;
6375
u_int i;
6376
u_int skip_addr;
6377
u_int sg_prefetch_cnt;
6378
int downloaded;
6379
uint8_t download_consts[7];
6380
6381
/*
6382
* Start out with 0 critical sections
6383
* that apply to this firmware load.
6384
*/
6385
cs_count = 0;
6386
cur_cs = 0;
6387
memset(begin_set, 0, sizeof(begin_set));
6388
memset(end_set, 0, sizeof(end_set));
6389
6390
/* Setup downloadable constant table */
6391
download_consts[QOUTFIFO_OFFSET] = 0;
6392
if (ahc->targetcmds != NULL)
6393
download_consts[QOUTFIFO_OFFSET] += 32;
6394
download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1;
6395
download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1;
6396
download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1);
6397
sg_prefetch_cnt = ahc->pci_cachesize;
6398
if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg)))
6399
sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg);
6400
download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt;
6401
download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1);
6402
download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1);
6403
6404
cur_patch = patches;
6405
downloaded = 0;
6406
skip_addr = 0;
6407
ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
6408
ahc_outb(ahc, SEQADDR0, 0);
6409
ahc_outb(ahc, SEQADDR1, 0);
6410
6411
for (i = 0; i < sizeof(seqprog)/4; i++) {
6412
if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) {
6413
/*
6414
* Don't download this instruction as it
6415
* is in a patch that was removed.
6416
*/
6417
continue;
6418
}
6419
6420
if (downloaded == ahc->instruction_ram_size) {
6421
/*
6422
* We're about to exceed the instruction
6423
* storage capacity for this chip. Fail
6424
* the load.
6425
*/
6426
printf("\n%s: Program too large for instruction memory "
6427
"size of %d!\n", ahc_name(ahc),
6428
ahc->instruction_ram_size);
6429
return (ENOMEM);
6430
}
6431
6432
/*
6433
* Move through the CS table until we find a CS
6434
* that might apply to this instruction.
6435
*/
6436
for (; cur_cs < num_critical_sections; cur_cs++) {
6437
if (critical_sections[cur_cs].end <= i) {
6438
if (begin_set[cs_count] == TRUE
6439
&& end_set[cs_count] == FALSE) {
6440
cs_table[cs_count].end = downloaded;
6441
end_set[cs_count] = TRUE;
6442
cs_count++;
6443
}
6444
continue;
6445
}
6446
if (critical_sections[cur_cs].begin <= i
6447
&& begin_set[cs_count] == FALSE) {
6448
cs_table[cs_count].begin = downloaded;
6449
begin_set[cs_count] = TRUE;
6450
}
6451
break;
6452
}
6453
ahc_download_instr(ahc, i, download_consts);
6454
downloaded++;
6455
}
6456
6457
ahc->num_critical_sections = cs_count;
6458
if (cs_count != 0) {
6459
cs_count *= sizeof(struct cs);
6460
ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT);
6461
if (ahc->critical_sections == NULL)
6462
panic("ahc_loadseq: Could not malloc");
6463
memcpy(ahc->critical_sections, cs_table, cs_count);
6464
}
6465
ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
6466
6467
if (bootverbose) {
6468
printf(" %d instructions downloaded\n", downloaded);
6469
printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n",
6470
ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags);
6471
}
6472
return (0);
6473
}
6474
6475
static int
6476
ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch,
6477
u_int start_instr, u_int *skip_addr)
6478
{
6479
struct patch *cur_patch;
6480
struct patch *last_patch;
6481
u_int num_patches;
6482
6483
num_patches = sizeof(patches)/sizeof(struct patch);
6484
last_patch = &patches[num_patches];
6485
cur_patch = *start_patch;
6486
6487
while (cur_patch < last_patch && start_instr == cur_patch->begin) {
6488
if (cur_patch->patch_func(ahc) == 0) {
6489
/* Start rejecting code */
6490
*skip_addr = start_instr + cur_patch->skip_instr;
6491
cur_patch += cur_patch->skip_patch;
6492
} else {
6493
/* Accepted this patch. Advance to the next
6494
* one and wait for our instruction pointer to
6495
* hit this point.
6496
*/
6497
cur_patch++;
6498
}
6499
}
6500
6501
*start_patch = cur_patch;
6502
if (start_instr < *skip_addr)
6503
/* Still skipping */
6504
return (0);
6505
6506
return (1);
6507
}
6508
6509
static void
6510
ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
6511
{
6512
union ins_formats instr;
6513
struct ins_format1 *fmt1_ins;
6514
struct ins_format3 *fmt3_ins;
6515
u_int opcode;
6516
6517
/*
6518
* The firmware is always compiled into a little endian format.
6519
*/
6520
instr.integer = aic_le32toh(*(uint32_t*)&seqprog[instrptr * 4]);
6521
6522
fmt1_ins = &instr.format1;
6523
fmt3_ins = NULL;
6524
6525
/* Pull the opcode */
6526
opcode = instr.format1.opcode;
6527
switch (opcode) {
6528
case AIC_OP_JMP:
6529
case AIC_OP_JC:
6530
case AIC_OP_JNC:
6531
case AIC_OP_CALL:
6532
case AIC_OP_JNE:
6533
case AIC_OP_JNZ:
6534
case AIC_OP_JE:
6535
case AIC_OP_JZ:
6536
{
6537
struct patch *cur_patch;
6538
int address_offset;
6539
u_int address;
6540
u_int skip_addr;
6541
u_int i;
6542
6543
fmt3_ins = &instr.format3;
6544
address_offset = 0;
6545
address = fmt3_ins->address;
6546
cur_patch = patches;
6547
skip_addr = 0;
6548
6549
for (i = 0; i < address;) {
6550
ahc_check_patch(ahc, &cur_patch, i, &skip_addr);
6551
6552
if (skip_addr > i) {
6553
int end_addr;
6554
6555
end_addr = MIN(address, skip_addr);
6556
address_offset += end_addr - i;
6557
i = skip_addr;
6558
} else {
6559
i++;
6560
}
6561
}
6562
address -= address_offset;
6563
fmt3_ins->address = address;
6564
/* FALLTHROUGH */
6565
}
6566
case AIC_OP_OR:
6567
case AIC_OP_AND:
6568
case AIC_OP_XOR:
6569
case AIC_OP_ADD:
6570
case AIC_OP_ADC:
6571
case AIC_OP_BMOV:
6572
if (fmt1_ins->parity != 0) {
6573
fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
6574
}
6575
fmt1_ins->parity = 0;
6576
if ((ahc->features & AHC_CMD_CHAN) == 0
6577
&& opcode == AIC_OP_BMOV) {
6578
/*
6579
* Block move was added at the same time
6580
* as the command channel. Verify that
6581
* this is only a move of a single element
6582
* and convert the BMOV to a MOV
6583
* (AND with an immediate of FF).
6584
*/
6585
if (fmt1_ins->immediate != 1)
6586
panic("%s: BMOV not supported\n",
6587
ahc_name(ahc));
6588
fmt1_ins->opcode = AIC_OP_AND;
6589
fmt1_ins->immediate = 0xff;
6590
}
6591
/* FALLTHROUGH */
6592
case AIC_OP_ROL:
6593
if ((ahc->features & AHC_ULTRA2) != 0) {
6594
int i, count;
6595
6596
/* Calculate odd parity for the instruction */
6597
for (i = 0, count = 0; i < 31; i++) {
6598
uint32_t mask;
6599
6600
mask = 0x01 << i;
6601
if ((instr.integer & mask) != 0)
6602
count++;
6603
}
6604
if ((count & 0x01) == 0)
6605
instr.format1.parity = 1;
6606
} else {
6607
/* Compress the instruction for older sequencers */
6608
if (fmt3_ins != NULL) {
6609
instr.integer =
6610
fmt3_ins->immediate
6611
| (fmt3_ins->source << 8)
6612
| (fmt3_ins->address << 16)
6613
| (fmt3_ins->opcode << 25);
6614
} else {
6615
instr.integer =
6616
fmt1_ins->immediate
6617
| (fmt1_ins->source << 8)
6618
| (fmt1_ins->destination << 16)
6619
| (fmt1_ins->ret << 24)
6620
| (fmt1_ins->opcode << 25);
6621
}
6622
}
6623
/* The sequencer is a little endian cpu */
6624
instr.integer = aic_htole32(instr.integer);
6625
ahc_outsb(ahc, SEQRAM, instr.bytes, 4);
6626
break;
6627
default:
6628
panic("Unknown opcode encountered in seq program");
6629
break;
6630
}
6631
}
6632
6633
int
6634
ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries,
6635
const char *name, u_int address, u_int value,
6636
u_int *cur_column, u_int wrap_point)
6637
{
6638
int printed;
6639
u_int printed_mask;
6640
u_int dummy_column;
6641
6642
if (cur_column == NULL) {
6643
dummy_column = 0;
6644
cur_column = &dummy_column;
6645
}
6646
6647
if (*cur_column >= wrap_point) {
6648
printf("\n");
6649
*cur_column = 0;
6650
}
6651
printed = printf("%s[0x%x]", name, value);
6652
if (table == NULL) {
6653
printed += printf(" ");
6654
*cur_column += printed;
6655
return (printed);
6656
}
6657
printed_mask = 0;
6658
while (printed_mask != 0xFF) {
6659
int entry;
6660
6661
for (entry = 0; entry < num_entries; entry++) {
6662
if (((value & table[entry].mask)
6663
!= table[entry].value)
6664
|| ((printed_mask & table[entry].mask)
6665
== table[entry].mask))
6666
continue;
6667
6668
printed += printf("%s%s",
6669
printed_mask == 0 ? ":(" : "|",
6670
table[entry].name);
6671
printed_mask |= table[entry].mask;
6672
6673
break;
6674
}
6675
if (entry >= num_entries)
6676
break;
6677
}
6678
if (printed_mask != 0)
6679
printed += printf(") ");
6680
else
6681
printed += printf(" ");
6682
if (cur_column != NULL)
6683
*cur_column += printed;
6684
return (printed);
6685
}
6686
6687
void
6688
ahc_dump_card_state(struct ahc_softc *ahc)
6689
{
6690
struct scb *scb;
6691
struct scb_tailq *untagged_q;
6692
u_int cur_col;
6693
int paused;
6694
int target;
6695
int maxtarget;
6696
int i;
6697
uint8_t last_phase;
6698
uint8_t qinpos;
6699
uint8_t qintail;
6700
uint8_t qoutpos;
6701
uint8_t scb_index;
6702
uint8_t saved_scbptr;
6703
6704
if (ahc_is_paused(ahc)) {
6705
paused = 1;
6706
} else {
6707
paused = 0;
6708
ahc_pause(ahc);
6709
}
6710
6711
saved_scbptr = ahc_inb(ahc, SCBPTR);
6712
last_phase = ahc_inb(ahc, LASTPHASE);
6713
printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n"
6714
"%s: Dumping Card State %s, at SEQADDR 0x%x\n",
6715
ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg,
6716
ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
6717
if (paused)
6718
printf("Card was paused\n");
6719
printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n",
6720
ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX),
6721
ahc_inb(ahc, ARG_2));
6722
printf("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT),
6723
ahc_inb(ahc, SCBPTR));
6724
cur_col = 0;
6725
if ((ahc->features & AHC_DT) != 0)
6726
ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50);
6727
ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50);
6728
ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50);
6729
ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50);
6730
ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50);
6731
ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50);
6732
ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50);
6733
ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50);
6734
ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50);
6735
ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50);
6736
ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50);
6737
ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50);
6738
ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50);
6739
ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50);
6740
ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50);
6741
ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50);
6742
ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50);
6743
ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50);
6744
ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50);
6745
if (cur_col != 0)
6746
printf("\n");
6747
printf("STACK:");
6748
for (i = 0; i < STACK_SIZE; i++)
6749
printf(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8));
6750
printf("\nSCB count = %d\n", ahc->scb_data->numscbs);
6751
printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag);
6752
printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB));
6753
/* QINFIFO */
6754
printf("QINFIFO entries: ");
6755
if ((ahc->features & AHC_QUEUE_REGS) != 0) {
6756
qinpos = ahc_inb(ahc, SNSCB_QOFF);
6757
ahc_outb(ahc, SNSCB_QOFF, qinpos);
6758
} else
6759
qinpos = ahc_inb(ahc, QINPOS);
6760
qintail = ahc->qinfifonext;
6761
while (qinpos != qintail) {
6762
printf("%d ", ahc->qinfifo[qinpos]);
6763
qinpos++;
6764
}
6765
printf("\n");
6766
6767
printf("Waiting Queue entries: ");
6768
scb_index = ahc_inb(ahc, WAITING_SCBH);
6769
i = 0;
6770
while (scb_index != SCB_LIST_NULL && i++ < 256) {
6771
ahc_outb(ahc, SCBPTR, scb_index);
6772
printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
6773
scb_index = ahc_inb(ahc, SCB_NEXT);
6774
}
6775
printf("\n");
6776
6777
printf("Disconnected Queue entries: ");
6778
scb_index = ahc_inb(ahc, DISCONNECTED_SCBH);
6779
i = 0;
6780
while (scb_index != SCB_LIST_NULL && i++ < 256) {
6781
ahc_outb(ahc, SCBPTR, scb_index);
6782
printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
6783
scb_index = ahc_inb(ahc, SCB_NEXT);
6784
}
6785
printf("\n");
6786
6787
ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
6788
printf("QOUTFIFO entries: ");
6789
qoutpos = ahc->qoutfifonext;
6790
i = 0;
6791
while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) {
6792
printf("%d ", ahc->qoutfifo[qoutpos]);
6793
qoutpos++;
6794
}
6795
printf("\n");
6796
6797
printf("Sequencer Free SCB List: ");
6798
scb_index = ahc_inb(ahc, FREE_SCBH);
6799
i = 0;
6800
while (scb_index != SCB_LIST_NULL && i++ < 256) {
6801
ahc_outb(ahc, SCBPTR, scb_index);
6802
printf("%d ", scb_index);
6803
scb_index = ahc_inb(ahc, SCB_NEXT);
6804
}
6805
printf("\n");
6806
6807
printf("Sequencer SCB Info: ");
6808
for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
6809
ahc_outb(ahc, SCBPTR, i);
6810
cur_col = printf("\n%3d ", i);
6811
6812
ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60);
6813
ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60);
6814
ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60);
6815
ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60);
6816
}
6817
printf("\n");
6818
6819
printf("Pending list: ");
6820
i = 0;
6821
LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
6822
if (i++ > 256)
6823
break;
6824
cur_col = printf("\n%3d ", scb->hscb->tag);
6825
ahc_scb_control_print(scb->hscb->control, &cur_col, 60);
6826
ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60);
6827
ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60);
6828
if ((ahc->flags & AHC_PAGESCBS) == 0) {
6829
ahc_outb(ahc, SCBPTR, scb->hscb->tag);
6830
printf("(");
6831
ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL),
6832
&cur_col, 60);
6833
ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60);
6834
printf(")");
6835
}
6836
}
6837
printf("\n");
6838
6839
printf("Kernel Free SCB list: ");
6840
i = 0;
6841
SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) {
6842
if (i++ > 256)
6843
break;
6844
printf("%d ", scb->hscb->tag);
6845
}
6846
printf("\n");
6847
6848
maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7;
6849
for (target = 0; target <= maxtarget; target++) {
6850
untagged_q = &ahc->untagged_queues[target];
6851
if (TAILQ_FIRST(untagged_q) == NULL)
6852
continue;
6853
printf("Untagged Q(%d): ", target);
6854
i = 0;
6855
TAILQ_FOREACH(scb, untagged_q, links.tqe) {
6856
if (i++ > 256)
6857
break;
6858
printf("%d ", scb->hscb->tag);
6859
}
6860
printf("\n");
6861
}
6862
6863
ahc_platform_dump_card_state(ahc);
6864
printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
6865
ahc_outb(ahc, SCBPTR, saved_scbptr);
6866
if (paused == 0)
6867
ahc_unpause(ahc);
6868
}
6869
6870
/*************************** Timeout Handling *********************************/
6871
void
6872
ahc_timeout(struct scb *scb)
6873
{
6874
struct ahc_softc *ahc;
6875
6876
ahc = scb->ahc_softc;
6877
if ((scb->flags & SCB_ACTIVE) != 0) {
6878
if ((scb->flags & SCB_TIMEDOUT) == 0) {
6879
LIST_INSERT_HEAD(&ahc->timedout_scbs, scb,
6880
timedout_links);
6881
scb->flags |= SCB_TIMEDOUT;
6882
}
6883
ahc_wakeup_recovery_thread(ahc);
6884
}
6885
}
6886
6887
/*
6888
* Re-schedule a timeout for the passed in SCB if we determine that some
6889
* other SCB is in the process of recovery or an SCB with a longer
6890
* timeout is still pending. Limit our search to just "other_scb"
6891
* if it is non-NULL.
6892
*/
6893
static int
6894
ahc_other_scb_timeout(struct ahc_softc *ahc, struct scb *scb,
6895
struct scb *other_scb)
6896
{
6897
u_int newtimeout;
6898
int found;
6899
6900
ahc_print_path(ahc, scb);
6901
printf("Other SCB Timeout%s",
6902
(scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
6903
? " again\n" : "\n");
6904
6905
newtimeout = aic_get_timeout(scb);
6906
scb->flags |= SCB_OTHERTCL_TIMEOUT;
6907
found = 0;
6908
if (other_scb != NULL) {
6909
if ((other_scb->flags
6910
& (SCB_OTHERTCL_TIMEOUT|SCB_TIMEDOUT)) == 0
6911
|| (other_scb->flags & SCB_RECOVERY_SCB) != 0) {
6912
found++;
6913
newtimeout = MAX(aic_get_timeout(other_scb),
6914
newtimeout);
6915
}
6916
} else {
6917
LIST_FOREACH(other_scb, &ahc->pending_scbs, pending_links) {
6918
if ((other_scb->flags
6919
& (SCB_OTHERTCL_TIMEOUT|SCB_TIMEDOUT)) == 0
6920
|| (other_scb->flags & SCB_RECOVERY_SCB) != 0) {
6921
found++;
6922
newtimeout =
6923
MAX(aic_get_timeout(other_scb),
6924
newtimeout);
6925
}
6926
}
6927
}
6928
6929
if (found != 0)
6930
aic_scb_timer_reset(scb, newtimeout);
6931
else {
6932
ahc_print_path(ahc, scb);
6933
printf("No other SCB worth waiting for...\n");
6934
}
6935
6936
return (found != 0);
6937
}
6938
6939
/*
6940
* ahc_recover_commands determines if any of the commands that have currently
6941
* timedout are the root cause for this timeout. Innocent commands are given
6942
* a new timeout while we wait for the command executing on the bus to timeout.
6943
* This routine is invoked from a thread context so we are allowed to sleep.
6944
* Our lock is not held on entry.
6945
*/
6946
void
6947
ahc_recover_commands(struct ahc_softc *ahc)
6948
{
6949
struct scb *scb;
6950
int found;
6951
int restart_needed;
6952
u_int last_phase;
6953
6954
/*
6955
* Pause the controller and manually flush any
6956
* commands that have just completed but that our
6957
* interrupt handler has yet to see.
6958
*/
6959
ahc_pause_and_flushwork(ahc);
6960
6961
if (LIST_EMPTY(&ahc->timedout_scbs) != 0) {
6962
/*
6963
* The timedout commands have already
6964
* completed. This typically means
6965
* that either the timeout value was on
6966
* the hairy edge of what the device
6967
* requires or - more likely - interrupts
6968
* are not happening.
6969
*/
6970
printf("%s: Timedout SCBs already complete. "
6971
"Interrupts may not be functioning.\n", ahc_name(ahc));
6972
ahc_unpause(ahc);
6973
return;
6974
}
6975
6976
restart_needed = 0;
6977
printf("%s: Recovery Initiated\n", ahc_name(ahc));
6978
ahc_dump_card_state(ahc);
6979
6980
last_phase = ahc_inb(ahc, LASTPHASE);
6981
while ((scb = LIST_FIRST(&ahc->timedout_scbs)) != NULL) {
6982
u_int active_scb_index;
6983
u_int saved_scbptr;
6984
int target;
6985
int lun;
6986
int i;
6987
char channel;
6988
6989
target = SCB_GET_TARGET(ahc, scb);
6990
channel = SCB_GET_CHANNEL(ahc, scb);
6991
lun = SCB_GET_LUN(scb);
6992
6993
ahc_print_path(ahc, scb);
6994
printf("SCB 0x%x - timed out\n", scb->hscb->tag);
6995
if (scb->sg_count > 0) {
6996
for (i = 0; i < scb->sg_count; i++) {
6997
printf("sg[%d] - Addr 0x%x : Length %d\n",
6998
i,
6999
scb->sg_list[i].addr,
7000
scb->sg_list[i].len & AHC_SG_LEN_MASK);
7001
}
7002
}
7003
if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
7004
/*
7005
* Been down this road before.
7006
* Do a full bus reset.
7007
*/
7008
aic_set_transaction_status(scb, CAM_CMD_TIMEOUT);
7009
bus_reset:
7010
found = ahc_reset_channel(ahc, channel,
7011
/*Initiate Reset*/TRUE);
7012
printf("%s: Issued Channel %c Bus Reset. "
7013
"%d SCBs aborted\n", ahc_name(ahc), channel,
7014
found);
7015
continue;
7016
}
7017
7018
/*
7019
* Remove the command from the timedout list in
7020
* preparation for requeing it.
7021
*/
7022
LIST_REMOVE(scb, timedout_links);
7023
scb->flags &= ~SCB_TIMEDOUT;
7024
7025
/*
7026
* If we are a target, transition to bus free and report
7027
* the timeout.
7028
*
7029
* The target/initiator that is holding up the bus may not
7030
* be the same as the one that triggered this timeout
7031
* (different commands have different timeout lengths).
7032
* If the bus is idle and we are actiing as the initiator
7033
* for this request, queue a BDR message to the timed out
7034
* target. Otherwise, if the timed out transaction is
7035
* active:
7036
* Initiator transaction:
7037
* Stuff the message buffer with a BDR message and assert
7038
* ATN in the hopes that the target will let go of the bus
7039
* and go to the mesgout phase. If this fails, we'll
7040
* get another timeout 2 seconds later which will attempt
7041
* a bus reset.
7042
*
7043
* Target transaction:
7044
* Transition to BUS FREE and report the error.
7045
* It's good to be the target!
7046
*/
7047
saved_scbptr = ahc_inb(ahc, SCBPTR);
7048
active_scb_index = ahc_inb(ahc, SCB_TAG);
7049
7050
if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0
7051
&& (active_scb_index < ahc->scb_data->numscbs)) {
7052
struct scb *active_scb;
7053
7054
/*
7055
* If the active SCB is not us, assume that
7056
* the active SCB has a longer timeout than
7057
* the timedout SCB, and wait for the active
7058
* SCB to timeout.
7059
*/
7060
active_scb = ahc_lookup_scb(ahc, active_scb_index);
7061
if (active_scb != scb) {
7062
if (ahc_other_scb_timeout(ahc, scb,
7063
active_scb) == 0)
7064
goto bus_reset;
7065
continue;
7066
}
7067
7068
/* It's us */
7069
if ((scb->flags & SCB_TARGET_SCB) != 0) {
7070
/*
7071
* Send back any queued up transactions
7072
* and properly record the error condition.
7073
*/
7074
ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
7075
SCB_GET_CHANNEL(ahc, scb),
7076
SCB_GET_LUN(scb),
7077
scb->hscb->tag,
7078
ROLE_TARGET,
7079
CAM_CMD_TIMEOUT);
7080
7081
/* Will clear us from the bus */
7082
restart_needed = 1;
7083
break;
7084
}
7085
7086
ahc_set_recoveryscb(ahc, active_scb);
7087
ahc_outb(ahc, MSG_OUT, HOST_MSG);
7088
ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
7089
ahc_print_path(ahc, active_scb);
7090
printf("BDR message in message buffer\n");
7091
active_scb->flags |= SCB_DEVICE_RESET;
7092
aic_scb_timer_reset(scb, 2 * 1000);
7093
} else if (last_phase != P_BUSFREE
7094
&& (ahc_inb(ahc, SSTAT1) & REQINIT) == 0) {
7095
/*
7096
* SCB is not identified, there
7097
* is no pending REQ, and the sequencer
7098
* has not seen a busfree. Looks like
7099
* a stuck connection waiting to
7100
* go busfree. Reset the bus.
7101
*/
7102
printf("%s: Connection stuck awaiting busfree or "
7103
"Identify Msg.\n", ahc_name(ahc));
7104
goto bus_reset;
7105
} else {
7106
int disconnected;
7107
7108
if (last_phase != P_BUSFREE
7109
&& (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
7110
/* Hung target selection. Goto busfree */
7111
printf("%s: Hung target selection\n",
7112
ahc_name(ahc));
7113
restart_needed = 1;
7114
break;
7115
}
7116
7117
/* XXX Shouldn't panic. Just punt instead? */
7118
if ((scb->flags & SCB_TARGET_SCB) != 0)
7119
panic("Timed-out target SCB but bus idle");
7120
7121
if (ahc_search_qinfifo(ahc, target, channel, lun,
7122
scb->hscb->tag, ROLE_INITIATOR,
7123
/*status*/0, SEARCH_COUNT) > 0) {
7124
disconnected = FALSE;
7125
} else {
7126
disconnected = TRUE;
7127
}
7128
7129
if (disconnected) {
7130
ahc_set_recoveryscb(ahc, scb);
7131
/*
7132
* Actually re-queue this SCB in an attempt
7133
* to select the device before it reconnects.
7134
* In either case (selection or reselection),
7135
* we will now issue a target reset to the
7136
* timed-out device.
7137
*
7138
* Set the MK_MESSAGE control bit indicating
7139
* that we desire to send a message. We
7140
* also set the disconnected flag since
7141
* in the paging case there is no guarantee
7142
* that our SCB control byte matches the
7143
* version on the card. We don't want the
7144
* sequencer to abort the command thinking
7145
* an unsolicited reselection occurred.
7146
*/
7147
scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
7148
scb->flags |= SCB_DEVICE_RESET;
7149
7150
/*
7151
* Remove any cached copy of this SCB in the
7152
* disconnected list in preparation for the
7153
* queuing of our abort SCB. We use the
7154
* same element in the SCB, SCB_NEXT, for
7155
* both the qinfifo and the disconnected list.
7156
*/
7157
ahc_search_disc_list(ahc, target, channel,
7158
lun, scb->hscb->tag,
7159
/*stop_on_first*/TRUE,
7160
/*remove*/TRUE,
7161
/*save_state*/FALSE);
7162
7163
/*
7164
* In the non-paging case, the sequencer will
7165
* never re-reference the in-core SCB.
7166
* To make sure we are notified during
7167
* reselection, set the MK_MESSAGE flag in
7168
* the card's copy of the SCB.
7169
*/
7170
if ((ahc->flags & AHC_PAGESCBS) == 0) {
7171
ahc_outb(ahc, SCBPTR, scb->hscb->tag);
7172
ahc_outb(ahc, SCB_CONTROL,
7173
ahc_inb(ahc, SCB_CONTROL)
7174
| MK_MESSAGE);
7175
}
7176
7177
/*
7178
* Clear out any entries in the QINFIFO first
7179
* so we are the next SCB for this target
7180
* to run.
7181
*/
7182
ahc_search_qinfifo(ahc,
7183
SCB_GET_TARGET(ahc, scb),
7184
channel, SCB_GET_LUN(scb),
7185
SCB_LIST_NULL,
7186
ROLE_INITIATOR,
7187
CAM_REQUEUE_REQ,
7188
SEARCH_COMPLETE);
7189
ahc_print_path(ahc, scb);
7190
printf("Queuing a BDR SCB\n");
7191
ahc_qinfifo_requeue_tail(ahc, scb);
7192
ahc_outb(ahc, SCBPTR, saved_scbptr);
7193
aic_scb_timer_reset(scb, 2 * 1000);
7194
} else {
7195
/* Go "immediately" to the bus reset */
7196
/* This shouldn't happen */
7197
ahc_set_recoveryscb(ahc, scb);
7198
ahc_print_path(ahc, scb);
7199
printf("SCB %d: Immediate reset. "
7200
"Flags = 0x%x\n", scb->hscb->tag,
7201
scb->flags);
7202
goto bus_reset;
7203
}
7204
}
7205
break;
7206
}
7207
7208
/*
7209
* Any remaining SCBs were not the "culprit", so remove
7210
* them from the timeout list. The timer for these commands
7211
* will be reset once the recovery SCB completes.
7212
*/
7213
while ((scb = LIST_FIRST(&ahc->timedout_scbs)) != NULL) {
7214
LIST_REMOVE(scb, timedout_links);
7215
scb->flags &= ~SCB_TIMEDOUT;
7216
}
7217
7218
if (restart_needed)
7219
ahc_restart(ahc);
7220
else
7221
ahc_unpause(ahc);
7222
}
7223
7224
/************************* Target Mode ****************************************/
7225
#ifdef AHC_TARGET_MODE
7226
cam_status
7227
ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb,
7228
struct ahc_tmode_tstate **tstate,
7229
struct ahc_tmode_lstate **lstate,
7230
int notfound_failure)
7231
{
7232
7233
if ((ahc->features & AHC_TARGETMODE) == 0)
7234
return (CAM_REQ_INVALID);
7235
7236
/*
7237
* Handle the 'black hole' device that sucks up
7238
* requests to unattached luns on enabled targets.
7239
*/
7240
if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
7241
&& ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
7242
*tstate = NULL;
7243
*lstate = ahc->black_hole;
7244
} else {
7245
u_int max_id;
7246
7247
max_id = (ahc->features & AHC_WIDE) ? 15 : 7;
7248
if (ccb->ccb_h.target_id > max_id)
7249
return (CAM_TID_INVALID);
7250
7251
if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS)
7252
return (CAM_LUN_INVALID);
7253
7254
*tstate = ahc->enabled_targets[ccb->ccb_h.target_id];
7255
*lstate = NULL;
7256
if (*tstate != NULL)
7257
*lstate =
7258
(*tstate)->enabled_luns[ccb->ccb_h.target_lun];
7259
}
7260
7261
if (notfound_failure != 0 && *lstate == NULL)
7262
return (CAM_PATH_INVALID);
7263
7264
return (CAM_REQ_CMP);
7265
}
7266
7267
void
7268
ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
7269
{
7270
struct ahc_tmode_tstate *tstate;
7271
struct ahc_tmode_lstate *lstate;
7272
struct ccb_en_lun *cel;
7273
union ccb *cancel_ccb;
7274
cam_status status;
7275
u_int target;
7276
u_int lun;
7277
u_int target_mask;
7278
u_int our_id;
7279
int error;
7280
char channel;
7281
7282
status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate,
7283
/*notfound_failure*/FALSE);
7284
7285
if (status != CAM_REQ_CMP) {
7286
ccb->ccb_h.status = status;
7287
return;
7288
}
7289
7290
if (cam_sim_bus(sim) == 0)
7291
our_id = ahc->our_id;
7292
else
7293
our_id = ahc->our_id_b;
7294
7295
if (ccb->ccb_h.target_id != our_id
7296
&& ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
7297
/*
7298
* our_id represents our initiator ID, or
7299
* the ID of the first target to have an
7300
* enabled lun in target mode. There are
7301
* two cases that may preclude enabling a
7302
* target id other than our_id.
7303
*
7304
* o our_id is for an active initiator role.
7305
* Since the hardware does not support
7306
* reselections to the initiator role at
7307
* anything other than our_id, and our_id
7308
* is used by the hardware to indicate the
7309
* ID to use for both select-out and
7310
* reselect-out operations, the only target
7311
* ID we can support in this mode is our_id.
7312
*
7313
* o The MULTARGID feature is not available and
7314
* a previous target mode ID has been enabled.
7315
*/
7316
if ((ahc->features & AHC_MULTIROLE) != 0) {
7317
if ((ahc->features & AHC_MULTI_TID) != 0
7318
&& (ahc->flags & AHC_INITIATORROLE) != 0) {
7319
/*
7320
* Only allow additional targets if
7321
* the initiator role is disabled.
7322
* The hardware cannot handle a re-select-in
7323
* on the initiator id during a re-select-out
7324
* on a different target id.
7325
*/
7326
status = CAM_TID_INVALID;
7327
} else if ((ahc->flags & AHC_INITIATORROLE) != 0
7328
|| ahc->enabled_luns > 0) {
7329
/*
7330
* Only allow our target id to change
7331
* if the initiator role is not configured
7332
* and there are no enabled luns which
7333
* are attached to the currently registered
7334
* scsi id.
7335
*/
7336
status = CAM_TID_INVALID;
7337
}
7338
} else if ((ahc->features & AHC_MULTI_TID) == 0
7339
&& ahc->enabled_luns > 0) {
7340
status = CAM_TID_INVALID;
7341
}
7342
}
7343
7344
if (status != CAM_REQ_CMP) {
7345
ccb->ccb_h.status = status;
7346
return;
7347
}
7348
7349
/*
7350
* We now have an id that is valid.
7351
* If we aren't in target mode, switch modes.
7352
*/
7353
if ((ahc->flags & AHC_TARGETROLE) == 0
7354
&& ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
7355
ahc_flag saved_flags;
7356
7357
printf("Configuring Target Mode\n");
7358
if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
7359
ccb->ccb_h.status = CAM_BUSY;
7360
return;
7361
}
7362
saved_flags = ahc->flags;
7363
ahc->flags |= AHC_TARGETROLE;
7364
if ((ahc->features & AHC_MULTIROLE) == 0)
7365
ahc->flags &= ~AHC_INITIATORROLE;
7366
ahc_pause(ahc);
7367
error = ahc_loadseq(ahc);
7368
if (error != 0) {
7369
/*
7370
* Restore original configuration and notify
7371
* the caller that we cannot support target mode.
7372
* Since the adapter started out in this
7373
* configuration, the firmware load will succeed,
7374
* so there is no point in checking ahc_loadseq's
7375
* return value.
7376
*/
7377
ahc->flags = saved_flags;
7378
(void)ahc_loadseq(ahc);
7379
ahc_restart(ahc);
7380
ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
7381
return;
7382
}
7383
ahc_restart(ahc);
7384
}
7385
cel = &ccb->cel;
7386
target = ccb->ccb_h.target_id;
7387
lun = ccb->ccb_h.target_lun;
7388
channel = SIM_CHANNEL(ahc, sim);
7389
target_mask = 0x01 << target;
7390
if (channel == 'B')
7391
target_mask <<= 8;
7392
7393
if (cel->enable != 0) {
7394
u_int scsiseq;
7395
7396
/* Are we already enabled?? */
7397
if (lstate != NULL) {
7398
xpt_print_path(ccb->ccb_h.path);
7399
printf("Lun already enabled\n");
7400
ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
7401
return;
7402
}
7403
7404
if (cel->grp6_len != 0
7405
|| cel->grp7_len != 0) {
7406
/*
7407
* Don't (yet?) support vendor
7408
* specific commands.
7409
*/
7410
ccb->ccb_h.status = CAM_REQ_INVALID;
7411
printf("Non-zero Group Codes\n");
7412
return;
7413
}
7414
7415
/*
7416
* Seems to be okay.
7417
* Setup our data structures.
7418
*/
7419
if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
7420
tstate = ahc_alloc_tstate(ahc, target, channel);
7421
if (tstate == NULL) {
7422
xpt_print_path(ccb->ccb_h.path);
7423
printf("Couldn't allocate tstate\n");
7424
ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
7425
return;
7426
}
7427
}
7428
lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT);
7429
if (lstate == NULL) {
7430
xpt_print_path(ccb->ccb_h.path);
7431
printf("Couldn't allocate lstate\n");
7432
ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
7433
return;
7434
}
7435
memset(lstate, 0, sizeof(*lstate));
7436
status = xpt_create_path(&lstate->path, /*periph*/NULL,
7437
xpt_path_path_id(ccb->ccb_h.path),
7438
xpt_path_target_id(ccb->ccb_h.path),
7439
xpt_path_lun_id(ccb->ccb_h.path));
7440
if (status != CAM_REQ_CMP) {
7441
free(lstate, M_DEVBUF);
7442
xpt_print_path(ccb->ccb_h.path);
7443
printf("Couldn't allocate path\n");
7444
ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
7445
return;
7446
}
7447
SLIST_INIT(&lstate->accept_tios);
7448
SLIST_INIT(&lstate->immed_notifies);
7449
ahc_pause(ahc);
7450
if (target != CAM_TARGET_WILDCARD) {
7451
tstate->enabled_luns[lun] = lstate;
7452
ahc->enabled_luns++;
7453
7454
if ((ahc->features & AHC_MULTI_TID) != 0) {
7455
u_int targid_mask;
7456
7457
targid_mask = ahc_inb(ahc, TARGID)
7458
| (ahc_inb(ahc, TARGID + 1) << 8);
7459
7460
targid_mask |= target_mask;
7461
ahc_outb(ahc, TARGID, targid_mask);
7462
ahc_outb(ahc, TARGID+1, (targid_mask >> 8));
7463
7464
ahc_update_scsiid(ahc, targid_mask);
7465
} else {
7466
u_int our_id;
7467
char channel;
7468
7469
channel = SIM_CHANNEL(ahc, sim);
7470
our_id = SIM_SCSI_ID(ahc, sim);
7471
7472
/*
7473
* This can only happen if selections
7474
* are not enabled
7475
*/
7476
if (target != our_id) {
7477
u_int sblkctl;
7478
char cur_channel;
7479
int swap;
7480
7481
sblkctl = ahc_inb(ahc, SBLKCTL);
7482
cur_channel = (sblkctl & SELBUSB)
7483
? 'B' : 'A';
7484
if ((ahc->features & AHC_TWIN) == 0)
7485
cur_channel = 'A';
7486
swap = cur_channel != channel;
7487
if (channel == 'A')
7488
ahc->our_id = target;
7489
else
7490
ahc->our_id_b = target;
7491
7492
if (swap)
7493
ahc_outb(ahc, SBLKCTL,
7494
sblkctl ^ SELBUSB);
7495
7496
ahc_outb(ahc, SCSIID, target);
7497
7498
if (swap)
7499
ahc_outb(ahc, SBLKCTL, sblkctl);
7500
}
7501
}
7502
} else
7503
ahc->black_hole = lstate;
7504
/* Allow select-in operations */
7505
if (ahc->black_hole != NULL && ahc->enabled_luns > 0) {
7506
scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
7507
scsiseq |= ENSELI;
7508
ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
7509
scsiseq = ahc_inb(ahc, SCSISEQ);
7510
scsiseq |= ENSELI;
7511
ahc_outb(ahc, SCSISEQ, scsiseq);
7512
}
7513
ahc_unpause(ahc);
7514
ccb->ccb_h.status = CAM_REQ_CMP;
7515
xpt_print_path(ccb->ccb_h.path);
7516
printf("Lun now enabled for target mode\n");
7517
} else {
7518
struct scb *scb;
7519
int i, empty;
7520
7521
if (lstate == NULL) {
7522
ccb->ccb_h.status = CAM_LUN_INVALID;
7523
return;
7524
}
7525
7526
ccb->ccb_h.status = CAM_REQ_CMP;
7527
LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
7528
struct ccb_hdr *ccbh;
7529
7530
ccbh = &scb->io_ctx->ccb_h;
7531
if (ccbh->func_code == XPT_CONT_TARGET_IO
7532
&& !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
7533
printf("CTIO pending\n");
7534
ccb->ccb_h.status = CAM_REQ_INVALID;
7535
return;
7536
}
7537
}
7538
7539
if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
7540
printf("ATIOs pending\n");
7541
while ((cancel_ccb = (union ccb *)SLIST_FIRST(&lstate->accept_tios)) != NULL) {
7542
SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
7543
cancel_ccb->ccb_h.status = CAM_REQ_ABORTED;
7544
xpt_done(cancel_ccb);
7545
};
7546
}
7547
7548
if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
7549
printf("INOTs pending\n");
7550
while ((cancel_ccb = (union ccb *)SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
7551
SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
7552
cancel_ccb->ccb_h.status = CAM_REQ_ABORTED;
7553
xpt_done(cancel_ccb);
7554
};
7555
}
7556
7557
if (ccb->ccb_h.status != CAM_REQ_CMP) {
7558
return;
7559
}
7560
7561
xpt_print_path(ccb->ccb_h.path);
7562
printf("Target mode disabled\n");
7563
xpt_free_path(lstate->path);
7564
free(lstate, M_DEVBUF);
7565
7566
ahc_pause(ahc);
7567
/* Can we clean up the target too? */
7568
if (target != CAM_TARGET_WILDCARD) {
7569
tstate->enabled_luns[lun] = NULL;
7570
ahc->enabled_luns--;
7571
for (empty = 1, i = 0; i < 8; i++)
7572
if (tstate->enabled_luns[i] != NULL) {
7573
empty = 0;
7574
break;
7575
}
7576
7577
if (empty) {
7578
ahc_free_tstate(ahc, target, channel,
7579
/*force*/FALSE);
7580
if (ahc->features & AHC_MULTI_TID) {
7581
u_int targid_mask;
7582
7583
targid_mask = ahc_inb(ahc, TARGID)
7584
| (ahc_inb(ahc, TARGID + 1)
7585
<< 8);
7586
7587
targid_mask &= ~target_mask;
7588
ahc_outb(ahc, TARGID, targid_mask);
7589
ahc_outb(ahc, TARGID+1,
7590
(targid_mask >> 8));
7591
ahc_update_scsiid(ahc, targid_mask);
7592
}
7593
}
7594
} else {
7595
ahc->black_hole = NULL;
7596
7597
/*
7598
* We can't allow selections without
7599
* our black hole device.
7600
*/
7601
empty = TRUE;
7602
}
7603
if (ahc->enabled_luns == 0) {
7604
/* Disallow select-in */
7605
u_int scsiseq;
7606
7607
scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
7608
scsiseq &= ~ENSELI;
7609
ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
7610
scsiseq = ahc_inb(ahc, SCSISEQ);
7611
scsiseq &= ~ENSELI;
7612
ahc_outb(ahc, SCSISEQ, scsiseq);
7613
7614
if ((ahc->features & AHC_MULTIROLE) == 0) {
7615
printf("Configuring Initiator Mode\n");
7616
ahc->flags &= ~AHC_TARGETROLE;
7617
ahc->flags |= AHC_INITIATORROLE;
7618
/*
7619
* Returning to a configuration that
7620
* fit previously will always succeed.
7621
*/
7622
(void)ahc_loadseq(ahc);
7623
ahc_restart(ahc);
7624
/*
7625
* Unpaused. The extra unpause
7626
* that follows is harmless.
7627
*/
7628
}
7629
}
7630
ahc_unpause(ahc);
7631
}
7632
}
7633
7634
static void
7635
ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
7636
{
7637
u_int scsiid_mask;
7638
u_int scsiid;
7639
7640
if ((ahc->features & AHC_MULTI_TID) == 0)
7641
panic("ahc_update_scsiid called on non-multitid unit\n");
7642
7643
/*
7644
* Since we will rely on the TARGID mask
7645
* for selection enables, ensure that OID
7646
* in SCSIID is not set to some other ID
7647
* that we don't want to allow selections on.
7648
*/
7649
if ((ahc->features & AHC_ULTRA2) != 0)
7650
scsiid = ahc_inb(ahc, SCSIID_ULTRA2);
7651
else
7652
scsiid = ahc_inb(ahc, SCSIID);
7653
scsiid_mask = 0x1 << (scsiid & OID);
7654
if ((targid_mask & scsiid_mask) == 0) {
7655
u_int our_id;
7656
7657
/* ffs counts from 1 */
7658
our_id = ffs(targid_mask);
7659
if (our_id == 0)
7660
our_id = ahc->our_id;
7661
else
7662
our_id--;
7663
scsiid &= TID;
7664
scsiid |= our_id;
7665
}
7666
if ((ahc->features & AHC_ULTRA2) != 0)
7667
ahc_outb(ahc, SCSIID_ULTRA2, scsiid);
7668
else
7669
ahc_outb(ahc, SCSIID, scsiid);
7670
}
7671
7672
void
7673
ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
7674
{
7675
struct target_cmd *cmd;
7676
7677
/*
7678
* If the card supports auto-access pause,
7679
* we can access the card directly regardless
7680
* of whether it is paused or not.
7681
*/
7682
if ((ahc->features & AHC_AUTOPAUSE) != 0)
7683
paused = TRUE;
7684
7685
ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD);
7686
while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) {
7687
/*
7688
* Only advance through the queue if we
7689
* have the resources to process the command.
7690
*/
7691
if (ahc_handle_target_cmd(ahc, cmd) != 0)
7692
break;
7693
7694
cmd->cmd_valid = 0;
7695
aic_dmamap_sync(ahc, ahc->shared_data_dmat,
7696
ahc->shared_data_dmamap,
7697
ahc_targetcmd_offset(ahc, ahc->tqinfifonext),
7698
sizeof(struct target_cmd),
7699
BUS_DMASYNC_PREREAD);
7700
ahc->tqinfifonext++;
7701
7702
/*
7703
* Lazily update our position in the target mode incoming
7704
* command queue as seen by the sequencer.
7705
*/
7706
if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
7707
if ((ahc->features & AHC_HS_MAILBOX) != 0) {
7708
u_int hs_mailbox;
7709
7710
hs_mailbox = ahc_inb(ahc, HS_MAILBOX);
7711
hs_mailbox &= ~HOST_TQINPOS;
7712
hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS;
7713
ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
7714
} else {
7715
if (!paused)
7716
ahc_pause(ahc);
7717
ahc_outb(ahc, KERNEL_TQINPOS,
7718
ahc->tqinfifonext & HOST_TQINPOS);
7719
if (!paused)
7720
ahc_unpause(ahc);
7721
}
7722
}
7723
}
7724
}
7725
7726
static int
7727
ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
7728
{
7729
struct ahc_tmode_tstate *tstate;
7730
struct ahc_tmode_lstate *lstate;
7731
struct ccb_accept_tio *atio;
7732
uint8_t *byte;
7733
int initiator;
7734
int target;
7735
int lun;
7736
7737
initiator = SCSIID_TARGET(ahc, cmd->scsiid);
7738
target = SCSIID_OUR_ID(cmd->scsiid);
7739
lun = (cmd->identify & MSG_IDENTIFY_LUNMASK);
7740
7741
byte = cmd->bytes;
7742
tstate = ahc->enabled_targets[target];
7743
lstate = NULL;
7744
if (tstate != NULL)
7745
lstate = tstate->enabled_luns[lun];
7746
7747
/*
7748
* Commands for disabled luns go to the black hole driver.
7749
*/
7750
if (lstate == NULL)
7751
lstate = ahc->black_hole;
7752
7753
atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
7754
if (atio == NULL) {
7755
ahc->flags |= AHC_TQINFIFO_BLOCKED;
7756
/*
7757
* Wait for more ATIOs from the peripheral driver for this lun.
7758
*/
7759
if (bootverbose)
7760
printf("%s: ATIOs exhausted\n", ahc_name(ahc));
7761
return (1);
7762
} else
7763
ahc->flags &= ~AHC_TQINFIFO_BLOCKED;
7764
#ifdef AHC_DEBUG
7765
if (ahc_debug & AHC_SHOW_TQIN) {
7766
printf("Incoming command from %d for %d:%d%s\n",
7767
initiator, target, lun,
7768
lstate == ahc->black_hole ? "(Black Holed)" : "");
7769
}
7770
#endif
7771
SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
7772
7773
if (lstate == ahc->black_hole) {
7774
/* Fill in the wildcards */
7775
atio->ccb_h.target_id = target;
7776
atio->ccb_h.target_lun = lun;
7777
}
7778
7779
/*
7780
* Package it up and send it off to
7781
* whomever has this lun enabled.
7782
*/
7783
atio->sense_len = 0;
7784
atio->init_id = initiator;
7785
if (byte[0] != 0xFF) {
7786
/* Tag was included */
7787
atio->tag_action = *byte++;
7788
atio->tag_id = *byte++;
7789
atio->ccb_h.flags |= CAM_TAG_ACTION_VALID;
7790
} else {
7791
atio->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
7792
}
7793
byte++;
7794
7795
/* Okay. Now determine the cdb size based on the command code */
7796
switch (*byte >> CMD_GROUP_CODE_SHIFT) {
7797
case 0:
7798
atio->cdb_len = 6;
7799
break;
7800
case 1:
7801
case 2:
7802
atio->cdb_len = 10;
7803
break;
7804
case 4:
7805
atio->cdb_len = 16;
7806
break;
7807
case 5:
7808
atio->cdb_len = 12;
7809
break;
7810
case 3:
7811
default:
7812
/* Only copy the opcode. */
7813
atio->cdb_len = 1;
7814
printf("Reserved or VU command code type encountered\n");
7815
break;
7816
}
7817
7818
memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
7819
7820
atio->ccb_h.status |= CAM_CDB_RECVD;
7821
7822
if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
7823
/*
7824
* We weren't allowed to disconnect.
7825
* We're hanging on the bus until a
7826
* continue target I/O comes in response
7827
* to this accept tio.
7828
*/
7829
#ifdef AHC_DEBUG
7830
if (ahc_debug & AHC_SHOW_TQIN) {
7831
printf("Received Immediate Command %d:%d:%d - %p\n",
7832
initiator, target, lun, ahc->pending_device);
7833
}
7834
#endif
7835
ahc->pending_device = lstate;
7836
aic_freeze_ccb((union ccb *)atio);
7837
atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
7838
} else {
7839
atio->ccb_h.flags &= ~CAM_DIS_DISCONNECT;
7840
}
7841
7842
xpt_done((union ccb*)atio);
7843
return (0);
7844
}
7845
7846
#endif
7847
7848