Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/include/scsi/scsi_host.h
26285 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef _SCSI_SCSI_HOST_H
3
#define _SCSI_SCSI_HOST_H
4
5
#include <linux/device.h>
6
#include <linux/list.h>
7
#include <linux/types.h>
8
#include <linux/workqueue.h>
9
#include <linux/mutex.h>
10
#include <linux/seq_file.h>
11
#include <linux/blk-mq.h>
12
#include <scsi/scsi.h>
13
14
struct block_device;
15
struct completion;
16
struct module;
17
struct scsi_cmnd;
18
struct scsi_device;
19
struct scsi_target;
20
struct Scsi_Host;
21
struct scsi_transport_template;
22
23
24
#define SG_ALL SG_CHUNK_SIZE
25
26
#define MODE_UNKNOWN 0x00
27
#define MODE_INITIATOR 0x01
28
#define MODE_TARGET 0x02
29
30
/**
31
* enum scsi_timeout_action - How to handle a command that timed out.
32
* @SCSI_EH_DONE: The command has already been completed.
33
* @SCSI_EH_RESET_TIMER: Reset the timer and continue waiting for completion.
34
* @SCSI_EH_NOT_HANDLED: The command has not yet finished. Abort the command.
35
*/
36
enum scsi_timeout_action {
37
SCSI_EH_DONE,
38
SCSI_EH_RESET_TIMER,
39
SCSI_EH_NOT_HANDLED,
40
};
41
42
struct scsi_host_template {
43
/*
44
* Put fields referenced in IO submission path together in
45
* same cacheline
46
*/
47
48
/*
49
* Additional per-command data allocated for the driver.
50
*/
51
unsigned int cmd_size;
52
53
/*
54
* The queuecommand function is used to queue up a scsi
55
* command block to the LLDD. When the driver finished
56
* processing the command the done callback is invoked.
57
*
58
* If queuecommand returns 0, then the driver has accepted the
59
* command. It must also push it to the HBA if the scsi_cmnd
60
* flag SCMD_LAST is set, or if the driver does not implement
61
* commit_rqs. The done() function must be called on the command
62
* when the driver has finished with it. (you may call done on the
63
* command before queuecommand returns, but in this case you
64
* *must* return 0 from queuecommand).
65
*
66
* Queuecommand may also reject the command, in which case it may
67
* not touch the command and must not call done() for it.
68
*
69
* There are two possible rejection returns:
70
*
71
* SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
72
* allow commands to other devices serviced by this host.
73
*
74
* SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
75
* host temporarily.
76
*
77
* For compatibility, any other non-zero return is treated the
78
* same as SCSI_MLQUEUE_HOST_BUSY.
79
*
80
* NOTE: "temporarily" means either until the next command for#
81
* this device/host completes, or a period of time determined by
82
* I/O pressure in the system if there are no other outstanding
83
* commands.
84
*
85
* STATUS: REQUIRED
86
*/
87
int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
88
89
/*
90
* The commit_rqs function is used to trigger a hardware
91
* doorbell after some requests have been queued with
92
* queuecommand, when an error is encountered before sending
93
* the request with SCMD_LAST set.
94
*
95
* STATUS: OPTIONAL
96
*/
97
void (*commit_rqs)(struct Scsi_Host *, u16);
98
99
struct module *module;
100
const char *name;
101
102
/*
103
* The info function will return whatever useful information the
104
* developer sees fit. If not provided, then the name field will
105
* be used instead.
106
*
107
* Status: OPTIONAL
108
*/
109
const char *(*info)(struct Scsi_Host *);
110
111
/*
112
* Ioctl interface
113
*
114
* Status: OPTIONAL
115
*/
116
int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
117
void __user *arg);
118
119
120
#ifdef CONFIG_COMPAT
121
/*
122
* Compat handler. Handle 32bit ABI.
123
* When unknown ioctl is passed return -ENOIOCTLCMD.
124
*
125
* Status: OPTIONAL
126
*/
127
int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
128
void __user *arg);
129
#endif
130
131
int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
132
int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
133
134
/*
135
* This is an error handling strategy routine. You don't need to
136
* define one of these if you don't want to - there is a default
137
* routine that is present that should work in most cases. For those
138
* driver authors that have the inclination and ability to write their
139
* own strategy routine, this is where it is specified. Note - the
140
* strategy routine is *ALWAYS* run in the context of the kernel eh
141
* thread. Thus you are guaranteed to *NOT* be in an interrupt
142
* handler when you execute this, and you are also guaranteed to
143
* *NOT* have any other commands being queued while you are in the
144
* strategy routine. When you return from this function, operations
145
* return to normal.
146
*
147
* See scsi_error.c scsi_unjam_host for additional comments about
148
* what this function should and should not be attempting to do.
149
*
150
* Status: REQUIRED (at least one of them)
151
*/
152
int (* eh_abort_handler)(struct scsi_cmnd *);
153
int (* eh_device_reset_handler)(struct scsi_cmnd *);
154
int (* eh_target_reset_handler)(struct scsi_cmnd *);
155
int (* eh_bus_reset_handler)(struct scsi_cmnd *);
156
int (* eh_host_reset_handler)(struct scsi_cmnd *);
157
158
/*
159
* Before the mid layer attempts to scan for a new device where none
160
* currently exists, it will call this entry in your driver. Should
161
* your driver need to allocate any structs or perform any other init
162
* items in order to send commands to a currently unused target/lun
163
* combo, then this is where you can perform those allocations. This
164
* is specifically so that drivers won't have to perform any kind of
165
* "is this a new device" checks in their queuecommand routine,
166
* thereby making the hot path a bit quicker.
167
*
168
* Return values: 0 on success, non-0 on failure
169
*
170
* Deallocation: If we didn't find any devices at this ID, you will
171
* get an immediate call to sdev_destroy(). If we find something
172
* here then you will get a call to sdev_configure(), then the
173
* device will be used for however long it is kept around, then when
174
* the device is removed from the system (or * possibly at reboot
175
* time), you will then get a call to sdev_destroy(). This is
176
* assuming you implement sdev_configure and sdev_destroy.
177
* However, if you allocate memory and hang it off the device struct,
178
* then you must implement the sdev_destroy() routine at a minimum
179
* in order to avoid leaking memory
180
* each time a device is tore down.
181
*
182
* Status: OPTIONAL
183
*/
184
int (* sdev_init)(struct scsi_device *);
185
186
/*
187
* Once the device has responded to an INQUIRY and we know the
188
* device is online, we call into the low level driver with the
189
* struct scsi_device *. If the low level device driver implements
190
* this function, it *must* perform the task of setting the queue
191
* depth on the device. All other tasks are optional and depend
192
* on what the driver supports and various implementation details.
193
*
194
* Things currently recommended to be handled at this time include:
195
*
196
* 1. Setting the device queue depth. Proper setting of this is
197
* described in the comments for scsi_change_queue_depth.
198
* 2. Determining if the device supports the various synchronous
199
* negotiation protocols. The device struct will already have
200
* responded to INQUIRY and the results of the standard items
201
* will have been shoved into the various device flag bits, eg.
202
* device->sdtr will be true if the device supports SDTR messages.
203
* 3. Allocating command structs that the device will need.
204
* 4. Setting the default timeout on this device (if needed).
205
* 5. Anything else the low level driver might want to do on a device
206
* specific setup basis...
207
* 6. Return 0 on success, non-0 on error. The device will be marked
208
* as offline on error so that no access will occur. If you return
209
* non-0, your sdev_destroy routine will never get called for this
210
* device, so don't leave any loose memory hanging around, clean
211
* up after yourself before returning non-0
212
*
213
* Status: OPTIONAL
214
*/
215
int (* sdev_configure)(struct scsi_device *, struct queue_limits *lim);
216
217
/*
218
* Immediately prior to deallocating the device and after all activity
219
* has ceased the mid layer calls this point so that the low level
220
* driver may completely detach itself from the scsi device and vice
221
* versa. The low level driver is responsible for freeing any memory
222
* it allocated in the sdev_init or sdev_configure calls.
223
*
224
* Status: OPTIONAL
225
*/
226
void (* sdev_destroy)(struct scsi_device *);
227
228
/*
229
* Before the mid layer attempts to scan for a new device attached
230
* to a target where no target currently exists, it will call this
231
* entry in your driver. Should your driver need to allocate any
232
* structs or perform any other init items in order to send commands
233
* to a currently unused target, then this is where you can perform
234
* those allocations.
235
*
236
* Return values: 0 on success, non-0 on failure
237
*
238
* Status: OPTIONAL
239
*/
240
int (* target_alloc)(struct scsi_target *);
241
242
/*
243
* Immediately prior to deallocating the target structure, and
244
* after all activity to attached scsi devices has ceased, the
245
* midlayer calls this point so that the driver may deallocate
246
* and terminate any references to the target.
247
*
248
* Note: This callback is called with the host lock held and hence
249
* must not sleep.
250
*
251
* Status: OPTIONAL
252
*/
253
void (* target_destroy)(struct scsi_target *);
254
255
/*
256
* If a host has the ability to discover targets on its own instead
257
* of scanning the entire bus, it can fill in this function and
258
* call scsi_scan_host(). This function will be called periodically
259
* until it returns 1 with the scsi_host and the elapsed time of
260
* the scan in jiffies.
261
*
262
* Status: OPTIONAL
263
*/
264
int (* scan_finished)(struct Scsi_Host *, unsigned long);
265
266
/*
267
* If the host wants to be called before the scan starts, but
268
* after the midlayer has set up ready for the scan, it can fill
269
* in this function.
270
*
271
* Status: OPTIONAL
272
*/
273
void (* scan_start)(struct Scsi_Host *);
274
275
/*
276
* Fill in this function to allow the queue depth of this host
277
* to be changeable (on a per device basis). Returns either
278
* the current queue depth setting (may be different from what
279
* was passed in) or an error. An error should only be
280
* returned if the requested depth is legal but the driver was
281
* unable to set it. If the requested depth is illegal, the
282
* driver should set and return the closest legal queue depth.
283
*
284
* Status: OPTIONAL
285
*/
286
int (* change_queue_depth)(struct scsi_device *, int);
287
288
/*
289
* This functions lets the driver expose the queue mapping
290
* to the block layer.
291
*
292
* Status: OPTIONAL
293
*/
294
void (* map_queues)(struct Scsi_Host *shost);
295
296
/*
297
* SCSI interface of blk_poll - poll for IO completions.
298
* Only applicable if SCSI LLD exposes multiple h/w queues.
299
*
300
* Return value: Number of completed entries found.
301
*
302
* Status: OPTIONAL
303
*/
304
int (* mq_poll)(struct Scsi_Host *shost, unsigned int queue_num);
305
306
/*
307
* Check if scatterlists need to be padded for DMA draining.
308
*
309
* Status: OPTIONAL
310
*/
311
bool (* dma_need_drain)(struct request *rq);
312
313
/*
314
* This function determines the BIOS parameters for a given
315
* harddisk. These tend to be numbers that are made up by
316
* the host adapter. Parameters:
317
* size, device, list (heads, sectors, cylinders)
318
*
319
* Status: OPTIONAL
320
*/
321
int (* bios_param)(struct scsi_device *, struct block_device *,
322
sector_t, int []);
323
324
/*
325
* This function is called when one or more partitions on the
326
* device reach beyond the end of the device.
327
*
328
* Status: OPTIONAL
329
*/
330
void (*unlock_native_capacity)(struct scsi_device *);
331
332
/*
333
* Can be used to export driver statistics and other infos to the
334
* world outside the kernel ie. userspace and it also provides an
335
* interface to feed the driver with information.
336
*
337
* Status: OBSOLETE
338
*/
339
int (*show_info)(struct seq_file *, struct Scsi_Host *);
340
int (*write_info)(struct Scsi_Host *, char *, int);
341
342
/*
343
* This is an optional routine that allows the transport to become
344
* involved when a scsi io timer fires. The return value tells the
345
* timer routine how to finish the io timeout handling.
346
*
347
* Status: OPTIONAL
348
*/
349
enum scsi_timeout_action (*eh_timed_out)(struct scsi_cmnd *);
350
/*
351
* Optional routine that allows the transport to decide if a cmd
352
* is retryable. Return true if the transport is in a state the
353
* cmd should be retried on.
354
*/
355
bool (*eh_should_retry_cmd)(struct scsi_cmnd *scmd);
356
357
/* This is an optional routine that allows transport to initiate
358
* LLD adapter or firmware reset using sysfs attribute.
359
*
360
* Return values: 0 on success, -ve value on failure.
361
*
362
* Status: OPTIONAL
363
*/
364
365
int (*host_reset)(struct Scsi_Host *shost, int reset_type);
366
#define SCSI_ADAPTER_RESET 1
367
#define SCSI_FIRMWARE_RESET 2
368
369
370
/*
371
* Name of proc directory
372
*/
373
const char *proc_name;
374
375
/*
376
* This determines if we will use a non-interrupt driven
377
* or an interrupt driven scheme. It is set to the maximum number
378
* of simultaneous commands a single hw queue in HBA will accept.
379
*/
380
int can_queue;
381
382
/*
383
* In many instances, especially where disconnect / reconnect are
384
* supported, our host also has an ID on the SCSI bus. If this is
385
* the case, then it must be reserved. Please set this_id to -1 if
386
* your setup is in single initiator mode, and the host lacks an
387
* ID.
388
*/
389
int this_id;
390
391
/*
392
* This determines the degree to which the host adapter is capable
393
* of scatter-gather.
394
*/
395
unsigned short sg_tablesize;
396
unsigned short sg_prot_tablesize;
397
398
/*
399
* Set this if the host adapter has limitations beside segment count.
400
*/
401
unsigned int max_sectors;
402
403
/*
404
* Maximum size in bytes of a single segment.
405
*/
406
unsigned int max_segment_size;
407
408
unsigned int dma_alignment;
409
410
/*
411
* DMA scatter gather segment boundary limit. A segment crossing this
412
* boundary will be split in two.
413
*/
414
unsigned long dma_boundary;
415
416
unsigned long virt_boundary_mask;
417
418
/*
419
* This specifies "machine infinity" for host templates which don't
420
* limit the transfer size. Note this limit represents an absolute
421
* maximum, and may be over the transfer limits allowed for
422
* individual devices (e.g. 256 for SCSI-1).
423
*/
424
#define SCSI_DEFAULT_MAX_SECTORS 1024
425
426
/*
427
* True if this host adapter can make good use of linked commands.
428
* This will allow more than one command to be queued to a given
429
* unit on a given host. Set this to the maximum number of command
430
* blocks to be provided for each device. Set this to 1 for one
431
* command block per lun, 2 for two, etc. Do not set this to 0.
432
* You should make sure that the host adapter will do the right thing
433
* before you try setting this above 1.
434
*/
435
short cmd_per_lun;
436
437
/*
438
* Allocate tags starting from last allocated tag.
439
*/
440
bool tag_alloc_policy_rr : 1;
441
442
/*
443
* Track QUEUE_FULL events and reduce queue depth on demand.
444
*/
445
unsigned track_queue_depth:1;
446
447
/*
448
* This specifies the mode that a LLD supports.
449
*/
450
unsigned supported_mode:2;
451
452
/*
453
* True for emulated SCSI host adapters (e.g. ATAPI).
454
*/
455
unsigned emulated:1;
456
457
/*
458
* True if the low-level driver performs its own reset-settle delays.
459
*/
460
unsigned skip_settle_delay:1;
461
462
/* True if the controller does not support WRITE SAME */
463
unsigned no_write_same:1;
464
465
/* True if the host uses host-wide tagspace */
466
unsigned host_tagset:1;
467
468
/* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */
469
unsigned queuecommand_may_block:1;
470
471
/*
472
* Countdown for host blocking with no commands outstanding.
473
*/
474
unsigned int max_host_blocked;
475
476
/*
477
* Default value for the blocking. If the queue is empty,
478
* host_blocked counts down in the request_fn until it restarts
479
* host operations as zero is reached.
480
*
481
* FIXME: This should probably be a value in the template
482
*/
483
#define SCSI_DEFAULT_HOST_BLOCKED 7
484
485
/*
486
* Pointer to the SCSI host sysfs attribute groups, NULL terminated.
487
*/
488
const struct attribute_group **shost_groups;
489
490
/*
491
* Pointer to the SCSI device attribute groups for this host,
492
* NULL terminated.
493
*/
494
const struct attribute_group **sdev_groups;
495
496
/*
497
* Vendor Identifier associated with the host
498
*
499
* Note: When specifying vendor_id, be sure to read the
500
* Vendor Type and ID formatting requirements specified in
501
* scsi_netlink.h
502
*/
503
u64 vendor_id;
504
};
505
506
/*
507
* Temporary #define for host lock push down. Can be removed when all
508
* drivers have been updated to take advantage of unlocked
509
* queuecommand.
510
*
511
*/
512
#define DEF_SCSI_QCMD(func_name) \
513
int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd) \
514
{ \
515
unsigned long irq_flags; \
516
int rc; \
517
spin_lock_irqsave(shost->host_lock, irq_flags); \
518
rc = func_name##_lck(cmd); \
519
spin_unlock_irqrestore(shost->host_lock, irq_flags); \
520
return rc; \
521
}
522
523
524
/*
525
* shost state: If you alter this, you also need to alter scsi_sysfs.c
526
* (for the ascii descriptions) and the state model enforcer:
527
* scsi_host_set_state()
528
*/
529
enum scsi_host_state {
530
SHOST_CREATED = 1,
531
SHOST_RUNNING,
532
SHOST_CANCEL,
533
SHOST_DEL,
534
SHOST_RECOVERY,
535
SHOST_CANCEL_RECOVERY,
536
SHOST_DEL_RECOVERY,
537
};
538
539
struct Scsi_Host {
540
/*
541
* __devices is protected by the host_lock, but you should
542
* usually use scsi_device_lookup / shost_for_each_device
543
* to access it and don't care about locking yourself.
544
* In the rare case of being in irq context you can use
545
* their __ prefixed variants with the lock held. NEVER
546
* access this list directly from a driver.
547
*/
548
struct list_head __devices;
549
struct list_head __targets;
550
551
struct list_head starved_list;
552
553
spinlock_t default_lock;
554
spinlock_t *host_lock;
555
556
struct mutex scan_mutex;/* serialize scanning activity */
557
558
struct list_head eh_abort_list;
559
struct list_head eh_cmd_q;
560
struct task_struct * ehandler; /* Error recovery thread. */
561
struct completion * eh_action; /* Wait for specific actions on the
562
host. */
563
wait_queue_head_t host_wait;
564
const struct scsi_host_template *hostt;
565
struct scsi_transport_template *transportt;
566
567
struct kref tagset_refcnt;
568
struct completion tagset_freed;
569
/* Area to keep a shared tag map */
570
struct blk_mq_tag_set tag_set;
571
572
atomic_t host_blocked;
573
574
unsigned int host_failed; /* commands that failed.
575
protected by host_lock */
576
unsigned int host_eh_scheduled; /* EH scheduled without command */
577
578
unsigned int host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
579
580
/* next two fields are used to bound the time spent in error handling */
581
int eh_deadline;
582
unsigned long last_reset;
583
584
585
/*
586
* These three parameters can be used to allow for wide scsi,
587
* and for host adapters that support multiple busses
588
* The last two should be set to 1 more than the actual max id
589
* or lun (e.g. 8 for SCSI parallel systems).
590
*/
591
unsigned int max_channel;
592
unsigned int max_id;
593
u64 max_lun;
594
595
/*
596
* This is a unique identifier that must be assigned so that we
597
* have some way of identifying each detected host adapter properly
598
* and uniquely. For hosts that do not support more than one card
599
* in the system at one time, this does not need to be set. It is
600
* initialized to 0 in scsi_host_alloc.
601
*/
602
unsigned int unique_id;
603
604
/*
605
* The maximum length of SCSI commands that this host can accept.
606
* Probably 12 for most host adapters, but could be 16 for others.
607
* or 260 if the driver supports variable length cdbs.
608
* For drivers that don't set this field, a value of 12 is
609
* assumed.
610
*/
611
unsigned short max_cmd_len;
612
613
int this_id;
614
int can_queue;
615
short cmd_per_lun;
616
short unsigned int sg_tablesize;
617
short unsigned int sg_prot_tablesize;
618
unsigned int max_sectors;
619
unsigned int opt_sectors;
620
unsigned int max_segment_size;
621
unsigned int dma_alignment;
622
unsigned long dma_boundary;
623
unsigned long virt_boundary_mask;
624
/*
625
* In scsi-mq mode, the number of hardware queues supported by the LLD.
626
*
627
* Note: it is assumed that each hardware queue has a queue depth of
628
* can_queue. In other words, the total queue depth per host
629
* is nr_hw_queues * can_queue. However, for when host_tagset is set,
630
* the total queue depth is can_queue.
631
*/
632
unsigned nr_hw_queues;
633
unsigned nr_maps;
634
unsigned active_mode:2;
635
636
/*
637
* Host has requested that no further requests come through for the
638
* time being.
639
*/
640
unsigned host_self_blocked:1;
641
642
/*
643
* Host uses correct SCSI ordering not PC ordering. The bit is
644
* set for the minority of drivers whose authors actually read
645
* the spec ;).
646
*/
647
unsigned reverse_ordering:1;
648
649
/* Task mgmt function in progress */
650
unsigned tmf_in_progress:1;
651
652
/* Asynchronous scan in progress */
653
unsigned async_scan:1;
654
655
/* Don't resume host in EH */
656
unsigned eh_noresume:1;
657
658
/* The controller does not support WRITE SAME */
659
unsigned no_write_same:1;
660
661
/* True if the host uses host-wide tagspace */
662
unsigned host_tagset:1;
663
664
/* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */
665
unsigned queuecommand_may_block:1;
666
667
/* Host responded with short (<36 bytes) INQUIRY result */
668
unsigned short_inquiry:1;
669
670
/* The transport requires the LUN bits NOT to be stored in CDB[1] */
671
unsigned no_scsi2_lun_in_cdb:1;
672
673
/*
674
* Optional work queue to be utilized by the transport
675
*/
676
struct workqueue_struct *work_q;
677
678
/*
679
* Task management function work queue
680
*/
681
struct workqueue_struct *tmf_work_q;
682
683
/*
684
* Value host_blocked counts down from
685
*/
686
unsigned int max_host_blocked;
687
688
/* Protection Information */
689
unsigned int prot_capabilities;
690
unsigned char prot_guard_type;
691
692
/* legacy crap */
693
unsigned long base;
694
unsigned long io_port;
695
unsigned char n_io_port;
696
unsigned char dma_channel;
697
unsigned int irq;
698
699
700
enum scsi_host_state shost_state;
701
702
/* ldm bits */
703
struct device shost_gendev, shost_dev;
704
705
/*
706
* Points to the transport data (if any) which is allocated
707
* separately
708
*/
709
void *shost_data;
710
711
/*
712
* Points to the physical bus device we'd use to do DMA
713
* Needed just in case we have virtual hosts.
714
*/
715
struct device *dma_dev;
716
717
/* Delay for runtime autosuspend */
718
int rpm_autosuspend_delay;
719
720
/*
721
* We should ensure that this is aligned, both for better performance
722
* and also because some compilers (m68k) don't automatically force
723
* alignment to a long boundary.
724
*/
725
unsigned long hostdata[] /* Used for storage of host specific stuff */
726
__attribute__ ((aligned (sizeof(unsigned long))));
727
};
728
729
#define class_to_shost(d) \
730
container_of(d, struct Scsi_Host, shost_dev)
731
732
#define shost_printk(prefix, shost, fmt, a...) \
733
dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
734
735
static inline void *shost_priv(struct Scsi_Host *shost)
736
{
737
return (void *)shost->hostdata;
738
}
739
740
int scsi_is_host_device(const struct device *);
741
742
static inline struct Scsi_Host *dev_to_shost(struct device *dev)
743
{
744
while (!scsi_is_host_device(dev)) {
745
if (!dev->parent)
746
return NULL;
747
dev = dev->parent;
748
}
749
return container_of(dev, struct Scsi_Host, shost_gendev);
750
}
751
752
static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
753
{
754
return shost->shost_state == SHOST_RECOVERY ||
755
shost->shost_state == SHOST_CANCEL_RECOVERY ||
756
shost->shost_state == SHOST_DEL_RECOVERY ||
757
shost->tmf_in_progress;
758
}
759
760
extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
761
extern void scsi_flush_work(struct Scsi_Host *);
762
763
extern struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *, int);
764
extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
765
struct device *,
766
struct device *);
767
#if defined(CONFIG_SCSI_PROC_FS)
768
struct proc_dir_entry *
769
scsi_template_proc_dir(const struct scsi_host_template *sht);
770
#else
771
#define scsi_template_proc_dir(sht) NULL
772
#endif
773
extern void scsi_scan_host(struct Scsi_Host *);
774
extern int scsi_resume_device(struct scsi_device *sdev);
775
extern int scsi_rescan_device(struct scsi_device *sdev);
776
extern void scsi_remove_host(struct Scsi_Host *);
777
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
778
extern int scsi_host_busy(struct Scsi_Host *shost);
779
extern void scsi_host_put(struct Scsi_Host *t);
780
extern struct Scsi_Host *scsi_host_lookup(unsigned int hostnum);
781
extern const char *scsi_host_state_name(enum scsi_host_state);
782
extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
783
enum scsi_host_status status);
784
785
static inline int __must_check scsi_add_host(struct Scsi_Host *host,
786
struct device *dev)
787
{
788
return scsi_add_host_with_dma(host, dev, dev);
789
}
790
791
static inline struct device *scsi_get_device(struct Scsi_Host *shost)
792
{
793
return shost->shost_gendev.parent;
794
}
795
796
/**
797
* scsi_host_scan_allowed - Is scanning of this host allowed
798
* @shost: Pointer to Scsi_Host.
799
**/
800
static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
801
{
802
return shost->shost_state == SHOST_RUNNING ||
803
shost->shost_state == SHOST_RECOVERY;
804
}
805
806
extern void scsi_unblock_requests(struct Scsi_Host *);
807
extern void scsi_block_requests(struct Scsi_Host *);
808
extern int scsi_host_block(struct Scsi_Host *shost);
809
extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state);
810
811
void scsi_host_busy_iter(struct Scsi_Host *,
812
bool (*fn)(struct scsi_cmnd *, void *), void *priv);
813
814
struct class_container;
815
816
/*
817
* DIF defines the exchange of protection information between
818
* initiator and SBC block device.
819
*
820
* DIX defines the exchange of protection information between OS and
821
* initiator.
822
*/
823
enum scsi_host_prot_capabilities {
824
SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
825
SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
826
SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
827
828
SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
829
SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
830
SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
831
SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
832
};
833
834
/*
835
* SCSI hosts which support the Data Integrity Extensions must
836
* indicate their capabilities by setting the prot_capabilities using
837
* this call.
838
*/
839
static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
840
{
841
shost->prot_capabilities = mask;
842
}
843
844
static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
845
{
846
return shost->prot_capabilities;
847
}
848
849
static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
850
{
851
return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
852
}
853
854
static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
855
{
856
static unsigned char cap[] = { 0,
857
SHOST_DIF_TYPE1_PROTECTION,
858
SHOST_DIF_TYPE2_PROTECTION,
859
SHOST_DIF_TYPE3_PROTECTION };
860
861
if (target_type >= ARRAY_SIZE(cap))
862
return 0;
863
864
return shost->prot_capabilities & cap[target_type] ? target_type : 0;
865
}
866
867
static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
868
{
869
#if defined(CONFIG_BLK_DEV_INTEGRITY)
870
static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
871
SHOST_DIX_TYPE1_PROTECTION,
872
SHOST_DIX_TYPE2_PROTECTION,
873
SHOST_DIX_TYPE3_PROTECTION };
874
875
if (target_type >= ARRAY_SIZE(cap))
876
return 0;
877
878
return shost->prot_capabilities & cap[target_type];
879
#endif
880
return 0;
881
}
882
883
/*
884
* All DIX-capable initiators must support the T10-mandated CRC
885
* checksum. Controllers can optionally implement the IP checksum
886
* scheme which has much lower impact on system performance. Note
887
* that the main rationale for the checksum is to match integrity
888
* metadata with data. Detecting bit errors are a job for ECC memory
889
* and buses.
890
*/
891
892
enum scsi_host_guard_type {
893
SHOST_DIX_GUARD_CRC = 1 << 0,
894
SHOST_DIX_GUARD_IP = 1 << 1,
895
};
896
897
static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
898
{
899
shost->prot_guard_type = type;
900
}
901
902
static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
903
{
904
return shost->prot_guard_type;
905
}
906
907
extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
908
909
#endif /* _SCSI_SCSI_HOST_H */
910
911